summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-06-04 12:13:06 +0200
committerIngo Molnar <mingo@elte.hu>2011-06-04 12:13:06 +0200
commit710054ba25c0d1f8f41c22ce13ba336503fb5318 (patch)
treef9b09b722bf511841539173d946f90a20fc2e59a
parent74c355fbdfedd3820046dba4f537876cea54c207 (diff)
parentb273fa9716aa1564bee88ceee62f9042981cdc81 (diff)
downloadop-kernel-dev-710054ba25c0d1f8f41c22ce13ba336503fb5318.zip
op-kernel-dev-710054ba25c0d1f8f41c22ce13ba336503fb5318.tar.gz
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent
-rw-r--r--Documentation/DocBook/mtdnand.tmpl3
-rw-r--r--Documentation/RCU/trace.txt17
-rw-r--r--Documentation/acpi/method-customizing.txt5
-rw-r--r--Documentation/arm/Booting33
-rw-r--r--Documentation/arm/Samsung/Overview.txt2
-rw-r--r--Documentation/devicetree/booting-without-of.txt48
-rw-r--r--Documentation/dmaengine.txt97
-rw-r--r--Documentation/feature-removal-schedule.txt36
-rw-r--r--Documentation/filesystems/Locking4
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/laptops/acer-wmi.txt184
-rw-r--r--Documentation/lockstat.txt36
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas14
-rw-r--r--Documentation/virtual/lguest/Makefile2
-rw-r--r--Documentation/virtual/lguest/lguest.c22
-rw-r--r--MAINTAINERS33
-rw-r--r--Makefile10
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--arch/alpha/kernel/systbls.S1
-rw-r--r--arch/arm/Kconfig29
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/configs/at572d940hfek_defconfig358
-rw-r--r--arch/arm/configs/at91sam9261_defconfig (renamed from arch/arm/configs/at91sam9261ek_defconfig)85
-rw-r--r--arch/arm/configs/at91sam9263_defconfig (renamed from arch/arm/configs/at91sam9263ek_defconfig)84
-rw-r--r--arch/arm/configs/exynos4_defconfig2
-rw-r--r--arch/arm/configs/neocore926_defconfig104
-rw-r--r--arch/arm/configs/s5p6442_defconfig65
-rw-r--r--arch/arm/configs/usb-a9263_defconfig106
-rw-r--r--arch/arm/include/asm/fiq.h23
-rw-r--r--arch/arm/include/asm/mach/arch.h9
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/prom.h37
-rw-r--r--arch/arm/include/asm/setup.h4
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/devtree.c145
-rw-r--r--arch/arm/kernel/fiq.c45
-rw-r--r--arch/arm/kernel/fiqasm.S49
-rw-r--r--arch/arm/kernel/head-common.S24
-rw-r--r--arch/arm/kernel/head.S15
-rw-r--r--arch/arm/kernel/setup.c90
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/lib/lib1funcs.S25
-rw-r--r--arch/arm/mach-at91/Kconfig40
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/at572d940hf.c377
-rw-r--r--arch/arm/mach-at91/at572d940hf_devices.c970
-rw-r--r--arch/arm/mach-at91/at91cap9.c41
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c24
-rw-r--r--arch/arm/mach-at91/at91rm9200.c53
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c24
-rw-r--r--arch/arm/mach-at91/at91sam9260.c48
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c26
-rw-r--r--arch/arm/mach-at91/at91sam9261.c41
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c21
-rw-r--r--arch/arm/mach-at91/at91sam9263.c39
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c20
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c64
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c27
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c40
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c23
-rw-r--r--arch/arm/mach-at91/at91x40.c5
-rw-r--r--arch/arm/mach-at91/board-1arm.c12
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c6
-rw-r--r--arch/arm/mach-at91/board-at572d940hf_ek.c326
-rw-r--r--arch/arm/mach-at91/board-cam60.c6
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c13
-rw-r--r--arch/arm/mach-at91/board-carmeva.c8
-rw-r--r--arch/arm/mach-at91/board-cpu9krea.c6
-rw-r--r--arch/arm/mach-at91/board-cpuat91.c12
-rw-r--r--arch/arm/mach-at91/board-csb337.c8
-rw-r--r--arch/arm/mach-at91/board-csb637.c8
-rw-r--r--arch/arm/mach-at91/board-eb01.c4
-rw-r--r--arch/arm/mach-at91/board-eb9200.c8
-rw-r--r--arch/arm/mach-at91/board-ecbat91.c12
-rw-r--r--arch/arm/mach-at91/board-eco920.c32
-rw-r--r--arch/arm/mach-at91/board-flexibity.c6
-rw-r--r--arch/arm/mach-at91/board-foxg20.c6
-rw-r--r--arch/arm/mach-at91/board-gsia18s.c8
-rw-r--r--arch/arm/mach-at91/board-kafa.c12
-rw-r--r--arch/arm/mach-at91/board-kb9202.c13
-rw-r--r--arch/arm/mach-at91/board-neocore926.c6
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c8
-rw-r--r--arch/arm/mach-at91/board-picotux200.c8
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c6
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c8
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c8
-rw-r--r--arch/arm/mach-at91/board-sam9-l9260.c6
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c17
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c6
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c6
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c18
-rw-r--r--arch/arm/mach-at91/board-usb-a9260.c6
-rw-r--r--arch/arm/mach-at91/board-usb-a9263.c6
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c12
-rw-r--r--arch/arm/mach-at91/clock.c69
-rw-r--r--arch/arm/mach-at91/clock.h20
-rw-r--r--arch/arm/mach-at91/generic.h30
-rw-r--r--arch/arm/mach-at91/include/mach/at572d940hf.h123
-rw-r--r--arch/arm/mach-at91/include/mach/at572d940hf_matrix.h123
-rw-r--r--arch/arm/mach-at91/include/mach/at91cap9.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91rm9200.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9260.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9263.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9rl.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91x40.h2
-rw-r--r--arch/arm/mach-at91/include/mach/board.h6
-rw-r--r--arch/arm/mach-at91/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h15
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h15
-rw-r--r--arch/arm/mach-at91/include/mach/memory.h2
-rw-r--r--arch/arm/mach-at91/include/mach/stamp9g20.h2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h25
-rw-r--r--arch/arm/mach-at91/include/mach/timex.h5
-rw-r--r--arch/arm/mach-davinci/da850.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c16
-rw-r--r--arch/arm/mach-davinci/devices.c3
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-exynos4/Kconfig2
-rw-r--r--arch/arm/mach-exynos4/Makefile3
-rw-r--r--arch/arm/mach-exynos4/cpuidle.c86
-rw-r--r--arch/arm/mach-exynos4/mach-nuri.c89
-rw-r--r--arch/arm/mach-gemini/board-wbd111.c7
-rw-r--r--arch/arm/mach-gemini/board-wbd222.c7
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c4
-rw-r--r--arch/arm/mach-netx/fb.c1
-rw-r--r--arch/arm/mach-nomadik/Kconfig1
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-s3c2410/mach-amlm5900.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-tct_hammer.c6
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c20
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h48
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h60
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h53
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h49
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h44
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h71
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h42
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h74
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h40
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h36
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h54
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h70
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h69
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h46
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c34
-rw-r--r--arch/arm/mach-s3c64xx/setup-i2c0.c7
-rw-r--r--arch/arm/mach-s3c64xx/setup-i2c1.c7
-rw-r--r--arch/arm/mach-s3c64xx/sleep.S8
-rw-r--r--arch/arm/mach-s5p6442/Kconfig25
-rw-r--r--arch/arm/mach-s5p6442/Makefile24
-rw-r--r--arch/arm/mach-s5p6442/Makefile.boot2
-rw-r--r--arch/arm/mach-s5p6442/clock.c420
-rw-r--r--arch/arm/mach-s5p6442/cpu.c143
-rw-r--r--arch/arm/mach-s5p6442/dev-audio.c217
-rw-r--r--arch/arm/mach-s5p6442/dev-spi.c121
-rw-r--r--arch/arm/mach-s5p6442/dma.c105
-rw-r--r--arch/arm/mach-s5p6442/include/mach/debug-macro.S35
-rw-r--r--arch/arm/mach-s5p6442/include/mach/dma.h26
-rw-r--r--arch/arm/mach-s5p6442/include/mach/entry-macro.S48
-rw-r--r--arch/arm/mach-s5p6442/include/mach/gpio.h123
-rw-r--r--arch/arm/mach-s5p6442/include/mach/hardware.h18
-rw-r--r--arch/arm/mach-s5p6442/include/mach/io.h17
-rw-r--r--arch/arm/mach-s5p6442/include/mach/irqs.h87
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h76
-rw-r--r--arch/arm/mach-s5p6442/include/mach/memory.h19
-rw-r--r--arch/arm/mach-s5p6442/include/mach/pwm-clock.h70
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-clock.h104
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-irq.h19
-rw-r--r--arch/arm/mach-s5p6442/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5p6442/include/mach/system.h23
-rw-r--r--arch/arm/mach-s5p6442/include/mach/tick.h26
-rw-r--r--arch/arm/mach-s5p6442/include/mach/timex.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/uncompress.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/vmalloc.h17
-rw-r--r--arch/arm/mach-s5p6442/init.c44
-rw-r--r--arch/arm/mach-s5p6442/mach-smdk6442.c102
-rw-r--r--arch/arm/mach-s5p6442/setup-i2c0.c28
-rw-r--r--arch/arm/mach-s5pc100/Makefile2
-rw-r--r--arch/arm/mach-s5pv210/Makefile2
-rw-r--r--arch/arm/mach-u300/Makefile2
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c16
-rw-r--r--arch/arm/mach-ux500/devices-common.h10
-rw-r--r--arch/arm/mach-ux500/devices-db5500.h28
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h34
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h3
-rw-r--r--arch/arm/mm/cache-v6.S1
-rw-r--r--arch/arm/mm/cache-v7.S2
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c15
-rw-r--r--arch/arm/mm/mm.h7
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/plat-nomadik/Kconfig5
-rw-r--r--arch/arm/plat-nomadik/Makefile1
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h2
-rw-r--r--arch/arm/plat-omap/Makefile2
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h103
-rw-r--r--arch/arm/plat-s5p/Kconfig2
-rw-r--r--arch/arm/plat-s5p/cpu.c10
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p6442.h33
-rw-r--r--arch/arm/plat-samsung/Makefile1
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/debug-macro.S2
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h1
-rw-r--r--arch/avr32/include/asm/unistd.h3
-rw-r--r--arch/avr32/kernel/syscall_table.S1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c4
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h6
-rw-r--r--arch/blackfin/include/asm/gptimers.h18
-rw-r--r--arch/blackfin/include/asm/unistd.h4
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c36
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h79
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF514.h16
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h79
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF525.h4
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h52
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c4
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h79
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h93
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h94
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h19
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c4
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h52
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/blackfin/mm/maccess.c4
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c10
-rw-r--r--arch/cris/arch-v10/kernel/entry.S1
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c7
-rw-r--r--arch/cris/arch-v32/kernel/entry.S1
-rw-r--r--arch/cris/include/asm/unistd.h3
-rw-r--r--arch/frv/include/asm/unistd.h3
-rw-r--r--arch/frv/kernel/entry.S1
-rw-r--r--arch/h8300/include/asm/unistd.h3
-rw-r--r--arch/h8300/kernel/syscalls.S1
-rw-r--r--arch/ia64/include/asm/unistd.h4
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/m32r/include/asm/unistd.h3
-rw-r--r--arch/m32r/kernel/syscall_table.S1
-rw-r--r--arch/m68k/include/asm/unistd.h3
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/microblaze/include/asm/unistd.h3
-rw-r--r--arch/microblaze/kernel/prom.c2
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c11
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/unistd.h15
-rw-r--r--arch/mips/kernel/prom.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/txx9/generic/setup.c3
-rw-r--r--arch/mn10300/include/asm/unistd.h3
-rw-r--r--arch/mn10300/kernel/entry.S1
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/s390/include/asm/pgtable.h16
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/mm/maccess.c4
-rw-r--r--arch/s390/mm/pgtable.c23
-rw-r--r--arch/sh/include/asm/unistd_32.h3
-rw-r--r--arch/sh/include/asm/unistd_64.h3
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/syscalls_64.S1
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/hardwall.h15
-rw-r--r--arch/tile/kernel/Makefile2
-rw-r--r--arch/tile/kernel/hardwall.c90
-rw-r--r--arch/tile/kernel/proc.c73
-rw-r--r--arch/tile/kernel/sysfs.c185
-rw-r--r--arch/x86/ia32/ia32entry.S1
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/desc.h152
-rw-r--r--arch/x86/include/asm/idle.h2
-rw-r--r--arch/x86/include/asm/mmu.h4
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/unistd_32.h3
-rw-r--r--arch/x86/include/asm/unistd_64.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h590
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h71
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1012
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c40
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c7
-rw-r--r--arch/x86/kernel/cpu/bugs.c1
-rw-r--r--arch/x86/kernel/cpu/common.c9
-rw-r--r--arch/x86/kernel/ftrace.c12
-rw-r--r--arch/x86/kernel/process.c43
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/mm/fault.c35
-rw-r--r--arch/x86/oprofile/op_model_amd.c95
-rw-r--r--arch/x86/platform/efi/efi.c45
-rw-r--r--arch/x86/platform/efi/efi_64.c5
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1484
-rw-r--r--arch/x86/platform/uv/uv_time.c16
-rw-r--r--arch/xtensa/include/asm/unistd.h4
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconfig.h1
-rw-r--r--drivers/acpi/acpica/acevents.h17
-rw-r--r--drivers/acpi/acpica/acglobal.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h15
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c335
-rw-r--r--drivers/acpi/acpica/evmisc.c303
-rw-r--r--drivers/acpi/acpica/evregion.c121
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c13
-rw-r--r--drivers/acpi/acpica/excreate.c3
-rw-r--r--drivers/acpi/acpica/nsrepair.c13
-rw-r--r--drivers/acpi/acpica/utdecode.c5
-rw-r--r--drivers/acpi/acpica/utmutex.c12
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/custom_method.c100
-rw-r--r--drivers/acpi/debugfs.c92
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/osl.c33
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/TODO14
-rw-r--r--drivers/dma/at_hdmac.c376
-rw-r--r--drivers/dma/at_hdmac_regs.h30
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c272
-rw-r--r--drivers/dma/dw_dmac_regs.h2
-rw-r--r--drivers/dma/intel_mid_dma.c17
-rw-r--r--drivers/dma/ioat/dma_v2.c8
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/pch_dma.c96
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/gpio/Kconfig38
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/gpio-exynos4.c (renamed from arch/arm/mach-exynos4/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-nomadik.c (renamed from arch/arm/plat-nomadik/gpio.c)65
-rw-r--r--drivers/gpio/gpio-omap.c (renamed from arch/arm/plat-omap/gpio.c)105
-rw-r--r--drivers/gpio/gpio-plat-samsung.c (renamed from arch/arm/plat-samsung/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-s5pc100.c (renamed from arch/arm/mach-s5pc100/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-s5pv210.c (renamed from arch/arm/mach-s5pv210/gpiolib.c)0
-rw-r--r--drivers/gpio/gpio-u300.c (renamed from arch/arm/mach-u300/gpio.c)0
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpio/langwell_gpio.c65
-rw-r--r--drivers/gpio/pca953x.c249
-rw-r--r--drivers/gpio/pch_gpio.c2
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/md/dm-io.c27
-rw-r--r--drivers/md/dm-kcopyd.c168
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap-persistent.c13
-rw-r--r--drivers/md/dm-snap.c10
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/misc/kgdbts.c5
-rw-r--r--drivers/mmc/host/mmci.c25
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/Makefile3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c4
-rw-r--r--drivers/mtd/devices/lart.c9
-rw-r--r--drivers/mtd/devices/m25p80.c109
-rw-r--r--drivers/mtd/devices/ms02-nv.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c45
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/devices/pmc551.c6
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c8
-rw-r--r--drivers/mtd/maps/Kconfig23
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c4
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c21
-rw-r--r--drivers/mtd/maps/cdb89712.c12
-rw-r--r--drivers/mtd/maps/ceiva.c6
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c20
-rw-r--r--drivers/mtd/maps/dilnetpc.c9
-rw-r--r--drivers/mtd/maps/dmv182.c4
-rw-r--r--drivers/mtd/maps/edb7312.c26
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c7
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c27
-rw-r--r--drivers/mtd/maps/h720x-flash.c6
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c22
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/ixp2000.c4
-rw-r--r--drivers/mtd/maps/ixp4xx.c16
-rw-r--r--drivers/mtd/maps/l440gx.c4
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c45
-rw-r--r--drivers/mtd/maps/mbx860.c6
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c12
-rw-r--r--drivers/mtd/maps/octagon-5066.c4
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c34
-rw-r--r--drivers/mtd/maps/physmap_of.c30
-rw-r--r--drivers/mtd/maps/plat-ram.c24
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c18
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/rpxlite.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c21
-rw-r--r--drivers/mtd/maps/sbc_gxx.c4
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c6
-rw-r--r--drivers/mtd/maps/scx200_docflash.c16
-rw-r--r--drivers/mtd/maps/solutionengine.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/maps/tqm8xxl.c20
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/maps/tsunami_flash.c4
-rw-r--r--drivers/mtd/maps/uclinux.c12
-rw-r--r--drivers/mtd/maps/vmax301.c4
-rw-r--r--drivers/mtd/maps/vmu-flash.c4
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c15
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c55
-rw-r--r--drivers/mtd/mtdconcat.c4
-rw-r--r--drivers/mtd/mtdcore.c167
-rw-r--r--drivers/mtd/mtdcore.h6
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/ams-delta.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/autcpu12.c16
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c7
-rw-r--r--drivers/mtd/nand/cafe_nand.c11
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c19
-rw-r--r--drivers/mtd/nand/davinci_nand.c51
-rw-r--r--drivers/mtd/nand/denali.c247
-rw-r--r--drivers/mtd/nand/denali.h373
-rw-r--r--drivers/mtd/nand/diskonchip.c18
-rw-r--r--drivers/mtd/nand/edb7312.c9
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c12
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c25
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/h1910.c5
-rw-r--r--drivers/mtd/nand/jz4740_nand.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c64
-rw-r--r--drivers/mtd/nand/nand_base.c18
-rw-r--r--drivers/mtd/nand/nand_bbt.c27
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c65
-rw-r--r--drivers/mtd/nand/nomadik_nand.c7
-rw-r--r--drivers/mtd/nand/nuc900_nand.c4
-rw-r--r--drivers/mtd/nand/omap2.c32
-rw-r--r--drivers/mtd/nand/orion_nand.c14
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c12
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c15
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c13
-rw-r--r--drivers/mtd/nand/rtc_from4.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c75
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c16
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c10
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c14
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/onenand/onenand_sim.c3
-rw-r--r--drivers/mtd/onenand/samsung.c12
-rw-r--r--drivers/mtd/ubi/gluebi.c6
-rw-r--r--drivers/net/sfc/mtd.c6
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/oprofile/event_buffer.h2
-rw-r--r--drivers/oprofile/oprof.c2
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/intel-iommu.c240
-rw-r--r--drivers/pci/iova.c12
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c184
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/asus-laptop.c34
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus_acpi.c77
-rw-r--r--drivers/platform/x86/compal-laptop.c36
-rw-r--r--drivers/platform/x86/dell-laptop.c12
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c3
-rw-r--r--drivers/platform/x86/dell-wmi.c17
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/eeepc-wmi.c14
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c39
-rw-r--r--drivers/platform/x86/hdaps.c19
-rw-r--r--drivers/platform/x86/hp-wmi.c43
-rw-r--r--drivers/platform/x86/ibm_rtl.c23
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c5
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c72
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c607
-rw-r--r--drivers/platform/x86/intel_oaktrail.c396
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c23
-rw-r--r--drivers/platform/x86/msi-wmi.c45
-rw-r--r--drivers/platform/x86/sony-laptop.c106
-rw-r--r--drivers/platform/x86/tc1100-wmi.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c490
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c59
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c11
-rw-r--r--drivers/platform/x86/wmi.c10
-rw-r--r--drivers/platform/x86/xo15-ebook.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_proc.c5
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_bfin_sport.c952
-rw-r--r--drivers/spi/tle62x0.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c25
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c29
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c68
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/thermal/thermal_sys.c10
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/pci-quirks.c63
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c26
-rw-r--r--drivers/usb/host/xhci-ring.c89
-rw-r--r--drivers/usb/host/xhci.c240
-rw-r--r--drivers/usb/host/xhci.h22
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.c138
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_ring.c53
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/affs/namei.c5
-rw-r--r--fs/afs/dir.c5
-rw-r--r--fs/attr.c7
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/bfs/dir.c3
-rw-r--r--fs/bio.c16
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/cifs/cifsacl.c3
-rw-r--r--fs/coda/dir.c5
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/ecryptfs/crypto.c74
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h26
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/ecryptfs/inode.c286
-rw-r--r--fs/ecryptfs/main.c84
-rw-r--r--fs/ecryptfs/super.c16
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/fat/namei_msdos.c5
-rw-r--r--fs/fat/namei_vfat.c5
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/hfs/dir.c6
-rw-r--r--fs/hfsplus/dir.c8
-rw-r--r--fs/hostfs/hostfs_kern.c5
-rw-r--r--fs/hpfs/namei.c5
-rw-r--r--fs/inode.c54
-rw-r--r--fs/jffs2/dir.c9
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/scan.c19
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/namei.c5
-rw-r--r--fs/logfs/dir.c5
-rw-r--r--fs/minix/namei.c5
-rw-r--r--fs/namei.c44
-rw-r--r--fs/ncpfs/dir.c15
-rw-r--r--fs/nfs/Kconfig10
-rw-r--r--fs/nfs/Makefile4
-rw-r--r--fs/nfs/callback.h17
-rw-r--r--fs/nfs/callback_proc.c51
-rw-r--r--fs/nfs/callback_xdr.c96
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/delegation.c14
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs4filelayout.c38
-rw-r--r--fs/nfs/nfs4filelayout.h8
-rw-r--r--fs/nfs/nfs4filelayoutdev.c119
-rw-r--r--fs/nfs/nfs4proc.c107
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/nfs4xdr.c132
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/objlayout/Kbuild5
-rw-r--r--fs/nfs/objlayout/objio_osd.c1057
-rw-r--r--fs/nfs/objlayout/objlayout.c712
-rw-r--r--fs/nfs/objlayout/objlayout.h187
-rw-r--r--fs/nfs/objlayout/pnfs_osd_xdr_cli.c412
-rw-r--r--fs/nfs/pagelist.c62
-rw-r--r--fs/nfs/pnfs.c342
-rw-r--r--fs/nfs/pnfs.h117
-rw-r--r--fs/nfs/pnfs_dev.c270
-rw-r--r--fs/nfs/read.c9
-rw-r--r--fs/nfs/super.c25
-rw-r--r--fs/nfs/write.c10
-rw-r--r--fs/nfsd/export.c6
-rw-r--r--fs/nfsd/nfs3proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nfsd/nfs4proc.c73
-rw-r--r--fs/nfsd/nfs4state.c42
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/vfs.c33
-rw-r--r--fs/nfsd/vfs.h6
-rw-r--r--fs/nilfs2/inode.c2
-rw-r--r--fs/nilfs2/namei.c5
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/omfs/dir.c11
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/reiserfs/namei.c5
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/reiserfs/xattr.c1
-rw-r--r--fs/squashfs/export.c2
-rw-r--r--fs/squashfs/fragment.c2
-rw-r--r--fs/squashfs/id.c2
-rw-r--r--fs/squashfs/super.c6
-rw-r--r--fs/sysv/namei.c5
-rw-r--r--fs/ubifs/dir.c5
-rw-r--r--fs/ubifs/shrinker.c3
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/udf/namei.c5
-rw-r--r--fs/ufs/namei.c5
-rw-r--r--fs/xattr.c23
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--include/acpi/acpiosxf.h3
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actypes.h25
-rw-r--r--include/acpi/processor.h7
-rw-r--r--include/asm-generic/gpio.h10
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/atomic.h13
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/device-mapper.h6
-rw-r--r--include/linux/dm-io.h3
-rw-r--r--include/linux/dm-kcopyd.h3
-rw-r--r--include/linux/dma_remapping.h4
-rw-r--r--include/linux/dw_dmac.h1
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/gpio.h8
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mtd/mtd.h17
-rw-r--r--include/linux/mtd/nand.h4
-rw-r--r--include/linux/mtd/partitions.h16
-rw-r--r--include/linux/mtd/physmap.h5
-rw-r--r--include/linux/nfs4.h9
-rw-r--r--include/linux/nfs_page.h2
-rw-r--r--include/linux/nfs_xdr.h23
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pm_qos_params.h4
-rw-r--r--include/linux/pnfs_osd_xdr.h345
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sunrpc/msg_prot.h1
-rw-r--r--include/linux/sunrpc/svcsock.h1
-rw-r--r--include/linux/sunrpc/xdr.h2
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/linux/virtio.h9
-rw-r--r--include/linux/virtio_9p.h25
-rw-r--r--include/linux/virtio_balloon.h25
-rw-r--r--include/linux/virtio_blk.h25
-rw-r--r--include/linux/virtio_config.h25
-rw-r--r--include/linux/virtio_console.h26
-rw-r--r--include/linux/virtio_ids.h24
-rw-r--r--include/linux/virtio_net.h25
-rw-r--r--include/linux/virtio_pci.h23
-rw-r--r--include/linux/virtio_ring.h52
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_tcq.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric_ops.h1
-rw-r--r--include/target/target_core_transport.h1
-rw-r--r--include/trace/events/btrfs.h4
-rw-r--r--include/trace/ftrace.h13
-rw-r--r--init/main.c2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/fork.c42
-rw-r--r--kernel/jump_label.c18
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/pm_qos_params.c37
-rw-r--r--kernel/rcutree.c208
-rw-r--r--kernel/rcutree.h30
-rw-r--r--kernel/rcutree_plugin.h33
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/sched.c56
-rw-r--r--kernel/sched_fair.c5
-rw-r--r--kernel/sched_rt.c10
-rw-r--r--kernel/sched_stats.h4
-rw-r--r--kernel/trace/ftrace.c31
-rw-r--r--kernel/trace/ring_buffer.c10
-rw-r--r--kernel/trace/trace.h15
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/trace/trace_output.c27
-rw-r--r--kernel/watchdog.c9
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/maccess.c8
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/rmap.c22
-rw-r--r--mm/shmem.c2
-rw-r--r--net/sunrpc/clnt.c29
-rw-r--r--net/sunrpc/rpcb_clnt.c97
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcsock.c336
-rw-r--r--net/sunrpc/xdr.c19
-rw-r--r--net/sunrpc/xprtsock.c435
-rw-r--r--scripts/recordmcount.h8
-rwxr-xr-xscripts/tags.sh6
-rw-r--r--security/apparmor/lsm.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/soc/samsung/Kconfig4
-rw-r--r--sound/soc/samsung/smdk_wm8580.c2
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/builtin-report.c23
-rw-r--r--tools/perf/builtin-script.c1
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/builtin-top.c37
-rw-r--r--tools/perf/util/event.c31
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c68
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c26
-rw-r--r--tools/perf/util/evsel.h7
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/include/linux/const.h1
-rw-r--r--tools/perf/util/python.c14
-rw-r--r--tools/perf/util/session.c12
-rw-r--r--tools/perf/util/symbol.c48
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/virtio/virtio_test.c19
883 files changed, 22951 insertions, 14873 deletions
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 6f242d5..17910e2 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -189,8 +189,7 @@ static void __iomem *baseaddr;
<title>Partition defines</title>
<para>
If you want to divide your device into partitions, then
- enable the configuration switch CONFIG_MTD_PARTITIONS and define
- a partitioning scheme suitable to your board.
+ define a partitioning scheme suitable to your board.
</para>
<programlisting>
#define NUM_PARTITIONS 2
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index c078ad4..8173cec 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -99,18 +99,11 @@ o "qp" indicates that RCU still expects a quiescent state from
o "dt" is the current value of the dyntick counter that is incremented
when entering or leaving dynticks idle state, either by the
- scheduler or by irq. The number after the "/" is the interrupt
- nesting depth when in dyntick-idle state, or one greater than
- the interrupt-nesting depth otherwise.
-
- This field is displayed only for CONFIG_NO_HZ kernels.
-
-o "dn" is the current value of the dyntick counter that is incremented
- when entering or leaving dynticks idle state via NMI. If both
- the "dt" and "dn" values are even, then this CPU is in dynticks
- idle mode and may be ignored by RCU. If either of these two
- counters is odd, then RCU must be alert to the possibility of
- an RCU read-side critical section running on this CPU.
+ scheduler or by irq. This number is even if the CPU is in
+ dyntick idle mode and odd otherwise. The number after the first
+ "/" is the interrupt nesting depth when in dyntick-idle state,
+ or one greater than the interrupt-nesting depth otherwise.
+ The number after the second "/" is the NMI nesting depth.
This field is displayed only for CONFIG_NO_HZ kernels.
diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt
index 3e1d25a..5f55373 100644
--- a/Documentation/acpi/method-customizing.txt
+++ b/Documentation/acpi/method-customizing.txt
@@ -66,3 +66,8 @@ Note: We can use a kernel with multiple custom ACPI method running,
But each individual write to debugfs can implement a SINGLE
method override. i.e. if we want to insert/override multiple
ACPI methods, we need to redo step c) ~ g) for multiple times.
+
+Note: Be aware that root can mis-use this driver to modify arbitrary
+ memory and gain additional rights, if root's privileges got
+ restricted (for example if root is not allowed to load additional
+ modules after boot).
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 7685029..4e686a2 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -65,13 +65,19 @@ looks at the connected hardware is beyond the scope of this document.
The boot loader must ultimately be able to provide a MACH_TYPE_xxx
value to the kernel. (see linux/arch/arm/tools/mach-types).
-
-4. Setup the kernel tagged list
--------------------------------
+4. Setup boot data
+------------------
Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED
New boot loaders: MANDATORY
+The boot loader must provide either a tagged list or a dtb image for
+passing configuration data to the kernel. The physical address of the
+boot data is passed to the kernel in register r2.
+
+4a. Setup the kernel tagged list
+--------------------------------
+
The boot loader must create and initialise the kernel tagged list.
A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE.
The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag
@@ -101,6 +107,24 @@ The tagged list must be placed in a region of memory where neither
the kernel decompressor nor initrd 'bootp' program will overwrite
it. The recommended placement is in the first 16KiB of RAM.
+4b. Setup the device tree
+-------------------------
+
+The boot loader must load a device tree image (dtb) into system ram
+at a 64bit aligned address and initialize it with the boot data. The
+dtb format is documented in Documentation/devicetree/booting-without-of.txt.
+The kernel will look for the dtb magic value of 0xd00dfeed at the dtb
+physical address to determine if a dtb has been passed instead of a
+tagged list.
+
+The boot loader must pass at a minimum the size and location of the
+system memory, and the root filesystem location. The dtb must be
+placed in a region of memory where the kernel decompressor will not
+overwrite it. The recommended placement is in the first 16KiB of RAM
+with the caveat that it may not be located at physical address 0 since
+the kernel interprets a value of 0 in r2 to mean neither a tagged list
+nor a dtb were passed.
+
5. Calling the kernel image
---------------------------
@@ -125,7 +149,8 @@ In either case, the following conditions must be met:
- CPU register settings
r0 = 0,
r1 = machine type number discovered in (3) above.
- r2 = physical address of tagged list in system RAM.
+ r2 = physical address of tagged list in system RAM, or
+ physical address of device tree block (dtb) in system RAM
- CPU mode
All forms of interrupts must be disabled (IRQs and FIQs)
diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt
index c3094ea..658abb2 100644
--- a/Documentation/arm/Samsung/Overview.txt
+++ b/Documentation/arm/Samsung/Overview.txt
@@ -14,7 +14,6 @@ Introduction
- S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list
- S3C64XX: S3C6400 and S3C6410
- S5P6440
- - S5P6442
- S5PC100
- S5PC110 / S5PV210
@@ -36,7 +35,6 @@ Configuration
unifying all the SoCs into one kernel.
s5p6440_defconfig - S5P6440 specific default configuration
- s5p6442_defconfig - S5P6442 specific default configuration
s5pc100_defconfig - S5PC100 specific default configuration
s5pc110_defconfig - S5PC110 specific default configuration
s5pv210_defconfig - S5PV210 specific default configuration
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 50619a0..7c1329d 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -12,8 +12,9 @@ Table of Contents
=================
I - Introduction
- 1) Entry point for arch/powerpc
- 2) Entry point for arch/x86
+ 1) Entry point for arch/arm
+ 2) Entry point for arch/powerpc
+ 3) Entry point for arch/x86
II - The DT block format
1) Header
@@ -148,7 +149,46 @@ upgrades without significantly impacting the kernel code or cluttering
it with special cases.
-1) Entry point for arch/powerpc
+1) Entry point for arch/arm
+---------------------------
+
+ There is one single entry point to the kernel, at the start
+ of the kernel image. That entry point supports two calling
+ conventions. A summary of the interface is described here. A full
+ description of the boot requirements is documented in
+ Documentation/arm/Booting
+
+ a) ATAGS interface. Minimal information is passed from firmware
+ to the kernel with a tagged list of predefined parameters.
+
+ r0 : 0
+
+ r1 : Machine type number
+
+ r2 : Physical address of tagged list in system RAM
+
+ b) Entry with a flattened device-tree block. Firmware loads the
+ physical address of the flattened device tree block (dtb) into r2,
+ r1 is not used, but it is considered good practise to use a valid
+ machine number as described in Documentation/arm/Booting.
+
+ r0 : 0
+
+ r1 : Valid machine type number. When using a device tree,
+ a single machine type number will often be assigned to
+ represent a class or family of SoCs.
+
+ r2 : physical pointer to the device-tree block
+ (defined in chapter II) in RAM. Device tree can be located
+ anywhere in system RAM, but it should be aligned on a 64 bit
+ boundary.
+
+ The kernel will differentiate between ATAGS and device tree booting by
+ reading the memory pointed to by r2 and looking for either the flattened
+ device tree block magic value (0xd00dfeed) or the ATAG_CORE value at
+ offset 0x4 from r2 (0x54410001).
+
+2) Entry point for arch/powerpc
-------------------------------
There is one single entry point to the kernel, at the start
@@ -226,7 +266,7 @@ it with special cases.
cannot support both configurations with Book E and configurations
with classic Powerpc architectures.
-2) Entry point for arch/x86
+3) Entry point for arch/x86
-------------------------------
There is one single 32bit entry point to the kernel at code32_start,
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
index 0c1c2f6..5a0cb1e 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine.txt
@@ -1 +1,96 @@
-See Documentation/crypto/async-tx-api.txt
+ DMA Engine API Guide
+ ====================
+
+ Vinod Koul <vinod dot koul at intel.com>
+
+NOTE: For DMA Engine usage in async_tx please see:
+ Documentation/crypto/async-tx-api.txt
+
+
+Below is a guide to device driver writers on how to use the Slave-DMA API of the
+DMA Engine. This is applicable only for slave DMA usage only.
+
+The slave DMA usage consists of following steps
+1. Allocate a DMA slave channel
+2. Set slave and controller specific parameters
+3. Get a descriptor for transaction
+4. Submit the transaction and wait for callback notification
+
+1. Allocate a DMA slave channel
+Channel allocation is slightly different in the slave DMA context, client
+drivers typically need a channel from a particular DMA controller only and even
+in some cases a specific channel is desired. To request a channel
+dma_request_channel() API is used.
+
+Interface:
+struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
+ dma_filter_fn filter_fn,
+ void *filter_param);
+where dma_filter_fn is defined as:
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+When the optional 'filter_fn' parameter is set to NULL dma_request_channel
+simply returns the first channel that satisfies the capability mask. Otherwise,
+when the mask parameter is insufficient for specifying the necessary channel,
+the filter_fn routine can be used to disposition the available channels in the
+system. The filter_fn routine is called once for each free channel in the
+system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags
+that channel to be the return value from dma_request_channel. A channel
+allocated via this interface is exclusive to the caller, until
+dma_release_channel() is called.
+
+2. Set slave and controller specific parameters
+Next step is always to pass some specific information to the DMA driver. Most of
+the generic information which a slave DMA can use is in struct dma_slave_config.
+It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA
+burst lengths etc. If some DMA controllers have more parameters to be sent then
+they should try to embed struct dma_slave_config in their controller specific
+structure. That gives flexibility to client to pass more parameters, if
+required.
+
+Interface:
+int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+
+3. Get a descriptor for transaction
+For slave usage the various modes of slave transfers supported by the
+DMA-engine are:
+slave_sg - DMA a list of scatter gather buffers from/to a peripheral
+dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
+ operation is explicitly stopped.
+The non NULL return of this transfer API represents a "descriptor" for the given
+transaction.
+
+Interface:
+struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
+struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction);
+
+4. Submit the transaction and wait for callback notification
+To schedule the transaction to be scheduled by dma device, the "descriptor"
+returned in above (3) needs to be submitted.
+To tell the dma driver that a transaction is ready to be serviced, the
+descriptor->submit() callback needs to be invoked. This chains the descriptor to
+the pending queue.
+The transactions in the pending queue can be activated by calling the
+issue_pending API. If channel is idle then the first transaction in queue is
+started and subsequent ones queued up.
+On completion of the DMA operation the next in queue is submitted and a tasklet
+triggered. The tasklet would then call the client driver completion callback
+routine for notification, if set.
+Interface:
+void dma_async_issue_pending(struct dma_chan *chan);
+
+==============================================================================
+
+Additional usage notes for dma driver writers
+1/ Although DMA engine specifies that completion callback routines cannot submit
+any new operations, but typically for slave DMA subsequent transaction may not
+be available for submit prior to callback routine being called. This requirement
+is not a requirement for DMA-slave devices. But they should take care to drop
+the spin-lock they might be holding before calling the callback routine
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ff31b1c..1a9446b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,42 @@ be removed from this file.
---------------------------
+What: x86 floppy disable_hlt
+When: 2012
+Why: ancient workaround of dubious utility clutters the
+ code used by everybody else.
+Who: Len Brown <len.brown@intel.com>
+
+---------------------------
+
+What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
+When: 2012
+Why: This optional sub-feature of APM is of dubious reliability,
+ and ancient APM laptops are likely better served by calling HLT.
+ Deleting CONFIG_APM_CPU_IDLE allows x86 to stop exporting
+ the pm_idle function pointer to modules.
+Who: Len Brown <len.brown@intel.com>
+
+----------------------------
+
+What: x86_32 "no-hlt" cmdline param
+When: 2012
+Why: remove a branch from idle path, simplify code used by everybody.
+ This option disabled the use of HLT in idle and machine_halt()
+ for hardware that was flakey 15-years ago. Today we have
+ "idle=poll" that removed HLT from idle, and so if such a machine
+ is still running the upstream kernel, "idle=poll" is likely sufficient.
+Who: Len Brown <len.brown@intel.com>
+
+----------------------------
+
+What: x86 "idle=mwait" cmdline param
+When: 2012
+Why: simplify x86 idle code
+Who: Len Brown <len.brown@intel.com>
+
+----------------------------
+
What: PRISM54
When: 2.6.34
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 61b31ac..57d827d6 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -104,7 +104,7 @@ of the locking scheme for directory operations.
prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*dirty_inode) (struct inode *);
+ void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
@@ -126,7 +126,7 @@ locking rules:
s_umount
alloc_inode:
destroy_inode:
-dirty_inode: (must not sleep)
+dirty_inode:
write_inode:
drop_inode: !!!inode->i_lock!!!
evict_inode:
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 21a7dc4..88b9f55 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -211,7 +211,7 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*dirty_inode) (struct inode *);
+ void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, int);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5438a2d..d9a203b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -999,7 +999,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
With this option on every unmap_single operation will
result in a hardware IOTLB flush operation as opposed
to batching them for performance.
-
+ sp_off [Default Off]
+ By default, super page will be supported if Intel IOMMU
+ has the capability. With this option, super page will
+ not be supported.
intremap= [X86-64, Intel-IOMMU]
Format: { on (default) | off | nosid }
on enable Interrupt Remapping (default)
diff --git a/Documentation/laptops/acer-wmi.txt b/Documentation/laptops/acer-wmi.txt
deleted file mode 100644
index 4beafa6..0000000
--- a/Documentation/laptops/acer-wmi.txt
+++ /dev/null
@@ -1,184 +0,0 @@
-Acer Laptop WMI Extras Driver
-http://code.google.com/p/aceracpi
-Version 0.3
-4th April 2009
-
-Copyright 2007-2009 Carlos Corbacho <carlos@strangeworlds.co.uk>
-
-acer-wmi is a driver to allow you to control various parts of your Acer laptop
-hardware under Linux which are exposed via ACPI-WMI.
-
-This driver completely replaces the old out-of-tree acer_acpi, which I am
-currently maintaining for bug fixes only on pre-2.6.25 kernels. All development
-work is now focused solely on acer-wmi.
-
-Disclaimer
-**********
-
-Acer and Wistron have provided nothing towards the development acer_acpi or
-acer-wmi. All information we have has been through the efforts of the developers
-and the users to discover as much as possible about the hardware.
-
-As such, I do warn that this could break your hardware - this is extremely
-unlikely of course, but please bear this in mind.
-
-Background
-**********
-
-acer-wmi is derived from acer_acpi, originally developed by Mark
-Smith in 2005, then taken over by Carlos Corbacho in 2007, in order to activate
-the wireless LAN card under a 64-bit version of Linux, as acerhk[1] (the
-previous solution to the problem) relied on making 32 bit BIOS calls which are
-not possible in kernel space from a 64 bit OS.
-
-[1] acerhk: http://www.cakey.de/acerhk/
-
-Supported Hardware
-******************
-
-NOTE: The Acer Aspire One is not supported hardware. It cannot work with
-acer-wmi until Acer fix their ACPI-WMI implementation on them, so has been
-blacklisted until that happens.
-
-Please see the website for the current list of known working hardware:
-
-http://code.google.com/p/aceracpi/wiki/SupportedHardware
-
-If your laptop is not listed, or listed as unknown, and works with acer-wmi,
-please contact me with a copy of the DSDT.
-
-If your Acer laptop doesn't work with acer-wmi, I would also like to see the
-DSDT.
-
-To send me the DSDT, as root/sudo:
-
-cat /sys/firmware/acpi/tables/DSDT > dsdt
-
-And send me the resulting 'dsdt' file.
-
-Usage
-*****
-
-On Acer laptops, acer-wmi should already be autoloaded based on DMI matching.
-For non-Acer laptops, until WMI based autoloading support is added, you will
-need to manually load acer-wmi.
-
-acer-wmi creates /sys/devices/platform/acer-wmi, and fills it with various
-files whose usage is detailed below, which enables you to control some of the
-following (varies between models):
-
-* the wireless LAN card radio
-* inbuilt Bluetooth adapter
-* inbuilt 3G card
-* mail LED of your laptop
-* brightness of the LCD panel
-
-Wireless
-********
-
-With regards to wireless, all acer-wmi does is enable the radio on the card. It
-is not responsible for the wireless LED - once the radio is enabled, this is
-down to the wireless driver for your card. So the behaviour of the wireless LED,
-once you enable the radio, will depend on your hardware and driver combination.
-
-e.g. With the BCM4318 on the Acer Aspire 5020 series:
-
-ndiswrapper: Light blinks on when transmitting
-b43: Solid light, blinks off when transmitting
-
-Wireless radio control is unconditionally enabled - all Acer laptops that support
-acer-wmi come with built-in wireless. However, should you feel so inclined to
-ever wish to remove the card, or swap it out at some point, please get in touch
-with me, as we may well be able to gain some data on wireless card detection.
-
-The wireless radio is exposed through rfkill.
-
-Bluetooth
-*********
-
-For bluetooth, this is an internal USB dongle, so once enabled, you will get
-a USB device connection event, and a new USB device appears. When you disable
-bluetooth, you get the reverse - a USB device disconnect event, followed by the
-device disappearing again.
-
-Bluetooth is autodetected by acer-wmi, so if you do not have a bluetooth module
-installed in your laptop, this file won't exist (please be aware that it is
-quite common for Acer not to fit bluetooth to their laptops - so just because
-you have a bluetooth button on the laptop, doesn't mean that bluetooth is
-installed).
-
-For the adventurously minded - if you want to buy an internal bluetooth
-module off the internet that is compatible with your laptop and fit it, then
-it will work just fine with acer-wmi.
-
-Bluetooth is exposed through rfkill.
-
-3G
-**
-
-3G is currently not autodetected, so the 'threeg' file is always created under
-sysfs. So far, no-one in possession of an Acer laptop with 3G built-in appears to
-have tried Linux, or reported back, so we don't have any information on this.
-
-If you have an Acer laptop that does have a 3G card in, please contact me so we
-can properly detect these, and find out a bit more about them.
-
-To read the status of the 3G card (0=off, 1=on):
-cat /sys/devices/platform/acer-wmi/threeg
-
-To enable the 3G card:
-echo 1 > /sys/devices/platform/acer-wmi/threeg
-
-To disable the 3G card:
-echo 0 > /sys/devices/platform/acer-wmi/threeg
-
-To set the state of the 3G card when loading acer-wmi, pass:
-threeg=X (where X is 0 or 1)
-
-Mail LED
-********
-
-This can be found in most older Acer laptops supported by acer-wmi, and many
-newer ones - it is built into the 'mail' button, and blinks when active.
-
-On newer (WMID) laptops though, we have no way of detecting the mail LED. If
-your laptop identifies itself in dmesg as a WMID model, then please try loading
-acer_acpi with:
-
-force_series=2490
-
-This will use a known alternative method of reading/ writing the mail LED. If
-it works, please report back to me with the DMI data from your laptop so this
-can be added to acer-wmi.
-
-The LED is exposed through the LED subsystem, and can be found in:
-
-/sys/devices/platform/acer-wmi/leds/acer-wmi::mail/
-
-The mail LED is autodetected, so if you don't have one, the LED device won't
-be registered.
-
-Backlight
-*********
-
-The backlight brightness control is available on all acer-wmi supported
-hardware. The maximum brightness level is usually 15, but on some newer laptops
-it's 10 (this is again autodetected).
-
-The backlight is exposed through the backlight subsystem, and can be found in:
-
-/sys/devices/platform/acer-wmi/backlight/acer-wmi/
-
-Credits
-*******
-
-Olaf Tauber, who did the real hard work when he developed acerhk
-http://www.cakey.de/acerhk/
-All the authors of laptop ACPI modules in the kernel, whose work
-was an inspiration in the early days of acer_acpi
-Mathieu Segaud, who solved the problem with having to modprobe the driver
-twice in acer_acpi 0.2.
-Jim Ramsay, who added support for the WMID interface
-Mark Smith, who started the original acer_acpi
-
-And the many people who have used both acer_acpi and acer-wmi.
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index 9c0a80d..cef00d4 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -12,8 +12,9 @@ Because things like lock contention can severely impact performance.
- HOW
Lockdep already has hooks in the lock functions and maps lock instances to
-lock classes. We build on that. The graph below shows the relation between
-the lock functions and the various hooks therein.
+lock classes. We build on that (see Documentation/lockdep-design.txt).
+The graph below shows the relation between the lock functions and the various
+hooks therein.
__acquire
|
@@ -128,6 +129,37 @@ points are the points we're contending with.
The integer part of the time values is in us.
+Dealing with nested locks, subclasses may appear:
+
+32...............................................................................................................................................................................................
+33
+34 &rq->lock: 13128 13128 0.43 190.53 103881.26 97454 3453404 0.00 401.11 13224683.11
+35 ---------
+36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
+39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
+40 ---------
+41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+44 &rq->lock 893 [<ffffffff81340524>] schedule+0x157/0x7b8
+45
+46...............................................................................................................................................................................................
+47
+48 &rq->lock/1: 11526 11488 0.33 388.73 136294.31 21461 38404 0.00 37.93 109388.53
+49 -----------
+50 &rq->lock/1 11526 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+51 -----------
+52 &rq->lock/1 5645 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+53 &rq->lock/1 1224 [<ffffffff81340524>] schedule+0x157/0x7b8
+54 &rq->lock/1 4336 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+55 &rq->lock/1 181 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+
+Line 48 shows statistics for the second subclass (/1) of &rq->lock class
+(subclass starts from 0), since in this case, as line 50 suggests,
+double_rq_lock actually acquires a nested lock of two spinlocks.
+
View the top contending locks:
# grep : /proc/lock_stat | head
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 4d9ce73..9ed1d9d 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,17 @@
+Release Date : Wed. May 11, 2011 17:00:00 PST 2010 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.05.38-rc1
+Old Version : 00.00.05.34-rc1
+ 1. Remove MSI-X black list, use MFI_REG_STATE.ready.msiEnable.
+ 2. Remove un-used function megasas_return_cmd_for_smid().
+ 3. Check MFI_REG_STATE.fault.resetAdapter in megasas_reset_fusion().
+ 4. Disable interrupts/free_irq() in megasas_shutdown().
+ 5. Fix bug where AENs could be lost in probe() and resume().
+ 6. Convert 6,10,12 byte CDB's to 16 byte CDB for large LBA's for FastPath
+ IO.
+ 7. Add 1078 OCR support.
+-------------------------------------------------------------------------------
Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/Documentation/virtual/lguest/Makefile b/Documentation/virtual/lguest/Makefile
index bebac6b..0ac3420 100644
--- a/Documentation/virtual/lguest/Makefile
+++ b/Documentation/virtual/lguest/Makefile
@@ -1,5 +1,5 @@
# This creates the demonstration utility "lguest" which runs a Linux guest.
-# Missing headers? Add "-I../../include -I../../arch/x86/include"
+# Missing headers? Add "-I../../../include -I../../../arch/x86/include"
CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
all: lguest
diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
index d9da7e1..cd9d6af 100644
--- a/Documentation/virtual/lguest/lguest.c
+++ b/Documentation/virtual/lguest/lguest.c
@@ -49,7 +49,7 @@
#include <linux/virtio_rng.h>
#include <linux/virtio_ring.h>
#include <asm/bootparam.h>
-#include "../../include/linux/lguest_launcher.h"
+#include "../../../include/linux/lguest_launcher.h"
/*L:110
* We can ignore the 42 include files we need for this program, but I do want
* to draw attention to the use of kernel-style types.
@@ -135,9 +135,6 @@ struct device {
/* Is it operational */
bool running;
- /* Does Guest want an intrrupt on empty? */
- bool irq_on_empty;
-
/* Device-specific data. */
void *priv;
};
@@ -637,10 +634,7 @@ static void trigger_irq(struct virtqueue *vq)
/* If they don't want an interrupt, don't send one... */
if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
- /* ... unless they've asked us to force one on empty. */
- if (!vq->dev->irq_on_empty
- || lg_last_avail(vq) != vq->vring.avail->idx)
- return;
+ return;
}
/* Send the Guest an interrupt tell them we used something up. */
@@ -1057,15 +1051,6 @@ static void create_thread(struct virtqueue *vq)
close(vq->eventfd);
}
-static bool accepted_feature(struct device *dev, unsigned int bit)
-{
- const u8 *features = get_feature_bits(dev) + dev->feature_len;
-
- if (dev->feature_len < bit / CHAR_BIT)
- return false;
- return features[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT));
-}
-
static void start_device(struct device *dev)
{
unsigned int i;
@@ -1079,8 +1064,6 @@ static void start_device(struct device *dev)
verbose(" %02x", get_feature_bits(dev)
[dev->feature_len+i]);
- dev->irq_on_empty = accepted_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY);
-
for (vq = dev->vq; vq; vq = vq->next) {
if (vq->service)
create_thread(vq);
@@ -1564,7 +1547,6 @@ static void setup_tun_net(char *arg)
/* Set up the tun device. */
configure_device(ipfd, tapif, ip);
- add_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY);
/* Expect Guest to handle everything except UFO */
add_feature(dev, VIRTIO_NET_F_CSUM);
add_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
diff --git a/MAINTAINERS b/MAINTAINERS
index a33b115..29801f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -223,10 +223,8 @@ S: Maintained
F: drivers/platform/x86/acerhdf.c
ACER WMI LAPTOP EXTRAS
-M: Carlos Corbacho <carlos@strangeworlds.co.uk>
-L: aceracpi@googlegroups.com (subscribers-only)
+M: Joey Lee <jlee@novell.com>
L: platform-driver-x86@vger.kernel.org
-W: http://code.google.com/p/aceracpi
S: Maintained
F: drivers/platform/x86/acer-wmi.c
@@ -271,10 +269,8 @@ S: Supported
F: drivers/acpi/video.c
ACPI WMI DRIVER
-M: Carlos Corbacho <carlos@strangeworlds.co.uk>
L: platform-driver-x86@vger.kernel.org
-W: http://www.lesswatts.org/projects/acpi/
-S: Maintained
+S: Orphan
F: drivers/platform/x86/wmi.c
AD1889 ALSA SOUND DRIVER
@@ -2178,6 +2174,8 @@ M: Dan Williams <dan.j.williams@intel.com>
S: Supported
F: drivers/dma/
F: include/linux/dma*
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
+T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
@@ -3031,9 +3029,8 @@ S: Maintained
F: drivers/net/wireless/hostap/
HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
-M: Carlos Corbacho <carlos@strangeworlds.co.uk>
L: platform-driver-x86@vger.kernel.org
-S: Odd Fixes
+S: Orphan
F: drivers/platform/x86/tc1100-wmi.c
HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
@@ -5451,6 +5448,13 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial
+SYNOPSYS DESIGNWARE DMAC DRIVER
+M: Viresh Kumar <viresh.kumar@st.com>
+S: Maintained
+F: include/linux/dw_dmac.h
+F: drivers/dma/dw_dmac_regs.h
+F: drivers/dma/dw_dmac.c
+
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
@@ -5515,7 +5519,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <James.Bottomley@suse.de>
+M: "James E.J. Bottomley" <JBottomley@parallels.com>
L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
@@ -6084,6 +6088,17 @@ F: Documentation/filesystems/sysv-fs.txt
F: fs/sysv/
F: include/linux/sysv_fs.h
+TARGET SUBSYSTEM
+M: Nicholas A. Bellinger <nab@linux-iscsi.org>
+L: linux-scsi@vger.kernel.org
+L: http://groups.google.com/group/linux-iscsi-target-dev
+W: http://www.linux-iscsi.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
+S: Supported
+F: drivers/target/
+F: include/target/
+F: Documentation/target/
+
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <balbir@linux.vnet.ibm.com>
S: Maintained
diff --git a/Makefile b/Makefile
index 529d93f..afb8e0d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
-VERSION = 2
-PATCHLEVEL = 6
-SUBLEVEL = 39
-EXTRAVERSION =
-NAME = Flesh-Eating Bats with Fangs
+VERSION = 3
+PATCHLEVEL = 0
+SUBLEVEL = 0
+EXTRAVERSION = -rc1
+NAME = Sneaky Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index b183416..4ac48a0 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -456,10 +456,11 @@
#define __NR_open_by_handle_at 498
#define __NR_clock_adjtime 499
#define __NR_syncfs 500
+#define __NR_setns 501
#ifdef __KERNEL__
-#define NR_SYSCALLS 501
+#define NR_SYSCALLS 502
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 15f999d..b9c28f3 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -519,6 +519,7 @@ sys_call_table:
.quad sys_open_by_handle_at
.quad sys_clock_adjtime
.quad sys_syncfs /* 500 */
+ .quad sys_setns
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7275009..9adc278 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -294,6 +294,8 @@ config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
+ select CLKDEV_LOOKUP
+ select ARM_PATCH_PHYS_VIRT if MMU
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -730,16 +732,6 @@ config ARCH_S5P64X0
Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
SMDK6450.
-config ARCH_S5P6442
- bool "Samsung S5P6442"
- select CPU_V6
- select GENERIC_GPIO
- select HAVE_CLK
- select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_WATCHDOG if WATCHDOG
- help
- Samsung S5P6442 CPU based systems
-
config ARCH_S5PC100
bool "Samsung S5PC100"
select GENERIC_GPIO
@@ -991,8 +983,6 @@ endif
source "arch/arm/mach-s5p64x0/Kconfig"
-source "arch/arm/mach-s5p6442/Kconfig"
-
source "arch/arm/mach-s5pc100/Kconfig"
source "arch/arm/mach-s5pv210/Kconfig"
@@ -1399,7 +1389,6 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL
- depends on !ARCH_MSM
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
@@ -1420,7 +1409,7 @@ source kernel/Kconfig.preempt
config HZ
int
default 200 if ARCH_EBSA110 || ARCH_S3C2410 || ARCH_S5P64X0 || \
- ARCH_S5P6442 || ARCH_S5PV210 || ARCH_EXYNOS4
+ ARCH_S5PV210 || ARCH_EXYNOS4
default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
default AT91_TIMER_HZ if ARCH_AT91
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
@@ -1516,6 +1505,9 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE
+config HAVE_ARCH_PFN_VALID
+ def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
+
config HIGHMEM
bool "High Memory Support"
depends on MMU
@@ -1683,6 +1675,13 @@ endmenu
menu "Boot options"
+config USE_OF
+ bool "Flattened Device Tree support"
+ select OF
+ select OF_EARLY_FLATTREE
+ help
+ Include support for flattened device tree machine descriptions.
+
# Compressed boot loader in ROM. Yes, we really want to ask about
# TEXT and BSS so we preserve their values in the config files.
config ZBOOT_ROM_TEXT
@@ -2021,7 +2020,7 @@ menu "Power management options"
source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
- depends on !ARCH_S5P64X0 && !ARCH_S5P6442 && !ARCH_S5PC100
+ depends on !ARCH_S5P64X0 && !ARCH_S5PC100
depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
def_bool y
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 25750bc..f5b2b39 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -176,7 +176,6 @@ machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2416 s3c2440 s3c24
machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0
machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0
-machine-$(CONFIG_ARCH_S5P6442) := s5p6442
machine-$(CONFIG_ARCH_S5PC100) := s5pc100
machine-$(CONFIG_ARCH_S5PV210) := s5pv210
machine-$(CONFIG_ARCH_EXYNOS4) := exynos4
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index ea5ee4d..4b71766 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -7,7 +7,7 @@ config ARM_VIC
config ARM_VIC_NR
int
default 4 if ARCH_S5PV210
- default 3 if ARCH_S5P6442 || ARCH_S5PC100
+ default 3 if ARCH_S5PC100
default 2
depends on ARM_VIC
help
diff --git a/arch/arm/configs/at572d940hfek_defconfig b/arch/arm/configs/at572d940hfek_defconfig
deleted file mode 100644
index 1b1158a..0000000
--- a/arch/arm/configs/at572d940hfek_defconfig
+++ /dev/null
@@ -1,358 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOCALVERSION="-AT572D940HF"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT572D940HF=y
-CONFIG_MACH_AT572D940HFEB=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_CMDLINE="mem=48M console=ttyS0 initrd=0x21100000,3145728 root=/dev/ram0 rw ip=172.16.1.181"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_FPE_NWFPE_XP=y
-CONFIG_NET=y
-CONFIG_PACKET=m
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_NET_PKTGEN=m
-CONFIG_NET_TCPPROBE=m
-CONFIG_CAN=m
-CONFIG_CAN_RAW=m
-CONFIG_CAN_BCM=m
-CONFIG_CAN_VCAN=m
-CONFIG_CAN_DEBUG_DEVICES=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_CONNECTOR=m
-CONFIG_MTD=m
-CONFIG_MTD_DEBUG=y
-CONFIG_MTD_DEBUG_VERBOSE=1
-CONFIG_MTD_CONCAT=m
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
-CONFIG_MTD_OOPS=m
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PLATRAM=m
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTD_BLOCK2MTD=m
-CONFIG_MTD_NAND=m
-CONFIG_MTD_NAND_VERIFY_WRITE=y
-CONFIG_MTD_NAND_DISKONCHIP=m
-CONFIG_MTD_NAND_NANDSIM=m
-CONFIG_MTD_NAND_PLATFORM=m
-CONFIG_MTD_ALAUDA=m
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_GLUEBI=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_ATMEL_TCLIB=y
-CONFIG_ATMEL_SSC=m
-CONFIG_SENSORS_TSL2550=m
-CONFIG_DS1682=m
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_SCSI_TGT=m
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-CONFIG_MACVLAN=m
-CONFIG_EQUALIZER=m
-CONFIG_TUN=m
-CONFIG_VETH=m
-CONFIG_PHYLIB=y
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-CONFIG_USB_ZD1201=m
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-CONFIG_USB_NET_DM9601=m
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_PLUSB=m
-CONFIG_USB_NET_MCS7830=m
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_KC2190=y
-# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_GPIO=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_MOUSE_APPLETOUCH=m
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_MOUSE_GPIO=m
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=m
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_RAW=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_N_HDLC=m
-CONFIG_SPECIALIX=m
-CONFIG_STALDRV=y
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=y
-CONFIG_R3964=m
-CONFIG_RAW_DRIVER=m
-CONFIG_TCG_TPM=m
-CONFIG_TCG_NSC=m
-CONFIG_TCG_ATMEL=m
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_SPIDEV=m
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-# CONFIG_SND_PCM_OSS_PLUGINS is not set
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_DYNAMIC_MINORS=y
-# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_DUMMY=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_USB_AUDIO=m
-CONFIG_SND_USB_CAIAQ=m
-CONFIG_SND_USB_CAIAQ_INPUT=y
-CONFIG_HID=m
-CONFIG_HIDRAW=y
-CONFIG_USB_HID=m
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_KBD=m
-CONFIG_USB_MOUSE=m
-CONFIG_HID_A4TECH=m
-CONFIG_HID_APPLE=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_LOGITECH=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_PANTHERLORD=m
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=m
-CONFIG_USB_STORAGE_DATAFAB=m
-CONFIG_USB_STORAGE_FREECOM=m
-CONFIG_USB_STORAGE_ISD200=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_STORAGE_ALAUDA=m
-CONFIG_USB_STORAGE_KARMA=m
-CONFIG_USB_LIBUSUAL=y
-CONFIG_USB_SERIAL=m
-CONFIG_USB_EZUSB=y
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_DEBUG=m
-CONFIG_USB_EMI62=m
-CONFIG_USB_EMI26=m
-CONFIG_USB_ADUTUX=m
-CONFIG_USB_TEST=m
-CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_DEBUG_FILES=y
-CONFIG_USB_GADGET_DEBUG_FS=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_MMC=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_AT91=m
-CONFIG_MMC_SPI=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
-CONFIG_LEDS_GPIO=m
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_DRV_DS1307=m
-CONFIG_RTC_DRV_DS1305=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD_DEBUG=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_CHECK=y
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_INOTIFY=y
-CONFIG_FUSE_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_CMODE_FAVOURLZO=y
-CONFIG_CRAMFS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-CONFIG_LDM_DEBUG=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_NLS_DEFAULT="cp437"
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
diff --git a/arch/arm/configs/at91sam9261ek_defconfig b/arch/arm/configs/at91sam9261_defconfig
index b46025b..ade6b2f 100644
--- a/arch/arm/configs/at91sam9261ek_defconfig
+++ b/arch/arm/configs/at91sam9261_defconfig
@@ -1,9 +1,13 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
+CONFIG_NAMESPACES=y
+CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -15,18 +19,27 @@ CONFIG_ARCH_AT91SAM9261=y
CONFIG_MACH_AT91SAM9261EK=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_VFP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_CFG80211=y
+CONFIG_LIB80211=y
+CONFIG_MAC80211=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
@@ -34,8 +47,12 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -45,12 +62,27 @@ CONFIG_NET_ETHERNET=y
CONFIG_DM9000=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_USB_ZD1201=m
+CONFIG_RTL8187=m
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+CONFIG_LIBERTAS_SPI=m
+CONFIG_RT2X00=m
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_ZD1211RW=m
+CONFIG_INPUT_POLLDEV=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
+CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_HW_RANDOM=y
@@ -65,31 +97,62 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_ATMEL_LCDC=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+CONFIG_SND_AT73C213=y
+CONFIG_SND_USB_AUDIO=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
CONFIG_MMC_AT91=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
+CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+CONFIG_FTRACE=y
+CONFIG_CRC_CCITT=m
diff --git a/arch/arm/configs/at91sam9263ek_defconfig b/arch/arm/configs/at91sam9263_defconfig
index 8a04d6f..1cf9626 100644
--- a/arch/arm/configs/at91sam9263ek_defconfig
+++ b/arch/arm/configs/at91sam9263_defconfig
@@ -1,9 +1,13 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
+CONFIG_NAMESPACES=y
+CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -13,53 +17,81 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9263=y
CONFIG_MACH_AT91SAM9263EK=y
+CONFIG_MACH_USB_A9263=y
+CONFIG_MACH_NEOCORE926=y
CONFIG_MTD_AT91_DATAFLASH_CARD=y
# CONFIG_ARM_THUMB is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
+CONFIG_AUTO_ZRELADDR=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_BLOCK2MTD=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_PWM=y
+CONFIG_ATMEL_TCLIB=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+CONFIG_SMSC_PHY=y
+CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_USB_ZD1201=m
+CONFIG_INPUT_POLLDEV=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=y
-# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_HW_RANDOM=y
@@ -74,8 +106,25 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_ATMEL_LCDC=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+CONFIG_SND_ATMEL_AC97C=y
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB_AUDIO=m
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
@@ -83,24 +132,37 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
+CONFIG_SDIO_UART=m
CONFIG_MMC_AT91=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_ATMEL_PWM=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
+CONFIG_FUSE_FS=m
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_FTRACE=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
+CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/exynos4_defconfig b/arch/arm/configs/exynos4_defconfig
index 2ffba24..da53ff3 100644
--- a/arch/arm/configs/exynos4_defconfig
+++ b/arch/arm/configs/exynos4_defconfig
@@ -8,7 +8,9 @@ CONFIG_ARCH_EXYNOS4=y
CONFIG_S3C_LOWLEVEL_UART_PORT=1
CONFIG_MACH_SMDKC210=y
CONFIG_MACH_SMDKV310=y
+CONFIG_MACH_ARMLEX4210=y
CONFIG_MACH_UNIVERSAL_C210=y
+CONFIG_MACH_NURI=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
diff --git a/arch/arm/configs/neocore926_defconfig b/arch/arm/configs/neocore926_defconfig
deleted file mode 100644
index 462dd18..0000000
--- a/arch/arm/configs/neocore926_defconfig
+++ /dev/null
@@ -1,104 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9263=y
-CONFIG_MACH_NEOCORE926=y
-CONFIG_MTD_AT91_DATAFLASH_CARD=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_NFTL=y
-CONFIG_NFTL_RW=y
-CONFIG_MTD_BLOCK2MTD=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND_VERIFY_WRITE=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_MTD_NAND_PLATFORM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_ATMEL_PWM=y
-CONFIG_ATMEL_TCLIB=y
-CONFIG_SCSI=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_SERIAL_ATMEL_PDC is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_FB=y
-CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_ATMEL_LCDC=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-CONFIG_LOGO=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_SDIO_UART=y
-CONFIG_MMC_AT91=m
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/s5p6442_defconfig b/arch/arm/configs/s5p6442_defconfig
deleted file mode 100644
index 0e92a78..0000000
--- a/arch/arm/configs/s5p6442_defconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_S5P6442=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
-CONFIG_MACH_SMDK6442=y
-CONFIG_CPU_32v6K=y
-CONFIG_AEABI=y
-CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
-CONFIG_FPE_NWFPE=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_NR_UARTS=3
-CONFIG_SERIAL_SAMSUNG=y
-CONFIG_SERIAL_SAMSUNG_CONSOLE=y
-CONFIG_HW_RANDOM=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_ARM_UNWIND is not set
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_S3C_UART=1
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/usb-a9263_defconfig b/arch/arm/configs/usb-a9263_defconfig
deleted file mode 100644
index ee82d09..0000000
--- a/arch/arm/configs/usb-a9263_defconfig
+++ /dev/null
@@ -1,106 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9263=y
-CONFIG_MACH_USB_A9263=y
-CONFIG_AT91_SLOW_CLOCK=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS0,115200"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_DATAFLASH=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MACB=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_EVBUG=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_FUSE_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index 2242ce2..d493d0b 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -4,6 +4,13 @@
* Support for FIQ on ARM architectures.
* Written by Philip Blundell <philb@gnu.org>, 1998
* Re-written by Russell King
+ *
+ * NOTE: The FIQ mode registers are not magically preserved across
+ * suspend/resume.
+ *
+ * Drivers which require these registers to be preserved across power
+ * management operations must implement appropriate suspend/resume handlers to
+ * save and restore them.
*/
#ifndef __ASM_FIQ_H
@@ -29,9 +36,21 @@ struct fiq_handler {
extern int claim_fiq(struct fiq_handler *f);
extern void release_fiq(struct fiq_handler *f);
extern void set_fiq_handler(void *start, unsigned int length);
-extern void set_fiq_regs(struct pt_regs *regs);
-extern void get_fiq_regs(struct pt_regs *regs);
extern void enable_fiq(int fiq);
extern void disable_fiq(int fiq);
+/* helpers defined in fiqasm.S: */
+extern void __set_fiq_regs(unsigned long const *regs);
+extern void __get_fiq_regs(unsigned long *regs);
+
+static inline void set_fiq_regs(struct pt_regs const *regs)
+{
+ __set_fiq_regs(&regs->ARM_r8);
+}
+
+static inline void get_fiq_regs(struct pt_regs *regs)
+{
+ __get_fiq_regs(&regs->ARM_r8);
+}
+
#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index bf13b81..946f4d7 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -18,6 +18,8 @@ struct machine_desc {
unsigned int nr; /* architecture number */
const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */
+ const char **dt_compat; /* array of device tree
+ * 'compatible' strings */
unsigned int nr_irqs; /* number of IRQs */
@@ -48,6 +50,13 @@ struct machine_desc {
extern struct machine_desc *machine_desc;
/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+/*
* Set of macros to define architecture features. This is built into
* a table by the linker.
*/
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index f51a695..ac75d08 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -197,7 +197,7 @@ typedef unsigned long pgprot_t;
typedef struct page *pgtable_t;
-#ifndef CONFIG_SPARSEMEM
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
#endif
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
new file mode 100644
index 0000000..11b8708
--- /dev/null
+++ b/arch/arm/include/asm/prom.h
@@ -0,0 +1,37 @@
+/*
+ * arch/arm/include/asm/prom.h
+ *
+ * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __ASMARM_PROM_H
+#define __ASMARM_PROM_H
+
+#ifdef CONFIG_OF
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+ return;
+}
+
+extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern void arm_dt_memblock_reserve(void);
+
+#else /* CONFIG_OF */
+
+static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+{
+ return NULL;
+}
+
+static inline void arm_dt_memblock_reserve(void) { }
+
+#endif /* CONFIG_OF */
+#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 95176af..ee2ad8a 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -217,6 +217,10 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
+extern int arm_add_memory(phys_addr_t start, unsigned long size);
+extern void early_print(const char *str, ...);
+extern void dump_machine_table(void);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d2b514f..e42d96a 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -70,6 +70,7 @@ extern void platform_smp_prepare_cpus(unsigned int);
*/
struct secondary_data {
unsigned long pgdir;
+ unsigned long swapper_pg_dir;
void *stack;
};
extern struct secondary_data secondary_data;
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 87dbe3e..2c04ed5 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -400,6 +400,8 @@
#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
#define __NR_syncfs (__NR_SYSCALL_BASE+373)
+#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
+#define __NR_setns (__NR_SYSCALL_BASE+375)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8d95446..a5b31af 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_ARCH_ACORN) += ecard.o
-obj-$(CONFIG_FIQ) += fiq.o
+obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
@@ -44,6 +44,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
+obj-$(CONFIG_OF) += devtree.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 7fbf28c..80f7896 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -383,6 +383,8 @@
CALL(sys_open_by_handle_at)
CALL(sys_clock_adjtime)
CALL(sys_syncfs)
+ CALL(sys_sendmmsg)
+/* 375 */ CALL(sys_setns)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
new file mode 100644
index 0000000..a701e42
--- /dev/null
+++ b/arch/arm/kernel/devtree.c
@@ -0,0 +1,145 @@
+/*
+ * linux/arch/arm/kernel/devtree.c
+ *
+ * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ arm_add_memory(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return alloc_bootmem_align(size, align);
+}
+
+void __init arm_dt_memblock_reserve(void)
+{
+ u64 *reserve_map, base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ /* Reserve the dtb region */
+ memblock_reserve(virt_to_phys(initial_boot_params),
+ be32_to_cpu(initial_boot_params->totalsize));
+
+ /*
+ * Process the reserve map. This will probably overlap the initrd
+ * and dtb locations which are already reserved, but overlaping
+ * doesn't hurt anything
+ */
+ reserve_map = ((void*)initial_boot_params) +
+ be32_to_cpu(initial_boot_params->off_mem_rsvmap);
+ while (1) {
+ base = be64_to_cpup(reserve_map++);
+ size = be64_to_cpup(reserve_map++);
+ if (!size)
+ break;
+ memblock_reserve(base, size);
+ }
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt_phys: physical address of dt blob
+ *
+ * If a dtb was passed to the kernel in r2, then use it to choose the
+ * correct machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+{
+ struct boot_param_header *devtree;
+ struct machine_desc *mdesc, *mdesc_best = NULL;
+ unsigned int score, mdesc_score = ~1;
+ unsigned long dt_root;
+ const char *model;
+
+ devtree = phys_to_virt(dt_phys);
+
+ /* check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ return NULL;
+
+ /* Search the mdescs for the 'best' compatible value match */
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+ for_each_machine_desc(mdesc) {
+ score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+ if (score > 0 && score < mdesc_score) {
+ mdesc_best = mdesc;
+ mdesc_score = score;
+ }
+ }
+ if (!mdesc_best) {
+ const char *prop;
+ long size;
+
+ early_print("\nError: unrecognized/unsupported "
+ "device tree compatible list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ while (size > 0) {
+ early_print("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ early_print("]\n\n");
+
+ dump_machine_table(); /* does not return */
+ }
+
+ model = of_get_flat_dt_prop(dt_root, "model", NULL);
+ if (!model)
+ model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+ if (!model)
+ model = "<unknown>";
+ pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ /* Change machine number to match the mdesc we're using */
+ __machine_arch_type = mdesc_best->nr;
+
+ return mdesc_best;
+}
+
+/**
+ * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
+ *
+ * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
+ * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
+ * supported.
+ */
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index e72dc34..4c164ec 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -89,47 +89,6 @@ void set_fiq_handler(void *start, unsigned int length)
flush_icache_range(0x1c, 0x1c + length);
}
-/*
- * Taking an interrupt in FIQ mode is death, so both these functions
- * disable irqs for the duration. Note - these functions are almost
- * entirely coded in assembly.
- */
-void __naked set_fiq_regs(struct pt_regs *regs)
-{
- register unsigned long tmp;
- asm volatile (
- "mov ip, sp\n\
- stmfd sp!, {fp, ip, lr, pc}\n\
- sub fp, ip, #4\n\
- mrs %0, cpsr\n\
- msr cpsr_c, %2 @ select FIQ mode\n\
- mov r0, r0\n\
- ldmia %1, {r8 - r14}\n\
- msr cpsr_c, %0 @ return to SVC mode\n\
- mov r0, r0\n\
- ldmfd sp, {fp, sp, pc}"
- : "=&r" (tmp)
- : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
-}
-
-void __naked get_fiq_regs(struct pt_regs *regs)
-{
- register unsigned long tmp;
- asm volatile (
- "mov ip, sp\n\
- stmfd sp!, {fp, ip, lr, pc}\n\
- sub fp, ip, #4\n\
- mrs %0, cpsr\n\
- msr cpsr_c, %2 @ select FIQ mode\n\
- mov r0, r0\n\
- stmia %1, {r8 - r14}\n\
- msr cpsr_c, %0 @ return to SVC mode\n\
- mov r0, r0\n\
- ldmfd sp, {fp, sp, pc}"
- : "=&r" (tmp)
- : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
-}
-
int claim_fiq(struct fiq_handler *f)
{
int ret = 0;
@@ -174,8 +133,8 @@ void disable_fiq(int fiq)
}
EXPORT_SYMBOL(set_fiq_handler);
-EXPORT_SYMBOL(set_fiq_regs);
-EXPORT_SYMBOL(get_fiq_regs);
+EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */
+EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(claim_fiq);
EXPORT_SYMBOL(release_fiq);
EXPORT_SYMBOL(enable_fiq);
diff --git a/arch/arm/kernel/fiqasm.S b/arch/arm/kernel/fiqasm.S
new file mode 100644
index 0000000..207f9d6
--- /dev/null
+++ b/arch/arm/kernel/fiqasm.S
@@ -0,0 +1,49 @@
+/*
+ * linux/arch/arm/kernel/fiqasm.S
+ *
+ * Derived from code originally in linux/arch/arm/kernel/fiq.c:
+ *
+ * Copyright (C) 1998 Russell King
+ * Copyright (C) 1998, 1999 Phil Blundell
+ * Copyright (C) 2011, Linaro Limited
+ *
+ * FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
+ *
+ * FIQ support re-written by Russell King to be more generic
+ *
+ * v7/Thumb-2 compatibility modifications by Linaro Limited, 2011.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Taking an interrupt in FIQ mode is death, so both these functions
+ * disable irqs for the duration.
+ */
+
+ENTRY(__set_fiq_regs)
+ mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
+ mrs r1, cpsr
+ msr cpsr_c, r2 @ select FIQ mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ ldmia r0!, {r8 - r12}
+ ldr sp, [r0], #4
+ ldr lr, [r0]
+ msr cpsr_c, r1 @ return to SVC mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ mov pc, lr
+ENDPROC(__set_fiq_regs)
+
+ENTRY(__get_fiq_regs)
+ mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
+ mrs r1, cpsr
+ msr cpsr_c, r2 @ select FIQ mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ stmia r0!, {r8 - r12}
+ str sp, [r0], #4
+ str lr, [r0]
+ msr cpsr_c, r1 @ return to SVC mode
+ mov r0, r0 @ avoid hazard prior to ARMv4
+ mov pc, lr
+ENDPROC(__get_fiq_regs)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index c84b57d..854bd22 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -15,6 +15,12 @@
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define OF_DT_MAGIC 0xd00dfeed
+#else
+#define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */
+#endif
+
/*
* Exception handling. Something went wrong and we can't proceed. We
* ought to tell the user, but since we don't have any guarantee that
@@ -28,20 +34,26 @@
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
- * that the ATAG_CORE marker is first and present. Future revisions
+ * that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE
+ * is selected, then it will also accept a dtb pointer. Future revisions
* of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary.
*
* Returns:
- * r2 either valid atags pointer, or zero
+ * r2 either valid atags pointer, valid dtb pointer, or zero
* r5, r6 corrupted
*/
__vet_atags:
tst r2, #0x3 @ aligned?
bne 1f
- ldr r5, [r2, #0] @ is first tag ATAG_CORE?
- cmp r5, #ATAG_CORE_SIZE
+ ldr r5, [r2, #0]
+#ifdef CONFIG_OF_FLATTREE
+ ldr r6, =OF_DT_MAGIC @ is it a DTB?
+ cmp r5, r6
+ beq 2f
+#endif
+ cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE?
cmpne r5, #ATAG_CORE_SIZE_EMPTY
bne 1f
ldr r5, [r2, #4]
@@ -49,7 +61,7 @@ __vet_atags:
cmp r5, r6
bne 1f
- mov pc, lr @ atag pointer is ok
+2: mov pc, lr @ atag/dtb pointer is ok
1: mov r2, #0
mov pc, lr
@@ -61,7 +73,7 @@ ENDPROC(__vet_atags)
*
* r0 = cp#15 control register
* r1 = machine ID
- * r2 = atags pointer
+ * r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c9173cf..278c1b0 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -59,7 +59,7 @@
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
- * r1 = machine nr, r2 = atags pointer.
+ * r1 = machine nr, r2 = atags or dtb pointer.
*
* This code is mostly position independent, so if you link the kernel at
* 0xc0008000, you call this at __pa(0xc0008000).
@@ -91,7 +91,7 @@ ENTRY(stext)
#endif
/*
- * r1 = machine no, r2 = atags,
+ * r1 = machine no, r2 = atags or dtb,
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
@@ -113,6 +113,7 @@ ENTRY(stext)
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
adr lr, BSYM(1f) @ return (PIC) address
+ mov r8, r4 @ set TTBR1 to swapper_pg_dir
ARM( add pc, r10, #PROCINFO_INITFUNC )
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
@@ -302,8 +303,10 @@ ENTRY(secondary_startup)
*/
adr r4, __secondary_data
ldmia r4, {r5, r7, r12} @ address to jump to after
- sub r4, r4, r5 @ mmu has been enabled
- ldr r4, [r7, r4] @ get secondary_data.pgdir
+ sub lr, r4, r5 @ mmu has been enabled
+ ldr r4, [r7, lr] @ get secondary_data.pgdir
+ add r7, r7, #4
+ ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
adr lr, BSYM(__enable_mmu) @ return address
mov r13, r12 @ __secondary_switched address
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
@@ -339,7 +342,7 @@ __secondary_data:
*
* r0 = cp#15 control register
* r1 = machine ID
- * r2 = atags pointer
+ * r2 = atags or dtb pointer
* r4 = page table pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
@@ -376,7 +379,7 @@ ENDPROC(__enable_mmu)
*
* r0 = cp#15 control register
* r1 = machine ID
- * r2 = atags pointer
+ * r2 = atags or dtb pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6dce209..ed11fb0 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -20,6 +20,7 @@
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
+#include <linux/of_fdt.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
@@ -42,6 +43,7 @@
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
+#include <asm/prom.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
@@ -309,7 +311,7 @@ static void __init cacheid_init(void)
*/
extern struct proc_info_list *lookup_processor_type(unsigned int);
-static void __init early_print(const char *str, ...)
+void __init early_print(const char *str, ...)
{
extern void printascii(const char *);
char buf[256];
@@ -439,25 +441,12 @@ void cpu_init(void)
: "r14");
}
-static struct machine_desc * __init setup_machine(unsigned int nr)
+void __init dump_machine_table(void)
{
- extern struct machine_desc __arch_info_begin[], __arch_info_end[];
struct machine_desc *p;
- /*
- * locate machine in the list of supported machines.
- */
- for (p = __arch_info_begin; p < __arch_info_end; p++)
- if (nr == p->nr) {
- printk("Machine: %s\n", p->name);
- return p;
- }
-
- early_print("\n"
- "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
- "Available machine support:\n\nID (hex)\tNAME\n", nr);
-
- for (p = __arch_info_begin; p < __arch_info_end; p++)
+ early_print("Available machine support:\n\nID (hex)\tNAME\n");
+ for_each_machine_desc(p)
early_print("%08x\t%s\n", p->nr, p->name);
early_print("\nPlease check your kernel config and/or bootloader.\n");
@@ -466,7 +455,7 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
/* can't use cpu_relax() here as it may require MMU setup */;
}
-static int __init arm_add_memory(phys_addr_t start, unsigned long size)
+int __init arm_add_memory(phys_addr_t start, unsigned long size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
@@ -801,23 +790,29 @@ static void __init squash_mem_tags(struct tag *tag)
tag->hdr.tag = ATAG_NONE;
}
-void __init setup_arch(char **cmdline_p)
+static struct machine_desc * __init setup_machine_tags(unsigned int nr)
{
struct tag *tags = (struct tag *)&init_tags;
- struct machine_desc *mdesc;
+ struct machine_desc *mdesc = NULL, *p;
char *from = default_command_line;
init_tags.mem.start = PHYS_OFFSET;
- unwind_init();
-
- setup_processor();
- mdesc = setup_machine(machine_arch_type);
- machine_desc = mdesc;
- machine_name = mdesc->name;
+ /*
+ * locate machine in the list of supported machines.
+ */
+ for_each_machine_desc(p)
+ if (nr == p->nr) {
+ printk("Machine: %s\n", p->name);
+ mdesc = p;
+ break;
+ }
- if (mdesc->soft_reboot)
- reboot_setup("s");
+ if (!mdesc) {
+ early_print("\nError: unrecognized/unsupported machine ID"
+ " (r1 = 0x%08x).\n\n", nr);
+ dump_machine_table(); /* does not return */
+ }
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
@@ -849,8 +844,17 @@ void __init setup_arch(char **cmdline_p)
if (tags->hdr.tag != ATAG_CORE)
convert_to_tag_list(tags);
#endif
- if (tags->hdr.tag != ATAG_CORE)
+
+ if (tags->hdr.tag != ATAG_CORE) {
+#if defined(CONFIG_OF)
+ /*
+ * If CONFIG_OF is set, then assume this is a reasonably
+ * modern system that should pass boot parameters
+ */
+ early_print("Warning: Neither atags nor dtb found\n");
+#endif
tags = (struct tag *)&init_tags;
+ }
if (mdesc->fixup)
mdesc->fixup(mdesc, tags, &from, &meminfo);
@@ -862,14 +866,34 @@ void __init setup_arch(char **cmdline_p)
parse_tags(tags);
}
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+ return mdesc;
+}
+
+
+void __init setup_arch(char **cmdline_p)
+{
+ struct machine_desc *mdesc;
+
+ unwind_init();
+
+ setup_processor();
+ mdesc = setup_machine_fdt(__atags_pointer);
+ if (!mdesc)
+ mdesc = setup_machine_tags(machine_arch_type);
+ machine_desc = mdesc;
+ machine_name = mdesc->name;
+
+ if (mdesc->soft_reboot)
+ reboot_setup("s");
+
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
- /* parse_early_param needs a boot_command_line */
- strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
-
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
@@ -881,6 +905,8 @@ void __init setup_arch(char **cmdline_p)
paging_init(mdesc);
request_standard_resources(mdesc);
+ unflatten_device_tree();
+
#ifdef CONFIG_SMP
if (is_smp())
smp_init_cpus();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d439a8f..344e52b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -105,6 +105,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
+ secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index 6dc0648..c562f64 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -35,7 +35,7 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-
+#include <asm/unwind.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
@@ -207,6 +207,7 @@ Boston, MA 02111-1307, USA. */
ENTRY(__udivsi3)
ENTRY(__aeabi_uidiv)
+UNWIND(.fnstart)
subs r2, r1, #1
moveq pc, lr
@@ -230,10 +231,12 @@ ENTRY(__aeabi_uidiv)
mov r0, r0, lsr r2
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
ENTRY(__umodsi3)
+UNWIND(.fnstart)
subs r2, r1, #1 @ compare divisor with 1
bcc Ldiv0
@@ -247,10 +250,12 @@ ENTRY(__umodsi3)
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__umodsi3)
ENTRY(__divsi3)
ENTRY(__aeabi_idiv)
+UNWIND(.fnstart)
cmp r1, #0
eor ip, r0, r1 @ save the sign of the result.
@@ -287,10 +292,12 @@ ENTRY(__aeabi_idiv)
rsbmi r0, r0, #0
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
ENTRY(__modsi3)
+UNWIND(.fnstart)
cmp r1, #0
beq Ldiv0
@@ -310,11 +317,14 @@ ENTRY(__modsi3)
rsbmi r0, r0, #0
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__modsi3)
#ifdef CONFIG_AEABI
ENTRY(__aeabi_uidivmod)
+UNWIND(.fnstart)
+UNWIND(.save {r0, r1, ip, lr} )
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_uidiv
@@ -323,10 +333,12 @@ ENTRY(__aeabi_uidivmod)
sub r1, r1, r3
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
-
+UNWIND(.fnstart)
+UNWIND(.save {r0, r1, ip, lr} )
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_idiv
ldmfd sp!, {r1, r2, ip, lr}
@@ -334,15 +346,18 @@ ENTRY(__aeabi_idivmod)
sub r1, r1, r3
mov pc, lr
+UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
#endif
Ldiv0:
-
+UNWIND(.fnstart)
+UNWIND(.pad #4)
+UNWIND(.save {lr})
str lr, [sp, #-8]!
bl __div0
mov r0, #0 @ About as wrong as it could be.
ldr pc, [sp], #8
-
-
+UNWIND(.fnend)
+ENDPROC(Ldiv0)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 2d299bf..2248467 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -3,9 +3,6 @@ if ARCH_AT91
config HAVE_AT91_DATAFLASH_CARD
bool
-config HAVE_NAND_ATMEL_BUSWIDTH_16
- bool
-
config HAVE_AT91_USART3
bool
@@ -85,11 +82,6 @@ config ARCH_AT91CAP9
select HAVE_FB_ATMEL
select HAVE_NET_MACB
-config ARCH_AT572D940HF
- bool "AT572D940HF"
- select CPU_ARM926T
- select GENERIC_CLOCKEVENTS
-
config ARCH_AT91X40
bool "AT91x40"
select ARCH_USES_GETTIMEOFFSET
@@ -209,7 +201,6 @@ comment "AT91SAM9260 / AT91SAM9XE Board Type"
config MACH_AT91SAM9260EK
bool "Atmel AT91SAM9260-EK / AT91SAM9XE Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9260-EK or AT91SAM9XE Evaluation Kit
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3933>
@@ -270,7 +261,6 @@ comment "AT91SAM9261 Board Type"
config MACH_AT91SAM9261EK
bool "Atmel AT91SAM9261-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9261-EK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3820>
@@ -286,7 +276,6 @@ comment "AT91SAM9G10 Board Type"
config MACH_AT91SAM9G10EK
bool "Atmel AT91SAM9G10-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9G10-EK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4588>
@@ -302,7 +291,6 @@ comment "AT91SAM9263 Board Type"
config MACH_AT91SAM9263EK
bool "Atmel AT91SAM9263-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9263-EK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4057>
@@ -343,7 +331,6 @@ comment "AT91SAM9G20 Board Type"
config MACH_AT91SAM9G20EK
bool "Atmel AT91SAM9G20-EK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9G20-EK Evaluation Kit
that embeds only one SD/MMC slot.
@@ -351,7 +338,6 @@ config MACH_AT91SAM9G20EK
config MACH_AT91SAM9G20EK_2MMC
depends on MACH_AT91SAM9G20EK
bool "Atmel AT91SAM9G20-EK Evaluation Kit with 2 SD/MMC Slots"
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit
with 2 SD/MMC Slots. This is the case for AT91SAM9G20-EK rev. C and
@@ -416,7 +402,6 @@ comment "AT91SAM9G45 Board Type"
config MACH_AT91SAM9M10G45EK
bool "Atmel AT91SAM9M10G45-EK Evaluation Kits"
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91SAM9G45-EKES Evaluation Kit.
"ES" at the end of the name means that this board is an
@@ -433,7 +418,6 @@ comment "AT91CAP9 Board Type"
config MACH_AT91CAP9ADK
bool "Atmel AT91CAP9A-DK Evaluation Kit"
select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
help
Select this if you are using Atmel's AT91CAP9A-DK Evaluation Kit.
<http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4138>
@@ -442,23 +426,6 @@ endif
# ----------------------------------------------------------
-if ARCH_AT572D940HF
-
-comment "AT572D940HF Board Type"
-
-config MACH_AT572D940HFEB
- bool "AT572D940HF-EK"
- depends on ARCH_AT572D940HF
- select HAVE_AT91_DATAFLASH_CARD
- select HAVE_NAND_ATMEL_BUSWIDTH_16
- help
- Select this if you are using Atmel's AT572D940HF-EK evaluation kit.
- <http://www.atmel.com/products/diopsis/default.asp>
-
-endif
-
-# ----------------------------------------------------------
-
if ARCH_AT91X40
comment "AT91X40 Board Type"
@@ -483,13 +450,6 @@ config MTD_AT91_DATAFLASH_CARD
help
Enable support for the DataFlash card.
-config MTD_NAND_ATMEL_BUSWIDTH_16
- bool "Enable 16-bit data bus interface to NAND flash"
- depends on HAVE_NAND_ATMEL_BUSWIDTH_16
- help
- On AT91SAM926x boards both types of NAND flash can be present
- (8 and 16 bit data bus width).
-
# ----------------------------------------------------------
comment "AT91 Feature Selections"
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index a83835e..9696623 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devi
obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
# AT91RM9200 board-specific support
@@ -78,9 +77,6 @@ obj-$(CONFIG_MACH_AT91SAM9M10G45EK) += board-sam9m10g45ek.o
# AT91CAP9 board-specific support
obj-$(CONFIG_MACH_AT91CAP9ADK) += board-cap9adk.o
-# AT572D940HF board-specific support
-obj-$(CONFIG_MACH_AT572D940HFEB) += board-at572d940hf_ek.o
-
# AT91X40 board-specific support
obj-$(CONFIG_MACH_AT91EB01) += board-eb01.o
diff --git a/arch/arm/mach-at91/at572d940hf.c b/arch/arm/mach-at91/at572d940hf.c
deleted file mode 100644
index a6b9c68..0000000
--- a/arch/arm/mach-at91/at572d940hf.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * arch/arm/mach-at91/at572d940hf.c
- *
- * Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2008 Atmel
- *
- * Copyright (C) 2005 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-
-#include <asm/mach/irq.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <mach/at572d940hf.h>
-#include <mach/at91_pmc.h>
-#include <mach/at91_rstc.h>
-
-#include "generic.h"
-#include "clock.h"
-
-static struct map_desc at572d940hf_io_desc[] __initdata = {
- {
- .virtual = AT91_VA_BASE_SYS,
- .pfn = __phys_to_pfn(AT91_BASE_SYS),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = AT91_IO_VIRT_BASE - AT572D940HF_SRAM_SIZE,
- .pfn = __phys_to_pfn(AT572D940HF_SRAM_BASE),
- .length = AT572D940HF_SRAM_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-/* --------------------------------------------------------------------
- * Clocks
- * -------------------------------------------------------------------- */
-
-/*
- * The peripheral clocks.
- */
-static struct clk pioA_clk = {
- .name = "pioA_clk",
- .pmc_mask = 1 << AT572D940HF_ID_PIOA,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk pioB_clk = {
- .name = "pioB_clk",
- .pmc_mask = 1 << AT572D940HF_ID_PIOB,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk pioC_clk = {
- .name = "pioC_clk",
- .pmc_mask = 1 << AT572D940HF_ID_PIOC,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk macb_clk = {
- .name = "macb_clk",
- .pmc_mask = 1 << AT572D940HF_ID_EMAC,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk usart0_clk = {
- .name = "usart0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_US0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk usart1_clk = {
- .name = "usart1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_US1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk usart2_clk = {
- .name = "usart2_clk",
- .pmc_mask = 1 << AT572D940HF_ID_US2,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk mmc_clk = {
- .name = "mci_clk",
- .pmc_mask = 1 << AT572D940HF_ID_MCI,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk udc_clk = {
- .name = "udc_clk",
- .pmc_mask = 1 << AT572D940HF_ID_UDP,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk twi0_clk = {
- .name = "twi0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TWI0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk spi0_clk = {
- .name = "spi0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SPI0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk spi1_clk = {
- .name = "spi1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SPI1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc0_clk = {
- .name = "ssc0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc1_clk = {
- .name = "ssc1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc2_clk = {
- .name = "ssc2_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC2,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk tc0_clk = {
- .name = "tc0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TC0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk tc1_clk = {
- .name = "tc1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TC1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk tc2_clk = {
- .name = "tc2_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TC2,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ohci_clk = {
- .name = "ohci_clk",
- .pmc_mask = 1 << AT572D940HF_ID_UHP,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk ssc3_clk = {
- .name = "ssc3_clk",
- .pmc_mask = 1 << AT572D940HF_ID_SSC3,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk twi1_clk = {
- .name = "twi1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_TWI1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk can0_clk = {
- .name = "can0_clk",
- .pmc_mask = 1 << AT572D940HF_ID_CAN0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk can1_clk = {
- .name = "can1_clk",
- .pmc_mask = 1 << AT572D940HF_ID_CAN1,
- .type = CLK_TYPE_PERIPHERAL,
-};
-static struct clk mAgicV_clk = {
- .name = "mAgicV_clk",
- .pmc_mask = 1 << AT572D940HF_ID_MSIRQ0,
- .type = CLK_TYPE_PERIPHERAL,
-};
-
-
-static struct clk *periph_clocks[] __initdata = {
- &pioA_clk,
- &pioB_clk,
- &pioC_clk,
- &macb_clk,
- &usart0_clk,
- &usart1_clk,
- &usart2_clk,
- &mmc_clk,
- &udc_clk,
- &twi0_clk,
- &spi0_clk,
- &spi1_clk,
- &ssc0_clk,
- &ssc1_clk,
- &ssc2_clk,
- &tc0_clk,
- &tc1_clk,
- &tc2_clk,
- &ohci_clk,
- &ssc3_clk,
- &twi1_clk,
- &can0_clk,
- &can1_clk,
- &mAgicV_clk,
- /* irq0 .. irq2 */
-};
-
-/*
- * The five programmable clocks.
- * You must configure pin multiplexing to bring these signals out.
- */
-static struct clk pck0 = {
- .name = "pck0",
- .pmc_mask = AT91_PMC_PCK0,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 0,
-};
-static struct clk pck1 = {
- .name = "pck1",
- .pmc_mask = AT91_PMC_PCK1,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 1,
-};
-static struct clk pck2 = {
- .name = "pck2",
- .pmc_mask = AT91_PMC_PCK2,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 2,
-};
-static struct clk pck3 = {
- .name = "pck3",
- .pmc_mask = AT91_PMC_PCK3,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 3,
-};
-
-static struct clk mAgicV_mem_clk = {
- .name = "mAgicV_mem_clk",
- .pmc_mask = AT91_PMC_PCK4,
- .type = CLK_TYPE_PROGRAMMABLE,
- .id = 4,
-};
-
-/* HClocks */
-static struct clk hck0 = {
- .name = "hck0",
- .pmc_mask = AT91_PMC_HCK0,
- .type = CLK_TYPE_SYSTEM,
- .id = 0,
-};
-static struct clk hck1 = {
- .name = "hck1",
- .pmc_mask = AT91_PMC_HCK1,
- .type = CLK_TYPE_SYSTEM,
- .id = 1,
-};
-
-static void __init at572d940hf_register_clocks(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
- clk_register(periph_clocks[i]);
-
- clk_register(&pck0);
- clk_register(&pck1);
- clk_register(&pck2);
- clk_register(&pck3);
- clk_register(&mAgicV_mem_clk);
-
- clk_register(&hck0);
- clk_register(&hck1);
-}
-
-/* --------------------------------------------------------------------
- * GPIO
- * -------------------------------------------------------------------- */
-
-static struct at91_gpio_bank at572d940hf_gpio[] = {
- {
- .id = AT572D940HF_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
- }, {
- .id = AT572D940HF_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
- }, {
- .id = AT572D940HF_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
- }
-};
-
-static void at572d940hf_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
-
-/* --------------------------------------------------------------------
- * AT572D940HF processor initialization
- * -------------------------------------------------------------------- */
-
-void __init at572d940hf_initialize(unsigned long main_clock)
-{
- /* Map peripherals */
- iotable_init(at572d940hf_io_desc, ARRAY_SIZE(at572d940hf_io_desc));
-
- at91_arch_reset = at572d940hf_reset;
- at91_extern_irq = (1 << AT572D940HF_ID_IRQ0) | (1 << AT572D940HF_ID_IRQ1)
- | (1 << AT572D940HF_ID_IRQ2);
-
- /* Init clock subsystem */
- at91_clock_init(main_clock);
-
- /* Register the processor-specific clocks */
- at572d940hf_register_clocks();
-
- /* Register GPIO subsystem */
- at91_gpio_init(at572d940hf_gpio, 3);
-}
-
-/* --------------------------------------------------------------------
- * Interrupt initialization
- * -------------------------------------------------------------------- */
-
-/*
- * The default interrupt priority levels (0 = lowest, 7 = highest).
- */
-static unsigned int at572d940hf_default_irq_priority[NR_AIC_IRQS] __initdata = {
- 7, /* Advanced Interrupt Controller */
- 7, /* System Peripherals */
- 0, /* Parallel IO Controller A */
- 0, /* Parallel IO Controller B */
- 0, /* Parallel IO Controller C */
- 3, /* Ethernet */
- 6, /* USART 0 */
- 6, /* USART 1 */
- 6, /* USART 2 */
- 0, /* Multimedia Card Interface */
- 4, /* USB Device Port */
- 0, /* Two-Wire Interface 0 */
- 6, /* Serial Peripheral Interface 0 */
- 6, /* Serial Peripheral Interface 1 */
- 5, /* Serial Synchronous Controller 0 */
- 5, /* Serial Synchronous Controller 1 */
- 5, /* Serial Synchronous Controller 2 */
- 0, /* Timer Counter 0 */
- 0, /* Timer Counter 1 */
- 0, /* Timer Counter 2 */
- 3, /* USB Host port */
- 3, /* Serial Synchronous Controller 3 */
- 0, /* Two-Wire Interface 1 */
- 0, /* CAN Controller 0 */
- 0, /* CAN Controller 1 */
- 0, /* mAgicV HALT line */
- 0, /* mAgicV SIRQ0 line */
- 0, /* mAgicV exception line */
- 0, /* mAgicV end of DMA line */
- 0, /* Advanced Interrupt Controller */
- 0, /* Advanced Interrupt Controller */
- 0, /* Advanced Interrupt Controller */
-};
-
-void __init at572d940hf_init_interrupts(unsigned int priority[NR_AIC_IRQS])
-{
- if (!priority)
- priority = at572d940hf_default_irq_priority;
-
- /* Initialize the AIC interrupt controller */
- at91_aic_init(priority);
-
- /* Enable GPIO interrupts */
- at91_gpio_irq_setup();
-}
-
diff --git a/arch/arm/mach-at91/at572d940hf_devices.c b/arch/arm/mach-at91/at572d940hf_devices.c
deleted file mode 100644
index 0fc20a2..0000000
--- a/arch/arm/mach-at91/at572d940hf_devices.c
+++ /dev/null
@@ -1,970 +0,0 @@
-/*
- * arch/arm/mach-at91/at572d940hf_devices.c
- *
- * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
- * Copyright (C) 2005 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-#include <mach/board.h>
-#include <mach/gpio.h>
-#include <mach/at572d940hf.h>
-#include <mach/at572d940hf_matrix.h>
-#include <mach/at91sam9_smc.h>
-
-#include "generic.h"
-#include "sam9_smc.h"
-
-
-/* --------------------------------------------------------------------
- * USB Host
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-static u64 ohci_dmamask = DMA_BIT_MASK(32);
-static struct at91_usbh_data usbh_data;
-
-static struct resource usbh_resources[] = {
- [0] = {
- .start = AT572D940HF_UHP_BASE,
- .end = AT572D940HF_UHP_BASE + SZ_1M - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_UHP,
- .end = AT572D940HF_ID_UHP,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_usbh_device = {
- .name = "at91_ohci",
- .id = -1,
- .dev = {
- .dma_mask = &ohci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &usbh_data,
- },
- .resource = usbh_resources,
- .num_resources = ARRAY_SIZE(usbh_resources),
-};
-
-void __init at91_add_device_usbh(struct at91_usbh_data *data)
-{
- if (!data)
- return;
-
- usbh_data = *data;
- platform_device_register(&at572d940hf_usbh_device);
-
-}
-#else
-void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * USB Device (Gadget)
- * -------------------------------------------------------------------- */
-
-#ifdef CONFIG_USB_GADGET_AT91
-static struct at91_udc_data udc_data;
-
-static struct resource udc_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_UDP,
- .end = AT572D940HF_BASE_UDP + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_UDP,
- .end = AT572D940HF_ID_UDP,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_udc_device = {
- .name = "at91_udc",
- .id = -1,
- .dev = {
- .platform_data = &udc_data,
- },
- .resource = udc_resources,
- .num_resources = ARRAY_SIZE(udc_resources),
-};
-
-void __init at91_add_device_udc(struct at91_udc_data *data)
-{
- if (!data)
- return;
-
- if (data->vbus_pin) {
- at91_set_gpio_input(data->vbus_pin, 0);
- at91_set_deglitch(data->vbus_pin, 1);
- }
-
- /* Pullup pin is handled internally */
-
- udc_data = *data;
- platform_device_register(&at572d940hf_udc_device);
-}
-#else
-void __init at91_add_device_udc(struct at91_udc_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * Ethernet
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
-static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
-
-static struct resource eth_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_EMAC,
- .end = AT572D940HF_BASE_EMAC + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_EMAC,
- .end = AT572D940HF_ID_EMAC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_eth_device = {
- .name = "macb",
- .id = -1,
- .dev = {
- .dma_mask = &eth_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &eth_data,
- },
- .resource = eth_resources,
- .num_resources = ARRAY_SIZE(eth_resources),
-};
-
-void __init at91_add_device_eth(struct at91_eth_data *data)
-{
- if (!data)
- return;
-
- if (data->phy_irq_pin) {
- at91_set_gpio_input(data->phy_irq_pin, 0);
- at91_set_deglitch(data->phy_irq_pin, 1);
- }
-
- /* Only RMII is supported */
- data->is_rmii = 1;
-
- /* Pins used for RMII */
- at91_set_A_periph(AT91_PIN_PA16, 0); /* ETXCK_EREFCK */
- at91_set_A_periph(AT91_PIN_PA17, 0); /* ERXDV */
- at91_set_A_periph(AT91_PIN_PA18, 0); /* ERX0 */
- at91_set_A_periph(AT91_PIN_PA19, 0); /* ERX1 */
- at91_set_A_periph(AT91_PIN_PA20, 0); /* ERXER */
- at91_set_A_periph(AT91_PIN_PA23, 0); /* ETXEN */
- at91_set_A_periph(AT91_PIN_PA21, 0); /* ETX0 */
- at91_set_A_periph(AT91_PIN_PA22, 0); /* ETX1 */
- at91_set_A_periph(AT91_PIN_PA13, 0); /* EMDIO */
- at91_set_A_periph(AT91_PIN_PA14, 0); /* EMDC */
-
- eth_data = *data;
- platform_device_register(&at572d940hf_eth_device);
-}
-#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * MMC / SD
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
-static u64 mmc_dmamask = DMA_BIT_MASK(32);
-static struct at91_mmc_data mmc_data;
-
-static struct resource mmc_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_MCI,
- .end = AT572D940HF_BASE_MCI + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_MCI,
- .end = AT572D940HF_ID_MCI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_mmc_device = {
- .name = "at91_mci",
- .id = -1,
- .dev = {
- .dma_mask = &mmc_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &mmc_data,
- },
- .resource = mmc_resources,
- .num_resources = ARRAY_SIZE(mmc_resources),
-};
-
-void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
-{
- if (!data)
- return;
-
- /* input/irq */
- if (data->det_pin) {
- at91_set_gpio_input(data->det_pin, 1);
- at91_set_deglitch(data->det_pin, 1);
- }
- if (data->wp_pin)
- at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
- at91_set_gpio_output(data->vcc_pin, 0);
-
- /* CLK */
- at91_set_A_periph(AT91_PIN_PC22, 0);
-
- /* CMD */
- at91_set_A_periph(AT91_PIN_PC23, 1);
-
- /* DAT0, maybe DAT1..DAT3 */
- at91_set_A_periph(AT91_PIN_PC24, 1);
- if (data->wire4) {
- at91_set_A_periph(AT91_PIN_PC25, 1);
- at91_set_A_periph(AT91_PIN_PC26, 1);
- at91_set_A_periph(AT91_PIN_PC27, 1);
- }
-
- mmc_data = *data;
- platform_device_register(&at572d940hf_mmc_device);
-}
-#else
-void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * NAND / SmartMedia
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
-static struct atmel_nand_data nand_data;
-
-#define NAND_BASE AT91_CHIPSELECT_3
-
-static struct resource nand_resources[] = {
- {
- .start = NAND_BASE,
- .end = NAND_BASE + SZ_256M - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device at572d940hf_nand_device = {
- .name = "atmel_nand",
- .id = -1,
- .dev = {
- .platform_data = &nand_data,
- },
- .resource = nand_resources,
- .num_resources = ARRAY_SIZE(nand_resources),
-};
-
-void __init at91_add_device_nand(struct atmel_nand_data *data)
-{
- unsigned long csa;
-
- if (!data)
- return;
-
- csa = at91_sys_read(AT91_MATRIX_EBICSA);
- at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
-
- /* enable pin */
- if (data->enable_pin)
- at91_set_gpio_output(data->enable_pin, 1);
-
- /* ready/busy pin */
- if (data->rdy_pin)
- at91_set_gpio_input(data->rdy_pin, 1);
-
- /* card detect pin */
- if (data->det_pin)
- at91_set_gpio_input(data->det_pin, 1);
-
- at91_set_A_periph(AT91_PIN_PB28, 0); /* A[22] */
- at91_set_B_periph(AT91_PIN_PA28, 0); /* NANDOE */
- at91_set_B_periph(AT91_PIN_PA29, 0); /* NANDWE */
-
- nand_data = *data;
- platform_device_register(&at572d940hf_nand_device);
-}
-
-#else
-void __init at91_add_device_nand(struct atmel_nand_data *data) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * TWI (i2c)
- * -------------------------------------------------------------------- */
-
-/*
- * Prefer the GPIO code since the TWI controller isn't robust
- * (gets overruns and underruns under load) and can only issue
- * repeated STARTs in one scenario (the driver doesn't yet handle them).
- */
-
-#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
-
-static struct i2c_gpio_platform_data pdata = {
- .sda_pin = AT91_PIN_PC7,
- .sda_is_open_drain = 1,
- .scl_pin = AT91_PIN_PC8,
- .scl_is_open_drain = 1,
- .udelay = 2, /* ~100 kHz */
-};
-
-static struct platform_device at572d940hf_twi_device {
- .name = "i2c-gpio",
- .id = -1,
- .dev.platform_data = &pdata,
-};
-
-void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
-{
- at91_set_GPIO_periph(AT91_PIN_PC7, 1); /* TWD (SDA) */
- at91_set_multi_drive(AT91_PIN_PC7, 1);
-
- at91_set_GPIO_periph(AT91_PIN_PA8, 1); /* TWCK (SCL) */
- at91_set_multi_drive(AT91_PIN_PC8, 1);
-
- i2c_register_board_info(0, devices, nr_devices);
- platform_device_register(&at572d940hf_twi_device);
-}
-
-#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
-
-static struct resource twi0_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_TWI0,
- .end = AT572D940HF_BASE_TWI0 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_TWI0,
- .end = AT572D940HF_ID_TWI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_twi0_device = {
- .name = "at91_i2c",
- .id = 0,
- .resource = twi0_resources,
- .num_resources = ARRAY_SIZE(twi0_resources),
-};
-
-static struct resource twi1_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_TWI1,
- .end = AT572D940HF_BASE_TWI1 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_TWI1,
- .end = AT572D940HF_ID_TWI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_twi1_device = {
- .name = "at91_i2c",
- .id = 1,
- .resource = twi1_resources,
- .num_resources = ARRAY_SIZE(twi1_resources),
-};
-
-void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
-{
- /* pins used for TWI0 interface */
- at91_set_A_periph(AT91_PIN_PC7, 0); /* TWD */
- at91_set_multi_drive(AT91_PIN_PC7, 1);
-
- at91_set_A_periph(AT91_PIN_PC8, 0); /* TWCK */
- at91_set_multi_drive(AT91_PIN_PC8, 1);
-
- /* pins used for TWI1 interface */
- at91_set_A_periph(AT91_PIN_PC20, 0); /* TWD */
- at91_set_multi_drive(AT91_PIN_PC20, 1);
-
- at91_set_A_periph(AT91_PIN_PC21, 0); /* TWCK */
- at91_set_multi_drive(AT91_PIN_PC21, 1);
-
- i2c_register_board_info(0, devices, nr_devices);
- platform_device_register(&at572d940hf_twi0_device);
- platform_device_register(&at572d940hf_twi1_device);
-}
-#else
-void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * SPI
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-static struct resource spi0_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_SPI0,
- .end = AT572D940HF_BASE_SPI0 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_SPI0,
- .end = AT572D940HF_ID_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_spi0_device = {
- .name = "atmel_spi",
- .id = 0,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .resource = spi0_resources,
- .num_resources = ARRAY_SIZE(spi0_resources),
-};
-
-static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PA5, AT91_PIN_PA6 };
-
-static struct resource spi1_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_SPI1,
- .end = AT572D940HF_BASE_SPI1 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_SPI1,
- .end = AT572D940HF_ID_SPI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_spi1_device = {
- .name = "atmel_spi",
- .id = 1,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .resource = spi1_resources,
- .num_resources = ARRAY_SIZE(spi1_resources),
-};
-
-static const unsigned spi1_standard_cs[4] = { AT91_PIN_PC3, AT91_PIN_PC4, AT91_PIN_PC5, AT91_PIN_PC6 };
-
-void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
-{
- int i;
- unsigned long cs_pin;
- short enable_spi0 = 0;
- short enable_spi1 = 0;
-
- /* Choose SPI chip-selects */
- for (i = 0; i < nr_devices; i++) {
- if (devices[i].controller_data)
- cs_pin = (unsigned long) devices[i].controller_data;
- else if (devices[i].bus_num == 0)
- cs_pin = spi0_standard_cs[devices[i].chip_select];
- else
- cs_pin = spi1_standard_cs[devices[i].chip_select];
-
- if (devices[i].bus_num == 0)
- enable_spi0 = 1;
- else
- enable_spi1 = 1;
-
- /* enable chip-select pin */
- at91_set_gpio_output(cs_pin, 1);
-
- /* pass chip-select pin to driver */
- devices[i].controller_data = (void *) cs_pin;
- }
-
- spi_register_board_info(devices, nr_devices);
-
- /* Configure SPI bus(es) */
- if (enable_spi0) {
- at91_set_A_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */
- at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
- at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
-
- at91_clock_associate("spi0_clk", &at572d940hf_spi0_device.dev, "spi_clk");
- platform_device_register(&at572d940hf_spi0_device);
- }
- if (enable_spi1) {
- at91_set_A_periph(AT91_PIN_PC0, 0); /* SPI1_MISO */
- at91_set_A_periph(AT91_PIN_PC1, 0); /* SPI1_MOSI */
- at91_set_A_periph(AT91_PIN_PC2, 0); /* SPI1_SPCK */
-
- at91_clock_associate("spi1_clk", &at572d940hf_spi1_device.dev, "spi_clk");
- platform_device_register(&at572d940hf_spi1_device);
- }
-}
-#else
-void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * Timer/Counter blocks
- * -------------------------------------------------------------------- */
-
-#ifdef CONFIG_ATMEL_TCLIB
-
-static struct resource tcb_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_TCB,
- .end = AT572D940HF_BASE_TCB + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_TC0,
- .end = AT572D940HF_ID_TC0,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = AT572D940HF_ID_TC1,
- .end = AT572D940HF_ID_TC1,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = AT572D940HF_ID_TC2,
- .end = AT572D940HF_ID_TC2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device at572d940hf_tcb_device = {
- .name = "atmel_tcb",
- .id = 0,
- .resource = tcb_resources,
- .num_resources = ARRAY_SIZE(tcb_resources),
-};
-
-static void __init at91_add_device_tc(void)
-{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at572d940hf_tcb_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at572d940hf_tcb_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at572d940hf_tcb_device.dev, "t2_clk");
- platform_device_register(&at572d940hf_tcb_device);
-}
-#else
-static void __init at91_add_device_tc(void) { }
-#endif
-
-
-/* --------------------------------------------------------------------
- * RTT
- * -------------------------------------------------------------------- */
-
-static struct resource rtt_resources[] = {
- {
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device at572d940hf_rtt_device = {
- .name = "at91_rtt",
- .id = 0,
- .resource = rtt_resources,
- .num_resources = ARRAY_SIZE(rtt_resources),
-};
-
-static void __init at91_add_device_rtt(void)
-{
- platform_device_register(&at572d940hf_rtt_device);
-}
-
-
-/* --------------------------------------------------------------------
- * Watchdog
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
-static struct platform_device at572d940hf_wdt_device = {
- .name = "at91_wdt",
- .id = -1,
- .num_resources = 0,
-};
-
-static void __init at91_add_device_watchdog(void)
-{
- platform_device_register(&at572d940hf_wdt_device);
-}
-#else
-static void __init at91_add_device_watchdog(void) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * UART
- * -------------------------------------------------------------------- */
-
-#if defined(CONFIG_SERIAL_ATMEL)
-static struct resource dbgu_resources[] = {
- [0] = {
- .start = AT91_VA_BASE_SYS + AT91_DBGU,
- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data dbgu_data = {
- .use_dma_tx = 0,
- .use_dma_rx = 0, /* DBGU not capable of receive DMA */
- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
-};
-
-static u64 dbgu_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_dbgu_device = {
- .name = "atmel_usart",
- .id = 0,
- .dev = {
- .dma_mask = &dbgu_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &dbgu_data,
- },
- .resource = dbgu_resources,
- .num_resources = ARRAY_SIZE(dbgu_resources),
-};
-
-static inline void configure_dbgu_pins(void)
-{
- at91_set_A_periph(AT91_PIN_PC31, 1); /* DTXD */
- at91_set_A_periph(AT91_PIN_PC30, 0); /* DRXD */
-}
-
-static struct resource uart0_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_US0,
- .end = AT572D940HF_BASE_US0 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_US0,
- .end = AT572D940HF_ID_US0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data uart0_data = {
- .use_dma_tx = 1,
- .use_dma_rx = 1,
-};
-
-static u64 uart0_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_uart0_device = {
- .name = "atmel_usart",
- .id = 1,
- .dev = {
- .dma_mask = &uart0_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &uart0_data,
- },
- .resource = uart0_resources,
- .num_resources = ARRAY_SIZE(uart0_resources),
-};
-
-static inline void configure_usart0_pins(unsigned pins)
-{
- at91_set_A_periph(AT91_PIN_PA8, 1); /* TXD0 */
- at91_set_A_periph(AT91_PIN_PA7, 0); /* RXD0 */
-
- if (pins & ATMEL_UART_RTS)
- at91_set_A_periph(AT91_PIN_PA10, 0); /* RTS0 */
- if (pins & ATMEL_UART_CTS)
- at91_set_A_periph(AT91_PIN_PA9, 0); /* CTS0 */
-}
-
-static struct resource uart1_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_US1,
- .end = AT572D940HF_BASE_US1 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_US1,
- .end = AT572D940HF_ID_US1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data uart1_data = {
- .use_dma_tx = 1,
- .use_dma_rx = 1,
-};
-
-static u64 uart1_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_uart1_device = {
- .name = "atmel_usart",
- .id = 2,
- .dev = {
- .dma_mask = &uart1_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &uart1_data,
- },
- .resource = uart1_resources,
- .num_resources = ARRAY_SIZE(uart1_resources),
-};
-
-static inline void configure_usart1_pins(unsigned pins)
-{
- at91_set_A_periph(AT91_PIN_PC10, 1); /* TXD1 */
- at91_set_A_periph(AT91_PIN_PC9 , 0); /* RXD1 */
-
- if (pins & ATMEL_UART_RTS)
- at91_set_A_periph(AT91_PIN_PC12, 0); /* RTS1 */
- if (pins & ATMEL_UART_CTS)
- at91_set_A_periph(AT91_PIN_PC11, 0); /* CTS1 */
-}
-
-static struct resource uart2_resources[] = {
- [0] = {
- .start = AT572D940HF_BASE_US2,
- .end = AT572D940HF_BASE_US2 + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = AT572D940HF_ID_US2,
- .end = AT572D940HF_ID_US2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct atmel_uart_data uart2_data = {
- .use_dma_tx = 1,
- .use_dma_rx = 1,
-};
-
-static u64 uart2_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device at572d940hf_uart2_device = {
- .name = "atmel_usart",
- .id = 3,
- .dev = {
- .dma_mask = &uart2_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &uart2_data,
- },
- .resource = uart2_resources,
- .num_resources = ARRAY_SIZE(uart2_resources),
-};
-
-static inline void configure_usart2_pins(unsigned pins)
-{
- at91_set_A_periph(AT91_PIN_PC15, 1); /* TXD2 */
- at91_set_A_periph(AT91_PIN_PC14, 0); /* RXD2 */
-
- if (pins & ATMEL_UART_RTS)
- at91_set_A_periph(AT91_PIN_PC17, 0); /* RTS2 */
- if (pins & ATMEL_UART_CTS)
- at91_set_A_periph(AT91_PIN_PC16, 0); /* CTS2 */
-}
-
-static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
-struct platform_device *atmel_default_console_device; /* the serial console device */
-
-void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
-{
- struct platform_device *pdev;
-
- switch (id) {
- case 0: /* DBGU */
- pdev = &at572d940hf_dbgu_device;
- configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
- break;
- case AT572D940HF_ID_US0:
- pdev = &at572d940hf_uart0_device;
- configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
- break;
- case AT572D940HF_ID_US1:
- pdev = &at572d940hf_uart1_device;
- configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
- break;
- case AT572D940HF_ID_US2:
- pdev = &at572d940hf_uart2_device;
- configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
- break;
- default:
- return;
- }
- pdev->id = portnr; /* update to mapped ID */
-
- if (portnr < ATMEL_MAX_UART)
- at91_uarts[portnr] = pdev;
-}
-
-void __init at91_set_serial_console(unsigned portnr)
-{
- if (portnr < ATMEL_MAX_UART)
- atmel_default_console_device = at91_uarts[portnr];
-}
-
-void __init at91_add_device_serial(void)
-{
- int i;
-
- for (i = 0; i < ATMEL_MAX_UART; i++) {
- if (at91_uarts[i])
- platform_device_register(at91_uarts[i]);
- }
-
- if (!atmel_default_console_device)
- printk(KERN_INFO "AT91: No default serial console defined.\n");
-}
-
-#else
-void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
-void __init at91_set_serial_console(unsigned portnr) {}
-void __init at91_add_device_serial(void) {}
-#endif
-
-
-/* --------------------------------------------------------------------
- * mAgic
- * -------------------------------------------------------------------- */
-
-#ifdef CONFIG_MAGICV
-static struct resource mAgic_resources[] = {
- {
- .start = AT91_MAGIC_PM_BASE,
- .end = AT91_MAGIC_PM_BASE + AT91_MAGIC_PM_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_DM_I_BASE,
- .end = AT91_MAGIC_DM_I_BASE + AT91_MAGIC_DM_I_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_DM_F_BASE,
- .end = AT91_MAGIC_DM_F_BASE + AT91_MAGIC_DM_F_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_DM_DB_BASE,
- .end = AT91_MAGIC_DM_DB_BASE + AT91_MAGIC_DM_DB_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_REGS_BASE,
- .end = AT91_MAGIC_REGS_BASE + AT91_MAGIC_REGS_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT91_MAGIC_EXTPAGE_BASE,
- .end = AT91_MAGIC_EXTPAGE_BASE + AT91_MAGIC_EXTPAGE_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = AT572D940HF_ID_MSIRQ0,
- .end = AT572D940HF_ID_MSIRQ0,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = AT572D940HF_ID_MHALT,
- .end = AT572D940HF_ID_MHALT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = AT572D940HF_ID_MEXC,
- .end = AT572D940HF_ID_MEXC,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = AT572D940HF_ID_MEDMA,
- .end = AT572D940HF_ID_MEDMA,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device mAgic_device = {
- .name = "mAgic",
- .id = -1,
- .num_resources = ARRAY_SIZE(mAgic_resources),
- .resource = mAgic_resources,
-};
-
-void __init at91_add_device_mAgic(void)
-{
- platform_device_register(&mAgic_device);
-}
-#else
-void __init at91_add_device_mAgic(void) {}
-#endif
-
-
-/* -------------------------------------------------------------------- */
-
-/*
- * These devices are always present and don't need any board-specific
- * setup.
- */
-static int __init at91_add_standard_devices(void)
-{
- at91_add_device_rtt();
- at91_add_device_watchdog();
- at91_add_device_tc();
- return 0;
-}
-
-arch_initcall(at91_add_standard_devices);
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index 7337617..17fae4a 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -222,6 +222,25 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq1
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -258,12 +277,29 @@ static void __init at91cap9_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91cap9_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -303,11 +339,14 @@ static void at91cap9_poweroff(void)
* AT91CAP9 processor initialization
* -------------------------------------------------------------------- */
-void __init at91cap9_initialize(unsigned long main_clock)
+void __init at91cap9_map_io(void)
{
/* Map peripherals */
iotable_init(at91cap9_io_desc, ARRAY_SIZE(at91cap9_io_desc));
+}
+void __init at91cap9_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91cap9_reset;
pm_power_off = at91cap9_poweroff;
at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index 21020ce..cd850ed 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -181,10 +181,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
/* Pullup pin is handled internally by USB device peripheral */
- /* Clocks */
- at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
- at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
-
platform_device_register(&at91_usba_udc_device);
}
#else
@@ -355,7 +351,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc0_data = *data;
- at91_clock_associate("mci0_clk", &at91cap9_mmc0_device.dev, "mci_clk");
platform_device_register(&at91cap9_mmc0_device);
} else { /* MCI1 */
/* CLK */
@@ -373,7 +368,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc1_data = *data;
- at91_clock_associate("mci1_clk", &at91cap9_mmc1_device.dev, "mci_clk");
platform_device_register(&at91cap9_mmc1_device);
}
}
@@ -614,7 +608,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91cap9_spi0_device.dev, "spi_clk");
platform_device_register(&at91cap9_spi0_device);
}
if (enable_spi1) {
@@ -622,7 +615,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91cap9_spi1_device.dev, "spi_clk");
platform_device_register(&at91cap9_spi1_device);
}
}
@@ -659,8 +651,6 @@ static struct platform_device at91cap9_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has one clock and irq for all three TC channels */
- at91_clock_associate("tcb_clk", &at91cap9_tcb_device.dev, "t0_clk");
platform_device_register(&at91cap9_tcb_device);
}
#else
@@ -1001,12 +991,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91CAP9_ID_SSC0:
pdev = &at91cap9_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
break;
case AT91CAP9_ID_SSC1:
pdev = &at91cap9_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
break;
default:
return;
@@ -1199,32 +1187,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91cap9_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91CAP9_ID_US0:
pdev = &at91cap9_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91CAP9_ID_US1:
pdev = &at91cap9_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91CAP9_ID_US2:
pdev = &at91cap9_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1232,8 +1218,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91cap9_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 2e9ecad..b228ce9 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -18,6 +18,7 @@
#include <mach/at91rm9200.h>
#include <mach/at91_pmc.h>
#include <mach/at91_st.h>
+#include <mach/cpu.h>
#include "generic.h"
#include "clock.h"
@@ -191,6 +192,26 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq6
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -227,12 +248,29 @@ static void __init at91rm9200_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91rm9200_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -266,15 +304,25 @@ static void at91rm9200_reset(void)
at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
}
+int rm9200_type;
+EXPORT_SYMBOL(rm9200_type);
+
+void __init at91rm9200_set_type(int type)
+{
+ rm9200_type = type;
+}
/* --------------------------------------------------------------------
* AT91RM9200 processor initialization
* -------------------------------------------------------------------- */
-void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks)
+void __init at91rm9200_map_io(void)
{
/* Map peripherals */
iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
+}
+void __init at91rm9200_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91rm9200_reset;
at91_extern_irq = (1 << AT91RM9200_ID_IRQ0) | (1 << AT91RM9200_ID_IRQ1)
| (1 << AT91RM9200_ID_IRQ2) | (1 << AT91RM9200_ID_IRQ3)
@@ -288,7 +336,8 @@ void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks
at91rm9200_register_clocks();
/* Initialize GPIO subsystem */
- at91_gpio_init(at91rm9200_gpio, banks);
+ at91_gpio_init(at91rm9200_gpio,
+ cpu_is_at91rm9200_bga() ? AT91RM9200_BGA : AT91RM9200_PQFP);
}
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 7b53922..a0ba475 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -644,15 +644,7 @@ static struct platform_device at91rm9200_tcb1_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91rm9200_tcb0_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91rm9200_tcb0_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91rm9200_tcb0_device.dev, "t2_clk");
platform_device_register(&at91rm9200_tcb0_device);
-
- at91_clock_associate("tc3_clk", &at91rm9200_tcb1_device.dev, "t0_clk");
- at91_clock_associate("tc4_clk", &at91rm9200_tcb1_device.dev, "t1_clk");
- at91_clock_associate("tc5_clk", &at91rm9200_tcb1_device.dev, "t2_clk");
platform_device_register(&at91rm9200_tcb1_device);
}
#else
@@ -849,17 +841,14 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91RM9200_ID_SSC0:
pdev = &at91rm9200_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
break;
case AT91RM9200_ID_SSC1:
pdev = &at91rm9200_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
break;
case AT91RM9200_ID_SSC2:
pdev = &at91rm9200_ssc2_device;
configure_ssc2_pins(pins);
- at91_clock_associate("ssc2_clk", &pdev->dev, "ssc");
break;
default:
return;
@@ -1109,37 +1098,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91rm9200_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US0:
pdev = &at91rm9200_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US1:
pdev = &at91rm9200_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US2:
pdev = &at91rm9200_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US3:
pdev = &at91rm9200_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1147,8 +1133,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91rm9200_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 195208b..7d606b0 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -231,6 +231,28 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq2
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
+ CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.5", &usart4_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.6", &usart5_clk),
+};
+
/*
* The two programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -255,10 +277,27 @@ static void __init at91sam9260_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9260_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -289,7 +328,7 @@ static void at91sam9260_poweroff(void)
* AT91SAM9260 processor initialization
* -------------------------------------------------------------------- */
-static void __init at91sam9xe_initialize(void)
+static void __init at91sam9xe_map_io(void)
{
unsigned long cidr, sram_size;
@@ -310,18 +349,21 @@ static void __init at91sam9xe_initialize(void)
iotable_init(at91sam9xe_sram_desc, ARRAY_SIZE(at91sam9xe_sram_desc));
}
-void __init at91sam9260_initialize(unsigned long main_clock)
+void __init at91sam9260_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9260_io_desc, ARRAY_SIZE(at91sam9260_io_desc));
if (cpu_is_at91sam9xe())
- at91sam9xe_initialize();
+ at91sam9xe_map_io();
else if (cpu_is_at91sam9g20())
iotable_init(at91sam9g20_sram_desc, ARRAY_SIZE(at91sam9g20_sram_desc));
else
iotable_init(at91sam9260_sram_desc, ARRAY_SIZE(at91sam9260_sram_desc));
+}
+void __init at91sam9260_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9260_poweroff;
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 07eb7b0..1fdeb90 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -609,7 +609,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9260_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9260_spi0_device);
}
if (enable_spi1) {
@@ -617,7 +616,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9260_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9260_spi1_device);
}
}
@@ -694,15 +692,7 @@ static struct platform_device at91sam9260_tcb1_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91sam9260_tcb0_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91sam9260_tcb0_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91sam9260_tcb0_device.dev, "t2_clk");
platform_device_register(&at91sam9260_tcb0_device);
-
- at91_clock_associate("tc3_clk", &at91sam9260_tcb1_device.dev, "t0_clk");
- at91_clock_associate("tc4_clk", &at91sam9260_tcb1_device.dev, "t1_clk");
- at91_clock_associate("tc5_clk", &at91sam9260_tcb1_device.dev, "t2_clk");
platform_device_register(&at91sam9260_tcb1_device);
}
#else
@@ -820,7 +810,6 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9260_ID_SSC:
pdev = &at91sam9260_ssc_device;
configure_ssc_pins(pins);
- at91_clock_associate("ssc_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1139,47 +1128,42 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9260_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US0:
pdev = &at91sam9260_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US1:
pdev = &at91sam9260_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US2:
pdev = &at91sam9260_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US3:
pdev = &at91sam9260_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US4:
pdev = &at91sam9260_uart4_device;
configure_usart4_pins();
- at91_clock_associate("usart4_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US5:
pdev = &at91sam9260_uart5_device;
configure_usart5_pins();
- at91_clock_associate("usart5_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1187,8 +1171,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9260_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index fcad886..c148316 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -178,6 +178,24 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq2
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -228,6 +246,11 @@ static void __init at91sam9261_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
@@ -237,6 +260,18 @@ static void __init at91sam9261_register_clocks(void)
clk_register(&hck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9261_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -267,7 +302,7 @@ static void at91sam9261_poweroff(void)
* AT91SAM9261 processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9261_initialize(unsigned long main_clock)
+void __init at91sam9261_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9261_io_desc, ARRAY_SIZE(at91sam9261_io_desc));
@@ -276,8 +311,10 @@ void __init at91sam9261_initialize(unsigned long main_clock)
iotable_init(at91sam9g10_sram_desc, ARRAY_SIZE(at91sam9g10_sram_desc));
else
iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc));
+}
-
+void __init at91sam9261_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9261_poweroff;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 59fc483..3eb4538 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -426,7 +426,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9261_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9261_spi0_device);
}
if (enable_spi1) {
@@ -434,7 +433,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB31, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB29, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9261_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9261_spi1_device);
}
}
@@ -581,10 +579,6 @@ static struct platform_device at91sam9261_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91sam9261_tcb_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91sam9261_tcb_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91sam9261_tcb_device.dev, "t2_clk");
platform_device_register(&at91sam9261_tcb_device);
}
#else
@@ -786,17 +780,14 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9261_ID_SSC0:
pdev = &at91sam9261_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9261_ID_SSC1:
pdev = &at91sam9261_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
case AT91SAM9261_ID_SSC2:
pdev = &at91sam9261_ssc2_device;
configure_ssc2_pins(pins);
- at91_clock_associate("ssc2_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -989,32 +980,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9261_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9261_ID_US0:
pdev = &at91sam9261_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9261_ID_US1:
pdev = &at91sam9261_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9261_ID_US2:
pdev = &at91sam9261_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1022,8 +1011,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9261_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 249f900..dc28477 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -199,6 +199,23 @@ static struct clk *periph_clocks[] __initdata = {
// irq0 .. irq1
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+};
+
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -235,12 +252,29 @@ static void __init at91sam9263_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9263_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -279,11 +313,14 @@ static void at91sam9263_poweroff(void)
* AT91SAM9263 processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9263_initialize(unsigned long main_clock)
+void __init at91sam9263_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc));
+}
+void __init at91sam9263_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9263_poweroff;
at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index fb5c23a..ffe081b 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -308,7 +308,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc0_data = *data;
- at91_clock_associate("mci0_clk", &at91sam9263_mmc0_device.dev, "mci_clk");
platform_device_register(&at91sam9263_mmc0_device);
} else { /* MCI1 */
/* CLK */
@@ -339,7 +338,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
}
mmc1_data = *data;
- at91_clock_associate("mci1_clk", &at91sam9263_mmc1_device.dev, "mci_clk");
platform_device_register(&at91sam9263_mmc1_device);
}
}
@@ -686,7 +684,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9263_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9263_spi0_device);
}
if (enable_spi1) {
@@ -694,7 +691,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9263_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9263_spi1_device);
}
}
@@ -941,8 +937,6 @@ static struct platform_device at91sam9263_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has one clock and irq for all three TC channels */
- at91_clock_associate("tcb_clk", &at91sam9263_tcb_device.dev, "t0_clk");
platform_device_register(&at91sam9263_tcb_device);
}
#else
@@ -1171,12 +1165,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9263_ID_SSC0:
pdev = &at91sam9263_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9263_ID_SSC1:
pdev = &at91sam9263_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1370,32 +1362,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9263_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9263_ID_US0:
pdev = &at91sam9263_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9263_ID_US1:
pdev = &at91sam9263_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9263_ID_US2:
pdev = &at91sam9263_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1403,8 +1393,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9263_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index c67b47f..2bb6ff9 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -184,22 +184,6 @@ static struct clk vdec_clk = {
.type = CLK_TYPE_PERIPHERAL,
};
-/* One additional fake clock for ohci */
-static struct clk ohci_clk = {
- .name = "ohci_clk",
- .pmc_mask = 0,
- .type = CLK_TYPE_PERIPHERAL,
- .parent = &uhphs_clk,
-};
-
-/* One additional fake clock for second TC block */
-static struct clk tcb1_clk = {
- .name = "tcb1_clk",
- .pmc_mask = 0,
- .type = CLK_TYPE_PERIPHERAL,
- .parent = &tcb0_clk,
-};
-
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
@@ -228,8 +212,30 @@ static struct clk *periph_clocks[] __initdata = {
&udphs_clk,
&mmc1_clk,
// irq0
- &ohci_clk,
- &tcb1_clk,
+};
+
+static struct clk_lookup periph_clocks_lookups[] = {
+ /* One additional fake clock for ohci */
+ CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
+ CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tcb0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
};
/*
@@ -256,6 +262,11 @@ static void __init at91sam9g45_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
if (cpu_is_at91sam9m10() || cpu_is_at91sam9m11())
clk_register(&vdec_clk);
@@ -263,6 +274,18 @@ static void __init at91sam9g45_register_clocks(void)
clk_register(&pck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9g45_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -306,11 +329,14 @@ static void at91sam9g45_poweroff(void)
* AT91SAM9G45 processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9g45_initialize(unsigned long main_clock)
+void __init at91sam9g45_map_io(void)
{
/* Map peripherals */
iotable_init(at91sam9g45_io_desc, ARRAY_SIZE(at91sam9g45_io_desc));
+}
+void __init at91sam9g45_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9g45_reset;
pm_power_off = at91sam9g45_poweroff;
at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 5e9f8a4..0567486 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -180,7 +180,6 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data)
}
usbh_ehci_data = *data;
- at91_clock_associate("uhphs_clk", &at91_usbh_ehci_device.dev, "ehci_clk");
platform_device_register(&at91_usbh_ehci_device);
}
#else
@@ -266,10 +265,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
/* Pullup pin is handled internally by USB device peripheral */
- /* Clocks */
- at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
- at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
-
platform_device_register(&at91_usba_udc_device);
}
#else
@@ -478,7 +473,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
}
mmc0_data = *data;
- at91_clock_associate("mci0_clk", &at91sam9g45_mmc0_device.dev, "mci_clk");
platform_device_register(&at91sam9g45_mmc0_device);
} else { /* MCI1 */
@@ -504,7 +498,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
}
mmc1_data = *data;
- at91_clock_associate("mci1_clk", &at91sam9g45_mmc1_device.dev, "mci_clk");
platform_device_register(&at91sam9g45_mmc1_device);
}
@@ -801,7 +794,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI0_SPCK */
- at91_clock_associate("spi0_clk", &at91sam9g45_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9g45_spi0_device);
}
if (enable_spi1) {
@@ -809,7 +801,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
at91_set_A_periph(AT91_PIN_PB15, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB16, 0); /* SPI1_SPCK */
- at91_clock_associate("spi1_clk", &at91sam9g45_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9g45_spi1_device);
}
}
@@ -999,10 +990,7 @@ static struct platform_device at91sam9g45_tcb1_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has one clock and irq for all six TC channels */
- at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb0_device);
- at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb1_device);
}
#else
@@ -1286,12 +1274,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9G45_ID_SSC0:
pdev = &at91sam9g45_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9G45_ID_SSC1:
pdev = &at91sam9g45_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1527,37 +1513,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9g45_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US0:
pdev = &at91sam9g45_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US1:
pdev = &at91sam9g45_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US2:
pdev = &at91sam9g45_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9G45_ID_US3:
pdev = &at91sam9g45_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1565,8 +1548,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9g45_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 6a9d24e..1a40f16 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -190,6 +190,24 @@ static struct clk *periph_clocks[] __initdata = {
// irq0
};
+static struct clk_lookup periph_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+};
+
+static struct clk_lookup usart_clocks_lookups[] = {
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
+ CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
+};
+
/*
* The two programmable clocks.
* You must configure pin multiplexing to bring these signals out.
@@ -214,10 +232,27 @@ static void __init at91sam9rl_register_clocks(void)
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
+ clkdev_add_table(periph_clocks_lookups,
+ ARRAY_SIZE(periph_clocks_lookups));
+ clkdev_add_table(usart_clocks_lookups,
+ ARRAY_SIZE(usart_clocks_lookups));
+
clk_register(&pck0);
clk_register(&pck1);
}
+static struct clk_lookup console_clock_lookup;
+
+void __init at91sam9rl_set_console_clock(int id)
+{
+ if (id >= ARRAY_SIZE(usart_clocks_lookups))
+ return;
+
+ console_clock_lookup.con_id = "usart";
+ console_clock_lookup.clk = usart_clocks_lookups[id].clk;
+ clkdev_add(&console_clock_lookup);
+}
+
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
@@ -252,7 +287,7 @@ static void at91sam9rl_poweroff(void)
* AT91SAM9RL processor initialization
* -------------------------------------------------------------------- */
-void __init at91sam9rl_initialize(unsigned long main_clock)
+void __init at91sam9rl_map_io(void)
{
unsigned long cidr, sram_size;
@@ -275,7 +310,10 @@ void __init at91sam9rl_initialize(unsigned long main_clock)
/* Map SRAM */
iotable_init(at91sam9rl_sram_desc, ARRAY_SIZE(at91sam9rl_sram_desc));
+}
+void __init at91sam9rl_initialize(unsigned long main_clock)
+{
at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9rl_poweroff;
at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index c49262b..c296045f 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -155,10 +155,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
/* Pullup pin is handled internally by USB device peripheral */
- /* Clocks */
- at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
- at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
-
platform_device_register(&at91_usba_udc_device);
}
#else
@@ -605,10 +601,6 @@ static struct platform_device at91sam9rl_tcb_device = {
static void __init at91_add_device_tc(void)
{
- /* this chip has a separate clock and irq for each TC channel */
- at91_clock_associate("tc0_clk", &at91sam9rl_tcb_device.dev, "t0_clk");
- at91_clock_associate("tc1_clk", &at91sam9rl_tcb_device.dev, "t1_clk");
- at91_clock_associate("tc2_clk", &at91sam9rl_tcb_device.dev, "t2_clk");
platform_device_register(&at91sam9rl_tcb_device);
}
#else
@@ -892,12 +884,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
case AT91SAM9RL_ID_SSC0:
pdev = &at91sam9rl_ssc0_device;
configure_ssc0_pins(pins);
- at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
break;
case AT91SAM9RL_ID_SSC1:
pdev = &at91sam9rl_ssc1_device;
configure_ssc1_pins(pins);
- at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
break;
default:
return;
@@ -1141,37 +1131,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9rl_dbgu_device;
configure_dbgu_pins();
- at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US0:
pdev = &at91sam9rl_uart0_device;
configure_usart0_pins(pins);
- at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US1:
pdev = &at91sam9rl_uart1_device;
configure_usart1_pins(pins);
- at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US2:
pdev = &at91sam9rl_uart2_device;
configure_usart2_pins(pins);
- at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9RL_ID_US3:
pdev = &at91sam9rl_uart3_device;
configure_usart3_pins(pins);
- at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
- pdev->id = portnr; /* update to mapped ID */
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
@@ -1179,8 +1166,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
void __init at91_set_serial_console(unsigned portnr)
{
- if (portnr < ATMEL_MAX_UART)
+ if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
+ at91sam9rl_set_console_clock(portnr);
+ }
}
void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index ad3ec85..56ba3bd 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -37,11 +37,6 @@ unsigned long clk_get_rate(struct clk *clk)
return AT91X40_MASTER_CLOCK;
}
-struct clk *clk_get(struct device *dev, const char *id)
-{
- return NULL;
-}
-
void __init at91x40_initialize(unsigned long main_clock)
{
at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1)
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 8a3fc84..ab1d463 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -35,14 +35,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init onearm_map_io(void)
+static void __init onearm_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -92,9 +96,9 @@ static void __init onearm_board_init(void)
MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = onearm_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = onearm_init_early,
.init_irq = onearm_init_irq,
.init_machine = onearm_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index cba7f77..a4924de 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -48,7 +48,7 @@
#include "generic.h"
-static void __init afeb9260_map_io(void)
+static void __init afeb9260_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -218,9 +218,9 @@ static void __init afeb9260_board_init(void)
MACHINE_START(AFEB9260, "Custom afeb9260 board")
/* Maintainer: Sergey Lapin <slapin@ossfans.org> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = afeb9260_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = afeb9260_init_early,
.init_irq = afeb9260_init_irq,
.init_machine = afeb9260_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-at572d940hf_ek.c b/arch/arm/mach-at91/board-at572d940hf_ek.c
deleted file mode 100644
index 3929f1c..0000000
--- a/arch/arm/mach-at91/board-at572d940hf_ek.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * linux/arch/arm/mach-at91/board-at572d940hf_ek.c
- *
- * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2005 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/ds1305.h>
-#include <linux/irq.h>
-#include <linux/mtd/physmap.h>
-
-#include <mach/hardware.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/irq.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <mach/board.h>
-#include <mach/gpio.h>
-#include <mach/at91sam9_smc.h>
-
-#include "sam9_smc.h"
-#include "generic.h"
-
-
-static void __init eb_map_io(void)
-{
- /* Initialize processor: 12.500 MHz crystal */
- at572d940hf_initialize(12000000);
-
- /* DBGU on ttyS0. (Rx & Tx only) */
- at91_register_uart(0, 0, 0);
-
- /* USART0 on ttyS1. (Rx & Tx only) */
- at91_register_uart(AT572D940HF_ID_US0, 1, 0);
-
- /* USART1 on ttyS2. (Rx & Tx only) */
- at91_register_uart(AT572D940HF_ID_US1, 2, 0);
-
- /* USART2 on ttyS3. (Tx & Rx only */
- at91_register_uart(AT572D940HF_ID_US2, 3, 0);
-
- /* set serial console to ttyS0 (ie, DBGU) */
- at91_set_serial_console(0);
-}
-
-static void __init eb_init_irq(void)
-{
- at572d940hf_init_interrupts(NULL);
-}
-
-
-/*
- * USB Host Port
- */
-static struct at91_usbh_data __initdata eb_usbh_data = {
- .ports = 2,
-};
-
-
-/*
- * USB Device Port
- */
-static struct at91_udc_data __initdata eb_udc_data = {
- .vbus_pin = 0, /* no VBUS detection,UDC always on */
- .pullup_pin = 0, /* pull-up driven by UDC */
-};
-
-
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata eb_mmc_data = {
- .wire4 = 1,
-/* .det_pin = ... not connected */
-/* .wp_pin = ... not connected */
-/* .vcc_pin = ... not connected */
-};
-
-
-/*
- * MACB Ethernet device
- */
-static struct at91_eth_data __initdata eb_eth_data = {
- .phy_irq_pin = AT91_PIN_PB25,
- .is_rmii = 1,
-};
-
-/*
- * NOR flash
- */
-
-static struct mtd_partition eb_nor_partitions[] = {
- {
- .name = "Raw Environment",
- .offset = 0,
- .size = SZ_4M,
- .mask_flags = 0,
- },
- {
- .name = "OS FS",
- .offset = MTDPART_OFS_APPEND,
- .size = 3 * SZ_1M,
- .mask_flags = 0,
- },
- {
- .name = "APP FS",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- .mask_flags = 0,
- },
-};
-
-static void nor_flash_set_vpp(struct map_info* mi, int i) {
-};
-
-static struct physmap_flash_data nor_flash_data = {
- .width = 4,
- .parts = eb_nor_partitions,
- .nr_parts = ARRAY_SIZE(eb_nor_partitions),
- .set_vpp = nor_flash_set_vpp,
-};
-
-static struct resource nor_flash_resources[] = {
- {
- .start = AT91_CHIPSELECT_0,
- .end = AT91_CHIPSELECT_0 + SZ_16M - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device nor_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &nor_flash_data,
- },
- .resource = nor_flash_resources,
- .num_resources = ARRAY_SIZE(nor_flash_resources),
-};
-
-static struct sam9_smc_config __initdata eb_nor_smc_config = {
- .ncs_read_setup = 1,
- .nrd_setup = 1,
- .ncs_write_setup = 1,
- .nwe_setup = 1,
-
- .ncs_read_pulse = 7,
- .nrd_pulse = 7,
- .ncs_write_pulse = 7,
- .nwe_pulse = 7,
-
- .read_cycle = 9,
- .write_cycle = 9,
-
- .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_32,
- .tdf_cycles = 1,
-};
-
-static void __init eb_add_device_nor(void)
-{
- /* configure chip-select 0 (NOR) */
- sam9_smc_configure(0, &eb_nor_smc_config);
- platform_device_register(&nor_flash);
-}
-
-/*
- * NAND flash
- */
-static struct mtd_partition __initdata eb_nand_partition[] = {
- {
- .name = "Partition 1",
- .offset = 0,
- .size = SZ_16M,
- },
- {
- .name = "Partition 2",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
- }
-};
-
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
- *num_partitions = ARRAY_SIZE(eb_nand_partition);
- return eb_nand_partition;
-}
-
-static struct atmel_nand_data __initdata eb_nand_data = {
- .ale = 22,
- .cle = 21,
-/* .det_pin = ... not connected */
-/* .rdy_pin = AT91_PIN_PC16, */
- .enable_pin = AT91_PIN_PA15,
- .partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
-};
-
-static struct sam9_smc_config __initdata eb_nand_smc_config = {
- .ncs_read_setup = 0,
- .nrd_setup = 0,
- .ncs_write_setup = 1,
- .nwe_setup = 1,
-
- .ncs_read_pulse = 3,
- .nrd_pulse = 3,
- .ncs_write_pulse = 3,
- .nwe_pulse = 3,
-
- .read_cycle = 5,
- .write_cycle = 5,
-
- .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
- .tdf_cycles = 12,
-};
-
-static void __init eb_add_device_nand(void)
-{
- /* setup bus-width (8 or 16) */
- if (eb_nand_data.bus_width_16)
- eb_nand_smc_config.mode |= AT91_SMC_DBW_16;
- else
- eb_nand_smc_config.mode |= AT91_SMC_DBW_8;
-
- /* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &eb_nand_smc_config);
-
- at91_add_device_nand(&eb_nand_data);
-}
-
-
-/*
- * SPI devices
- */
-static struct resource rtc_resources[] = {
- [0] = {
- .start = AT572D940HF_ID_IRQ1,
- .end = AT572D940HF_ID_IRQ1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct ds1305_platform_data ds1306_data = {
- .is_ds1306 = true,
- .en_1hz = false,
-};
-
-static struct spi_board_info eb_spi_devices[] = {
- { /* RTC Dallas DS1306 */
- .modalias = "rtc-ds1305",
- .chip_select = 3,
- .mode = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA,
- .max_speed_hz = 500000,
- .bus_num = 0,
- .irq = AT572D940HF_ID_IRQ1,
- .platform_data = (void *) &ds1306_data,
- },
-#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD)
- { /* Dataflash card */
- .modalias = "mtd_dataflash",
- .chip_select = 0,
- .max_speed_hz = 15 * 1000 * 1000,
- .bus_num = 0,
- },
-#endif
-};
-
-static void __init eb_board_init(void)
-{
- /* Serial */
- at91_add_device_serial();
- /* USB Host */
- at91_add_device_usbh(&eb_usbh_data);
- /* USB Device */
- at91_add_device_udc(&eb_udc_data);
- /* I2C */
- at91_add_device_i2c(NULL, 0);
- /* NOR */
- eb_add_device_nor();
- /* NAND */
- eb_add_device_nand();
- /* SPI */
- at91_add_device_spi(eb_spi_devices, ARRAY_SIZE(eb_spi_devices));
- /* MMC */
- at91_add_device_mmc(0, &eb_mmc_data);
- /* Ethernet */
- at91_add_device_eth(&eb_eth_data);
- /* mAgic */
- at91_add_device_mAgic();
-}
-
-MACHINE_START(AT572D940HFEB, "Atmel AT91D940HF-EB")
- /* Maintainer: Atmel <costa.antonior@gmail.com> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
- .timer = &at91sam926x_timer,
- .map_io = eb_map_io,
- .init_irq = eb_init_irq,
- .init_machine = eb_board_init,
-MACHINE_END
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index b54e3e6..148fccb 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -45,7 +45,7 @@
#include "generic.h"
-static void __init cam60_map_io(void)
+static void __init cam60_init_early(void)
{
/* Initialize processor: 10 MHz crystal */
at91sam9260_initialize(10000000);
@@ -198,9 +198,9 @@ static void __init cam60_board_init(void)
MACHINE_START(CAM60, "KwikByte CAM60")
/* Maintainer: KwikByte */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = cam60_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = cam60_init_early,
.init_irq = cam60_init_irq,
.init_machine = cam60_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index e727444..1904fdf 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -44,12 +44,13 @@
#include <mach/gpio.h>
#include <mach/at91cap9_matrix.h>
#include <mach/at91sam9_smc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init cap9adk_map_io(void)
+static void __init cap9adk_init_early(void)
{
/* Initialize processor: 12 MHz crystal */
at91cap9_initialize(12000000);
@@ -187,11 +188,6 @@ static struct atmel_nand_data __initdata cap9adk_nand_data = {
// .rdy_pin = ... not connected
.enable_pin = AT91_PIN_PD15,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata cap9adk_nand_smc_config = {
@@ -219,6 +215,7 @@ static void __init cap9adk_add_device_nand(void)
csa = at91_sys_read(AT91_MATRIX_EBICSA);
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
+ cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (cap9adk_nand_data.bus_width_16)
cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -399,9 +396,9 @@ static void __init cap9adk_board_init(void)
MACHINE_START(AT91CAP9ADK, "Atmel AT91CAP9A-DK")
/* Maintainer: Stelian Pop <stelian.pop@leadtechdesign.com> */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = cap9adk_map_io,
+ .map_io = at91cap9_map_io,
+ .init_early = cap9adk_init_early,
.init_irq = cap9adk_init_irq,
.init_machine = cap9adk_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-carmeva.c b/arch/arm/mach-at91/board-carmeva.c
index 295e1e7..f36b186 100644
--- a/arch/arm/mach-at91/board-carmeva.c
+++ b/arch/arm/mach-at91/board-carmeva.c
@@ -40,10 +40,10 @@
#include "generic.h"
-static void __init carmeva_map_io(void)
+static void __init carmeva_init_early(void)
{
/* Initialize processor: 20.000 MHz crystal */
- at91rm9200_initialize(20000000, AT91RM9200_BGA);
+ at91rm9200_initialize(20000000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -162,9 +162,9 @@ static void __init carmeva_board_init(void)
MACHINE_START(CARMEVA, "Carmeva")
/* Maintainer: Conitec Datasystems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = carmeva_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = carmeva_init_early,
.init_irq = carmeva_init_irq,
.init_machine = carmeva_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index 3838594..9805110 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -47,7 +47,7 @@
#include "sam9_smc.h"
#include "generic.h"
-static void __init cpu9krea_map_io(void)
+static void __init cpu9krea_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -375,9 +375,9 @@ MACHINE_START(CPUAT9260, "Eukrea CPU9260")
MACHINE_START(CPUAT9G20, "Eukrea CPU9G20")
#endif
/* Maintainer: Eric Benard - EUKREA Electromatique */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = cpu9krea_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = cpu9krea_init_early,
.init_irq = cpu9krea_init_irq,
.init_machine = cpu9krea_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-cpuat91.c b/arch/arm/mach-at91/board-cpuat91.c
index 2f4dd8c..6daabe3 100644
--- a/arch/arm/mach-at91/board-cpuat91.c
+++ b/arch/arm/mach-at91/board-cpuat91.c
@@ -38,6 +38,7 @@
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/cpu.h>
#include "generic.h"
@@ -50,10 +51,13 @@ static struct gpio_led cpuat91_leds[] = {
},
};
-static void __init cpuat91_map_io(void)
+static void __init cpuat91_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -175,9 +179,9 @@ static void __init cpuat91_board_init(void)
MACHINE_START(CPUAT91, "Eukrea")
/* Maintainer: Eric Benard - EUKREA Electromatique */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = cpuat91_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = cpuat91_init_early,
.init_irq = cpuat91_init_irq,
.init_machine = cpuat91_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index 464839d..d98bcec 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -43,10 +43,10 @@
#include "generic.h"
-static void __init csb337_map_io(void)
+static void __init csb337_init_early(void)
{
/* Initialize processor: 3.6864 MHz crystal */
- at91rm9200_initialize(3686400, AT91RM9200_BGA);
+ at91rm9200_initialize(3686400);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1);
@@ -257,9 +257,9 @@ static void __init csb337_board_init(void)
MACHINE_START(CSB337, "Cogent CSB337")
/* Maintainer: Bill Gatliff */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = csb337_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = csb337_init_early,
.init_irq = csb337_init_irq,
.init_machine = csb337_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-csb637.c b/arch/arm/mach-at91/board-csb637.c
index 431688c..019aab4 100644
--- a/arch/arm/mach-at91/board-csb637.c
+++ b/arch/arm/mach-at91/board-csb637.c
@@ -40,10 +40,10 @@
#include "generic.h"
-static void __init csb637_map_io(void)
+static void __init csb637_init_early(void)
{
/* Initialize processor: 3.6864 MHz crystal */
- at91rm9200_initialize(3686400, AT91RM9200_BGA);
+ at91rm9200_initialize(3686400);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -138,9 +138,9 @@ static void __init csb637_board_init(void)
MACHINE_START(CSB637, "Cogent CSB637")
/* Maintainer: Bill Gatliff */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = csb637_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = csb637_init_early,
.init_irq = csb637_init_irq,
.init_machine = csb637_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eb01.c b/arch/arm/mach-at91/board-eb01.c
index d8df59a..d2023f2 100644
--- a/arch/arm/mach-at91/board-eb01.c
+++ b/arch/arm/mach-at91/board-eb01.c
@@ -35,7 +35,7 @@ static void __init at91eb01_init_irq(void)
at91x40_init_interrupts(NULL);
}
-static void __init at91eb01_map_io(void)
+static void __init at91eb01_init_early(void)
{
at91x40_initialize(40000000);
}
@@ -43,7 +43,7 @@ static void __init at91eb01_map_io(void)
MACHINE_START(AT91EB01, "Atmel AT91 EB01")
/* Maintainer: Greg Ungerer <gerg@snapgear.com> */
.timer = &at91x40_timer,
+ .init_early = at91eb01_init_early,
.init_irq = at91eb01_init_irq,
- .map_io = at91eb01_map_io,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eb9200.c b/arch/arm/mach-at91/board-eb9200.c
index 6cf6566..e948453 100644
--- a/arch/arm/mach-at91/board-eb9200.c
+++ b/arch/arm/mach-at91/board-eb9200.c
@@ -40,10 +40,10 @@
#include "generic.h"
-static void __init eb9200_map_io(void)
+static void __init eb9200_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -120,9 +120,9 @@ static void __init eb9200_board_init(void)
}
MACHINE_START(ATEB9200, "Embest ATEB9200")
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = eb9200_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = eb9200_init_early,
.init_irq = eb9200_init_irq,
.init_machine = eb9200_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index de2fd04..a6f57fa 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -38,14 +38,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init ecb_at91map_io(void)
+static void __init ecb_at91init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PC7, AT91_PIN_PC7);
@@ -168,9 +172,9 @@ static void __init ecb_at91board_init(void)
MACHINE_START(ECBAT91, "emQbit's ECB_AT91")
/* Maintainer: emQbit.com */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = ecb_at91map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = ecb_at91init_early,
.init_irq = ecb_at91init_irq,
.init_machine = ecb_at91board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eco920.c b/arch/arm/mach-at91/board-eco920.c
index a158a0c..bfc0062 100644
--- a/arch/arm/mach-at91/board-eco920.c
+++ b/arch/arm/mach-at91/board-eco920.c
@@ -26,11 +26,16 @@
#include <mach/board.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/cpu.h>
+
#include "generic.h"
-static void __init eco920_map_io(void)
+static void __init eco920_init_early(void)
{
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1);
@@ -86,21 +91,6 @@ static struct platform_device eco920_flash = {
.num_resources = 1,
};
-static struct resource at91_beeper_resources[] = {
- [0] = {
- .start = AT91RM9200_BASE_TC3,
- .end = AT91RM9200_BASE_TC3 + 0x39,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device at91_beeper = {
- .name = "at91_beeper",
- .id = 0,
- .resource = at91_beeper_resources,
- .num_resources = ARRAY_SIZE(at91_beeper_resources),
-};
-
static struct spi_board_info eco920_spi_devices[] = {
{ /* CAN controller */
.modalias = "tlv5638",
@@ -139,18 +129,14 @@ static void __init eco920_board_init(void)
AT91_SMC_TDF_(1) /* float time */
);
- at91_clock_associate("tc3_clk", &at91_beeper.dev, "at91_beeper");
- at91_set_B_periph(AT91_PIN_PB6, 0);
- platform_device_register(&at91_beeper);
-
at91_add_device_spi(eco920_spi_devices, ARRAY_SIZE(eco920_spi_devices));
}
MACHINE_START(ECO920, "eco920")
/* Maintainer: Sascha Hauer */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = eco920_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = eco920_init_early,
.init_irq = eco920_init_irq,
.init_machine = eco920_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
index c8a62dc..466c063 100644
--- a/arch/arm/mach-at91/board-flexibity.c
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -37,7 +37,7 @@
#include "generic.h"
-static void __init flexibity_map_io(void)
+static void __init flexibity_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -154,9 +154,9 @@ static void __init flexibity_board_init(void)
MACHINE_START(FLEXIBITY, "Flexibity Connect")
/* Maintainer: Maxim Osipov */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = flexibity_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = flexibity_init_early,
.init_irq = flexibity_init_irq,
.init_machine = flexibity_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index dfc7dfe..e2d1dc9 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -57,7 +57,7 @@
*/
-static void __init foxg20_map_io(void)
+static void __init foxg20_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -266,9 +266,9 @@ static void __init foxg20_board_init(void)
MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
/* Maintainer: Sergio Tanzilli */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = foxg20_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = foxg20_init_early,
.init_irq = foxg20_init_irq,
.init_machine = foxg20_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index bc28136..1d4f36b 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -38,9 +38,9 @@
#include "sam9_smc.h"
#include "generic.h"
-static void __init gsia18s_map_io(void)
+static void __init gsia18s_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/*
* USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI).
@@ -576,9 +576,9 @@ static void __init gsia18s_board_init(void)
}
MACHINE_START(GSIA18S, "GS_IA18_S")
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = gsia18s_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = gsia18s_init_early,
.init_irq = init_irq,
.init_machine = gsia18s_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index d2e1f4e..9b003ff 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -35,14 +35,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init kafa_map_io(void)
+static void __init kafa_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* Set up the LEDs */
at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4);
@@ -94,9 +98,9 @@ static void __init kafa_board_init(void)
MACHINE_START(KAFA, "Sperry-Sun KAFA")
/* Maintainer: Sergei Sharonov */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = kafa_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = kafa_init_early,
.init_irq = kafa_init_irq,
.init_machine = kafa_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index a13d206..a813a74 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -36,16 +36,19 @@
#include <mach/board.h>
#include <mach/gpio.h>
-
+#include <mach/cpu.h>
#include <mach/at91rm9200_mc.h>
#include "generic.h"
-static void __init kb9202_map_io(void)
+static void __init kb9202_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 10 MHz crystal */
- at91rm9200_initialize(10000000, AT91RM9200_PQFP);
+ at91rm9200_initialize(10000000);
/* Set up the LEDs */
at91_init_leds(AT91_PIN_PC19, AT91_PIN_PC18);
@@ -136,9 +139,9 @@ static void __init kb9202_board_init(void)
MACHINE_START(KB9200, "KB920x")
/* Maintainer: KwikByte, Inc. */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = kb9202_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = kb9202_init_early,
.init_irq = kb9202_init_irq,
.init_machine = kb9202_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index fe5f1d4..961e805 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -51,7 +51,7 @@
#include "generic.h"
-static void __init neocore926_map_io(void)
+static void __init neocore926_init_early(void)
{
/* Initialize processor: 20 MHz crystal */
at91sam9263_initialize(20000000);
@@ -387,9 +387,9 @@ static void __init neocore926_board_init(void)
MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926")
/* Maintainer: ADENEO */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = neocore926_map_io,
+ .map_io = at91sam9263_map_io,
+ .init_early = neocore926_init_early,
.init_irq = neocore926_init_irq,
.init_machine = neocore926_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index feb6578..21a21af 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -37,9 +37,9 @@
#include "generic.h"
-static void __init pcontrol_g20_map_io(void)
+static void __init pcontrol_g20_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
@@ -222,9 +222,9 @@ static void __init pcontrol_g20_board_init(void)
MACHINE_START(PCONTROL_G20, "PControl G20")
/* Maintainer: pgsellmann@portner-elektronik.at */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = pcontrol_g20_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = pcontrol_g20_init_early,
.init_irq = init_irq,
.init_machine = pcontrol_g20_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 55dad3a..756cc2a 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -43,10 +43,10 @@
#include "generic.h"
-static void __init picotux200_map_io(void)
+static void __init picotux200_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -123,9 +123,9 @@ static void __init picotux200_board_init(void)
MACHINE_START(PICOTUX2XX, "picotux 200")
/* Maintainer: Kleinhenz Elektronik GmbH */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = picotux200_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = picotux200_init_early,
.init_irq = picotux200_init_irq,
.init_machine = picotux200_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 69d15a8..d1a6001 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -48,7 +48,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
@@ -268,9 +268,9 @@ static void __init ek_board_init(void)
MACHINE_START(QIL_A9260, "CALAO QIL_A9260")
/* Maintainer: calao-systems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index 4c1047c..aef9627 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -45,10 +45,10 @@
#include "generic.h"
-static void __init dk_map_io(void)
+static void __init dk_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2);
@@ -227,9 +227,9 @@ static void __init dk_board_init(void)
MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK")
/* Maintainer: SAN People/Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = dk_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = dk_init_early,
.init_irq = dk_init_irq,
.init_machine = dk_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 9df1be8..015a021 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -45,10 +45,10 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_BGA);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PB1, AT91_PIN_PB2);
@@ -193,9 +193,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK")
/* Maintainer: SAN People/Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = ek_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index 25a26be..aaf1bf0 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -44,7 +44,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -212,9 +212,9 @@ static void __init ek_board_init(void)
MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260")
/* Maintainer: Olimex */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index de1816e..d600dc1 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -44,12 +44,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -191,11 +192,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -218,6 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -356,9 +353,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 14acc90..f897f84 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -48,12 +48,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9261_initialize(18432000);
@@ -197,11 +198,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC15,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -224,6 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -623,9 +620,9 @@ MACHINE_START(AT91SAM9261EK, "Atmel AT91SAM9261-EK")
MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK")
#endif
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9261_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index bfe490d..605b26f 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -47,12 +47,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 16.367 MHz crystal */
at91sam9263_initialize(16367660);
@@ -198,11 +199,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PA22,
.enable_pin = AT91_PIN_PD15,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -225,6 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -454,9 +451,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9263_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index ca8198b..7624cf0 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -43,6 +43,7 @@
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
@@ -60,7 +61,7 @@ static int inline ek_have_2mmc(void)
}
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -175,11 +176,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -202,6 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -406,18 +403,18 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 6c999db..063c95d 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -41,12 +41,13 @@
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
+#include <mach/system_rev.h>
#include "sam9_smc.h"
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9g45_initialize(12000000);
@@ -155,11 +156,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.rdy_pin = AT91_PIN_PC8,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
-#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
- .bus_width_16 = 1,
-#else
- .bus_width_16 = 0,
-#endif
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -182,6 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
+ ek_nand_data.bus_width_16 = !board_have_nand_8bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -424,9 +421,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9g45_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 3bf3408..effb399 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -38,7 +38,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9rl_initialize(12000000);
@@ -329,9 +329,9 @@ static void __init ek_board_init(void)
MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK")
/* Maintainer: Atmel */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9rl_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 17f7d9b..3eb0a11 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -40,7 +40,7 @@
#define SNAPPER9260_IO_EXP_GPIO(x) (NR_BUILTIN_GPIO + (x))
-static void __init snapper9260_map_io(void)
+static void __init snapper9260_init_early(void)
{
at91sam9260_initialize(18432000);
@@ -178,9 +178,9 @@ static void __init snapper9260_board_init(void)
}
MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module")
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = snapper9260_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = snapper9260_init_early,
.init_irq = snapper9260_init_irq,
.init_machine = snapper9260_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index f8902b11..5e5c856 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -32,7 +32,7 @@
#include "generic.h"
-void __init stamp9g20_map_io(void)
+void __init stamp9g20_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@@ -44,9 +44,9 @@ void __init stamp9g20_map_io(void)
at91_set_serial_console(0);
}
-static void __init stamp9g20evb_map_io(void)
+static void __init stamp9g20evb_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
@@ -54,9 +54,9 @@ static void __init stamp9g20evb_map_io(void)
| ATMEL_UART_DCD | ATMEL_UART_RI);
}
-static void __init portuxg20_map_io(void)
+static void __init portuxg20_init_early(void)
{
- stamp9g20_map_io();
+ stamp9g20_init_early();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
@@ -298,18 +298,18 @@ static void __init stamp9g20evb_board_init(void)
MACHINE_START(PORTUXG20, "taskit PortuxG20")
/* Maintainer: taskit GmbH */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = portuxg20_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = portuxg20_init_early,
.init_irq = init_irq,
.init_machine = portuxg20_board_init,
MACHINE_END
MACHINE_START(STAMP9G20, "taskit Stamp9G20")
/* Maintainer: taskit GmbH */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = stamp9g20evb_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = stamp9g20evb_init_early,
.init_irq = init_irq,
.init_machine = stamp9g20evb_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-usb-a9260.c b/arch/arm/mach-at91/board-usb-a9260.c
index 07784ba..0e784e6 100644
--- a/arch/arm/mach-at91/board-usb-a9260.c
+++ b/arch/arm/mach-at91/board-usb-a9260.c
@@ -48,7 +48,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
@@ -228,9 +228,9 @@ static void __init ek_board_init(void)
MACHINE_START(USB_A9260, "CALAO USB_A9260")
/* Maintainer: calao-systems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9260_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-usb-a9263.c b/arch/arm/mach-at91/board-usb-a9263.c
index b6145089..cf626dd 100644
--- a/arch/arm/mach-at91/board-usb-a9263.c
+++ b/arch/arm/mach-at91/board-usb-a9263.c
@@ -47,7 +47,7 @@
#include "generic.h"
-static void __init ek_map_io(void)
+static void __init ek_init_early(void)
{
/* Initialize processor: 12.00 MHz crystal */
at91sam9263_initialize(12000000);
@@ -244,9 +244,9 @@ static void __init ek_board_init(void)
MACHINE_START(USB_A9263, "CALAO USB_A9263")
/* Maintainer: calao-systems */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
- .map_io = ek_map_io,
+ .map_io = at91sam9263_map_io,
+ .init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index e0f0080..c208cc3 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -45,14 +45,18 @@
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/cpu.h>
#include "generic.h"
-static void __init yl9200_map_io(void)
+static void __init yl9200_init_early(void)
{
+ /* Set cpu type: PQFP */
+ at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
+
/* Initialize processor: 18.432 MHz crystal */
- at91rm9200_initialize(18432000, AT91RM9200_PQFP);
+ at91rm9200_initialize(18432000);
/* Setup the LEDs D2=PB17 (timer), D3=PB16 (cpu) */
at91_init_leds(AT91_PIN_PB16, AT91_PIN_PB17);
@@ -594,9 +598,9 @@ static void __init yl9200_board_init(void)
MACHINE_START(YL9200, "uCdragon YL-9200")
/* Maintainer: S.Birtles */
- .boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
- .map_io = yl9200_map_io,
+ .map_io = at91rm9200_map_io,
+ .init_early = yl9200_init_early,
.init_irq = yl9200_init_irq,
.init_machine = yl9200_board_init,
MACHINE_END
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 9113da6..61873f3 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -163,7 +163,7 @@ static struct clk udpck = {
.parent = &pllb,
.mode = pmc_sys_mode,
};
-static struct clk utmi_clk = {
+struct clk utmi_clk = {
.name = "utmi_clk",
.parent = &main_clk,
.pmc_mask = AT91_PMC_UPLLEN, /* in CKGR_UCKR */
@@ -182,7 +182,7 @@ static struct clk uhpck = {
* memory, interfaces to on-chip peripherals, the AIC, and sometimes more
* (e.g baud rate generation). It's sourced from one of the primary clocks.
*/
-static struct clk mck = {
+struct clk mck = {
.name = "mck",
.pmc_mask = AT91_PMC_MCKRDY, /* in PMC_SR */
};
@@ -215,43 +215,6 @@ static struct clk __init *at91_css_to_clk(unsigned long css)
return NULL;
}
-/*
- * Associate a particular clock with a function (eg, "uart") and device.
- * The drivers can then request the same 'function' with several different
- * devices and not care about which clock name to use.
- */
-void __init at91_clock_associate(const char *id, struct device *dev, const char *func)
-{
- struct clk *clk = clk_get(NULL, id);
-
- if (!dev || !clk || !IS_ERR(clk_get(dev, func)))
- return;
-
- clk->function = func;
- clk->dev = dev;
-}
-
-/* clocks cannot be de-registered no refcounting necessary */
-struct clk *clk_get(struct device *dev, const char *id)
-{
- struct clk *clk;
-
- list_for_each_entry(clk, &clocks, node) {
- if (strcmp(id, clk->name) == 0)
- return clk;
- if (clk->function && (dev == clk->dev) && strcmp(id, clk->function) == 0)
- return clk;
- }
-
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
static void __clk_enable(struct clk *clk)
{
if (clk->parent)
@@ -498,32 +461,38 @@ postcore_initcall(at91_clk_debugfs_init);
/*------------------------------------------------------------------------*/
/* Register a new clock */
+static void __init at91_clk_add(struct clk *clk)
+{
+ list_add_tail(&clk->node, &clocks);
+
+ clk->cl.con_id = clk->name;
+ clk->cl.clk = clk;
+ clkdev_add(&clk->cl);
+}
+
int __init clk_register(struct clk *clk)
{
if (clk_is_peripheral(clk)) {
if (!clk->parent)
clk->parent = &mck;
clk->mode = pmc_periph_mode;
- list_add_tail(&clk->node, &clocks);
}
else if (clk_is_sys(clk)) {
clk->parent = &mck;
clk->mode = pmc_sys_mode;
-
- list_add_tail(&clk->node, &clocks);
}
#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
else if (clk_is_programmable(clk)) {
clk->mode = pmc_sys_mode;
init_programmable_clock(clk);
- list_add_tail(&clk->node, &clocks);
}
#endif
+ at91_clk_add(clk);
+
return 0;
}
-
/*------------------------------------------------------------------------*/
static u32 __init at91_pll_rate(struct clk *pll, u32 freq, u32 reg)
@@ -630,7 +599,7 @@ static void __init at91_pllb_usbfs_clock_init(unsigned long main_clock)
at91_sys_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP);
} else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() ||
cpu_is_at91sam9263() || cpu_is_at91sam9g20() ||
- cpu_is_at91sam9g10() || cpu_is_at572d940hf()) {
+ cpu_is_at91sam9g10()) {
uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
udpck.pmc_mask = AT91SAM926x_PMC_UDP;
} else if (cpu_is_at91cap9()) {
@@ -754,19 +723,19 @@ int __init at91_clock_init(unsigned long main_clock)
/* Register the PMC's standard clocks */
for (i = 0; i < ARRAY_SIZE(standard_pmc_clocks); i++)
- list_add_tail(&standard_pmc_clocks[i]->node, &clocks);
+ at91_clk_add(standard_pmc_clocks[i]);
if (cpu_has_pllb())
- list_add_tail(&pllb.node, &clocks);
+ at91_clk_add(&pllb);
if (cpu_has_uhp())
- list_add_tail(&uhpck.node, &clocks);
+ at91_clk_add(&uhpck);
if (cpu_has_udpfs())
- list_add_tail(&udpck.node, &clocks);
+ at91_clk_add(&udpck);
if (cpu_has_utmi())
- list_add_tail(&utmi_clk.node, &clocks);
+ at91_clk_add(&utmi_clk);
/* MCK and CPU clock are "always on" */
clk_enable(&mck);
diff --git a/arch/arm/mach-at91/clock.h b/arch/arm/mach-at91/clock.h
index 6cf4b78..c2e63e4 100644
--- a/arch/arm/mach-at91/clock.h
+++ b/arch/arm/mach-at91/clock.h
@@ -6,6 +6,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/clkdev.h>
+
#define CLK_TYPE_PRIMARY 0x1
#define CLK_TYPE_PLL 0x2
#define CLK_TYPE_PROGRAMMABLE 0x4
@@ -16,8 +18,7 @@
struct clk {
struct list_head node;
const char *name; /* unique clock name */
- const char *function; /* function of the clock */
- struct device *dev; /* device associated with function */
+ struct clk_lookup cl;
unsigned long rate_hz;
struct clk *parent;
u32 pmc_mask;
@@ -29,3 +30,18 @@ struct clk {
extern int __init clk_register(struct clk *clk);
+extern struct clk mck;
+extern struct clk utmi_clk;
+
+#define CLKDEV_CON_ID(_id, _clk) \
+ { \
+ .con_id = _id, \
+ .clk = _clk, \
+ }
+
+#define CLKDEV_CON_DEV_ID(_con_id, _dev_id, _clk) \
+ { \
+ .con_id = _con_id, \
+ .dev_id = _dev_id, \
+ .clk = _clk, \
+ }
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 0c66deb..8ff3418 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -8,8 +8,21 @@
* published by the Free Software Foundation.
*/
+#include <linux/clkdev.h>
+
+ /* Map io */
+extern void __init at91rm9200_map_io(void);
+extern void __init at91sam9260_map_io(void);
+extern void __init at91sam9261_map_io(void);
+extern void __init at91sam9263_map_io(void);
+extern void __init at91sam9rl_map_io(void);
+extern void __init at91sam9g45_map_io(void);
+extern void __init at91x40_map_io(void);
+extern void __init at91cap9_map_io(void);
+
/* Processors */
-extern void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks);
+extern void __init at91rm9200_set_type(int type);
+extern void __init at91rm9200_initialize(unsigned long main_clock);
extern void __init at91sam9260_initialize(unsigned long main_clock);
extern void __init at91sam9261_initialize(unsigned long main_clock);
extern void __init at91sam9263_initialize(unsigned long main_clock);
@@ -17,7 +30,6 @@ extern void __init at91sam9rl_initialize(unsigned long main_clock);
extern void __init at91sam9g45_initialize(unsigned long main_clock);
extern void __init at91x40_initialize(unsigned long main_clock);
extern void __init at91cap9_initialize(unsigned long main_clock);
-extern void __init at572d940hf_initialize(unsigned long main_clock);
/* Interrupts */
extern void __init at91rm9200_init_interrupts(unsigned int priority[]);
@@ -28,7 +40,6 @@ extern void __init at91sam9rl_init_interrupts(unsigned int priority[]);
extern void __init at91sam9g45_init_interrupts(unsigned int priority[]);
extern void __init at91x40_init_interrupts(unsigned int priority[]);
extern void __init at91cap9_init_interrupts(unsigned int priority[]);
-extern void __init at572d940hf_init_interrupts(unsigned int priority[]);
extern void __init at91_aic_init(unsigned int priority[]);
/* Timer */
@@ -39,8 +50,19 @@ extern struct sys_timer at91x40_timer;
/* Clocks */
extern int __init at91_clock_init(unsigned long main_clock);
+/*
+ * function to specify the clock of the default console. As we do not
+ * use the device/driver bus, the dev_name is not intialize. So we need
+ * to link the clock to a specific con_id only "usart"
+ */
+extern void __init at91rm9200_set_console_clock(int id);
+extern void __init at91sam9260_set_console_clock(int id);
+extern void __init at91sam9261_set_console_clock(int id);
+extern void __init at91sam9263_set_console_clock(int id);
+extern void __init at91sam9rl_set_console_clock(int id);
+extern void __init at91sam9g45_set_console_clock(int id);
+extern void __init at91cap9_set_console_clock(int id);
struct device;
-extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func);
/* Power Management */
extern void at91_irq_suspend(void);
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf.h b/arch/arm/mach-at91/include/mach/at572d940hf.h
deleted file mode 100644
index be510cf..0000000
--- a/arch/arm/mach-at91/include/mach/at572d940hf.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * include/mach/at572d940hf.h
- *
- * Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2008 Atmel
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef AT572D940HF_H
-#define AT572D940HF_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
-#define AT572D940HF_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT572D940HF_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT572D940HF_ID_PIOC 4 /* Parallel IO Controller C */
-#define AT572D940HF_ID_EMAC 5 /* MACB ethernet controller */
-#define AT572D940HF_ID_US0 6 /* USART 0 */
-#define AT572D940HF_ID_US1 7 /* USART 1 */
-#define AT572D940HF_ID_US2 8 /* USART 2 */
-#define AT572D940HF_ID_MCI 9 /* Multimedia Card Interface */
-#define AT572D940HF_ID_UDP 10 /* USB Device Port */
-#define AT572D940HF_ID_TWI0 11 /* Two-Wire Interface 0 */
-#define AT572D940HF_ID_SPI0 12 /* Serial Peripheral Interface 0 */
-#define AT572D940HF_ID_SPI1 13 /* Serial Peripheral Interface 1 */
-#define AT572D940HF_ID_SSC0 14 /* Serial Synchronous Controller 0 */
-#define AT572D940HF_ID_SSC1 15 /* Serial Synchronous Controller 1 */
-#define AT572D940HF_ID_SSC2 16 /* Serial Synchronous Controller 2 */
-#define AT572D940HF_ID_TC0 17 /* Timer Counter 0 */
-#define AT572D940HF_ID_TC1 18 /* Timer Counter 1 */
-#define AT572D940HF_ID_TC2 19 /* Timer Counter 2 */
-#define AT572D940HF_ID_UHP 20 /* USB Host port */
-#define AT572D940HF_ID_SSC3 21 /* Serial Synchronous Controller 3 */
-#define AT572D940HF_ID_TWI1 22 /* Two-Wire Interface 1 */
-#define AT572D940HF_ID_CAN0 23 /* CAN Controller 0 */
-#define AT572D940HF_ID_CAN1 24 /* CAN Controller 1 */
-#define AT572D940HF_ID_MHALT 25 /* mAgicV HALT line */
-#define AT572D940HF_ID_MSIRQ0 26 /* mAgicV SIRQ0 line */
-#define AT572D940HF_ID_MEXC 27 /* mAgicV exception line */
-#define AT572D940HF_ID_MEDMA 28 /* mAgicV end of DMA line */
-#define AT572D940HF_ID_IRQ0 29 /* External Interrupt Source (IRQ0) */
-#define AT572D940HF_ID_IRQ1 30 /* External Interrupt Source (IRQ1) */
-#define AT572D940HF_ID_IRQ2 31 /* External Interrupt Source (IRQ2) */
-
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT572D940HF_BASE_TCB 0xfffa0000
-#define AT572D940HF_BASE_TC0 0xfffa0000
-#define AT572D940HF_BASE_TC1 0xfffa0040
-#define AT572D940HF_BASE_TC2 0xfffa0080
-#define AT572D940HF_BASE_UDP 0xfffa4000
-#define AT572D940HF_BASE_MCI 0xfffa8000
-#define AT572D940HF_BASE_TWI0 0xfffac000
-#define AT572D940HF_BASE_US0 0xfffb0000
-#define AT572D940HF_BASE_US1 0xfffb4000
-#define AT572D940HF_BASE_US2 0xfffb8000
-#define AT572D940HF_BASE_SSC0 0xfffbc000
-#define AT572D940HF_BASE_SSC1 0xfffc0000
-#define AT572D940HF_BASE_SSC2 0xfffc4000
-#define AT572D940HF_BASE_SPI0 0xfffc8000
-#define AT572D940HF_BASE_SPI1 0xfffcc000
-#define AT572D940HF_BASE_SSC3 0xfffd0000
-#define AT572D940HF_BASE_TWI1 0xfffd4000
-#define AT572D940HF_BASE_EMAC 0xfffd8000
-#define AT572D940HF_BASE_CAN0 0xfffdc000
-#define AT572D940HF_BASE_CAN1 0xfffe0000
-#define AT91_BASE_SYS 0xffffea00
-
-
-/*
- * System Peripherals (offset from AT91_BASE_SYS)
- */
-#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
-#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS)
-#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
-
-#define AT91_USART0 AT572D940HF_ID_US0
-#define AT91_USART1 AT572D940HF_ID_US1
-#define AT91_USART2 AT572D940HF_ID_US2
-
-
-/*
- * Internal Memory.
- */
-#define AT572D940HF_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT572D940HF_SRAM_SIZE (48 * SZ_1K) /* Internal SRAM size (48Kb) */
-
-#define AT572D940HF_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT572D940HF_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
-
-#define AT572D940HF_UHP_BASE 0x00500000 /* USB Host controller */
-
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h b/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h
deleted file mode 100644
index b6751df..0000000
--- a/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * include/mach//at572d940hf_matrix.h
- *
- * Antonio R. Costa <costa.antonior@gmail.com>
- * Copyright (C) 2008 Atmel
- *
- * Copyright (C) 2005 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef AT572D940HF_MATRIX_H
-#define AT572D940HF_MATRIX_H
-
-#define AT91_MATRIX_MCFG0 (AT91_MATRIX + 0x00) /* Master Configuration Register 0 */
-#define AT91_MATRIX_MCFG1 (AT91_MATRIX + 0x04) /* Master Configuration Register 1 */
-#define AT91_MATRIX_MCFG2 (AT91_MATRIX + 0x08) /* Master Configuration Register 2 */
-#define AT91_MATRIX_MCFG3 (AT91_MATRIX + 0x0C) /* Master Configuration Register 3 */
-#define AT91_MATRIX_MCFG4 (AT91_MATRIX + 0x10) /* Master Configuration Register 4 */
-#define AT91_MATRIX_MCFG5 (AT91_MATRIX + 0x14) /* Master Configuration Register 5 */
-
-#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
-#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
-#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
-#define AT91_MATRIX_ULBT_FOUR (2 << 0)
-#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
-#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
-
-#define AT91_MATRIX_SCFG0 (AT91_MATRIX + 0x40) /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 (AT91_MATRIX + 0x44) /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 (AT91_MATRIX + 0x48) /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 (AT91_MATRIX + 0x4C) /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 (AT91_MATRIX + 0x50) /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (0x7 << 18) /* Fixed Index of Default Master */
-#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */
-#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
-#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
-
-#define AT91_MATRIX_PRAS0 (AT91_MATRIX + 0x80) /* Priority Register A for Slave 0 */
-#define AT91_MATRIX_PRAS1 (AT91_MATRIX + 0x88) /* Priority Register A for Slave 1 */
-#define AT91_MATRIX_PRAS2 (AT91_MATRIX + 0x90) /* Priority Register A for Slave 2 */
-#define AT91_MATRIX_PRAS3 (AT91_MATRIX + 0x98) /* Priority Register A for Slave 3 */
-#define AT91_MATRIX_PRAS4 (AT91_MATRIX + 0xA0) /* Priority Register A for Slave 4 */
-
-#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
-#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
-#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
-#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
-#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
-#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
-#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */
-
-#define AT91_MATRIX_MRCR (AT91_MATRIX + 0x100) /* Master Remap Control Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-
-#define AT91_MATRIX_SFR0 (AT91_MATRIX + 0x110) /* Special Function Register 0 */
-#define AT91_MATRIX_SFR1 (AT91_MATRIX + 0x114) /* Special Function Register 1 */
-#define AT91_MATRIX_SFR2 (AT91_MATRIX + 0x118) /* Special Function Register 2 */
-#define AT91_MATRIX_SFR3 (AT91_MATRIX + 0x11C) /* Special Function Register 3 */
-#define AT91_MATRIX_SFR4 (AT91_MATRIX + 0x120) /* Special Function Register 4 */
-#define AT91_MATRIX_SFR5 (AT91_MATRIX + 0x124) /* Special Function Register 5 */
-#define AT91_MATRIX_SFR6 (AT91_MATRIX + 0x128) /* Special Function Register 6 */
-#define AT91_MATRIX_SFR7 (AT91_MATRIX + 0x12C) /* Special Function Register 7 */
-#define AT91_MATRIX_SFR8 (AT91_MATRIX + 0x130) /* Special Function Register 8 */
-#define AT91_MATRIX_SFR9 (AT91_MATRIX + 0x134) /* Special Function Register 9 */
-#define AT91_MATRIX_SFR10 (AT91_MATRIX + 0x138) /* Special Function Register 10 */
-#define AT91_MATRIX_SFR11 (AT91_MATRIX + 0x13C) /* Special Function Register 11 */
-#define AT91_MATRIX_SFR12 (AT91_MATRIX + 0x140) /* Special Function Register 12 */
-#define AT91_MATRIX_SFR13 (AT91_MATRIX + 0x144) /* Special Function Register 13 */
-#define AT91_MATRIX_SFR14 (AT91_MATRIX + 0x148) /* Special Function Register 14 */
-#define AT91_MATRIX_SFR15 (AT91_MATRIX + 0x14C) /* Special Function Register 15 */
-
-
-/*
- * The following registers / bits are not defined in the Datasheet (Revision A)
- */
-
-#define AT91_MATRIX_TCR (AT91_MATRIX + 0x100) /* TCM Configuration Register */
-#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
-#define AT91_MATRIX_ITCM_0 (0 << 0)
-#define AT91_MATRIX_ITCM_16 (5 << 0)
-#define AT91_MATRIX_ITCM_32 (6 << 0)
-#define AT91_MATRIX_ITCM_64 (7 << 0)
-#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
-#define AT91_MATRIX_DTCM_0 (0 << 4)
-#define AT91_MATRIX_DTCM_16 (5 << 4)
-#define AT91_MATRIX_DTCM_32 (6 << 4)
-#define AT91_MATRIX_DTCM_64 (7 << 4)
-
-#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x11C) /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4)
-#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5)
-#define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91cap9.h b/arch/arm/mach-at91/include/mach/at91cap9.h
index 9c6af97..6659938 100644
--- a/arch/arm/mach-at91/include/mach/at91cap9.h
+++ b/arch/arm/mach-at91/include/mach/at91cap9.h
@@ -20,8 +20,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91CAP9_ID_PIOABCD 2 /* Parallel IO Controller A, B, C and D */
#define AT91CAP9_ID_MPB0 3 /* MP Block Peripheral 0 */
#define AT91CAP9_ID_MPB1 4 /* MP Block Peripheral 1 */
@@ -123,6 +121,4 @@
#define AT91CAP9_UDPHS_FIFO 0x00600000 /* USB High Speed Device Port */
#define AT91CAP9_UHP_BASE 0x00700000 /* USB Host controller */
-#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
-
#endif
diff --git a/arch/arm/mach-at91/include/mach/at91rm9200.h b/arch/arm/mach-at91/include/mach/at91rm9200.h
index 7898315..99e0f8d 100644
--- a/arch/arm/mach-at91/include/mach/at91rm9200.h
+++ b/arch/arm/mach-at91/include/mach/at91rm9200.h
@@ -19,8 +19,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripheral */
#define AT91RM9200_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91RM9200_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91RM9200_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9260.h b/arch/arm/mach-at91/include/mach/at91sam9260.h
index 4e79036..8b6bf83 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9260.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9260.h
@@ -20,8 +20,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91SAM9260_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9260_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9260_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261.h b/arch/arm/mach-at91/include/mach/at91sam9261.h
index 2b56185..eafbdda 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9261.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9261.h
@@ -18,8 +18,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91SAM9261_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9261_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9261_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9263.h b/arch/arm/mach-at91/include/mach/at91sam9263.h
index 2091f1e..e2d3482 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9263.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9263.h
@@ -18,8 +18,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
#define AT91SAM9263_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9263_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9263_ID_PIOCDE 4 /* Parallel IO Controller C, D and E */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
index a526869..659304a 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9g45.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h
@@ -18,8 +18,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Controller Interrupt */
#define AT91SAM9G45_ID_PIOA 2 /* Parallel I/O Controller A */
#define AT91SAM9G45_ID_PIOB 3 /* Parallel I/O Controller B */
#define AT91SAM9G45_ID_PIOC 4 /* Parallel I/O Controller C */
@@ -131,8 +129,6 @@
#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
-#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
-
#define CONSISTENT_DMA_SIZE SZ_4M
/*
diff --git a/arch/arm/mach-at91/include/mach/at91sam9rl.h b/arch/arm/mach-at91/include/mach/at91sam9rl.h
index 87ba851..41dbbe6 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9rl.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9rl.h
@@ -17,8 +17,6 @@
/*
* Peripheral identifiers/interrupts.
*/
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Controller */
#define AT91SAM9RL_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91SAM9RL_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91SAM9RL_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91x40.h b/arch/arm/mach-at91/include/mach/at91x40.h
index 063ac44..a152ff8 100644
--- a/arch/arm/mach-at91/include/mach/at91x40.h
+++ b/arch/arm/mach-at91/include/mach/at91x40.h
@@ -15,8 +15,6 @@
/*
* IRQ list.
*/
-#define AT91_ID_FIQ 0 /* FIQ */
-#define AT91_ID_SYS 1 /* System Peripheral */
#define AT91X40_ID_USART0 2 /* USART port 0 */
#define AT91X40_ID_USART1 3 /* USART port 1 */
#define AT91X40_ID_TC0 4 /* Timer/Counter 0 */
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 2b499eb..ed544a0 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -90,7 +90,7 @@ struct at91_eth_data {
extern void __init at91_add_device_eth(struct at91_eth_data *data);
#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \
- || defined(CONFIG_ARCH_AT91SAM9G45) || defined(CONFIG_ARCH_AT572D940HF)
+ || defined(CONFIG_ARCH_AT91SAM9G45)
#define eth_platform_data at91_eth_data
#endif
@@ -140,6 +140,7 @@ extern void __init at91_set_serial_console(unsigned portnr);
extern struct platform_device *atmel_default_console_device;
struct atmel_uart_data {
+ int num; /* port num */
short use_dma_tx; /* use transmit DMA? */
short use_dma_rx; /* use receive DMA? */
void __iomem *regs; /* virt. base address, if any */
@@ -203,9 +204,6 @@ extern void __init at91_init_leds(u8 cpu_led, u8 timer_led);
extern void __init at91_gpio_leds(struct gpio_led *leds, int nr);
extern void __init at91_pwm_leds(struct gpio_led *leds, int nr);
- /* AT572D940HF DSP */
-extern void __init at91_add_device_mAgic(void);
-
/* FIXME: this needs a better location, but gets stuff building again */
extern int at91_suspend_entering_slow_clock(void);
diff --git a/arch/arm/mach-at91/include/mach/clkdev.h b/arch/arm/mach-at91/include/mach/clkdev.h
new file mode 100644
index 0000000..04b37a8
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
index 0700f21..df966c2 100644
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ b/arch/arm/mach-at91/include/mach/cpu.h
@@ -34,8 +34,6 @@
#define ARCH_ID_AT91SAM9XE256 0x329a93a0
#define ARCH_ID_AT91SAM9XE512 0x329aa3a0
-#define ARCH_ID_AT572D940HF 0x0e0303e0
-
#define ARCH_ID_AT91M40800 0x14080044
#define ARCH_ID_AT91R40807 0x44080746
#define ARCH_ID_AT91M40807 0x14080745
@@ -90,9 +88,16 @@ static inline unsigned long at91cap9_rev_identify(void)
#endif
#ifdef CONFIG_ARCH_AT91RM9200
+extern int rm9200_type;
+#define ARCH_REVISON_9200_BGA (0 << 0)
+#define ARCH_REVISON_9200_PQFP (1 << 0)
#define cpu_is_at91rm9200() (at91_cpu_identify() == ARCH_ID_AT91RM9200)
+#define cpu_is_at91rm9200_bga() (!cpu_is_at91rm9200_pqfp())
+#define cpu_is_at91rm9200_pqfp() (cpu_is_at91rm9200() && rm9200_type & ARCH_REVISON_9200_PQFP)
#else
#define cpu_is_at91rm9200() (0)
+#define cpu_is_at91rm9200_bga() (0)
+#define cpu_is_at91rm9200_pqfp() (0)
#endif
#ifdef CONFIG_ARCH_AT91SAM9260
@@ -181,12 +186,6 @@ static inline unsigned long at91cap9_rev_identify(void)
#define cpu_is_at91cap9_revC() (0)
#endif
-#ifdef CONFIG_ARCH_AT572D940HF
-#define cpu_is_at572d940hf() (at91_cpu_identify() == ARCH_ID_AT572D940HF)
-#else
-#define cpu_is_at572d940hf() (0)
-#endif
-
/*
* Since this is ARM, we will never run on any AVR32 CPU. But these
* definitions may reduce clutter in common drivers.
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index 3d64a75..1008b9f 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -32,13 +32,17 @@
#include <mach/at91cap9.h>
#elif defined(CONFIG_ARCH_AT91X40)
#include <mach/at91x40.h>
-#elif defined(CONFIG_ARCH_AT572D940HF)
-#include <mach/at572d940hf.h>
#else
#error "Unsupported AT91 processor"
#endif
+/*
+ * Peripheral identifiers/interrupts.
+ */
+#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
+#define AT91_ID_SYS 1 /* System Peripherals */
+
#ifdef CONFIG_MMU
/*
* Remap the peripherals from address 0xFFF78000 .. 0xFFFFFFFF
@@ -82,13 +86,6 @@
#define AT91_CHIPSELECT_6 0x70000000
#define AT91_CHIPSELECT_7 0x80000000
-/* SDRAM */
-#ifdef CONFIG_DRAM_BASE
-#define AT91_SDRAM_BASE CONFIG_DRAM_BASE
-#else
-#define AT91_SDRAM_BASE AT91_CHIPSELECT_1
-#endif
-
/* Clocks */
#define AT91_SLOW_CLOCK 32768 /* slow clock */
diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h
index c2cfe50..401c207 100644
--- a/arch/arm/mach-at91/include/mach/memory.h
+++ b/arch/arm/mach-at91/include/mach/memory.h
@@ -23,6 +23,4 @@
#include <mach/hardware.h>
-#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE)
-
#endif
diff --git a/arch/arm/mach-at91/include/mach/stamp9g20.h b/arch/arm/mach-at91/include/mach/stamp9g20.h
index 6120f9c..f62c0ab 100644
--- a/arch/arm/mach-at91/include/mach/stamp9g20.h
+++ b/arch/arm/mach-at91/include/mach/stamp9g20.h
@@ -1,7 +1,7 @@
#ifndef __MACH_STAMP9G20_H
#define __MACH_STAMP9G20_H
-void stamp9g20_map_io(void);
+void stamp9g20_init_early(void);
void stamp9g20_board_init(void);
#endif
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
new file mode 100644
index 0000000..b855ee7
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * Under GPLv2 only
+ */
+
+#ifndef __ARCH_SYSTEM_REV_H__
+#define __ARCH_SYSTEM_REV_H__
+
+/*
+ * board revision encoding
+ * mach specific
+ * the 16-31 bit are reserved for at91 generic information
+ *
+ * bit 31:
+ * 0 => nand 16 bit
+ * 1 => nand 8 bit
+ */
+#define BOARD_HAVE_NAND_8BIT (1 << 31)
+static int inline board_have_nand_8bit(void)
+{
+ return system_rev & BOARD_HAVE_NAND_8BIT;
+}
+
+#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-at91/include/mach/timex.h b/arch/arm/mach-at91/include/mach/timex.h
index 05a6e8a..31ac2d9 100644
--- a/arch/arm/mach-at91/include/mach/timex.h
+++ b/arch/arm/mach-at91/include/mach/timex.h
@@ -82,11 +82,6 @@
#define AT91X40_MASTER_CLOCK 40000000
#define CLOCK_TICK_RATE (AT91X40_MASTER_CLOCK)
-#elif defined(CONFIG_ARCH_AT572D940HF)
-
-#define AT572D940HF_MASTER_CLOCK 80000000
-#define CLOCK_TICK_RATE (AT572D940HF_MASTER_CLOCK/16)
-
#endif
#endif
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index b95b919..133aac4 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -1055,7 +1055,7 @@ int da850_register_pm(struct platform_device *pdev)
if (!pdata->cpupll_reg_base)
return -ENOMEM;
- pdata->ddrpll_reg_base = ioremap(DA8XX_PLL1_BASE, SZ_4K);
+ pdata->ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K);
if (!pdata->ddrpll_reg_base) {
ret = -ENOMEM;
goto no_ddrpll_mem;
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 58a02dc..4e66881 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -24,23 +24,25 @@
#include "clock.h"
#define DA8XX_TPCC_BASE 0x01c00000
-#define DA850_MMCSD1_BASE 0x01e1b000
-#define DA850_TPCC1_BASE 0x01e30000
#define DA8XX_TPTC0_BASE 0x01c08000
#define DA8XX_TPTC1_BASE 0x01c08400
-#define DA850_TPTC2_BASE 0x01e38000
#define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */
#define DA8XX_I2C0_BASE 0x01c22000
-#define DA8XX_RTC_BASE 0x01C23000
+#define DA8XX_RTC_BASE 0x01c23000
+#define DA8XX_MMCSD0_BASE 0x01c40000
+#define DA8XX_SPI0_BASE 0x01c41000
+#define DA830_SPI1_BASE 0x01e12000
+#define DA8XX_LCD_CNTRL_BASE 0x01e13000
+#define DA850_MMCSD1_BASE 0x01e1b000
#define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000
#define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000
#define DA8XX_EMAC_CPGMAC_BASE 0x01e23000
#define DA8XX_EMAC_MDIO_BASE 0x01e24000
-#define DA8XX_GPIO_BASE 0x01e26000
#define DA8XX_I2C1_BASE 0x01e28000
-#define DA8XX_SPI0_BASE 0x01c41000
-#define DA830_SPI1_BASE 0x01e12000
+#define DA850_TPCC1_BASE 0x01e30000
+#define DA850_TPTC2_BASE 0x01e38000
#define DA850_SPI1_BASE 0x01f0e000
+#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000
#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 22ebc64..8f4f736 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -33,6 +33,9 @@
#define DM365_MMCSD0_BASE 0x01D11000
#define DM365_MMCSD1_BASE 0x01D00000
+/* System control register offsets */
+#define DM64XX_VDD3P3V_PWDN 0x48
+
static struct resource i2c_resources[] = {
{
.start = DAVINCI_I2C_BASE,
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index e4fc1af..ad64da7 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -64,13 +64,9 @@ extern unsigned int da850_max_speed;
#define DA8XX_TIMER64P1_BASE 0x01c21000
#define DA8XX_GPIO_BASE 0x01e26000
#define DA8XX_PSC1_BASE 0x01e27000
-#define DA8XX_LCD_CNTRL_BASE 0x01e13000
-#define DA8XX_PLL1_BASE 0x01e1a000
-#define DA8XX_MMCSD0_BASE 0x01c40000
#define DA8XX_AEMIF_CS2_BASE 0x60000000
#define DA8XX_AEMIF_CS3_BASE 0x62000000
#define DA8XX_AEMIF_CTL_BASE 0x68000000
-#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define DA8XX_ARM_RAM_BASE 0xffff0000
void __init da830_init(void);
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index c45ba1f..414e0b9 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -21,9 +21,6 @@
*/
#define DAVINCI_SYSTEM_MODULE_BASE 0x01C40000
-/* System control register offsets */
-#define DM64XX_VDD3P3V_PWDN 0x48
-
/*
* I/O mapping
*/
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index 8051962..b92c1e5 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -169,9 +169,11 @@ config MACH_NURI
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select S3C_DEV_I2C1
+ select S3C_DEV_I2C3
select S3C_DEV_I2C5
select S5P_DEV_USB_EHCI
select EXYNOS4_SETUP_I2C1
+ select EXYNOS4_SETUP_I2C3
select EXYNOS4_SETUP_I2C5
select EXYNOS4_SETUP_SDHCI
select SAMSUNG_DEV_PWM
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index 7778975..a9bb94f 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -13,9 +13,10 @@ obj- :=
# Core support for EXYNOS4 system
obj-$(CONFIG_CPU_EXYNOS4210) += cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o gpiolib.o irq-eint.o dma.o
+obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o irq-eint.o dma.o
obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-exynos4/cpuidle.c b/arch/arm/mach-exynos4/cpuidle.c
new file mode 100644
index 0000000..bf7e96f
--- /dev/null
+++ b/arch/arm/mach-exynos4/cpuidle.c
@@ -0,0 +1,86 @@
+/* linux/arch/arm/mach-exynos4/cpuidle.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+
+#include <asm/proc-fns.h>
+
+static int exynos4_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_state *state);
+
+static struct cpuidle_state exynos4_cpuidle_set[] = {
+ [0] = {
+ .enter = exynos4_enter_idle,
+ .exit_latency = 1,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "IDLE",
+ .desc = "ARM clock gating(WFI)",
+ },
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device);
+
+static struct cpuidle_driver exynos4_idle_driver = {
+ .name = "exynos4_idle",
+ .owner = THIS_MODULE,
+};
+
+static int exynos4_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ struct timeval before, after;
+ int idle_time;
+
+ local_irq_disable();
+ do_gettimeofday(&before);
+
+ cpu_do_idle();
+
+ do_gettimeofday(&after);
+ local_irq_enable();
+ idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
+ (after.tv_usec - before.tv_usec);
+
+ return idle_time;
+}
+
+static int __init exynos4_init_cpuidle(void)
+{
+ int i, max_cpuidle_state, cpu_id;
+ struct cpuidle_device *device;
+
+ cpuidle_register_driver(&exynos4_idle_driver);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ device = &per_cpu(exynos4_cpuidle_device, cpu_id);
+ device->cpu = cpu_id;
+
+ device->state_count = (sizeof(exynos4_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+
+ max_cpuidle_state = device->state_count;
+
+ for (i = 0; i < max_cpuidle_state; i++) {
+ memcpy(&device->states[i], &exynos4_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+
+ if (cpuidle_register_device(device)) {
+ printk(KERN_ERR "CPUidle register device failed\n,");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+device_initcall(exynos4_init_cpuidle);
diff --git a/arch/arm/mach-exynos4/mach-nuri.c b/arch/arm/mach-exynos4/mach-nuri.c
index bb5d12f..642702b 100644
--- a/arch/arm/mach-exynos4/mach-nuri.c
+++ b/arch/arm/mach-exynos4/mach-nuri.c
@@ -12,6 +12,7 @@
#include <linux/serial_core.h>
#include <linux/input.h>
#include <linux/i2c.h>
+#include <linux/i2c/atmel_mxt_ts.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
#include <linux/regulator/machine.h>
@@ -32,6 +33,8 @@
#include <plat/sdhci.h>
#include <plat/ehci.h>
#include <plat/clock.h>
+#include <plat/gpio-cfg.h>
+#include <plat/iic.h>
#include <mach/map.h>
@@ -259,6 +262,88 @@ static struct i2c_board_info i2c1_devs[] __initdata = {
/* Gyro, To be updated */
};
+/* TSP */
+static u8 mxt_init_vals[] = {
+ /* MXT_GEN_COMMAND(6) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_GEN_POWER(7) */
+ 0x20, 0xff, 0x32,
+ /* MXT_GEN_ACQUIRE(8) */
+ 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
+ /* MXT_TOUCH_MULTI(9) */
+ 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00,
+ /* MXT_TOUCH_KEYARRAY(15) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00,
+ /* MXT_SPT_GPIOPWM(19) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_PROCI_GRIPFACE(20) */
+ 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
+ 0x0f, 0x0a,
+ /* MXT_PROCG_NOISE(22) */
+ 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
+ 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
+ /* MXT_TOUCH_PROXIMITY(23) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_PROCI_ONETOUCH(24) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_SPT_SELFTEST(25) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ /* MXT_PROCI_TWOTOUCH(27) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* MXT_SPT_CTECONFIG(28) */
+ 0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
+};
+
+static struct mxt_platform_data mxt_platform_data = {
+ .config = mxt_init_vals,
+ .config_length = ARRAY_SIZE(mxt_init_vals),
+
+ .x_line = 18,
+ .y_line = 11,
+ .x_size = 1024,
+ .y_size = 600,
+ .blen = 0x1,
+ .threshold = 0x28,
+ .voltage = 2800000, /* 2.8V */
+ .orient = MXT_DIAGONAL_COUNTER,
+ .irqflags = IRQF_TRIGGER_FALLING,
+};
+
+static struct s3c2410_platform_i2c i2c3_data __initdata = {
+ .flags = 0,
+ .bus_num = 3,
+ .slave_addr = 0x10,
+ .frequency = 400 * 1000,
+ .sda_delay = 100,
+};
+
+static struct i2c_board_info i2c3_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("atmel_mxt_ts", 0x4a),
+ .platform_data = &mxt_platform_data,
+ .irq = IRQ_EINT(4),
+ },
+};
+
+static void __init nuri_tsp_init(void)
+{
+ int gpio;
+
+ /* TOUCH_INT: XEINT_4 */
+ gpio = EXYNOS4_GPX0(4);
+ gpio_request(gpio, "TOUCH_INT");
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+}
+
/* GPIO I2C 5 (PMIC) */
static struct i2c_board_info i2c5_devs[] __initdata = {
/* max8997, To be updated */
@@ -283,6 +368,7 @@ static struct platform_device *nuri_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_timer[0],
&s5p_device_ehci,
+ &s3c_device_i2c3,
/* NURI Devices */
&nuri_gpio_keys,
@@ -300,8 +386,11 @@ static void __init nuri_map_io(void)
static void __init nuri_machine_init(void)
{
nuri_sdhci_init();
+ nuri_tsp_init();
i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
+ s3c_i2c3_set_platdata(&i2c3_data);
+ i2c_register_board_info(3, i2c3_devs, ARRAY_SIZE(i2c3_devs));
i2c_register_board_info(5, i2c5_devs, ARRAY_SIZE(i2c5_devs));
nuri_ehci_init();
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
index af7b68a..88cc422 100644
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ b/arch/arm/mach-gemini/board-wbd111.c
@@ -84,7 +84,6 @@ static struct sys_timer wbd111_timer = {
.init = gemini_timer_init,
};
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition wbd111_partitions[] = {
{
.name = "RedBoot",
@@ -116,11 +115,7 @@ static struct mtd_partition wbd111_partitions[] = {
.mask_flags = MTD_WRITEABLE,
}
};
-#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions)
-#else
-#define wbd111_partitions NULL
-#define wbd111_num_partitions 0
-#endif /* CONFIG_MTD_PARTITIONS */
+#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions)
static void __init wbd111_init(void)
{
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
index 99e5bbe..3a22034 100644
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ b/arch/arm/mach-gemini/board-wbd222.c
@@ -84,7 +84,6 @@ static struct sys_timer wbd222_timer = {
.init = gemini_timer_init,
};
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition wbd222_partitions[] = {
{
.name = "RedBoot",
@@ -116,11 +115,7 @@ static struct mtd_partition wbd222_partitions[] = {
.mask_flags = MTD_WRITEABLE,
}
};
-#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions)
-#else
-#define wbd222_partitions NULL
-#define wbd222_num_partitions 0
-#endif /* CONFIG_MTD_PARTITIONS */
+#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions)
static void __init wbd222_init(void)
{
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 1407833..dca4f7f 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -60,7 +60,6 @@ static struct platform_device ixdp425_flash = {
#if defined(CONFIG_MTD_NAND_PLATFORM) || \
defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static struct mtd_partition ixdp425_partitions[] = {
@@ -74,7 +73,6 @@ static struct mtd_partition ixdp425_partitions[] = {
.size = MTDPART_SIZ_FULL
},
};
-#endif
static void
ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -103,11 +101,9 @@ static struct platform_nand_data ixdp425_flash_nand_data = {
.nr_chips = 1,
.chip_delay = 30,
.options = NAND_NO_AUTOINCR,
-#ifdef CONFIG_MTD_PARTITIONS
.part_probe_types = part_probes,
.partitions = ixdp425_partitions,
.nr_partitions = ARRAY_SIZE(ixdp425_partitions),
-#endif
},
.ctrl = {
.cmd_ctrl = ixdp425_flash_nand_cmd_ctrl
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
index 5b84bcd..b991323 100644
--- a/arch/arm/mach-netx/fb.c
+++ b/arch/arm/mach-netx/fb.c
@@ -103,7 +103,6 @@ static struct amba_device fb_device = {
.flags = IORESOURCE_MEM,
},
.irq = { NETX_IRQ_LCD, NO_IRQ },
- .periphid = 0x10112400,
};
int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel)
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index 71f3ea6..3c5e0f5 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -6,7 +6,6 @@ config MACH_NOMADIK_8815NHK
bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
select NOMADIK_8815
select HAS_MTU
- select NOMADIK_GPIO
endmenu
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 2fc9f94..cd19309 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -153,7 +153,6 @@ config MACH_XCEP
bool "Iskratel Electronics XCEP"
select PXA25x
select MTD
- select MTD_PARTITIONS
select MTD_PHYSMAP
select MTD_CFI_INTELEXT
select MTD_CFI
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 44440cb..dabc141 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -58,8 +58,6 @@
#include <plat/cpu.h>
#include <plat/gpio-cfg.h>
-#ifdef CONFIG_MTD_PARTITIONS
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
@@ -113,7 +111,6 @@ static struct platform_device amlm5900_device_nor = {
.num_resources = 1,
.resource = &amlm5900_nor_resource,
};
-#endif
static struct map_desc amlm5900_iodesc[] __initdata = {
};
@@ -158,9 +155,7 @@ static struct platform_device *amlm5900_devices[] __initdata = {
&s3c_device_rtc,
&s3c_device_usbgadget,
&s3c_device_sdi,
-#ifdef CONFIG_MTD_PARTITIONS
&amlm5900_device_nor,
-#endif
};
static void __init amlm5900_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-tct_hammer.c b/arch/arm/mach-s3c2410/mach-tct_hammer.c
index a15d062..43c2b83 100644
--- a/arch/arm/mach-s3c2410/mach-tct_hammer.c
+++ b/arch/arm/mach-s3c2410/mach-tct_hammer.c
@@ -49,8 +49,6 @@
#include <plat/devs.h>
#include <plat/cpu.h>
-#ifdef CONFIG_MTD_PARTITIONS
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
@@ -91,8 +89,6 @@ static struct platform_device tct_hammer_device_nor = {
.resource = &tct_hammer_nor_resource,
};
-#endif
-
static struct map_desc tct_hammer_iodesc[] __initdata = {
};
@@ -133,9 +129,7 @@ static struct platform_device *tct_hammer_devices[] __initdata = {
&s3c_device_rtc,
&s3c_device_usbgadget,
&s3c_device_sdi,
-#ifdef CONFIG_MTD_PARTITIONS
&tct_hammer_device_nor,
-#endif
};
static void __init tct_hammer_map_io(void)
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 405e621..82db072 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -16,7 +16,6 @@
#include <mach/dma.h>
#include <mach/map.h>
-#include <mach/gpio-bank-c.h>
#include <mach/spi-clocks.h>
#include <mach/irqs.h>
@@ -40,23 +39,15 @@ static char *spi_src_clks[] = {
*/
static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S3C64XX_GPC(0), S3C64XX_GPC0_SPI_MISO0);
- s3c_gpio_cfgpin(S3C64XX_GPC(1), S3C64XX_GPC1_SPI_CLKO);
- s3c_gpio_cfgpin(S3C64XX_GPC(2), S3C64XX_GPC2_SPI_MOSIO);
- s3c_gpio_setpull(S3C64XX_GPC(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(1), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(2), S3C_GPIO_PULL_UP);
+ base = S3C64XX_GPC(0);
break;
case 1:
- s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C64XX_GPC4_SPI_MISO1);
- s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C64XX_GPC5_SPI_CLK1);
- s3c_gpio_cfgpin(S3C64XX_GPC(6), S3C64XX_GPC6_SPI_MOSI1);
- s3c_gpio_setpull(S3C64XX_GPC(4), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPC(6), S3C_GPIO_PULL_UP);
+ base = S3C64XX_GPC(4);
break;
default:
@@ -64,6 +55,9 @@ static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgall_range(base, 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
return 0;
}
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
deleted file mode 100644
index 34212e1..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank A register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPACON (S3C64XX_GPA_BASE + 0x00)
-#define S3C64XX_GPADAT (S3C64XX_GPA_BASE + 0x04)
-#define S3C64XX_GPAPUD (S3C64XX_GPA_BASE + 0x08)
-#define S3C64XX_GPACONSLP (S3C64XX_GPA_BASE + 0x0c)
-#define S3C64XX_GPAPUDSLP (S3C64XX_GPA_BASE + 0x10)
-
-#define S3C64XX_GPA_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPA_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPA_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPA0_UART_RXD0 (0x02 << 0)
-#define S3C64XX_GPA0_EINT_G1_0 (0x07 << 0)
-
-#define S3C64XX_GPA1_UART_TXD0 (0x02 << 4)
-#define S3C64XX_GPA1_EINT_G1_1 (0x07 << 4)
-
-#define S3C64XX_GPA2_UART_nCTS0 (0x02 << 8)
-#define S3C64XX_GPA2_EINT_G1_2 (0x07 << 8)
-
-#define S3C64XX_GPA3_UART_nRTS0 (0x02 << 12)
-#define S3C64XX_GPA3_EINT_G1_3 (0x07 << 12)
-
-#define S3C64XX_GPA4_UART_RXD1 (0x02 << 16)
-#define S3C64XX_GPA4_EINT_G1_4 (0x07 << 16)
-
-#define S3C64XX_GPA5_UART_TXD1 (0x02 << 20)
-#define S3C64XX_GPA5_EINT_G1_5 (0x07 << 20)
-
-#define S3C64XX_GPA6_UART_nCTS1 (0x02 << 24)
-#define S3C64XX_GPA6_EINT_G1_6 (0x07 << 24)
-
-#define S3C64XX_GPA7_UART_nRTS1 (0x02 << 28)
-#define S3C64XX_GPA7_EINT_G1_7 (0x07 << 28)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
deleted file mode 100644
index 7232c03..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank B register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPBCON (S3C64XX_GPB_BASE + 0x00)
-#define S3C64XX_GPBDAT (S3C64XX_GPB_BASE + 0x04)
-#define S3C64XX_GPBPUD (S3C64XX_GPB_BASE + 0x08)
-#define S3C64XX_GPBCONSLP (S3C64XX_GPB_BASE + 0x0c)
-#define S3C64XX_GPBPUDSLP (S3C64XX_GPB_BASE + 0x10)
-
-#define S3C64XX_GPB_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPB_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPB_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPB0_UART_RXD2 (0x02 << 0)
-#define S3C64XX_GPB0_EXTDMA_REQ (0x03 << 0)
-#define S3C64XX_GPB0_IrDA_RXD (0x04 << 0)
-#define S3C64XX_GPB0_ADDR_CF0 (0x05 << 0)
-#define S3C64XX_GPB0_EINT_G1_8 (0x07 << 0)
-
-#define S3C64XX_GPB1_UART_TXD2 (0x02 << 4)
-#define S3C64XX_GPB1_EXTDMA_ACK (0x03 << 4)
-#define S3C64XX_GPB1_IrDA_TXD (0x04 << 4)
-#define S3C64XX_GPB1_ADDR_CF1 (0x05 << 4)
-#define S3C64XX_GPB1_EINT_G1_9 (0x07 << 4)
-
-#define S3C64XX_GPB2_UART_RXD3 (0x02 << 8)
-#define S3C64XX_GPB2_IrDA_RXD (0x03 << 8)
-#define S3C64XX_GPB2_EXTDMA_REQ (0x04 << 8)
-#define S3C64XX_GPB2_ADDR_CF2 (0x05 << 8)
-#define S3C64XX_GPB2_I2C_SCL1 (0x06 << 8)
-#define S3C64XX_GPB2_EINT_G1_10 (0x07 << 8)
-
-#define S3C64XX_GPB3_UART_TXD3 (0x02 << 12)
-#define S3C64XX_GPB3_IrDA_TXD (0x03 << 12)
-#define S3C64XX_GPB3_EXTDMA_ACK (0x04 << 12)
-#define S3C64XX_GPB3_I2C_SDA1 (0x06 << 12)
-#define S3C64XX_GPB3_EINT_G1_11 (0x07 << 12)
-
-#define S3C64XX_GPB4_IrDA_SDBW (0x02 << 16)
-#define S3C64XX_GPB4_CAM_FIELD (0x03 << 16)
-#define S3C64XX_GPB4_CF_DATA_DIR (0x04 << 16)
-#define S3C64XX_GPB4_EINT_G1_12 (0x07 << 16)
-
-#define S3C64XX_GPB5_I2C_SCL0 (0x02 << 20)
-#define S3C64XX_GPB5_EINT_G1_13 (0x07 << 20)
-
-#define S3C64XX_GPB6_I2C_SDA0 (0x02 << 24)
-#define S3C64XX_GPB6_EINT_G1_14 (0x07 << 24)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
deleted file mode 100644
index db189ab..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank C register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPCCON (S3C64XX_GPC_BASE + 0x00)
-#define S3C64XX_GPCDAT (S3C64XX_GPC_BASE + 0x04)
-#define S3C64XX_GPCPUD (S3C64XX_GPC_BASE + 0x08)
-#define S3C64XX_GPCCONSLP (S3C64XX_GPC_BASE + 0x0c)
-#define S3C64XX_GPCPUDSLP (S3C64XX_GPC_BASE + 0x10)
-
-#define S3C64XX_GPC_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPC_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPC_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPC0_SPI_MISO0 (0x02 << 0)
-#define S3C64XX_GPC0_EINT_G2_0 (0x07 << 0)
-
-#define S3C64XX_GPC1_SPI_CLKO (0x02 << 4)
-#define S3C64XX_GPC1_EINT_G2_1 (0x07 << 4)
-
-#define S3C64XX_GPC2_SPI_MOSIO (0x02 << 8)
-#define S3C64XX_GPC2_EINT_G2_2 (0x07 << 8)
-
-#define S3C64XX_GPC3_SPI_nCSO (0x02 << 12)
-#define S3C64XX_GPC3_EINT_G2_3 (0x07 << 12)
-
-#define S3C64XX_GPC4_SPI_MISO1 (0x02 << 16)
-#define S3C64XX_GPC4_MMC2_CMD (0x03 << 16)
-#define S3C64XX_GPC4_I2S_V40_DO0 (0x05 << 16)
-#define S3C64XX_GPC4_EINT_G2_4 (0x07 << 16)
-
-#define S3C64XX_GPC5_SPI_CLK1 (0x02 << 20)
-#define S3C64XX_GPC5_MMC2_CLK (0x03 << 20)
-#define S3C64XX_GPC5_I2S_V40_DO1 (0x05 << 20)
-#define S3C64XX_GPC5_EINT_G2_5 (0x07 << 20)
-
-#define S3C64XX_GPC6_SPI_MOSI1 (0x02 << 24)
-#define S3C64XX_GPC6_EINT_G2_6 (0x07 << 24)
-
-#define S3C64XX_GPC7_SPI_nCS1 (0x02 << 28)
-#define S3C64XX_GPC7_I2S_V40_DO2 (0x05 << 28)
-#define S3C64XX_GPC7_EINT_G2_7 (0x07 << 28)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
deleted file mode 100644
index 1a01cee..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank D register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPDCON (S3C64XX_GPD_BASE + 0x00)
-#define S3C64XX_GPDDAT (S3C64XX_GPD_BASE + 0x04)
-#define S3C64XX_GPDPUD (S3C64XX_GPD_BASE + 0x08)
-#define S3C64XX_GPDCONSLP (S3C64XX_GPD_BASE + 0x0c)
-#define S3C64XX_GPDPUDSLP (S3C64XX_GPD_BASE + 0x10)
-
-#define S3C64XX_GPD_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPD_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPD_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPD0_PCM0_SCLK (0x02 << 0)
-#define S3C64XX_GPD0_I2S0_CLK (0x03 << 0)
-#define S3C64XX_GPD0_AC97_BITCLK (0x04 << 0)
-#define S3C64XX_GPD0_EINT_G3_0 (0x07 << 0)
-
-#define S3C64XX_GPD1_PCM0_EXTCLK (0x02 << 4)
-#define S3C64XX_GPD1_I2S0_CDCLK (0x03 << 4)
-#define S3C64XX_GPD1_AC97_nRESET (0x04 << 4)
-#define S3C64XX_GPD1_EINT_G3_1 (0x07 << 4)
-
-#define S3C64XX_GPD2_PCM0_FSYNC (0x02 << 8)
-#define S3C64XX_GPD2_I2S0_LRCLK (0x03 << 8)
-#define S3C64XX_GPD2_AC97_SYNC (0x04 << 8)
-#define S3C64XX_GPD2_EINT_G3_2 (0x07 << 8)
-
-#define S3C64XX_GPD3_PCM0_SIN (0x02 << 12)
-#define S3C64XX_GPD3_I2S0_DI (0x03 << 12)
-#define S3C64XX_GPD3_AC97_SDI (0x04 << 12)
-#define S3C64XX_GPD3_EINT_G3_3 (0x07 << 12)
-
-#define S3C64XX_GPD4_PCM0_SOUT (0x02 << 16)
-#define S3C64XX_GPD4_I2S0_D0 (0x03 << 16)
-#define S3C64XX_GPD4_AC97_SDO (0x04 << 16)
-#define S3C64XX_GPD4_EINT_G3_4 (0x07 << 16)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
deleted file mode 100644
index f057adb..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank E register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPECON (S3C64XX_GPE_BASE + 0x00)
-#define S3C64XX_GPEDAT (S3C64XX_GPE_BASE + 0x04)
-#define S3C64XX_GPEPUD (S3C64XX_GPE_BASE + 0x08)
-#define S3C64XX_GPECONSLP (S3C64XX_GPE_BASE + 0x0c)
-#define S3C64XX_GPEPUDSLP (S3C64XX_GPE_BASE + 0x10)
-
-#define S3C64XX_GPE_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPE_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPE_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPE0_PCM1_SCLK (0x02 << 0)
-#define S3C64XX_GPE0_I2S1_CLK (0x03 << 0)
-#define S3C64XX_GPE0_AC97_BITCLK (0x04 << 0)
-
-#define S3C64XX_GPE1_PCM1_EXTCLK (0x02 << 4)
-#define S3C64XX_GPE1_I2S1_CDCLK (0x03 << 4)
-#define S3C64XX_GPE1_AC97_nRESET (0x04 << 4)
-
-#define S3C64XX_GPE2_PCM1_FSYNC (0x02 << 8)
-#define S3C64XX_GPE2_I2S1_LRCLK (0x03 << 8)
-#define S3C64XX_GPE2_AC97_SYNC (0x04 << 8)
-
-#define S3C64XX_GPE3_PCM1_SIN (0x02 << 12)
-#define S3C64XX_GPE3_I2S1_DI (0x03 << 12)
-#define S3C64XX_GPE3_AC97_SDI (0x04 << 12)
-
-#define S3C64XX_GPE4_PCM1_SOUT (0x02 << 16)
-#define S3C64XX_GPE4_I2S1_D0 (0x03 << 16)
-#define S3C64XX_GPE4_AC97_SDO (0x04 << 16)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
deleted file mode 100644
index 62ab8f5..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank F register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPFCON (S3C64XX_GPF_BASE + 0x00)
-#define S3C64XX_GPFDAT (S3C64XX_GPF_BASE + 0x04)
-#define S3C64XX_GPFPUD (S3C64XX_GPF_BASE + 0x08)
-#define S3C64XX_GPFCONSLP (S3C64XX_GPF_BASE + 0x0c)
-#define S3C64XX_GPFPUDSLP (S3C64XX_GPF_BASE + 0x10)
-
-#define S3C64XX_GPF_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPF_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPF_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPF0_CAMIF_CLK (0x02 << 0)
-#define S3C64XX_GPF0_EINT_G4_0 (0x03 << 0)
-
-#define S3C64XX_GPF1_CAMIF_HREF (0x02 << 2)
-#define S3C64XX_GPF1_EINT_G4_1 (0x03 << 2)
-
-#define S3C64XX_GPF2_CAMIF_PCLK (0x02 << 4)
-#define S3C64XX_GPF2_EINT_G4_2 (0x03 << 4)
-
-#define S3C64XX_GPF3_CAMIF_nRST (0x02 << 6)
-#define S3C64XX_GPF3_EINT_G4_3 (0x03 << 6)
-
-#define S3C64XX_GPF4_CAMIF_VSYNC (0x02 << 8)
-#define S3C64XX_GPF4_EINT_G4_4 (0x03 << 8)
-
-#define S3C64XX_GPF5_CAMIF_YDATA0 (0x02 << 10)
-#define S3C64XX_GPF5_EINT_G4_5 (0x03 << 10)
-
-#define S3C64XX_GPF6_CAMIF_YDATA1 (0x02 << 12)
-#define S3C64XX_GPF6_EINT_G4_6 (0x03 << 12)
-
-#define S3C64XX_GPF7_CAMIF_YDATA2 (0x02 << 14)
-#define S3C64XX_GPF7_EINT_G4_7 (0x03 << 14)
-
-#define S3C64XX_GPF8_CAMIF_YDATA3 (0x02 << 16)
-#define S3C64XX_GPF8_EINT_G4_8 (0x03 << 16)
-
-#define S3C64XX_GPF9_CAMIF_YDATA4 (0x02 << 18)
-#define S3C64XX_GPF9_EINT_G4_9 (0x03 << 18)
-
-#define S3C64XX_GPF10_CAMIF_YDATA5 (0x02 << 20)
-#define S3C64XX_GPF10_EINT_G4_10 (0x03 << 20)
-
-#define S3C64XX_GPF11_CAMIF_YDATA6 (0x02 << 22)
-#define S3C64XX_GPF11_EINT_G4_11 (0x03 << 22)
-
-#define S3C64XX_GPF12_CAMIF_YDATA7 (0x02 << 24)
-#define S3C64XX_GPF12_EINT_G4_12 (0x03 << 24)
-
-#define S3C64XX_GPF13_PWM_ECLK (0x02 << 26)
-#define S3C64XX_GPF13_EINT_G4_13 (0x03 << 26)
-
-#define S3C64XX_GPF14_PWM_TOUT0 (0x02 << 28)
-#define S3C64XX_GPF14_CLKOUT0 (0x03 << 28)
-
-#define S3C64XX_GPF15_PWM_TOUT1 (0x02 << 30)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
deleted file mode 100644
index b94954a..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank G register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPGCON (S3C64XX_GPG_BASE + 0x00)
-#define S3C64XX_GPGDAT (S3C64XX_GPG_BASE + 0x04)
-#define S3C64XX_GPGPUD (S3C64XX_GPG_BASE + 0x08)
-#define S3C64XX_GPGCONSLP (S3C64XX_GPG_BASE + 0x0c)
-#define S3C64XX_GPGPUDSLP (S3C64XX_GPG_BASE + 0x10)
-
-#define S3C64XX_GPG_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPG_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPG_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPG0_MMC0_CLK (0x02 << 0)
-#define S3C64XX_GPG0_EINT_G5_0 (0x07 << 0)
-
-#define S3C64XX_GPG1_MMC0_CMD (0x02 << 4)
-#define S3C64XX_GPG1_EINT_G5_1 (0x07 << 4)
-
-#define S3C64XX_GPG2_MMC0_DATA0 (0x02 << 8)
-#define S3C64XX_GPG2_EINT_G5_2 (0x07 << 8)
-
-#define S3C64XX_GPG3_MMC0_DATA1 (0x02 << 12)
-#define S3C64XX_GPG3_EINT_G5_3 (0x07 << 12)
-
-#define S3C64XX_GPG4_MMC0_DATA2 (0x02 << 16)
-#define S3C64XX_GPG4_EINT_G5_4 (0x07 << 16)
-
-#define S3C64XX_GPG5_MMC0_DATA3 (0x02 << 20)
-#define S3C64XX_GPG5_EINT_G5_5 (0x07 << 20)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
deleted file mode 100644
index 5d75aaa..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank H register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPHCON0 (S3C64XX_GPH_BASE + 0x00)
-#define S3C64XX_GPHCON1 (S3C64XX_GPH_BASE + 0x04)
-#define S3C64XX_GPHDAT (S3C64XX_GPH_BASE + 0x08)
-#define S3C64XX_GPHPUD (S3C64XX_GPH_BASE + 0x0c)
-#define S3C64XX_GPHCONSLP (S3C64XX_GPH_BASE + 0x10)
-#define S3C64XX_GPHPUDSLP (S3C64XX_GPH_BASE + 0x14)
-
-#define S3C64XX_GPH_CONMASK(__gpio) (0xf << ((__gpio) * 4))
-#define S3C64XX_GPH_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S3C64XX_GPH_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-
-#define S3C64XX_GPH0_MMC1_CLK (0x02 << 0)
-#define S3C64XX_GPH0_KP_COL0 (0x04 << 0)
-#define S3C64XX_GPH0_EINT_G6_0 (0x07 << 0)
-
-#define S3C64XX_GPH1_MMC1_CMD (0x02 << 4)
-#define S3C64XX_GPH1_KP_COL1 (0x04 << 4)
-#define S3C64XX_GPH1_EINT_G6_1 (0x07 << 4)
-
-#define S3C64XX_GPH2_MMC1_DATA0 (0x02 << 8)
-#define S3C64XX_GPH2_KP_COL2 (0x04 << 8)
-#define S3C64XX_GPH2_EINT_G6_2 (0x07 << 8)
-
-#define S3C64XX_GPH3_MMC1_DATA1 (0x02 << 12)
-#define S3C64XX_GPH3_KP_COL3 (0x04 << 12)
-#define S3C64XX_GPH3_EINT_G6_3 (0x07 << 12)
-
-#define S3C64XX_GPH4_MMC1_DATA2 (0x02 << 16)
-#define S3C64XX_GPH4_KP_COL4 (0x04 << 16)
-#define S3C64XX_GPH4_EINT_G6_4 (0x07 << 16)
-
-#define S3C64XX_GPH5_MMC1_DATA3 (0x02 << 20)
-#define S3C64XX_GPH5_KP_COL5 (0x04 << 20)
-#define S3C64XX_GPH5_EINT_G6_5 (0x07 << 20)
-
-#define S3C64XX_GPH6_MMC1_DATA4 (0x02 << 24)
-#define S3C64XX_GPH6_MMC2_DATA0 (0x03 << 24)
-#define S3C64XX_GPH6_KP_COL6 (0x04 << 24)
-#define S3C64XX_GPH6_I2S_V40_BCLK (0x05 << 24)
-#define S3C64XX_GPH6_ADDR_CF0 (0x06 << 24)
-#define S3C64XX_GPH6_EINT_G6_6 (0x07 << 24)
-
-#define S3C64XX_GPH7_MMC1_DATA5 (0x02 << 28)
-#define S3C64XX_GPH7_MMC2_DATA1 (0x03 << 28)
-#define S3C64XX_GPH7_KP_COL7 (0x04 << 28)
-#define S3C64XX_GPH7_I2S_V40_CDCLK (0x05 << 28)
-#define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28)
-#define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28)
-
-#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0)
-#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0)
-#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0)
-#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0)
-#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0)
-
-#define S3C64XX_GPH9_OUTPUT (0x01 << 4)
-#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4)
-#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4)
-#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4)
-#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
deleted file mode 100644
index 4ceaa60..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank I register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPICON (S3C64XX_GPI_BASE + 0x00)
-#define S3C64XX_GPIDAT (S3C64XX_GPI_BASE + 0x04)
-#define S3C64XX_GPIPUD (S3C64XX_GPI_BASE + 0x08)
-#define S3C64XX_GPICONSLP (S3C64XX_GPI_BASE + 0x0c)
-#define S3C64XX_GPIPUDSLP (S3C64XX_GPI_BASE + 0x10)
-
-#define S3C64XX_GPI_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPI_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPI_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPI0_VD0 (0x02 << 0)
-#define S3C64XX_GPI1_VD1 (0x02 << 2)
-#define S3C64XX_GPI2_VD2 (0x02 << 4)
-#define S3C64XX_GPI3_VD3 (0x02 << 6)
-#define S3C64XX_GPI4_VD4 (0x02 << 8)
-#define S3C64XX_GPI5_VD5 (0x02 << 10)
-#define S3C64XX_GPI6_VD6 (0x02 << 12)
-#define S3C64XX_GPI7_VD7 (0x02 << 14)
-#define S3C64XX_GPI8_VD8 (0x02 << 16)
-#define S3C64XX_GPI9_VD9 (0x02 << 18)
-#define S3C64XX_GPI10_VD10 (0x02 << 20)
-#define S3C64XX_GPI11_VD11 (0x02 << 22)
-#define S3C64XX_GPI12_VD12 (0x02 << 24)
-#define S3C64XX_GPI13_VD13 (0x02 << 26)
-#define S3C64XX_GPI14_VD14 (0x02 << 28)
-#define S3C64XX_GPI15_VD15 (0x02 << 30)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
deleted file mode 100644
index 6f25cd0..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank J register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPJCON (S3C64XX_GPJ_BASE + 0x00)
-#define S3C64XX_GPJDAT (S3C64XX_GPJ_BASE + 0x04)
-#define S3C64XX_GPJPUD (S3C64XX_GPJ_BASE + 0x08)
-#define S3C64XX_GPJCONSLP (S3C64XX_GPJ_BASE + 0x0c)
-#define S3C64XX_GPJPUDSLP (S3C64XX_GPJ_BASE + 0x10)
-
-#define S3C64XX_GPJ_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPJ_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPJ_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPJ0_VD16 (0x02 << 0)
-#define S3C64XX_GPJ1_VD17 (0x02 << 2)
-#define S3C64XX_GPJ2_VD18 (0x02 << 4)
-#define S3C64XX_GPJ3_VD19 (0x02 << 6)
-#define S3C64XX_GPJ4_VD20 (0x02 << 8)
-#define S3C64XX_GPJ5_VD21 (0x02 << 10)
-#define S3C64XX_GPJ6_VD22 (0x02 << 12)
-#define S3C64XX_GPJ7_VD23 (0x02 << 14)
-#define S3C64XX_GPJ8_LCD_HSYNC (0x02 << 16)
-#define S3C64XX_GPJ9_LCD_VSYNC (0x02 << 18)
-#define S3C64XX_GPJ10_LCD_VDEN (0x02 << 20)
-#define S3C64XX_GPJ11_LCD_VCLK (0x02 << 22)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
deleted file mode 100644
index d0aeda1..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank N register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
-#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
-#define S3C64XX_GPNPUD (S3C64XX_GPN_BASE + 0x08)
-
-#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPN_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPN0_EINT0 (0x02 << 0)
-#define S3C64XX_GPN0_KP_ROW0 (0x03 << 0)
-
-#define S3C64XX_GPN1_EINT1 (0x02 << 2)
-#define S3C64XX_GPN1_KP_ROW1 (0x03 << 2)
-
-#define S3C64XX_GPN2_EINT2 (0x02 << 4)
-#define S3C64XX_GPN2_KP_ROW2 (0x03 << 4)
-
-#define S3C64XX_GPN3_EINT3 (0x02 << 6)
-#define S3C64XX_GPN3_KP_ROW3 (0x03 << 6)
-
-#define S3C64XX_GPN4_EINT4 (0x02 << 8)
-#define S3C64XX_GPN4_KP_ROW4 (0x03 << 8)
-
-#define S3C64XX_GPN5_EINT5 (0x02 << 10)
-#define S3C64XX_GPN5_KP_ROW5 (0x03 << 10)
-
-#define S3C64XX_GPN6_EINT6 (0x02 << 12)
-#define S3C64XX_GPN6_KP_ROW6 (0x03 << 12)
-
-#define S3C64XX_GPN7_EINT7 (0x02 << 14)
-#define S3C64XX_GPN7_KP_ROW7 (0x03 << 14)
-
-#define S3C64XX_GPN8_EINT8 (0x02 << 16)
-#define S3C64XX_GPN9_EINT9 (0x02 << 18)
-#define S3C64XX_GPN10_EINT10 (0x02 << 20)
-#define S3C64XX_GPN11_EINT11 (0x02 << 22)
-#define S3C64XX_GPN12_EINT12 (0x02 << 24)
-#define S3C64XX_GPN13_EINT13 (0x02 << 26)
-#define S3C64XX_GPN14_EINT14 (0x02 << 28)
-#define S3C64XX_GPN15_EINT15 (0x02 << 30)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
deleted file mode 100644
index 21868fa..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank O register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPOCON (S3C64XX_GPO_BASE + 0x00)
-#define S3C64XX_GPODAT (S3C64XX_GPO_BASE + 0x04)
-#define S3C64XX_GPOPUD (S3C64XX_GPO_BASE + 0x08)
-#define S3C64XX_GPOCONSLP (S3C64XX_GPO_BASE + 0x0c)
-#define S3C64XX_GPOPUDSLP (S3C64XX_GPO_BASE + 0x10)
-
-#define S3C64XX_GPO_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPO_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPO_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPO0_MEM0_nCS2 (0x02 << 0)
-#define S3C64XX_GPO0_EINT_G7_0 (0x03 << 0)
-
-#define S3C64XX_GPO1_MEM0_nCS3 (0x02 << 2)
-#define S3C64XX_GPO1_EINT_G7_1 (0x03 << 2)
-
-#define S3C64XX_GPO2_MEM0_nCS4 (0x02 << 4)
-#define S3C64XX_GPO2_EINT_G7_2 (0x03 << 4)
-
-#define S3C64XX_GPO3_MEM0_nCS5 (0x02 << 6)
-#define S3C64XX_GPO3_EINT_G7_3 (0x03 << 6)
-
-#define S3C64XX_GPO4_EINT_G7_4 (0x03 << 8)
-
-#define S3C64XX_GPO5_EINT_G7_5 (0x03 << 10)
-
-#define S3C64XX_GPO6_MEM0_ADDR6 (0x02 << 12)
-#define S3C64XX_GPO6_EINT_G7_6 (0x03 << 12)
-
-#define S3C64XX_GPO7_MEM0_ADDR7 (0x02 << 14)
-#define S3C64XX_GPO7_EINT_G7_7 (0x03 << 14)
-
-#define S3C64XX_GPO8_MEM0_ADDR8 (0x02 << 16)
-#define S3C64XX_GPO8_EINT_G7_8 (0x03 << 16)
-
-#define S3C64XX_GPO9_MEM0_ADDR9 (0x02 << 18)
-#define S3C64XX_GPO9_EINT_G7_9 (0x03 << 18)
-
-#define S3C64XX_GPO10_MEM0_ADDR10 (0x02 << 20)
-#define S3C64XX_GPO10_EINT_G7_10 (0x03 << 20)
-
-#define S3C64XX_GPO11_MEM0_ADDR11 (0x02 << 22)
-#define S3C64XX_GPO11_EINT_G7_11 (0x03 << 22)
-
-#define S3C64XX_GPO12_MEM0_ADDR12 (0x02 << 24)
-#define S3C64XX_GPO12_EINT_G7_12 (0x03 << 24)
-
-#define S3C64XX_GPO13_MEM0_ADDR13 (0x02 << 26)
-#define S3C64XX_GPO13_EINT_G7_13 (0x03 << 26)
-
-#define S3C64XX_GPO14_MEM0_ADDR14 (0x02 << 28)
-#define S3C64XX_GPO14_EINT_G7_14 (0x03 << 28)
-
-#define S3C64XX_GPO15_MEM0_ADDR15 (0x02 << 30)
-#define S3C64XX_GPO15_EINT_G7_15 (0x03 << 30)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
deleted file mode 100644
index 46bcfb6..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank P register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPPCON (S3C64XX_GPP_BASE + 0x00)
-#define S3C64XX_GPPDAT (S3C64XX_GPP_BASE + 0x04)
-#define S3C64XX_GPPPUD (S3C64XX_GPP_BASE + 0x08)
-#define S3C64XX_GPPCONSLP (S3C64XX_GPP_BASE + 0x0c)
-#define S3C64XX_GPPPUDSLP (S3C64XX_GPP_BASE + 0x10)
-
-#define S3C64XX_GPP_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPP_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPP_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPP0_MEM0_ADDRV (0x02 << 0)
-#define S3C64XX_GPP0_EINT_G8_0 (0x03 << 0)
-
-#define S3C64XX_GPP1_MEM0_SMCLK (0x02 << 2)
-#define S3C64XX_GPP1_EINT_G8_1 (0x03 << 2)
-
-#define S3C64XX_GPP2_MEM0_nWAIT (0x02 << 4)
-#define S3C64XX_GPP2_EINT_G8_2 (0x03 << 4)
-
-#define S3C64XX_GPP3_MEM0_RDY0_ALE (0x02 << 6)
-#define S3C64XX_GPP3_EINT_G8_3 (0x03 << 6)
-
-#define S3C64XX_GPP4_MEM0_RDY1_CLE (0x02 << 8)
-#define S3C64XX_GPP4_EINT_G8_4 (0x03 << 8)
-
-#define S3C64XX_GPP5_MEM0_INTsm0_FWE (0x02 << 10)
-#define S3C64XX_GPP5_EINT_G8_5 (0x03 << 10)
-
-#define S3C64XX_GPP6_MEM0_(null) (0x02 << 12)
-#define S3C64XX_GPP6_EINT_G8_6 (0x03 << 12)
-
-#define S3C64XX_GPP7_MEM0_INTsm1_FRE (0x02 << 14)
-#define S3C64XX_GPP7_EINT_G8_7 (0x03 << 14)
-
-#define S3C64XX_GPP8_MEM0_RPn_RnB (0x02 << 16)
-#define S3C64XX_GPP8_EINT_G8_8 (0x03 << 16)
-
-#define S3C64XX_GPP9_MEM0_ATA_RESET (0x02 << 18)
-#define S3C64XX_GPP9_EINT_G8_9 (0x03 << 18)
-
-#define S3C64XX_GPP10_MEM0_ATA_INPACK (0x02 << 20)
-#define S3C64XX_GPP10_EINT_G8_10 (0x03 << 20)
-
-#define S3C64XX_GPP11_MEM0_ATA_REG (0x02 << 22)
-#define S3C64XX_GPP11_EINT_G8_11 (0x03 << 22)
-
-#define S3C64XX_GPP12_MEM0_ATA_WE (0x02 << 24)
-#define S3C64XX_GPP12_EINT_G8_12 (0x03 << 24)
-
-#define S3C64XX_GPP13_MEM0_ATA_OE (0x02 << 26)
-#define S3C64XX_GPP13_EINT_G8_13 (0x03 << 26)
-
-#define S3C64XX_GPP14_MEM0_ATA_CD (0x02 << 28)
-#define S3C64XX_GPP14_EINT_G8_14 (0x03 << 28)
-
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
deleted file mode 100644
index 1712223..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * GPIO Bank Q register and configuration definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C64XX_GPQCON (S3C64XX_GPQ_BASE + 0x00)
-#define S3C64XX_GPQDAT (S3C64XX_GPQ_BASE + 0x04)
-#define S3C64XX_GPQPUD (S3C64XX_GPQ_BASE + 0x08)
-#define S3C64XX_GPQCONSLP (S3C64XX_GPQ_BASE + 0x0c)
-#define S3C64XX_GPQPUDSLP (S3C64XX_GPQ_BASE + 0x10)
-
-#define S3C64XX_GPQ_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
-#define S3C64XX_GPQ_INPUT(__gpio) (0x0 << ((__gpio) * 2))
-#define S3C64XX_GPQ_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
-
-#define S3C64XX_GPQ0_MEM0_ADDR18_RAS (0x02 << 0)
-#define S3C64XX_GPQ0_EINT_G9_0 (0x03 << 0)
-
-#define S3C64XX_GPQ1_MEM0_ADDR19_CAS (0x02 << 2)
-#define S3C64XX_GPQ1_EINT_G9_1 (0x03 << 2)
-
-#define S3C64XX_GPQ2_EINT_G9_2 (0x03 << 4)
-
-#define S3C64XX_GPQ3_EINT_G9_3 (0x03 << 6)
-
-#define S3C64XX_GPQ4_EINT_G9_4 (0x03 << 8)
-
-#define S3C64XX_GPQ5_EINT_G9_5 (0x03 << 10)
-
-#define S3C64XX_GPQ6_EINT_G9_6 (0x03 << 12)
-
-#define S3C64XX_GPQ7_MEM0_ADDR17_WENDMC (0x02 << 14)
-#define S3C64XX_GPQ7_EINT_G9_7 (0x03 << 14)
-
-#define S3C64XX_GPQ8_MEM0_ADDR16_APDMC (0x02 << 16)
-#define S3C64XX_GPQ8_EINT_G9_8 (0x03 << 16)
-
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 686a4f2..2c0353a 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -50,7 +50,6 @@
#include <mach/hardware.h>
#include <mach/regs-fb.h>
#include <mach/map.h>
-#include <mach/gpio-bank-f.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 79412f7..bc1c470 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -30,26 +30,18 @@
#include <mach/regs-gpio-memport.h>
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
-#include <mach/gpio-bank-n.h>
-
void s3c_pm_debug_smdkled(u32 set, u32 clear)
{
unsigned long flags;
- u32 reg;
+ int i;
local_irq_save(flags);
- reg = __raw_readl(S3C64XX_GPNCON);
- reg &= ~(S3C64XX_GPN_CONMASK(12) | S3C64XX_GPN_CONMASK(13) |
- S3C64XX_GPN_CONMASK(14) | S3C64XX_GPN_CONMASK(15));
- reg |= S3C64XX_GPN_OUTPUT(12) | S3C64XX_GPN_OUTPUT(13) |
- S3C64XX_GPN_OUTPUT(14) | S3C64XX_GPN_OUTPUT(15);
- __raw_writel(reg, S3C64XX_GPNCON);
-
- reg = __raw_readl(S3C64XX_GPNDAT);
- reg &= ~(clear << 12);
- reg |= set << 12;
- __raw_writel(reg, S3C64XX_GPNDAT);
-
+ for (i = 0; i < 4; i++) {
+ if (clear & (1 << i))
+ gpio_set_value(S3C64XX_GPN(12 + i), 0);
+ if (set & (1 << i))
+ gpio_set_value(S3C64XX_GPN(12 + i), 1);
+ }
local_irq_restore(flags);
}
#endif
@@ -187,6 +179,18 @@ static int s3c64xx_pm_init(void)
pm_cpu_prep = s3c64xx_pm_prepare;
pm_cpu_sleep = s3c64xx_cpu_suspend;
pm_uart_udivslot = 1;
+
+#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
+ gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
+ gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
+ gpio_request(S3C64XX_GPN(14), "DEBUG_LED2");
+ gpio_request(S3C64XX_GPN(15), "DEBUG_LED3");
+ gpio_direction_output(S3C64XX_GPN(12), 0);
+ gpio_direction_output(S3C64XX_GPN(13), 0);
+ gpio_direction_output(S3C64XX_GPN(14), 0);
+ gpio_direction_output(S3C64XX_GPN(15), 0);
+#endif
+
return 0;
}
diff --git a/arch/arm/mach-s3c64xx/setup-i2c0.c b/arch/arm/mach-s3c64xx/setup-i2c0.c
index 406192a..241af94 100644
--- a/arch/arm/mach-s3c64xx/setup-i2c0.c
+++ b/arch/arm/mach-s3c64xx/setup-i2c0.c
@@ -18,14 +18,11 @@
struct platform_device; /* don't need the contents */
-#include <mach/gpio-bank-b.h>
#include <plat/iic.h>
#include <plat/gpio-cfg.h>
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S3C64XX_GPB(5), S3C64XX_GPB5_I2C_SCL0);
- s3c_gpio_cfgpin(S3C64XX_GPB(6), S3C64XX_GPB6_I2C_SDA0);
- s3c_gpio_setpull(S3C64XX_GPB(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPB(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S3C64XX_GPB(5), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s3c64xx/setup-i2c1.c b/arch/arm/mach-s3c64xx/setup-i2c1.c
index 1ee62c9..3d13a96 100644
--- a/arch/arm/mach-s3c64xx/setup-i2c1.c
+++ b/arch/arm/mach-s3c64xx/setup-i2c1.c
@@ -18,14 +18,11 @@
struct platform_device; /* don't need the contents */
-#include <mach/gpio-bank-b.h>
#include <plat/iic.h>
#include <plat/gpio-cfg.h>
void s3c_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S3C64XX_GPB(2), S3C64XX_GPB2_I2C_SCL1);
- s3c_gpio_cfgpin(S3C64XX_GPB(3), S3C64XX_GPB3_I2C_SDA1);
- s3c_gpio_setpull(S3C64XX_GPB(2), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S3C64XX_GPB(3), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S3C64XX_GPB(2), 2,
+ S3C_GPIO_SFN(6), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S
index afe5a76..1f87732 100644
--- a/arch/arm/mach-s3c64xx/sleep.S
+++ b/arch/arm/mach-s3c64xx/sleep.S
@@ -20,7 +20,6 @@
#define S3C64XX_VA_GPIO (0x0)
#include <mach/regs-gpio.h>
-#include <mach/gpio-bank-n.h>
#define LL_UART (S3C_PA_UART + (0x400 * CONFIG_S3C_LOWLEVEL_UART_PORT))
@@ -68,6 +67,13 @@ ENTRY(s3c_cpu_resume)
ldr r2, =LL_UART /* for debug */
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
+
+#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
+#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
+
+#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
+#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
+
/* Initialise the GPIO state if we are debugging via the SMDK LEDs,
* as the uboot version supplied resets these to inputs during the
* resume checks.
diff --git a/arch/arm/mach-s5p6442/Kconfig b/arch/arm/mach-s5p6442/Kconfig
deleted file mode 100644
index 33569e4..0000000
--- a/arch/arm/mach-s5p6442/Kconfig
+++ /dev/null
@@ -1,25 +0,0 @@
-# arch/arm/mach-s5p6442/Kconfig
-#
-# Copyright (c) 2010 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-# Configuration options for the S5P6442
-
-if ARCH_S5P6442
-
-config CPU_S5P6442
- bool
- select S3C_PL330_DMA
- help
- Enable S5P6442 CPU support
-
-config MACH_SMDK6442
- bool "SMDK6442"
- select CPU_S5P6442
- select S3C_DEV_WDT
- help
- Machine support for Samsung SMDK6442
-
-endif
diff --git a/arch/arm/mach-s5p6442/Makefile b/arch/arm/mach-s5p6442/Makefile
deleted file mode 100644
index 90a3d83..0000000
--- a/arch/arm/mach-s5p6442/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# arch/arm/mach-s5p6442/Makefile
-#
-# Copyright (c) 2010 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n :=
-obj- :=
-
-# Core support for S5P6442 system
-
-obj-$(CONFIG_CPU_S5P6442) += cpu.o init.o clock.o dma.o
-obj-$(CONFIG_CPU_S5P6442) += setup-i2c0.o
-
-# machine support
-
-obj-$(CONFIG_MACH_SMDK6442) += mach-smdk6442.o
-
-# device support
-obj-y += dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
diff --git a/arch/arm/mach-s5p6442/Makefile.boot b/arch/arm/mach-s5p6442/Makefile.boot
deleted file mode 100644
index ff90aa1..0000000
--- a/arch/arm/mach-s5p6442/Makefile.boot
+++ /dev/null
@@ -1,2 +0,0 @@
- zreladdr-y := 0x20008000
-params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
deleted file mode 100644
index fbbc7be..0000000
--- a/arch/arm/mach-s5p6442/clock.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/clock.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Clock support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/map.h>
-
-#include <plat/cpu-freq.h>
-#include <mach/regs-clock.h>
-#include <plat/clock.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-#include <plat/s5p-clock.h>
-#include <plat/clock-clksrc.h>
-#include <plat/s5p6442.h>
-
-static struct clksrc_clk clk_mout_apll = {
- .clk = {
- .name = "mout_apll",
- .id = -1,
- },
- .sources = &clk_src_apll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
-};
-
-static struct clksrc_clk clk_mout_mpll = {
- .clk = {
- .name = "mout_mpll",
- .id = -1,
- },
- .sources = &clk_src_mpll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
-};
-
-static struct clksrc_clk clk_mout_epll = {
- .clk = {
- .name = "mout_epll",
- .id = -1,
- },
- .sources = &clk_src_epll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
-};
-
-/* Possible clock sources for ARM Mux */
-static struct clk *clk_src_arm_list[] = {
- [1] = &clk_mout_apll.clk,
- [2] = &clk_mout_mpll.clk,
-};
-
-static struct clksrc_sources clk_src_arm = {
- .sources = clk_src_arm_list,
- .nr_sources = ARRAY_SIZE(clk_src_arm_list),
-};
-
-static struct clksrc_clk clk_mout_arm = {
- .clk = {
- .name = "mout_arm",
- .id = -1,
- },
- .sources = &clk_src_arm,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 16, .size = 3 },
-};
-
-static struct clk clk_dout_a2m = {
- .name = "dout_a2m",
- .id = -1,
- .parent = &clk_mout_apll.clk,
-};
-
-/* Possible clock sources for D0 Mux */
-static struct clk *clk_src_d0_list[] = {
- [1] = &clk_mout_mpll.clk,
- [2] = &clk_dout_a2m,
-};
-
-static struct clksrc_sources clk_src_d0 = {
- .sources = clk_src_d0_list,
- .nr_sources = ARRAY_SIZE(clk_src_d0_list),
-};
-
-static struct clksrc_clk clk_mout_d0 = {
- .clk = {
- .name = "mout_d0",
- .id = -1,
- },
- .sources = &clk_src_d0,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 20, .size = 3 },
-};
-
-static struct clk clk_dout_apll = {
- .name = "dout_apll",
- .id = -1,
- .parent = &clk_mout_arm.clk,
-};
-
-/* Possible clock sources for D0SYNC Mux */
-static struct clk *clk_src_d0sync_list[] = {
- [1] = &clk_mout_d0.clk,
- [2] = &clk_dout_apll,
-};
-
-static struct clksrc_sources clk_src_d0sync = {
- .sources = clk_src_d0sync_list,
- .nr_sources = ARRAY_SIZE(clk_src_d0sync_list),
-};
-
-static struct clksrc_clk clk_mout_d0sync = {
- .clk = {
- .name = "mout_d0sync",
- .id = -1,
- },
- .sources = &clk_src_d0sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
-};
-
-/* Possible clock sources for D1 Mux */
-static struct clk *clk_src_d1_list[] = {
- [1] = &clk_mout_mpll.clk,
- [2] = &clk_dout_a2m,
-};
-
-static struct clksrc_sources clk_src_d1 = {
- .sources = clk_src_d1_list,
- .nr_sources = ARRAY_SIZE(clk_src_d1_list),
-};
-
-static struct clksrc_clk clk_mout_d1 = {
- .clk = {
- .name = "mout_d1",
- .id = -1,
- },
- .sources = &clk_src_d1,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 24, .size = 3 },
-};
-
-/* Possible clock sources for D1SYNC Mux */
-static struct clk *clk_src_d1sync_list[] = {
- [1] = &clk_mout_d1.clk,
- [2] = &clk_dout_apll,
-};
-
-static struct clksrc_sources clk_src_d1sync = {
- .sources = clk_src_d1sync_list,
- .nr_sources = ARRAY_SIZE(clk_src_d1sync_list),
-};
-
-static struct clksrc_clk clk_mout_d1sync = {
- .clk = {
- .name = "mout_d1sync",
- .id = -1,
- },
- .sources = &clk_src_d1sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
-};
-
-static struct clk clk_hclkd0 = {
- .name = "hclkd0",
- .id = -1,
- .parent = &clk_mout_d0sync.clk,
-};
-
-static struct clk clk_hclkd1 = {
- .name = "hclkd1",
- .id = -1,
- .parent = &clk_mout_d1sync.clk,
-};
-
-static struct clk clk_pclkd0 = {
- .name = "pclkd0",
- .id = -1,
- .parent = &clk_hclkd0,
-};
-
-static struct clk clk_pclkd1 = {
- .name = "pclkd1",
- .id = -1,
- .parent = &clk_hclkd1,
-};
-
-int s5p6442_clk_ip0_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable);
-}
-
-int s5p6442_clk_ip3_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
-}
-
-static struct clksrc_clk clksrcs[] = {
- {
- .clk = {
- .name = "dout_a2m",
- .id = -1,
- .parent = &clk_mout_apll.clk,
- },
- .sources = &clk_src_apll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
- }, {
- .clk = {
- .name = "dout_apll",
- .id = -1,
- .parent = &clk_mout_arm.clk,
- },
- .sources = &clk_src_arm,
- .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 16, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 },
- }, {
- .clk = {
- .name = "hclkd1",
- .id = -1,
- .parent = &clk_mout_d1sync.clk,
- },
- .sources = &clk_src_d1sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 },
- }, {
- .clk = {
- .name = "hclkd0",
- .id = -1,
- .parent = &clk_mout_d0sync.clk,
- },
- .sources = &clk_src_d0sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "pclkd0",
- .id = -1,
- .parent = &clk_hclkd0,
- },
- .sources = &clk_src_d0sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 },
- }, {
- .clk = {
- .name = "pclkd1",
- .id = -1,
- .parent = &clk_hclkd1,
- },
- .sources = &clk_src_d1sync,
- .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 },
- }
-};
-
-/* Clock initialisation code */
-static struct clksrc_clk *init_parents[] = {
- &clk_mout_apll,
- &clk_mout_mpll,
- &clk_mout_epll,
- &clk_mout_arm,
- &clk_mout_d0,
- &clk_mout_d0sync,
- &clk_mout_d1,
- &clk_mout_d1sync,
-};
-
-void __init_or_cpufreq s5p6442_setup_clocks(void)
-{
- struct clk *pclkd0_clk;
- struct clk *pclkd1_clk;
-
- unsigned long xtal;
- unsigned long arm;
- unsigned long hclkd0 = 0;
- unsigned long hclkd1 = 0;
- unsigned long pclkd0 = 0;
- unsigned long pclkd1 = 0;
-
- unsigned long apll;
- unsigned long mpll;
- unsigned long epll;
- unsigned int ptr;
-
- printk(KERN_DEBUG "%s: registering clocks\n", __func__);
-
- xtal = clk_get_rate(&clk_xtal);
-
- printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
-
- apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
- mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
- epll = s5p_get_pll45xx(xtal, __raw_readl(S5P_EPLL_CON), pll_4500);
-
- printk(KERN_INFO "S5P6442: PLL settings, A=%ld, M=%ld, E=%ld",
- apll, mpll, epll);
-
- clk_fout_apll.rate = apll;
- clk_fout_mpll.rate = mpll;
- clk_fout_epll.rate = epll;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c_set_clksrc(init_parents[ptr], true);
-
- for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
- s3c_set_clksrc(&clksrcs[ptr], true);
-
- arm = clk_get_rate(&clk_dout_apll);
- hclkd0 = clk_get_rate(&clk_hclkd0);
- hclkd1 = clk_get_rate(&clk_hclkd1);
-
- pclkd0_clk = clk_get(NULL, "pclkd0");
- BUG_ON(IS_ERR(pclkd0_clk));
-
- pclkd0 = clk_get_rate(pclkd0_clk);
- clk_put(pclkd0_clk);
-
- pclkd1_clk = clk_get(NULL, "pclkd1");
- BUG_ON(IS_ERR(pclkd1_clk));
-
- pclkd1 = clk_get_rate(pclkd1_clk);
- clk_put(pclkd1_clk);
-
- printk(KERN_INFO "S5P6442: HCLKD0=%ld, HCLKD1=%ld, PCLKD0=%ld, PCLKD1=%ld\n",
- hclkd0, hclkd1, pclkd0, pclkd1);
-
- /* For backward compatibility */
- clk_f.rate = arm;
- clk_h.rate = hclkd1;
- clk_p.rate = pclkd1;
-
- clk_pclkd0.rate = pclkd0;
- clk_pclkd1.rate = pclkd1;
-}
-
-static struct clk init_clocks_off[] = {
- {
- .name = "pdma",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip0_ctrl,
- .ctrlbit = (1 << 3),
- },
-};
-
-static struct clk init_clocks[] = {
- {
- .name = "systimer",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<16),
- }, {
- .name = "uart",
- .id = 0,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<17),
- }, {
- .name = "uart",
- .id = 1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<18),
- }, {
- .name = "uart",
- .id = 2,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<19),
- }, {
- .name = "watchdog",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1 << 22),
- }, {
- .name = "timers",
- .id = -1,
- .parent = &clk_pclkd1,
- .enable = s5p6442_clk_ip3_ctrl,
- .ctrlbit = (1<<23),
- },
-};
-
-static struct clk *clks[] __initdata = {
- &clk_ext,
- &clk_epll,
- &clk_mout_apll.clk,
- &clk_mout_mpll.clk,
- &clk_mout_epll.clk,
- &clk_mout_d0.clk,
- &clk_mout_d0sync.clk,
- &clk_mout_d1.clk,
- &clk_mout_d1sync.clk,
- &clk_hclkd0,
- &clk_pclkd0,
- &clk_hclkd1,
- &clk_pclkd1,
-};
-
-void __init s5p6442_register_clocks(void)
-{
- s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-
- s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
- s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
-
- s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
- s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
-
- s3c_pwmclk_init();
-}
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c
deleted file mode 100644
index 842af86..0000000
--- a/arch/arm/mach-s5p6442/cpu.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/cpu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <asm/proc-fns.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-#include <asm/irq.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-clock.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-#include <plat/s5p6442.h>
-
-/* Initial IO mappings */
-
-static struct map_desc s5p6442_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_SYSTIMER,
- .pfn = __phys_to_pfn(S5P6442_PA_SYSTIMER),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GPIO,
- .pfn = __phys_to_pfn(S5P6442_PA_GPIO),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC0,
- .pfn = __phys_to_pfn(S5P6442_PA_VIC0),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC1,
- .pfn = __phys_to_pfn(S5P6442_PA_VIC1),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC2,
- .pfn = __phys_to_pfn(S5P6442_PA_VIC2),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(S3C_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }
-};
-
-static void s5p6442_idle(void)
-{
- if (!need_resched())
- cpu_do_idle();
-
- local_irq_enable();
-}
-
-/*
- * s5p6442_map_io
- *
- * register the standard cpu IO areas
- */
-
-void __init s5p6442_map_io(void)
-{
- iotable_init(s5p6442_iodesc, ARRAY_SIZE(s5p6442_iodesc));
-}
-
-void __init s5p6442_init_clocks(int xtal)
-{
- printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
- s3c24xx_register_baseclocks(xtal);
- s5p_register_clocks(xtal);
- s5p6442_register_clocks();
- s5p6442_setup_clocks();
-}
-
-void __init s5p6442_init_irq(void)
-{
- /* S5P6442 supports 3 VIC */
- u32 vic[3];
-
- /* VIC0, VIC1, and VIC2: some interrupt reserved */
- vic[0] = 0x7fefffff;
- vic[1] = 0X7f389c81;
- vic[2] = 0X1bbbcfff;
-
- s5p_init_irq(vic, ARRAY_SIZE(vic));
-}
-
-struct sysdev_class s5p6442_sysclass = {
- .name = "s5p6442-core",
-};
-
-static struct sys_device s5p6442_sysdev = {
- .cls = &s5p6442_sysclass,
-};
-
-static int __init s5p6442_core_init(void)
-{
- return sysdev_class_register(&s5p6442_sysclass);
-}
-
-core_initcall(s5p6442_core_init);
-
-int __init s5p6442_init(void)
-{
- printk(KERN_INFO "S5P6442: Initializing architecture\n");
-
- /* set idle function */
- pm_idle = s5p6442_idle;
-
- return sysdev_register(&s5p6442_sysdev);
-}
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
deleted file mode 100644
index 8719dc4..0000000
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/dev-audio.c
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <plat/gpio-cfg.h>
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-#include <mach/irqs.h>
-
-static int s5p6442_cfg_i2s(struct platform_device *pdev)
-{
- unsigned int base;
-
- /* configure GPIO for i2s port */
- switch (pdev->id) {
- case 1:
- base = S5P6442_GPC1(0);
- break;
-
- case 0:
- base = S5P6442_GPC0(0);
- break;
-
- default:
- printk(KERN_ERR "Invalid Device %d\n", pdev->id);
- return -EINVAL;
- }
-
- s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
- return 0;
-}
-
-static const char *rclksrc_v35[] = {
- [0] = "busclk",
- [1] = "i2sclk",
-};
-
-static struct s3c_audio_pdata i2sv35_pdata = {
- .cfg_gpio = s5p6442_cfg_i2s,
- .type = {
- .i2s = {
- .quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
- .src_clk = rclksrc_v35,
- },
- },
-};
-
-static struct resource s5p6442_iis0_resource[] = {
- [0] = {
- .start = S5P6442_PA_I2S0,
- .end = S5P6442_PA_I2S0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_I2S0_TX,
- .end = DMACH_I2S0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_I2S0_RX,
- .end = DMACH_I2S0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = DMACH_I2S0S_TX,
- .end = DMACH_I2S0S_TX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_iis0 = {
- .name = "samsung-i2s",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p6442_iis0_resource),
- .resource = s5p6442_iis0_resource,
- .dev = {
- .platform_data = &i2sv35_pdata,
- },
-};
-
-static const char *rclksrc_v3[] = {
- [0] = "iis",
- [1] = "sclk_audio",
-};
-
-static struct s3c_audio_pdata i2sv3_pdata = {
- .cfg_gpio = s5p6442_cfg_i2s,
- .type = {
- .i2s = {
- .src_clk = rclksrc_v3,
- },
- },
-};
-
-static struct resource s5p6442_iis1_resource[] = {
- [0] = {
- .start = S5P6442_PA_I2S1,
- .end = S5P6442_PA_I2S1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_I2S1_TX,
- .end = DMACH_I2S1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_I2S1_RX,
- .end = DMACH_I2S1_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_iis1 = {
- .name = "samsung-i2s",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5p6442_iis1_resource),
- .resource = s5p6442_iis1_resource,
- .dev = {
- .platform_data = &i2sv3_pdata,
- },
-};
-
-/* PCM Controller platform_devices */
-
-static int s5p6442_pcm_cfg_gpio(struct platform_device *pdev)
-{
- unsigned int base;
-
- switch (pdev->id) {
- case 0:
- base = S5P6442_GPC0(0);
- break;
-
- case 1:
- base = S5P6442_GPC1(0);
- break;
-
- default:
- printk(KERN_DEBUG "Invalid PCM Controller number!");
- return -EINVAL;
- }
-
- s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
- return 0;
-}
-
-static struct s3c_audio_pdata s3c_pcm_pdata = {
- .cfg_gpio = s5p6442_pcm_cfg_gpio,
-};
-
-static struct resource s5p6442_pcm0_resource[] = {
- [0] = {
- .start = S5P6442_PA_PCM0,
- .end = S5P6442_PA_PCM0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_PCM0_TX,
- .end = DMACH_PCM0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_PCM0_RX,
- .end = DMACH_PCM0_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_pcm0 = {
- .name = "samsung-pcm",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p6442_pcm0_resource),
- .resource = s5p6442_pcm0_resource,
- .dev = {
- .platform_data = &s3c_pcm_pdata,
- },
-};
-
-static struct resource s5p6442_pcm1_resource[] = {
- [0] = {
- .start = S5P6442_PA_PCM1,
- .end = S5P6442_PA_PCM1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_PCM1_TX,
- .end = DMACH_PCM1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_PCM1_RX,
- .end = DMACH_PCM1_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device s5p6442_device_pcm1 = {
- .name = "samsung-pcm",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5p6442_pcm1_resource),
- .resource = s5p6442_pcm1_resource,
- .dev = {
- .platform_data = &s3c_pcm_pdata,
- },
-};
diff --git a/arch/arm/mach-s5p6442/dev-spi.c b/arch/arm/mach-s5p6442/dev-spi.c
deleted file mode 100644
index cce8c24..0000000
--- a/arch/arm/mach-s5p6442/dev-spi.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/dev-spi.c
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <mach/dma.h>
-#include <mach/map.h>
-#include <mach/irqs.h>
-#include <mach/spi-clocks.h>
-
-#include <plat/s3c64xx-spi.h>
-#include <plat/gpio-cfg.h>
-
-static char *spi_src_clks[] = {
- [S5P6442_SPI_SRCCLK_PCLK] = "pclk",
- [S5P6442_SPI_SRCCLK_SCLK] = "spi_epll",
-};
-
-/* SPI Controller platform_devices */
-
-/* Since we emulate multi-cs capability, we do not touch the CS.
- * The emulated CS is toggled by board specific mechanism, as it can
- * be either some immediate GPIO or some signal out of some other
- * chip in between ... or some yet another way.
- * We simply do not assume anything about CS.
- */
-static int s5p6442_spi_cfg_gpio(struct platform_device *pdev)
-{
- switch (pdev->id) {
- case 0:
- s3c_gpio_cfgpin(S5P6442_GPB(0), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6442_GPB(0), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgall_range(S5P6442_GPB(2), 2,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct resource s5p6442_spi0_resource[] = {
- [0] = {
- .start = S5P6442_PA_SPI,
- .end = S5P6442_PA_SPI + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI0_TX,
- .end = DMACH_SPI0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI0_RX,
- .end = DMACH_SPI0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI0,
- .end = IRQ_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5p6442_spi0_pdata = {
- .cfg_gpio = s5p6442_spi_cfg_gpio,
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
-};
-
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-struct platform_device s5p6442_device_spi = {
- .name = "s3c64xx-spi",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p6442_spi0_resource),
- .resource = s5p6442_spi0_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5p6442_spi0_pdata,
- },
-};
-
-void __init s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
-{
- struct s3c64xx_spi_info *pd;
-
- /* Reject invalid configuration */
- if (!num_cs || src_clk_nr < 0
- || src_clk_nr > S5P6442_SPI_SRCCLK_SCLK) {
- printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
- return;
- }
-
- switch (cntrlr) {
- case 0:
- pd = &s5p6442_spi0_pdata;
- break;
- default:
- printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
- __func__, cntrlr);
- return;
- }
-
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- pd->src_clk_name = spi_src_clks[src_clk_nr];
-}
diff --git a/arch/arm/mach-s5p6442/dma.c b/arch/arm/mach-s5p6442/dma.c
deleted file mode 100644
index 7dfb136..0000000
--- a/arch/arm/mach-s5p6442/dma.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-
-#include <plat/devs.h>
-#include <plat/irqs.h>
-
-#include <mach/map.h>
-#include <mach/irqs.h>
-
-#include <plat/s3c-pl330-pdata.h>
-
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
-static struct resource s5p6442_pdma_resource[] = {
- [0] = {
- .start = S5P6442_PA_PDMA,
- .end = S5P6442_PA_PDMA + SZ_4K,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PDMA,
- .end = IRQ_PDMA,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c_pl330_platdata s5p6442_pdma_pdata = {
- .peri = {
- [0] = DMACH_UART0_RX,
- [1] = DMACH_UART0_TX,
- [2] = DMACH_UART1_RX,
- [3] = DMACH_UART1_TX,
- [4] = DMACH_UART2_RX,
- [5] = DMACH_UART2_TX,
- [6] = DMACH_MAX,
- [7] = DMACH_MAX,
- [8] = DMACH_MAX,
- [9] = DMACH_I2S0_RX,
- [10] = DMACH_I2S0_TX,
- [11] = DMACH_I2S0S_TX,
- [12] = DMACH_I2S1_RX,
- [13] = DMACH_I2S1_TX,
- [14] = DMACH_MAX,
- [15] = DMACH_MAX,
- [16] = DMACH_SPI0_RX,
- [17] = DMACH_SPI0_TX,
- [18] = DMACH_MAX,
- [19] = DMACH_MAX,
- [20] = DMACH_PCM0_RX,
- [21] = DMACH_PCM0_TX,
- [22] = DMACH_PCM1_RX,
- [23] = DMACH_PCM1_TX,
- [24] = DMACH_MAX,
- [25] = DMACH_MAX,
- [26] = DMACH_MAX,
- [27] = DMACH_MSM_REQ0,
- [28] = DMACH_MSM_REQ1,
- [29] = DMACH_MSM_REQ2,
- [30] = DMACH_MSM_REQ3,
- [31] = DMACH_MAX,
- },
-};
-
-static struct platform_device s5p6442_device_pdma = {
- .name = "s3c-pl330",
- .id = -1,
- .num_resources = ARRAY_SIZE(s5p6442_pdma_resource),
- .resource = s5p6442_pdma_resource,
- .dev = {
- .dma_mask = &dma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5p6442_pdma_pdata,
- },
-};
-
-static struct platform_device *s5p6442_dmacs[] __initdata = {
- &s5p6442_device_pdma,
-};
-
-static int __init s5p6442_dma_init(void)
-{
- platform_add_devices(s5p6442_dmacs, ARRAY_SIZE(s5p6442_dmacs));
-
- return 0;
-}
-arch_initcall(s5p6442_dma_init);
diff --git a/arch/arm/mach-s5p6442/include/mach/debug-macro.S b/arch/arm/mach-s5p6442/include/mach/debug-macro.S
deleted file mode 100644
index e2213205..0000000
--- a/arch/arm/mach-s5p6442/include/mach/debug-macro.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/debug-macro.S
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s3c6400/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* pull in the relevant register and map files. */
-
-#include <mach/map.h>
-#include <plat/regs-serial.h>
-
- .macro addruart, rp, rv
- ldr \rp, = S3C_PA_UART
- ldr \rv, = S3C_VA_UART
-#if CONFIG_DEBUG_S3C_UART != 0
- add \rp, \rp, #(0x400 * CONFIG_DEBUG_S3C_UART)
- add \rv, \rv, #(0x400 * CONFIG_DEBUG_S3C_UART)
-#endif
- .endm
-
-#define fifo_full fifo_full_s5pv210
-#define fifo_level fifo_level_s5pv210
-
-/* include the reset of the code which will do the work, we're only
- * compiling for a single cpu processor type so the default of s3c2440
- * will be fine with us.
- */
-
-#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-s5p6442/include/mach/dma.h b/arch/arm/mach-s5p6442/include/mach/dma.h
deleted file mode 100644
index 81209eb..0000000
--- a/arch/arm/mach-s5p6442/include/mach/dma.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MACH_DMA_H
-#define __MACH_DMA_H
-
-/* This platform uses the common S3C DMA API driver for PL330 */
-#include <plat/s3c-dma-pl330.h>
-
-#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/entry-macro.S b/arch/arm/mach-s5p6442/include/mach/entry-macro.S
deleted file mode 100644
index 6d574ed..0000000
--- a/arch/arm/mach-s5p6442/include/mach/entry-macro.S
+++ /dev/null
@@ -1,48 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/entry-macro.S
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Low-level IRQ helper macros for the Samsung S5P6442
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-#include <plat/irqs.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =VA_VIC0
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- @ check the vic0
- mov \irqnr, # S5P_IRQ_OFFSET + 31
- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
- teq \irqstat, #0
-
- @ otherwise try vic1
- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- @ otherwise try vic2
- addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- clzne \irqstat, \irqstat
- subne \irqnr, \irqnr, \irqstat
- .endm
diff --git a/arch/arm/mach-s5p6442/include/mach/gpio.h b/arch/arm/mach-s5p6442/include/mach/gpio.h
deleted file mode 100644
index b8715df..0000000
--- a/arch/arm/mach-s5p6442/include/mach/gpio.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/gpio.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - GPIO lib support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_GPIO_H
-#define __ASM_ARCH_GPIO_H __FILE__
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-/* GPIO bank sizes */
-#define S5P6442_GPIO_A0_NR (8)
-#define S5P6442_GPIO_A1_NR (2)
-#define S5P6442_GPIO_B_NR (4)
-#define S5P6442_GPIO_C0_NR (5)
-#define S5P6442_GPIO_C1_NR (5)
-#define S5P6442_GPIO_D0_NR (2)
-#define S5P6442_GPIO_D1_NR (6)
-#define S5P6442_GPIO_E0_NR (8)
-#define S5P6442_GPIO_E1_NR (5)
-#define S5P6442_GPIO_F0_NR (8)
-#define S5P6442_GPIO_F1_NR (8)
-#define S5P6442_GPIO_F2_NR (8)
-#define S5P6442_GPIO_F3_NR (6)
-#define S5P6442_GPIO_G0_NR (7)
-#define S5P6442_GPIO_G1_NR (7)
-#define S5P6442_GPIO_G2_NR (7)
-#define S5P6442_GPIO_H0_NR (8)
-#define S5P6442_GPIO_H1_NR (8)
-#define S5P6442_GPIO_H2_NR (8)
-#define S5P6442_GPIO_H3_NR (8)
-#define S5P6442_GPIO_J0_NR (8)
-#define S5P6442_GPIO_J1_NR (6)
-#define S5P6442_GPIO_J2_NR (8)
-#define S5P6442_GPIO_J3_NR (8)
-#define S5P6442_GPIO_J4_NR (5)
-
-/* GPIO bank numbers */
-
-/* CONFIG_S3C_GPIO_SPACE allows the user to select extra
- * space for debugging purposes so that any accidental
- * change from one gpio bank to another can be caught.
-*/
-
-#define S5P6442_GPIO_NEXT(__gpio) \
- ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
-
-enum s5p_gpio_number {
- S5P6442_GPIO_A0_START = 0,
- S5P6442_GPIO_A1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_A0),
- S5P6442_GPIO_B_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_A1),
- S5P6442_GPIO_C0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_B),
- S5P6442_GPIO_C1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_C0),
- S5P6442_GPIO_D0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_C1),
- S5P6442_GPIO_D1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_D0),
- S5P6442_GPIO_E0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_D1),
- S5P6442_GPIO_E1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_E0),
- S5P6442_GPIO_F0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_E1),
- S5P6442_GPIO_F1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F0),
- S5P6442_GPIO_F2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F1),
- S5P6442_GPIO_F3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F2),
- S5P6442_GPIO_G0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F3),
- S5P6442_GPIO_G1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G0),
- S5P6442_GPIO_G2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G1),
- S5P6442_GPIO_H0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G2),
- S5P6442_GPIO_H1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H0),
- S5P6442_GPIO_H2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H1),
- S5P6442_GPIO_H3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H2),
- S5P6442_GPIO_J0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H3),
- S5P6442_GPIO_J1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J0),
- S5P6442_GPIO_J2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J1),
- S5P6442_GPIO_J3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J2),
- S5P6442_GPIO_J4_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J3),
-};
-
-/* S5P6442 GPIO number definitions. */
-#define S5P6442_GPA0(_nr) (S5P6442_GPIO_A0_START + (_nr))
-#define S5P6442_GPA1(_nr) (S5P6442_GPIO_A1_START + (_nr))
-#define S5P6442_GPB(_nr) (S5P6442_GPIO_B_START + (_nr))
-#define S5P6442_GPC0(_nr) (S5P6442_GPIO_C0_START + (_nr))
-#define S5P6442_GPC1(_nr) (S5P6442_GPIO_C1_START + (_nr))
-#define S5P6442_GPD0(_nr) (S5P6442_GPIO_D0_START + (_nr))
-#define S5P6442_GPD1(_nr) (S5P6442_GPIO_D1_START + (_nr))
-#define S5P6442_GPE0(_nr) (S5P6442_GPIO_E0_START + (_nr))
-#define S5P6442_GPE1(_nr) (S5P6442_GPIO_E1_START + (_nr))
-#define S5P6442_GPF0(_nr) (S5P6442_GPIO_F0_START + (_nr))
-#define S5P6442_GPF1(_nr) (S5P6442_GPIO_F1_START + (_nr))
-#define S5P6442_GPF2(_nr) (S5P6442_GPIO_F2_START + (_nr))
-#define S5P6442_GPF3(_nr) (S5P6442_GPIO_F3_START + (_nr))
-#define S5P6442_GPG0(_nr) (S5P6442_GPIO_G0_START + (_nr))
-#define S5P6442_GPG1(_nr) (S5P6442_GPIO_G1_START + (_nr))
-#define S5P6442_GPG2(_nr) (S5P6442_GPIO_G2_START + (_nr))
-#define S5P6442_GPH0(_nr) (S5P6442_GPIO_H0_START + (_nr))
-#define S5P6442_GPH1(_nr) (S5P6442_GPIO_H1_START + (_nr))
-#define S5P6442_GPH2(_nr) (S5P6442_GPIO_H2_START + (_nr))
-#define S5P6442_GPH3(_nr) (S5P6442_GPIO_H3_START + (_nr))
-#define S5P6442_GPJ0(_nr) (S5P6442_GPIO_J0_START + (_nr))
-#define S5P6442_GPJ1(_nr) (S5P6442_GPIO_J1_START + (_nr))
-#define S5P6442_GPJ2(_nr) (S5P6442_GPIO_J2_START + (_nr))
-#define S5P6442_GPJ3(_nr) (S5P6442_GPIO_J3_START + (_nr))
-#define S5P6442_GPJ4(_nr) (S5P6442_GPIO_J4_START + (_nr))
-
-/* the end of the S5P6442 specific gpios */
-#define S5P6442_GPIO_END (S5P6442_GPJ4(S5P6442_GPIO_J4_NR) + 1)
-#define S3C_GPIO_END S5P6442_GPIO_END
-
-/* define the number of gpios we need to the one after the GPJ4() range */
-#define ARCH_NR_GPIOS (S5P6442_GPJ4(S5P6442_GPIO_J4_NR) + \
- CONFIG_SAMSUNG_GPIO_EXTRA + 1)
-
-#include <asm-generic/gpio.h>
-
-#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/hardware.h b/arch/arm/mach-s5p6442/include/mach/hardware.h
deleted file mode 100644
index 8cd7b67..0000000
--- a/arch/arm/mach-s5p6442/include/mach/hardware.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/hardware.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Hardware support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H __FILE__
-
-/* currently nothing here, placeholder */
-
-#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/io.h b/arch/arm/mach-s5p6442/include/mach/io.h
deleted file mode 100644
index 5d2195a..0000000
--- a/arch/arm/mach-s5p6442/include/mach/io.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5p6442/include/mach/io.h
- *
- * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Default IO routines for S5P6442
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif
diff --git a/arch/arm/mach-s5p6442/include/mach/irqs.h b/arch/arm/mach-s5p6442/include/mach/irqs.h
deleted file mode 100644
index 3fbc6c3..0000000
--- a/arch/arm/mach-s5p6442/include/mach/irqs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/irqs.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - IRQ definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H __FILE__
-
-#include <plat/irqs.h>
-
-/* VIC0 */
-#define IRQ_EINT16_31 S5P_IRQ_VIC0(16)
-#define IRQ_BATF S5P_IRQ_VIC0(17)
-#define IRQ_MDMA S5P_IRQ_VIC0(18)
-#define IRQ_PDMA S5P_IRQ_VIC0(19)
-#define IRQ_TIMER0_VIC S5P_IRQ_VIC0(21)
-#define IRQ_TIMER1_VIC S5P_IRQ_VIC0(22)
-#define IRQ_TIMER2_VIC S5P_IRQ_VIC0(23)
-#define IRQ_TIMER3_VIC S5P_IRQ_VIC0(24)
-#define IRQ_TIMER4_VIC S5P_IRQ_VIC0(25)
-#define IRQ_SYSTIMER S5P_IRQ_VIC0(26)
-#define IRQ_WDT S5P_IRQ_VIC0(27)
-#define IRQ_RTC_ALARM S5P_IRQ_VIC0(28)
-#define IRQ_RTC_TIC S5P_IRQ_VIC0(29)
-#define IRQ_GPIOINT S5P_IRQ_VIC0(30)
-
-/* VIC1 */
-#define IRQ_PMU S5P_IRQ_VIC1(0)
-#define IRQ_ONENAND S5P_IRQ_VIC1(7)
-#define IRQ_UART0 S5P_IRQ_VIC1(10)
-#define IRQ_UART1 S5P_IRQ_VIC1(11)
-#define IRQ_UART2 S5P_IRQ_VIC1(12)
-#define IRQ_SPI0 S5P_IRQ_VIC1(15)
-#define IRQ_IIC S5P_IRQ_VIC1(19)
-#define IRQ_IIC1 S5P_IRQ_VIC1(20)
-#define IRQ_IIC2 S5P_IRQ_VIC1(21)
-#define IRQ_OTG S5P_IRQ_VIC1(24)
-#define IRQ_MSM S5P_IRQ_VIC1(25)
-#define IRQ_HSMMC0 S5P_IRQ_VIC1(26)
-#define IRQ_HSMMC1 S5P_IRQ_VIC1(27)
-#define IRQ_HSMMC2 S5P_IRQ_VIC1(28)
-#define IRQ_COMMRX S5P_IRQ_VIC1(29)
-#define IRQ_COMMTX S5P_IRQ_VIC1(30)
-
-/* VIC2 */
-#define IRQ_LCD0 S5P_IRQ_VIC2(0)
-#define IRQ_LCD1 S5P_IRQ_VIC2(1)
-#define IRQ_LCD2 S5P_IRQ_VIC2(2)
-#define IRQ_LCD3 S5P_IRQ_VIC2(3)
-#define IRQ_ROTATOR S5P_IRQ_VIC2(4)
-#define IRQ_FIMC0 S5P_IRQ_VIC2(5)
-#define IRQ_FIMC1 S5P_IRQ_VIC2(6)
-#define IRQ_FIMC2 S5P_IRQ_VIC2(7)
-#define IRQ_JPEG S5P_IRQ_VIC2(8)
-#define IRQ_3D S5P_IRQ_VIC2(10)
-#define IRQ_Mixer S5P_IRQ_VIC2(11)
-#define IRQ_MFC S5P_IRQ_VIC2(14)
-#define IRQ_TVENC S5P_IRQ_VIC2(15)
-#define IRQ_I2S0 S5P_IRQ_VIC2(16)
-#define IRQ_I2S1 S5P_IRQ_VIC2(17)
-#define IRQ_RP S5P_IRQ_VIC2(19)
-#define IRQ_PCM0 S5P_IRQ_VIC2(20)
-#define IRQ_PCM1 S5P_IRQ_VIC2(21)
-#define IRQ_ADC S5P_IRQ_VIC2(23)
-#define IRQ_PENDN S5P_IRQ_VIC2(24)
-#define IRQ_KEYPAD S5P_IRQ_VIC2(25)
-#define IRQ_SSS_INT S5P_IRQ_VIC2(27)
-#define IRQ_SSS_HASH S5P_IRQ_VIC2(28)
-#define IRQ_VIC_END S5P_IRQ_VIC2(31)
-
-#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1)
-
-#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
-#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE)
-
-/* Set the default NR_IRQS */
-
-#define NR_IRQS (IRQ_EINT(31) + 1)
-
-#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
deleted file mode 100644
index 058dab4..0000000
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/map.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Memory map definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MAP_H
-#define __ASM_ARCH_MAP_H __FILE__
-
-#include <plat/map-base.h>
-#include <plat/map-s5p.h>
-
-#define S5P6442_PA_SDRAM 0x20000000
-
-#define S5P6442_PA_I2S0 0xC0B00000
-#define S5P6442_PA_I2S1 0xF2200000
-
-#define S5P6442_PA_CHIPID 0xE0000000
-
-#define S5P6442_PA_SYSCON 0xE0100000
-
-#define S5P6442_PA_GPIO 0xE0200000
-
-#define S5P6442_PA_VIC0 0xE4000000
-#define S5P6442_PA_VIC1 0xE4100000
-#define S5P6442_PA_VIC2 0xE4200000
-
-#define S5P6442_PA_SROMC 0xE7000000
-
-#define S5P6442_PA_MDMA 0xE8000000
-#define S5P6442_PA_PDMA 0xE9000000
-
-#define S5P6442_PA_TIMER 0xEA000000
-
-#define S5P6442_PA_SYSTIMER 0xEA100000
-
-#define S5P6442_PA_WATCHDOG 0xEA200000
-
-#define S5P6442_PA_UART 0xEC000000
-
-#define S5P6442_PA_IIC0 0xEC100000
-
-#define S5P6442_PA_SPI 0xEC300000
-
-#define S5P6442_PA_PCM0 0xF2400000
-#define S5P6442_PA_PCM1 0xF2500000
-
-/* Compatibiltiy Defines */
-
-#define S3C_PA_IIC S5P6442_PA_IIC0
-#define S3C_PA_WDT S5P6442_PA_WATCHDOG
-
-#define S5P_PA_CHIPID S5P6442_PA_CHIPID
-#define S5P_PA_SDRAM S5P6442_PA_SDRAM
-#define S5P_PA_SROMC S5P6442_PA_SROMC
-#define S5P_PA_SYSCON S5P6442_PA_SYSCON
-#define S5P_PA_TIMER S5P6442_PA_TIMER
-
-/* UART */
-
-#define S3C_PA_UART S5P6442_PA_UART
-
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-
-#define S5P_SZ_UART SZ_256
-
-#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/memory.h b/arch/arm/mach-s5p6442/include/mach/memory.h
deleted file mode 100644
index cfe259d..0000000
--- a/arch/arm/mach-s5p6442/include/mach/memory.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/memory.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Memory definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-#define PLAT_PHYS_OFFSET UL(0x20000000)
-#define CONSISTENT_DMA_SIZE SZ_8M
-
-#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
deleted file mode 100644
index 2724b37..0000000
--- a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
- *
- * S5P6442 - pwm clock and timer support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_PWMCLK_H
-#define __ASM_ARCH_PWMCLK_H __FILE__
-
-/**
- * pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @tcfg: The timer TCFG1 register bits shifted down to 0.
- *
- * Return true if the given configuration from TCFG1 is a TCLK instead
- * any of the TDIV clocks.
- */
-static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
-{
- return tcfg == S3C64XX_TCFG1_MUX_TCLK;
-}
-
-/**
- * tcfg_to_divisor() - convert tcfg1 setting to a divisor
- * @tcfg1: The tcfg1 setting, shifted down.
- *
- * Get the divisor value for the given tcfg1 setting. We assume the
- * caller has already checked to see if this is not a TCLK source.
- */
-static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
-{
- return 1 << tcfg1;
-}
-
-/**
- * pwm_tdiv_has_div1() - does the tdiv setting have a /1
- *
- * Return true if we have a /1 in the tdiv setting.
- */
-static inline unsigned int pwm_tdiv_has_div1(void)
-{
- return 1;
-}
-
-/**
- * pwm_tdiv_div_bits() - calculate TCFG1 divisor value.
- * @div: The divisor to calculate the bit information for.
- *
- * Turn a divisor into the necessary bit field for TCFG1.
- */
-static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
-{
- return ilog2(div);
-}
-
-#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
-
-#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-clock.h b/arch/arm/mach-s5p6442/include/mach/regs-clock.h
deleted file mode 100644
index 00828a3..0000000
--- a/arch/arm/mach-s5p6442/include/mach/regs-clock.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/regs-clock.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - Clock register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_CLOCK_H
-#define __ASM_ARCH_REGS_CLOCK_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_CLKREG(x) (S3C_VA_SYS + (x))
-
-#define S5P_APLL_LOCK S5P_CLKREG(0x00)
-#define S5P_MPLL_LOCK S5P_CLKREG(0x08)
-#define S5P_EPLL_LOCK S5P_CLKREG(0x10)
-#define S5P_VPLL_LOCK S5P_CLKREG(0x20)
-
-#define S5P_APLL_CON S5P_CLKREG(0x100)
-#define S5P_MPLL_CON S5P_CLKREG(0x108)
-#define S5P_EPLL_CON S5P_CLKREG(0x110)
-#define S5P_VPLL_CON S5P_CLKREG(0x120)
-
-#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
-#define S5P_CLK_SRC1 S5P_CLKREG(0x204)
-#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
-#define S5P_CLK_SRC3 S5P_CLKREG(0x20C)
-#define S5P_CLK_SRC4 S5P_CLKREG(0x210)
-#define S5P_CLK_SRC5 S5P_CLKREG(0x214)
-#define S5P_CLK_SRC6 S5P_CLKREG(0x218)
-
-#define S5P_CLK_SRC_MASK0 S5P_CLKREG(0x280)
-#define S5P_CLK_SRC_MASK1 S5P_CLKREG(0x284)
-
-#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
-#define S5P_CLK_DIV1 S5P_CLKREG(0x304)
-#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
-#define S5P_CLK_DIV3 S5P_CLKREG(0x30C)
-#define S5P_CLK_DIV4 S5P_CLKREG(0x310)
-#define S5P_CLK_DIV5 S5P_CLKREG(0x314)
-#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
-
-#define S5P_CLKGATE_IP0 S5P_CLKREG(0x460)
-#define S5P_CLKGATE_IP3 S5P_CLKREG(0x46C)
-
-/* CLK_OUT */
-#define S5P_CLK_OUT_SHIFT (12)
-#define S5P_CLK_OUT_MASK (0x1F << S5P_CLK_OUT_SHIFT)
-#define S5P_CLK_OUT S5P_CLKREG(0x500)
-
-#define S5P_CLK_DIV_STAT0 S5P_CLKREG(0x1000)
-#define S5P_CLK_DIV_STAT1 S5P_CLKREG(0x1004)
-
-#define S5P_CLK_MUX_STAT0 S5P_CLKREG(0x1100)
-#define S5P_CLK_MUX_STAT1 S5P_CLKREG(0x1104)
-
-#define S5P_MDNIE_SEL S5P_CLKREG(0x7008)
-
-/* Register Bit definition */
-#define S5P_EPLL_EN (1<<31)
-#define S5P_EPLL_MASK 0xffffffff
-#define S5P_EPLLVAL(_m, _p, _s) ((_m) << 16 | ((_p) << 8) | ((_s)))
-
-/* CLKDIV0 */
-#define S5P_CLKDIV0_APLL_SHIFT (0)
-#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
-#define S5P_CLKDIV0_A2M_SHIFT (4)
-#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
-#define S5P_CLKDIV0_D0CLK_SHIFT (16)
-#define S5P_CLKDIV0_D0CLK_MASK (0xF << S5P_CLKDIV0_D0CLK_SHIFT)
-#define S5P_CLKDIV0_P0CLK_SHIFT (20)
-#define S5P_CLKDIV0_P0CLK_MASK (0x7 << S5P_CLKDIV0_P0CLK_SHIFT)
-#define S5P_CLKDIV0_D1CLK_SHIFT (24)
-#define S5P_CLKDIV0_D1CLK_MASK (0xF << S5P_CLKDIV0_D1CLK_SHIFT)
-#define S5P_CLKDIV0_P1CLK_SHIFT (28)
-#define S5P_CLKDIV0_P1CLK_MASK (0x7 << S5P_CLKDIV0_P1CLK_SHIFT)
-
-/* Clock MUX status Registers */
-#define S5P_CLK_MUX_STAT0_APLL_SHIFT (0)
-#define S5P_CLK_MUX_STAT0_APLL_MASK (0x7 << S5P_CLK_MUX_STAT0_APLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_MPLL_SHIFT (4)
-#define S5P_CLK_MUX_STAT0_MPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_MPLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_EPLL_SHIFT (8)
-#define S5P_CLK_MUX_STAT0_EPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_EPLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_VPLL_SHIFT (12)
-#define S5P_CLK_MUX_STAT0_VPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_VPLL_SHIFT)
-#define S5P_CLK_MUX_STAT0_MUXARM_SHIFT (16)
-#define S5P_CLK_MUX_STAT0_MUXARM_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXARM_SHIFT)
-#define S5P_CLK_MUX_STAT0_MUXD0_SHIFT (20)
-#define S5P_CLK_MUX_STAT0_MUXD0_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXD0_SHIFT)
-#define S5P_CLK_MUX_STAT0_MUXD1_SHIFT (24)
-#define S5P_CLK_MUX_STAT0_MUXD1_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXD1_SHIFT)
-#define S5P_CLK_MUX_STAT1_D1SYNC_SHIFT (24)
-#define S5P_CLK_MUX_STAT1_D1SYNC_MASK (0x7 << S5P_CLK_MUX_STAT1_D1SYNC_SHIFT)
-#define S5P_CLK_MUX_STAT1_D0SYNC_SHIFT (28)
-#define S5P_CLK_MUX_STAT1_D0SYNC_MASK (0x7 << S5P_CLK_MUX_STAT1_D0SYNC_SHIFT)
-
-#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-irq.h b/arch/arm/mach-s5p6442/include/mach/regs-irq.h
deleted file mode 100644
index 73782b5..0000000
--- a/arch/arm/mach-s5p6442/include/mach/regs-irq.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/regs-irq.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - IRQ register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_IRQ_H
-#define __ASM_ARCH_REGS_IRQ_H __FILE__
-
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-
-#endif /* __ASM_ARCH_REGS_IRQ_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h b/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
deleted file mode 100644
index 7fd8820..0000000
--- a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S5P6442_PLAT_SPI_CLKS_H
-#define __S5P6442_PLAT_SPI_CLKS_H __FILE__
-
-#define S5P6442_SPI_SRCCLK_PCLK 0
-#define S5P6442_SPI_SRCCLK_SCLK 1
-
-#endif /* __S5P6442_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/system.h b/arch/arm/mach-s5p6442/include/mach/system.h
deleted file mode 100644
index c30c1cc..0000000
--- a/arch/arm/mach-s5p6442/include/mach/system.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/system.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - system support header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H __FILE__
-
-#include <plat/system-reset.h>
-
-static void arch_idle(void)
-{
- /* nothing here yet */
-}
-
-#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/tick.h b/arch/arm/mach-s5p6442/include/mach/tick.h
deleted file mode 100644
index e1d4cab..0000000
--- a/arch/arm/mach-s5p6442/include/mach/tick.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/tick.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s3c6400/include/mach/tick.h
- *
- * S5P6442 - Timer tick support definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TICK_H
-#define __ASM_ARCH_TICK_H __FILE__
-
-static inline u32 s3c24xx_ostimer_pending(void)
-{
- u32 pend = __raw_readl(VA_VIC0 + VIC_RAW_STATUS);
- return pend & (1 << (IRQ_TIMER4_VIC - S5P_IRQ_VIC0(0)));
-}
-
-#define TICK_MAX (0xffffffff)
-
-#endif /* __ASM_ARCH_TICK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/timex.h b/arch/arm/mach-s5p6442/include/mach/timex.h
deleted file mode 100644
index ff8f2fc..0000000
--- a/arch/arm/mach-s5p6442/include/mach/timex.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* arch/arm/mach-s5p6442/include/mach/timex.h
- *
- * Copyright (c) 2003-2010 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S5P6442 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/uncompress.h b/arch/arm/mach-s5p6442/include/mach/uncompress.h
deleted file mode 100644
index 5ac7cbe..0000000
--- a/arch/arm/mach-s5p6442/include/mach/uncompress.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/uncompress.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5P6442 - uncompress code
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_UNCOMPRESS_H
-#define __ASM_ARCH_UNCOMPRESS_H
-
-#include <mach/map.h>
-#include <plat/uncompress.h>
-
-static void arch_detect_cpu(void)
-{
- /* we do not need to do any cpu detection here at the moment. */
-}
-
-#endif /* __ASM_ARCH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
deleted file mode 100644
index 4aa55e5..0000000
--- a/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S5P6442 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p6442/init.c b/arch/arm/mach-s5p6442/init.c
deleted file mode 100644
index 1874bdb..0000000
--- a/arch/arm/mach-s5p6442/init.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/s5p6442-init.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5p6442.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc s5p6442_serial_clocks[] = {
- [0] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
-};
-
-/* uart registration process */
-void __init s5p6442_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- struct s3c2410_uartcfg *tcfg = cfg;
- u32 ucnt;
-
- for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
- if (!tcfg->clocks) {
- tcfg->clocks = s5p6442_serial_clocks;
- tcfg->clocks_size = ARRAY_SIZE(s5p6442_serial_clocks);
- }
- }
-
- s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5p6442/mach-smdk6442.c b/arch/arm/mach-s5p6442/mach-smdk6442.c
deleted file mode 100644
index eaf6b9c..0000000
--- a/arch/arm/mach-s5p6442/mach-smdk6442.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/mach-smdk6442.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/i2c.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/regs-serial.h>
-#include <plat/s5p6442.h>
-#include <plat/devs.h>
-#include <plat/cpu.h>
-#include <plat/iic.h>
-
-/* Following are default values for UCON, ULCON and UFCON UART registers */
-#define SMDK6442_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
- S3C2410_UCON_RXILEVEL | \
- S3C2410_UCON_TXIRQMODE | \
- S3C2410_UCON_RXIRQMODE | \
- S3C2410_UCON_RXFIFO_TOI | \
- S3C2443_UCON_RXERR_IRQEN)
-
-#define SMDK6442_ULCON_DEFAULT S3C2410_LCON_CS8
-
-#define SMDK6442_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
- S5PV210_UFCON_TXTRIG4 | \
- S5PV210_UFCON_RXTRIG4)
-
-static struct s3c2410_uartcfg smdk6442_uartcfgs[] __initdata = {
- [0] = {
- .hwport = 0,
- .flags = 0,
- .ucon = SMDK6442_UCON_DEFAULT,
- .ulcon = SMDK6442_ULCON_DEFAULT,
- .ufcon = SMDK6442_UFCON_DEFAULT,
- },
- [1] = {
- .hwport = 1,
- .flags = 0,
- .ucon = SMDK6442_UCON_DEFAULT,
- .ulcon = SMDK6442_ULCON_DEFAULT,
- .ufcon = SMDK6442_UFCON_DEFAULT,
- },
- [2] = {
- .hwport = 2,
- .flags = 0,
- .ucon = SMDK6442_UCON_DEFAULT,
- .ulcon = SMDK6442_ULCON_DEFAULT,
- .ufcon = SMDK6442_UFCON_DEFAULT,
- },
-};
-
-static struct platform_device *smdk6442_devices[] __initdata = {
- &s3c_device_i2c0,
- &samsung_asoc_dma,
- &s5p6442_device_iis0,
- &s3c_device_wdt,
-};
-
-static struct i2c_board_info smdk6442_i2c_devs0[] __initdata = {
- { I2C_BOARD_INFO("wm8580", 0x1b), },
-};
-
-static void __init smdk6442_map_io(void)
-{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
- s3c24xx_init_clocks(12000000);
- s3c24xx_init_uarts(smdk6442_uartcfgs, ARRAY_SIZE(smdk6442_uartcfgs));
-}
-
-static void __init smdk6442_machine_init(void)
-{
- s3c_i2c0_set_platdata(NULL);
- i2c_register_board_info(0, smdk6442_i2c_devs0,
- ARRAY_SIZE(smdk6442_i2c_devs0));
- platform_add_devices(smdk6442_devices, ARRAY_SIZE(smdk6442_devices));
-}
-
-MACHINE_START(SMDK6442, "SMDK6442")
- /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
- .boot_params = S5P_PA_SDRAM + 0x100,
- .init_irq = s5p6442_init_irq,
- .map_io = smdk6442_map_io,
- .init_machine = smdk6442_machine_init,
- .timer = &s3c24xx_timer,
-MACHINE_END
diff --git a/arch/arm/mach-s5p6442/setup-i2c0.c b/arch/arm/mach-s5p6442/setup-i2c0.c
deleted file mode 100644
index aad8565..0000000
--- a/arch/arm/mach-s5p6442/setup-i2c0.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/setup-i2c0.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * I2C0 GPIO configuration.
- *
- * Based on plat-s3c64xx/setup-i2c0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/gpio.h>
-
-struct platform_device; /* don't need the contents */
-
-#include <plat/gpio-cfg.h>
-#include <plat/iic.h>
-
-void s3c_i2c0_cfg_gpio(struct platform_device *dev)
-{
- s3c_gpio_cfgall_range(S5P6442_GPD1(0), 2,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
-}
diff --git a/arch/arm/mach-s5pc100/Makefile b/arch/arm/mach-s5pc100/Makefile
index eecab57..a5e6e60 100644
--- a/arch/arm/mach-s5pc100/Makefile
+++ b/arch/arm/mach-s5pc100/Makefile
@@ -11,7 +11,7 @@ obj- :=
# Core support for S5PC100 system
-obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o gpiolib.o
+obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o
obj-$(CONFIG_CPU_S5PC100) += setup-i2c0.o
obj-$(CONFIG_CPU_S5PC100) += dma.o
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 11f1790..50907ac 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -12,7 +12,7 @@ obj- :=
# Core support for S5PV210 system
-obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o gpiolib.o
+obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o
obj-$(CONFIG_CPU_S5PV210) += setup-i2c0.o
obj-$(CONFIG_S5PV210_PM) += pm.o sleep.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile
index fab46fe..8fd354a 100644
--- a/arch/arm/mach-u300/Makefile
+++ b/arch/arm/mach-u300/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel, U300 machine.
#
-obj-y := core.o clock.o timer.o gpio.o padmux.o
+obj-y := core.o clock.o timer.o padmux.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 54429d0..f8b9392 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -5,7 +5,6 @@ config UX500_SOC_COMMON
default y
select ARM_GIC
select HAS_MTU
- select NOMADIK_GPIO
select ARM_ERRATA_753970
menu "Ux500 SoC"
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index bf0b024..7c6cb4fa 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -99,8 +99,11 @@ static void sdi0_configure(void)
gpio_direction_output(sdi0_vsel, 0);
gpio_direction_output(sdi0_en, 1);
- /* Add the device */
- db8500_add_sdi0(&mop500_sdi0_data);
+ /* Add the device, force v2 to subrevision 1 */
+ if (cpu_is_u8500v2())
+ db8500_add_sdi0(&mop500_sdi0_data, 0x10480180);
+ else
+ db8500_add_sdi0(&mop500_sdi0_data, 0);
}
void mop500_sdi_tc35892_init(void)
@@ -188,13 +191,18 @@ static struct mmci_platform_data mop500_sdi4_data = {
void __init mop500_sdi_init(void)
{
+ u32 periphid = 0;
+
+ /* v2 has a new version of this block that need to be forced */
+ if (cpu_is_u8500v2())
+ periphid = 0x10480180;
/* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */
if (!cpu_is_u8500v10())
mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
- db8500_add_sdi2(&mop500_sdi2_data);
+ db8500_add_sdi2(&mop500_sdi2_data, periphid);
/* On-board eMMC */
- db8500_add_sdi4(&mop500_sdi4_data);
+ db8500_add_sdi4(&mop500_sdi4_data, periphid);
if (machine_is_hrefv60()) {
mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index c719b5a1..7825705 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -28,18 +28,20 @@ dbx500_add_msp_spi(const char *name, resource_size_t base, int irq,
static inline struct amba_device *
dbx500_add_spi(const char *name, resource_size_t base, int irq,
- struct spi_master_cntlr *pdata)
+ struct spi_master_cntlr *pdata,
+ u32 periphid)
{
- return dbx500_add_amba_device(name, base, irq, pdata, 0);
+ return dbx500_add_amba_device(name, base, irq, pdata, periphid);
}
struct mmci_platform_data;
static inline struct amba_device *
dbx500_add_sdi(const char *name, resource_size_t base, int irq,
- struct mmci_platform_data *pdata)
+ struct mmci_platform_data *pdata,
+ u32 periphid)
{
- return dbx500_add_amba_device(name, base, irq, pdata, 0);
+ return dbx500_add_amba_device(name, base, irq, pdata, periphid);
}
struct amba_pl011_data;
diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h
index 94627f7..0c4bccd 100644
--- a/arch/arm/mach-ux500/devices-db5500.h
+++ b/arch/arm/mach-ux500/devices-db5500.h
@@ -38,24 +38,34 @@
ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg)
#define db5500_add_sdi0(pdata) \
- dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata)
+ dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata, \
+ 0x10480180)
#define db5500_add_sdi1(pdata) \
- dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata)
+ dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata, \
+ 0x10480180)
#define db5500_add_sdi2(pdata) \
- dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata)
+ dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata \
+ 0x10480180)
#define db5500_add_sdi3(pdata) \
- dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata)
+ dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata \
+ 0x10480180)
#define db5500_add_sdi4(pdata) \
- dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata)
+ dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata \
+ 0x10480180)
+/* This one has a bad peripheral ID in the U5500 silicon */
#define db5500_add_spi0(pdata) \
- dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata)
+ dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata, \
+ 0x10080023)
#define db5500_add_spi1(pdata) \
- dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata)
+ dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata, \
+ 0x10080023)
#define db5500_add_spi2(pdata) \
- dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata)
+ dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata \
+ 0x10080023)
#define db5500_add_spi3(pdata) \
- dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata)
+ dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata \
+ 0x10080023)
#define db5500_add_uart0(plat) \
dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0, plat)
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 9cc6f8f..cbd4a9a 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -25,7 +25,7 @@ static inline struct amba_device *
db8500_add_ssp(const char *name, resource_size_t base, int irq,
struct pl022_ssp_controller *pdata)
{
- return dbx500_add_amba_device(name, base, irq, pdata, SSP_PER_ID);
+ return dbx500_add_amba_device(name, base, irq, pdata, 0);
}
@@ -64,18 +64,18 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
#define db8500_add_usb(rx_cfg, tx_cfg) \
ux500_add_usb(U8500_USBOTG_BASE, IRQ_DB8500_USBOTG, rx_cfg, tx_cfg)
-#define db8500_add_sdi0(pdata) \
- dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata)
-#define db8500_add_sdi1(pdata) \
- dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata)
-#define db8500_add_sdi2(pdata) \
- dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata)
-#define db8500_add_sdi3(pdata) \
- dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata)
-#define db8500_add_sdi4(pdata) \
- dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata)
-#define db8500_add_sdi5(pdata) \
- dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata)
+#define db8500_add_sdi0(pdata, pid) \
+ dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata, pid)
+#define db8500_add_sdi1(pdata, pid) \
+ dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata, pid)
+#define db8500_add_sdi2(pdata, pid) \
+ dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata, pid)
+#define db8500_add_sdi3(pdata, pid) \
+ dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata, pid)
+#define db8500_add_sdi4(pdata, pid) \
+ dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata, pid)
+#define db8500_add_sdi5(pdata, pid) \
+ dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata, pid)
#define db8500_add_ssp0(pdata) \
db8500_add_ssp("ssp0", U8500_SSP0_BASE, IRQ_DB8500_SSP0, pdata)
@@ -83,13 +83,13 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
db8500_add_ssp("ssp1", U8500_SSP1_BASE, IRQ_DB8500_SSP1, pdata)
#define db8500_add_spi0(pdata) \
- dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata)
+ dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata, 0)
#define db8500_add_spi1(pdata) \
- dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata)
+ dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata, 0)
#define db8500_add_spi2(pdata) \
- dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata)
+ dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata, 0)
#define db8500_add_spi3(pdata) \
- dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata)
+ dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata, 0)
#define db8500_add_uart0(pdata) \
dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0, pdata)
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 2c6f710..470ac52 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -29,9 +29,6 @@
#include <mach/db8500-regs.h>
#include <mach/db5500-regs.h>
-/* ST-Ericsson modified pl022 id */
-#define SSP_PER_ID 0x01080022
-
#ifndef __ASSEMBLY__
#include <mach/id.h>
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index c96fa1b..73b4a8b 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
*/
ENTRY(v6_flush_kern_dcache_area)
add r1, r0, r1
+ bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index dc18d81..d32f02b 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
+ sub r3, r2, #1
+ bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba..8bfae96 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
/*
* We fork()ed a process, and we need a new context for the child
- * to run in. We reserve version 0 for initial tasks so we will
- * always allocate an ASID. The ASID 0 is reserved for the TTBR
- * register changing sequence.
+ * to run in.
*/
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
- /* set the reserved ASID before flushing the TLB */
- asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+ u32 ttb;
+ /* Copy TTBR1 into TTBR0 */
+ asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
+ "mcr p15, 0, %0, c2, c0, 0"
+ : "=r" (ttb));
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
@@ -93,7 +94,7 @@ static void reset_context(void *info)
return;
smp_rmb();
- asid = cpu_last_asid + cpu + 1;
+ asid = cpu_last_asid + cpu;
flush_context();
set_mm_context(mm, asid);
@@ -143,13 +144,13 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
- asid = cpu_last_asid + smp_processor_id() + 1;
+ asid = cpu_last_asid + smp_processor_id();
flush_context();
#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
#endif
- cpu_last_asid += NR_CPUS;
+ cpu_last_asid += NR_CPUS - 1;
}
set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3f17ea1..2c2cce9 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,12 +15,14 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
+#include <linux/of_fdt.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/mach-types.h>
+#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -71,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+{
+ phys_initrd_start = start;
+ phys_initrd_size = end - start;
+}
+#endif /* CONFIG_OF_FLATTREE */
+
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
@@ -273,13 +283,15 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
free_area_init_node(0, zone_size, min, zhole_size);
}
-#ifndef CONFIG_SPARSEMEM
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
+#endif
+#ifndef CONFIG_SPARSEMEM
static void arm_memory_present(void)
{
}
@@ -334,6 +346,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
#endif
arm_mm_memblock_reserve();
+ arm_dt_memblock_reserve();
/* reserve any platform specific memblock areas */
if (mdesc->reserve)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d238410..5b3d7d5 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -5,14 +5,9 @@ extern pmd_t *top_pmd;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
-static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
-{
- return pmd_offset(pud_offset(pgd, virt), virt);
-}
-
static inline pmd_t *pmd_off_k(unsigned long virt)
{
- return pmd_off(pgd_offset_k(virt), virt);
+ return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
struct mem_type {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 08a9236..9d9e736 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -763,15 +763,12 @@ static void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
- lowmem_limit = __pa(vmalloc_min - 1) + 1;
- memblock_set_current_limit(lowmem_limit);
-
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
#ifdef CONFIG_HIGHMEM
- if (__va(bank->start) > vmalloc_min ||
+ if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET)
highmem = 1;
@@ -829,6 +826,9 @@ static void __init sanity_check_meminfo(void)
bank->size = newsize;
}
#endif
+ if (!bank->highmem && bank->start + bank->size > lowmem_limit)
+ lowmem_limit = bank->start + bank->size;
+
j++;
}
#ifdef CONFIG_HIGHMEM
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
+ memblock_set_current_limit(lowmem_limit);
}
static inline void prepare_page_table(void)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ab17cc0..1d2b845 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -213,7 +213,9 @@ __v6_setup:
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
- mcr p15, 0, r4, c2, c0, 1 @ load TTB1
+ ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
+ ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
+ mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
adr r5, v6_crval
ldmia r5, {r5, r6}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index babfba09..b3b566e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
-#ifdef CONFIG_ARM_ERRATA_754322
- dsb
-#endif
- mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
- isb
-1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
+ mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
+ mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
@@ -368,7 +366,9 @@ __v7_setup:
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
- mcr p15, 0, r4, c2, c0, 1 @ load TTB1
+ ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
+ ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
+ mcr p15, 0, r8, c2, c0, 1 @ load TTB1
ldr r5, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
index 18296ee..ce65901 100644
--- a/arch/arm/plat-nomadik/Kconfig
+++ b/arch/arm/plat-nomadik/Kconfig
@@ -21,9 +21,4 @@ config HAS_MTU
to multiple interrupt generating programmable
32-bit free running decrementing counters.
-config NOMADIK_GPIO
- bool
- help
- Support for the Nomadik GPIO controller.
-
endif
diff --git a/arch/arm/plat-nomadik/Makefile b/arch/arm/plat-nomadik/Makefile
index c335473..37c7cdd 100644
--- a/arch/arm/plat-nomadik/Makefile
+++ b/arch/arm/plat-nomadik/Makefile
@@ -3,4 +3,3 @@
# Licensed under GPLv2
obj-$(CONFIG_HAS_MTU) += timer.o
-obj-$(CONFIG_NOMADIK_GPIO) += gpio.o
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index 1b9f6f0..ea19a5b 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -78,6 +78,8 @@ extern int nmk_gpio_get_mode(int gpio);
extern void nmk_gpio_wakeups_suspend(void);
extern void nmk_gpio_wakeups_resume(void);
+extern void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up);
+
/*
* Platform data to register a block: only the initial gpio/irq number.
*/
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index a4a1285..f0233e6 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := common.o sram.o clock.o devices.o dma.o mux.o gpio.o \
+obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
usb.o fb.o io.o counter_32k.o
obj-m :=
obj-n :=
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index cac2e8a..ec97e00 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -52,6 +52,109 @@
#define OMAP34XX_NR_GPIOS 6
+/*
+ * OMAP1510 GPIO registers
+ */
+#define OMAP1510_GPIO_DATA_INPUT 0x00
+#define OMAP1510_GPIO_DATA_OUTPUT 0x04
+#define OMAP1510_GPIO_DIR_CONTROL 0x08
+#define OMAP1510_GPIO_INT_CONTROL 0x0c
+#define OMAP1510_GPIO_INT_MASK 0x10
+#define OMAP1510_GPIO_INT_STATUS 0x14
+#define OMAP1510_GPIO_PIN_CONTROL 0x18
+
+#define OMAP1510_IH_GPIO_BASE 64
+
+/*
+ * OMAP1610 specific GPIO registers
+ */
+#define OMAP1610_GPIO_REVISION 0x0000
+#define OMAP1610_GPIO_SYSCONFIG 0x0010
+#define OMAP1610_GPIO_SYSSTATUS 0x0014
+#define OMAP1610_GPIO_IRQSTATUS1 0x0018
+#define OMAP1610_GPIO_IRQENABLE1 0x001c
+#define OMAP1610_GPIO_WAKEUPENABLE 0x0028
+#define OMAP1610_GPIO_DATAIN 0x002c
+#define OMAP1610_GPIO_DATAOUT 0x0030
+#define OMAP1610_GPIO_DIRECTION 0x0034
+#define OMAP1610_GPIO_EDGE_CTRL1 0x0038
+#define OMAP1610_GPIO_EDGE_CTRL2 0x003c
+#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c
+#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8
+#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0
+#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc
+#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8
+#define OMAP1610_GPIO_SET_DATAOUT 0x00f0
+
+/*
+ * OMAP7XX specific GPIO registers
+ */
+#define OMAP7XX_GPIO_DATA_INPUT 0x00
+#define OMAP7XX_GPIO_DATA_OUTPUT 0x04
+#define OMAP7XX_GPIO_DIR_CONTROL 0x08
+#define OMAP7XX_GPIO_INT_CONTROL 0x0c
+#define OMAP7XX_GPIO_INT_MASK 0x10
+#define OMAP7XX_GPIO_INT_STATUS 0x14
+
+/*
+ * omap2+ specific GPIO registers
+ */
+#define OMAP24XX_GPIO_REVISION 0x0000
+#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
+#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
+#define OMAP24XX_GPIO_IRQENABLE2 0x002c
+#define OMAP24XX_GPIO_IRQENABLE1 0x001c
+#define OMAP24XX_GPIO_WAKE_EN 0x0020
+#define OMAP24XX_GPIO_CTRL 0x0030
+#define OMAP24XX_GPIO_OE 0x0034
+#define OMAP24XX_GPIO_DATAIN 0x0038
+#define OMAP24XX_GPIO_DATAOUT 0x003c
+#define OMAP24XX_GPIO_LEVELDETECT0 0x0040
+#define OMAP24XX_GPIO_LEVELDETECT1 0x0044
+#define OMAP24XX_GPIO_RISINGDETECT 0x0048
+#define OMAP24XX_GPIO_FALLINGDETECT 0x004c
+#define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050
+#define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054
+#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060
+#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064
+#define OMAP24XX_GPIO_CLEARWKUENA 0x0080
+#define OMAP24XX_GPIO_SETWKUENA 0x0084
+#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
+#define OMAP24XX_GPIO_SETDATAOUT 0x0094
+
+#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_EOI 0x0020
+#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
+#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
+#define OMAP4_GPIO_IRQSTATUS0 0x002c
+#define OMAP4_GPIO_IRQSTATUS1 0x0030
+#define OMAP4_GPIO_IRQSTATUSSET0 0x0034
+#define OMAP4_GPIO_IRQSTATUSSET1 0x0038
+#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c
+#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
+#define OMAP4_GPIO_IRQWAKEN0 0x0044
+#define OMAP4_GPIO_IRQWAKEN1 0x0048
+#define OMAP4_GPIO_IRQENABLE1 0x011c
+#define OMAP4_GPIO_WAKE_EN 0x0120
+#define OMAP4_GPIO_IRQSTATUS2 0x0128
+#define OMAP4_GPIO_IRQENABLE2 0x012c
+#define OMAP4_GPIO_CTRL 0x0130
+#define OMAP4_GPIO_OE 0x0134
+#define OMAP4_GPIO_DATAIN 0x0138
+#define OMAP4_GPIO_DATAOUT 0x013c
+#define OMAP4_GPIO_LEVELDETECT0 0x0140
+#define OMAP4_GPIO_LEVELDETECT1 0x0144
+#define OMAP4_GPIO_RISINGDETECT 0x0148
+#define OMAP4_GPIO_FALLINGDETECT 0x014c
+#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
+#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
+#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160
+#define OMAP4_GPIO_SETIRQENABLE1 0x0164
+#define OMAP4_GPIO_CLEARWKUENA 0x0180
+#define OMAP4_GPIO_SETWKUENA 0x0184
+#define OMAP4_GPIO_CLEARDATAOUT 0x0190
+#define OMAP4_GPIO_SETDATAOUT 0x0194
+
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 6751bcf..e98f5c5 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -7,7 +7,7 @@
config PLAT_S5P
bool
- depends on (ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4)
+ depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4)
default y
select ARM_VIC if !ARCH_EXYNOS4
select ARM_GIC if ARCH_EXYNOS4
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index 5cf5e72..bbc2aa7 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -21,7 +21,6 @@
#include <plat/cpu.h>
#include <plat/s5p6440.h>
-#include <plat/s5p6442.h>
#include <plat/s5p6450.h>
#include <plat/s5pc100.h>
#include <plat/s5pv210.h>
@@ -30,7 +29,6 @@
/* table of supported CPUs */
static const char name_s5p6440[] = "S5P6440";
-static const char name_s5p6442[] = "S5P6442";
static const char name_s5p6450[] = "S5P6450";
static const char name_s5pc100[] = "S5PC100";
static const char name_s5pv210[] = "S5PV210/S5PC110";
@@ -46,14 +44,6 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s5p64x0_init,
.name = name_s5p6440,
}, {
- .idcode = 0x36442000,
- .idmask = 0xfffff000,
- .map_io = s5p6442_map_io,
- .init_clocks = s5p6442_init_clocks,
- .init_uarts = s5p6442_init_uarts,
- .init = s5p6442_init,
- .name = name_s5p6442,
- }, {
.idcode = 0x36450000,
.idmask = 0xfffff000,
.map_io = s5p6450_map_io,
diff --git a/arch/arm/plat-s5p/include/plat/s5p6442.h b/arch/arm/plat-s5p/include/plat/s5p6442.h
deleted file mode 100644
index 7b88013..0000000
--- a/arch/arm/plat-s5p/include/plat/s5p6442.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* arch/arm/plat-s5p/include/plat/s5p6442.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Header file for s5p6442 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5P6442 related SoCs */
-
-extern void s5p6442_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5p6442_register_clocks(void);
-extern void s5p6442_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5P6442
-
-extern int s5p6442_init(void);
-extern void s5p6442_init_irq(void);
-extern void s5p6442_map_io(void);
-extern void s5p6442_init_clocks(int xtal);
-
-#define s5p6442_init_uarts s5p6442_common_init_uarts
-
-#else
-#define s5p6442_init_clocks NULL
-#define s5p6442_init_uarts NULL
-#define s5p6442_map_io NULL
-#define s5p6442_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index e9de58a..53eb15b 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -19,7 +19,6 @@ obj-y += gpio.o
obj-y += gpio-config.o
obj-y += dev-asocdma.o
-obj-$(CONFIG_SAMSUNG_GPIOLIB_4BIT) += gpiolib.o
obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
obj-$(CONFIG_SAMSUNG_IRQ_UART) += irq-uart.o
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 3aedac0..c0a5741 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -86,7 +86,6 @@ extern struct sysdev_class s3c2443_sysclass;
extern struct sysdev_class s3c6410_sysclass;
extern struct sysdev_class s3c64xx_sysclass;
extern struct sysdev_class s5p64x0_sysclass;
-extern struct sysdev_class s5p6442_sysclass;
extern struct sysdev_class s5pv210_sysclass;
extern struct sysdev_class exynos4_sysclass;
diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
index dc6efd9..207e275 100644
--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
+++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
@@ -11,7 +11,7 @@
#include <plat/regs-serial.h>
-/* The S5PV210/S5PC110 and S5P6442 implementations are as belows. */
+/* The S5PV210/S5PC110 implementations are as belows. */
.macro fifo_level_s5pv210 rd, rx
ldr \rd, [ \rx, # S3C2410_UFSTAT ]
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 39818d8..b61b8ee 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -111,12 +111,6 @@ extern struct platform_device exynos4_device_spdif;
extern struct platform_device exynos4_device_pd[];
extern struct platform_device exynos4_device_ahci;
-extern struct platform_device s5p6442_device_pcm0;
-extern struct platform_device s5p6442_device_pcm1;
-extern struct platform_device s5p6442_device_iis0;
-extern struct platform_device s5p6442_device_iis1;
-extern struct platform_device s5p6442_device_spi;
-
extern struct platform_device s5p6440_device_pcm;
extern struct platform_device s5p6440_device_iis;
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 788837e9..c151c5f 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -194,7 +194,7 @@
#define S3C64XX_UINTSP 0x34
#define S3C64XX_UINTM 0x38
-/* Following are specific to S5PV210 and S5P6442 */
+/* Following are specific to S5PV210 */
#define S5PV210_UCON_CLKMASK (1<<10)
#define S5PV210_UCON_PCLK (0<<10)
#define S5PV210_UCON_UCLK (1<<10)
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index ff1a561..0ffe34a 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -69,6 +69,5 @@ extern void s3c64xx_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
extern void s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
extern void s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
extern void s5p64x0_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
-extern void s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
#endif /* __S3C64XX_PLAT_SPI_H */
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index 89861a2..f714544 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -299,9 +299,10 @@
#define __NR_signalfd 279
/* 280 was __NR_timerfd */
#define __NR_eventfd 281
+#define __NR_setns 283
#ifdef __KERNEL__
-#define NR_syscalls 282
+#define NR_syscalls 284
/* Old stuff */
#define __IGNORE_uselib
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index e76bad1..c7fd394 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -296,4 +296,5 @@ sys_call_table:
.long sys_ni_syscall /* 280, was sys_timerfd */
.long sys_eventfd
.long sys_recvmmsg
+ .long sys_setns
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index bfc9d07..aa677e2 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1014,6 +1014,7 @@ static struct platform_device *__initdata at32_usarts[4];
void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
{
struct platform_device *pdev;
+ struct atmel_uart_data *pdata;
switch (hw_id) {
case 0:
@@ -1042,7 +1043,8 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
data->regs = (void __iomem *)pdev->resource[0].start;
}
- pdev->id = line;
+ pdata = pdev->dev.platform_data;
+ pdata->num = portnr;
at32_usarts[line] = pdev;
}
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 6174020..679458d 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -33,6 +33,7 @@ extern struct platform_device *atmel_default_console_device;
#define ATMEL_USART_CLK 0x04
struct atmel_uart_data {
+ int num; /* port num */
short use_dma_tx; /* use transmit DMA? */
short use_dma_rx; /* use receive DMA? */
void __iomem *regs; /* virtual base address, if any */
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 7dbc664..7fd0ec7 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -184,7 +184,7 @@ struct bfin_uart_regs {
#undef __BFP
#ifndef port_membase
-# define port_membase(p) (((struct bfin_serial_port *)(p))->port.membase)
+# define port_membase(p) 0
#endif
#define UART_GET_CHAR(p) bfin_read16(port_membase(p) + OFFSET_RBR)
@@ -235,10 +235,10 @@ struct bfin_uart_regs {
#define UART_SET_DLAB(p) do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
#ifndef put_lsr_cache
-# define put_lsr_cache(p, v) (((struct bfin_serial_port *)(p))->lsr = (v))
+# define put_lsr_cache(p, v)
#endif
#ifndef get_lsr_cache
-# define get_lsr_cache(p) (((struct bfin_serial_port *)(p))->lsr)
+# define get_lsr_cache(p) 0
#endif
/* The hardware clears the LSR bits upon read, so we need to cache
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index c722acd..38657da 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -193,4 +193,22 @@ uint16_t get_enabled_gptimers(void);
uint32_t get_gptimer_status(unsigned int group);
void set_gptimer_status(unsigned int group, uint32_t value);
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits. So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/*
+ * bfin timer registers layout
+ */
+struct bfin_gptimer_regs {
+ __BFP(config);
+ u32 counter;
+ u32 period;
+ u32 width;
+};
+
+#undef __BFP
+
#endif
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index ff9a9f3..0ccba60 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -397,8 +397,10 @@
#define __NR_open_by_handle_at 376
#define __NR_clock_adjtime 377
#define __NR_syncfs 378
+#define __NR_setns 379
+#define __NR_sendmmsg 380
-#define __NR_syscall 379
+#define __NR_syscall 381
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index 94b1d8a..fce4807 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -13,6 +13,7 @@
#include <asm/blackfin.h>
#include <asm/gpio.h>
+#include <asm/gptimers.h>
#include <asm/bfin_can.h>
#include <asm/bfin_dma.h>
#include <asm/bfin_ppi.h>
@@ -230,8 +231,8 @@ bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdm
#define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "")
#define _MDMA(num, x) \
do { \
- _DMA(num, x##DMA_D##num##_CONFIG, 'D', #x); \
- _DMA(num, x##DMA_S##num##_CONFIG, 'S', #x); \
+ _DMA(num, x##DMA_D##num##_NEXT_DESC_PTR, 'D', #x); \
+ _DMA(num, x##DMA_S##num##_NEXT_DESC_PTR, 'S', #x); \
} while (0)
#define MDMA(num) _MDMA(num, M)
#define IMDMA(num) _MDMA(num, IM)
@@ -264,20 +265,15 @@ bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num)
/*
* General Purpose Timers
*/
-#define GPTIMER_OFF(mmr) (TIMER0_##mmr - TIMER0_CONFIG)
-#define __GPTIMER(name) \
- do { \
- strcpy(_buf, #name); \
- debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, (u16 *)(base + GPTIMER_OFF(name))); \
- } while (0)
+#define __GPTIMER(uname, lname) __REGS(gptimer, #uname, lname)
static void __init __maybe_unused
bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
{
char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num);
- __GPTIMER(CONFIG);
- __GPTIMER(COUNTER);
- __GPTIMER(PERIOD);
- __GPTIMER(WIDTH);
+ __GPTIMER(CONFIG, config);
+ __GPTIMER(COUNTER, counter);
+ __GPTIMER(PERIOD, period);
+ __GPTIMER(WIDTH, width);
}
#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
@@ -355,7 +351,7 @@ bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num)
__PPI(DELAY, delay);
__PPI(FRAME, frame);
}
-#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_STATUS, num)
+#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_CONTROL, num)
/*
* SPI
@@ -1288,15 +1284,15 @@ static int __init bfin_debug_mmrs_init(void)
D16(VR_CTL);
D32(CHIPID); /* it's part of this hardware block */
-#if defined(PPI_STATUS) || defined(PPI0_STATUS) || defined(PPI1_STATUS)
+#if defined(PPI_CONTROL) || defined(PPI0_CONTROL) || defined(PPI1_CONTROL)
parent = debugfs_create_dir("ppi", top);
-# ifdef PPI_STATUS
- bfin_debug_mmrs_ppi(parent, PPI_STATUS, -1);
+# ifdef PPI_CONTROL
+ bfin_debug_mmrs_ppi(parent, PPI_CONTROL, -1);
# endif
-# ifdef PPI0_STATUS
+# ifdef PPI0_CONTROL
PPI(0);
# endif
-# ifdef PPI1_STATUS
+# ifdef PPI1_CONTROL
PPI(1);
# endif
#endif
@@ -1341,6 +1337,10 @@ static int __init bfin_debug_mmrs_init(void)
D16(RSI_PID1);
D16(RSI_PID2);
D16(RSI_PID3);
+ D16(RSI_PID4);
+ D16(RSI_PID5);
+ D16(RSI_PID6);
+ D16(RSI_PID7);
D16(RSI_PWR_CONTROL);
D16(RSI_RD_WAIT_EN);
D32(RSI_RESPONSE0);
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index f6d924a..0000000
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF514.h b/arch/blackfin/mach-bf518/include/mach/defBF514.h
index 98a51c4..cfab428 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF514.h
@@ -36,13 +36,13 @@
#define RSI_EMASK 0xFFC038C4 /* RSI Exception Mask Register */
#define RSI_CONFIG 0xFFC038C8 /* RSI Configuration Register */
#define RSI_RD_WAIT_EN 0xFFC038CC /* RSI Read Wait Enable Register */
-#define RSI_PID0 0xFFC03FE0 /* RSI Peripheral ID Register 0 */
-#define RSI_PID1 0xFFC03FE4 /* RSI Peripheral ID Register 1 */
-#define RSI_PID2 0xFFC03FE8 /* RSI Peripheral ID Register 2 */
-#define RSI_PID3 0xFFC03FEC /* RSI Peripheral ID Register 3 */
-#define RSI_PID4 0xFFC03FF0 /* RSI Peripheral ID Register 4 */
-#define RSI_PID5 0xFFC03FF4 /* RSI Peripheral ID Register 5 */
-#define RSI_PID6 0xFFC03FF8 /* RSI Peripheral ID Register 6 */
-#define RSI_PID7 0xFFC03FFC /* RSI Peripheral ID Register 7 */
+#define RSI_PID0 0xFFC038D0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID1 0xFFC038D4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID2 0xFFC038D8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID3 0xFFC038DC /* RSI Peripheral ID Register 3 */
+#define RSI_PID4 0xFFC038E0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID5 0xFFC038E4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID6 0xFFC038E8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID7 0xFFC038EC /* RSI Peripheral ID Register 3 */
#endif /* _DEF_BF514_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 960e089..0000000
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2007-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index cc383ad..aab80bb 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -185,8 +185,8 @@
#define USB_EP_NI7_TXTYPE 0xffc03bd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
#define USB_EP_NI7_TXINTERVAL 0xffc03bd8 /* Sets the NAK response timeout on Endpoint7 */
#define USB_EP_NI7_RXTYPE 0xffc03bdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03bf0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03bf8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
+#define USB_EP_NI7_RXINTERVAL 0xffc03be0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
+#define USB_EP_NI7_TXCOUNT 0xffc03be8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
#define USB_DMA_INTERRUPT 0xffc03c00 /* Indicates pending interrupts for the DMA channels */
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 45dcaa4..0000000
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#ifdef CONFIG_BFIN_UART0_CTSRTS
-# define CONFIG_SERIAL_BFIN_CTSRTS
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- }
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index e16dc45..76db1d4 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -382,7 +382,6 @@ static struct platform_device net2272_bfin_device = {
#endif
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -396,7 +395,6 @@ static struct mtd_partition bfin_plat_nand_partitions[] = {
.offset = MTDPART_OFS_APPEND,
},
};
-#endif
#define BFIN_NAND_PLAT_CLE 2
#define BFIN_NAND_PLAT_ALE 1
@@ -423,11 +421,9 @@ static struct platform_nand_data bfin_plat_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
-#ifdef CONFIG_MTD_PARTITIONS
.part_probe_types = part_probes,
.partitions = bfin_plat_nand_partitions,
.nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
-#endif
},
.ctrl = {
.cmd_ctrl = bfin_plat_nand_cmd_ctrl,
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 3e955db..0000000
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index beb502e..0000000
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
-# define CONFIG_SERIAL_BFIN_CTSRTS
-
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_CTS_PIN
-# define CONFIG_UART1_CTS_PIN -1
-# endif
-
-# ifndef CONFIG_UART1_RTS_PIN
-# define CONFIG_UART1_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART1_CTS_PIN,
- CONFIG_UART1_RTS_PIN,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART2
- {
- 0xFFC02100,
- IRQ_UART2_RX,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART2_TX,
- CH_UART2_RX,
-#endif
-#ifdef CONFIG_BFIN_UART2_CTSRTS
- CONFIG_UART2_CTS_PIN,
- CONFIG_UART2_RTS_PIN,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 0d94eda..0000000
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2007-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \
- defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS)
-# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
-#ifdef CONFIG_SERIAL_BFIN_UART0
- {
- 0xFFC00400,
- IRQ_UART0_RX,
- IRQ_UART0_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART0_TX,
- CH_UART0_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- 0,
- 0,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART1
- {
- 0xFFC02000,
- IRQ_UART1_RX,
- IRQ_UART1_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART1_TX,
- CH_UART1_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- GPIO_PE10,
- GPIO_PE9,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART2
- {
- 0xFFC02100,
- IRQ_UART2_RX,
- IRQ_UART2_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART2_TX,
- CH_UART2_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- 0,
- 0,
-#endif
- },
-#endif
-#ifdef CONFIG_SERIAL_BFIN_UART3
- {
- 0xFFC03100,
- IRQ_UART3_RX,
- IRQ_UART3_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART3_TX,
- CH_UART3_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- GPIO_PB3,
- GPIO_PB2,
-#endif
- },
-#endif
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index 1cbba11..1fa41ec 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -271,10 +271,10 @@
#define USB_EP_NI0_TXINTERVAL 0xffc03e18 /* Sets the NAK response timeout on Endpoint 0 */
#define USB_EP_NI0_RXTYPE 0xffc03e1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint0 */
#define USB_EP_NI0_RXINTERVAL 0xffc03e20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint0 */
+#define USB_EP_NI0_TXCOUNT 0xffc03e28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
/* USB Endpoint 1 Control Registers */
-#define USB_EP_NI0_TXCOUNT 0xffc03e28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
#define USB_EP_NI1_TXMAXP 0xffc03e40 /* Maximum packet size for Host Tx endpoint1 */
#define USB_EP_NI1_TXCSR 0xffc03e44 /* Control Status register for endpoint1 */
#define USB_EP_NI1_RXMAXP 0xffc03e48 /* Maximum packet size for Host Rx endpoint1 */
@@ -284,10 +284,10 @@
#define USB_EP_NI1_TXINTERVAL 0xffc03e58 /* Sets the NAK response timeout on Endpoint1 */
#define USB_EP_NI1_RXTYPE 0xffc03e5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint1 */
#define USB_EP_NI1_RXINTERVAL 0xffc03e60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint1 */
+#define USB_EP_NI1_TXCOUNT 0xffc03e68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
/* USB Endpoint 2 Control Registers */
-#define USB_EP_NI1_TXCOUNT 0xffc03e68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
#define USB_EP_NI2_TXMAXP 0xffc03e80 /* Maximum packet size for Host Tx endpoint2 */
#define USB_EP_NI2_TXCSR 0xffc03e84 /* Control Status register for endpoint2 */
#define USB_EP_NI2_RXMAXP 0xffc03e88 /* Maximum packet size for Host Rx endpoint2 */
@@ -297,10 +297,10 @@
#define USB_EP_NI2_TXINTERVAL 0xffc03e98 /* Sets the NAK response timeout on Endpoint2 */
#define USB_EP_NI2_RXTYPE 0xffc03e9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint2 */
#define USB_EP_NI2_RXINTERVAL 0xffc03ea0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint2 */
+#define USB_EP_NI2_TXCOUNT 0xffc03ea8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
/* USB Endpoint 3 Control Registers */
-#define USB_EP_NI2_TXCOUNT 0xffc03ea8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
#define USB_EP_NI3_TXMAXP 0xffc03ec0 /* Maximum packet size for Host Tx endpoint3 */
#define USB_EP_NI3_TXCSR 0xffc03ec4 /* Control Status register for endpoint3 */
#define USB_EP_NI3_RXMAXP 0xffc03ec8 /* Maximum packet size for Host Rx endpoint3 */
@@ -310,10 +310,10 @@
#define USB_EP_NI3_TXINTERVAL 0xffc03ed8 /* Sets the NAK response timeout on Endpoint3 */
#define USB_EP_NI3_RXTYPE 0xffc03edc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint3 */
#define USB_EP_NI3_RXINTERVAL 0xffc03ee0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint3 */
+#define USB_EP_NI3_TXCOUNT 0xffc03ee8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
/* USB Endpoint 4 Control Registers */
-#define USB_EP_NI3_TXCOUNT 0xffc03ee8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
#define USB_EP_NI4_TXMAXP 0xffc03f00 /* Maximum packet size for Host Tx endpoint4 */
#define USB_EP_NI4_TXCSR 0xffc03f04 /* Control Status register for endpoint4 */
#define USB_EP_NI4_RXMAXP 0xffc03f08 /* Maximum packet size for Host Rx endpoint4 */
@@ -323,10 +323,10 @@
#define USB_EP_NI4_TXINTERVAL 0xffc03f18 /* Sets the NAK response timeout on Endpoint4 */
#define USB_EP_NI4_RXTYPE 0xffc03f1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint4 */
#define USB_EP_NI4_RXINTERVAL 0xffc03f20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint4 */
+#define USB_EP_NI4_TXCOUNT 0xffc03f28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
/* USB Endpoint 5 Control Registers */
-#define USB_EP_NI4_TXCOUNT 0xffc03f28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
#define USB_EP_NI5_TXMAXP 0xffc03f40 /* Maximum packet size for Host Tx endpoint5 */
#define USB_EP_NI5_TXCSR 0xffc03f44 /* Control Status register for endpoint5 */
#define USB_EP_NI5_RXMAXP 0xffc03f48 /* Maximum packet size for Host Rx endpoint5 */
@@ -336,10 +336,10 @@
#define USB_EP_NI5_TXINTERVAL 0xffc03f58 /* Sets the NAK response timeout on Endpoint5 */
#define USB_EP_NI5_RXTYPE 0xffc03f5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint5 */
#define USB_EP_NI5_RXINTERVAL 0xffc03f60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint5 */
+#define USB_EP_NI5_TXCOUNT 0xffc03f68 /* Number of bytes to be written to the H145endpoint5 Tx FIFO */
/* USB Endpoint 6 Control Registers */
-#define USB_EP_NI5_TXCOUNT 0xffc03f68 /* Number of bytes to be written to the H145endpoint5 Tx FIFO */
#define USB_EP_NI6_TXMAXP 0xffc03f80 /* Maximum packet size for Host Tx endpoint6 */
#define USB_EP_NI6_TXCSR 0xffc03f84 /* Control Status register for endpoint6 */
#define USB_EP_NI6_RXMAXP 0xffc03f88 /* Maximum packet size for Host Rx endpoint6 */
@@ -349,10 +349,10 @@
#define USB_EP_NI6_TXINTERVAL 0xffc03f98 /* Sets the NAK response timeout on Endpoint6 */
#define USB_EP_NI6_RXTYPE 0xffc03f9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint6 */
#define USB_EP_NI6_RXINTERVAL 0xffc03fa0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint6 */
+#define USB_EP_NI6_TXCOUNT 0xffc03fa8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
/* USB Endpoint 7 Control Registers */
-#define USB_EP_NI6_TXCOUNT 0xffc03fa8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
#define USB_EP_NI7_TXMAXP 0xffc03fc0 /* Maximum packet size for Host Tx endpoint7 */
#define USB_EP_NI7_TXCSR 0xffc03fc4 /* Control Status register for endpoint7 */
#define USB_EP_NI7_RXMAXP 0xffc03fc8 /* Maximum packet size for Host Rx endpoint7 */
@@ -361,8 +361,9 @@
#define USB_EP_NI7_TXTYPE 0xffc03fd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
#define USB_EP_NI7_TXINTERVAL 0xffc03fd8 /* Sets the NAK response timeout on Endpoint7 */
#define USB_EP_NI7_RXTYPE 0xffc03fdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03ff0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03ff8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
+#define USB_EP_NI7_RXINTERVAL 0xffc03fe0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
+#define USB_EP_NI7_TXCOUNT 0xffc03fe8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
+
#define USB_DMA_INTERRUPT 0xffc04000 /* Indicates pending interrupts for the DMA channels */
/* USB Channel 0 Config Registers */
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 3926cd9..9231a94 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -243,7 +243,6 @@ static struct platform_device bfin_uart0_device = {
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -257,7 +256,6 @@ static struct mtd_partition bfin_plat_nand_partitions[] = {
.offset = MTDPART_OFS_APPEND,
},
};
-#endif
#define BFIN_NAND_PLAT_CLE 2
#define BFIN_NAND_PLAT_ALE 3
@@ -286,11 +284,9 @@ static struct platform_nand_data bfin_plat_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
-#ifdef CONFIG_MTD_PARTITIONS
.part_probe_types = part_probes,
.partitions = bfin_plat_nand_partitions,
.nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
-#endif
},
.ctrl = {
.cmd_ctrl = bfin_plat_nand_cmd_ctrl,
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
deleted file mode 100644
index 3a69474..0000000
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#ifdef CONFIG_BFIN_UART0_CTSRTS
-# define CONFIG_SERIAL_BFIN_CTSRTS
-# ifndef CONFIG_UART0_CTS_PIN
-# define CONFIG_UART0_CTS_PIN -1
-# endif
-# ifndef CONFIG_UART0_RTS_PIN
-# define CONFIG_UART0_RTS_PIN -1
-# endif
-#endif
-
-struct bfin_serial_res {
- unsigned long uart_base_addr;
- int uart_irq;
- int uart_status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- unsigned int uart_tx_dma_channel;
- unsigned int uart_rx_dma_channel;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int uart_cts_pin;
- int uart_rts_pin;
-#endif
-};
-
-struct bfin_serial_res bfin_serial_resource[] = {
- {
- 0xFFC00400,
- IRQ_UART_RX,
- IRQ_UART_ERROR,
-#ifdef CONFIG_SERIAL_BFIN_DMA
- CH_UART_TX,
- CH_UART_RX,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- CONFIG_UART0_CTS_PIN,
- CONFIG_UART0_RTS_PIN,
-#endif
- }
-};
-
-#define DRIVER_NAME "bfin-uart"
-
-#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index f96933f..225d311 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1753,6 +1753,8 @@ ENTRY(_sys_call_table)
.long _sys_open_by_handle_at
.long _sys_clock_adjtime
.long _sys_syncfs
+ .long _sys_setns
+ .long _sys_sendmmsg /* 380 */
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mm/maccess.c b/arch/blackfin/mm/maccess.c
index b71cebc..e253211 100644
--- a/arch/blackfin/mm/maccess.c
+++ b/arch/blackfin/mm/maccess.c
@@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
return bfin_mem_access_type(addr, size);
}
-long probe_kernel_read(void *dst, void *src, size_t size)
+long probe_kernel_read(void *dst, const void *src, size_t size)
{
unsigned long lsrc = (unsigned long)src;
int mem_type;
@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
return -EFAULT;
}
-long probe_kernel_write(void *dst, void *src, size_t size)
+long probe_kernel_write(void *dst, const void *src, size_t size)
{
unsigned long ldst = (unsigned long)dst;
int mem_type;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index b6b94a2..17addac 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -270,7 +270,6 @@ config ETRAX_AXISFLASHMAP
select MTD_JEDECPROBE if ETRAX_ARCH_V32
select MTD_CHAR
select MTD_BLOCK
- select MTD_PARTITIONS
select MTD_COMPLEX_MAPPINGS
help
This option enables MTD mapping of flash devices. Needed to use
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index ed708e1..a4bbdfd 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -372,7 +372,7 @@ static int __init init_axis_flash(void)
#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
if (mymtd) {
main_partition.size = mymtd->size;
- err = add_mtd_partitions(mymtd, &main_partition, 1);
+ err = mtd_device_register(mymtd, &main_partition, 1);
if (err)
panic("axisflashmap: Could not initialize "
"partition for whole main mtd device!\n");
@@ -382,10 +382,12 @@ static int __init init_axis_flash(void)
if (mymtd) {
if (use_default_ptable) {
printk(KERN_INFO " Using default partition table.\n");
- err = add_mtd_partitions(mymtd, axis_default_partitions,
- NUM_DEFAULT_PARTITIONS);
+ err = mtd_device_register(mymtd,
+ axis_default_partitions,
+ NUM_DEFAULT_PARTITIONS);
} else {
- err = add_mtd_partitions(mymtd, axis_partitions, pidx);
+ err = mtd_device_register(mymtd, axis_partitions,
+ pidx);
}
if (err)
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 0d6420d..1161883 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -937,6 +937,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_setns /* 335 */
/*
* NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 1633b12..41a2732 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -405,7 +405,6 @@ config ETRAX_AXISFLASHMAP
select MTD_JEDECPROBE
select MTD_CHAR
select MTD_BLOCK
- select MTD_PARTITIONS
select MTD_COMPLEX_MAPPINGS
help
This option enables MTD mapping of flash devices. Needed to use
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 7b155f8..a2bde37 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -561,7 +561,7 @@ static int __init init_axis_flash(void)
#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
if (main_mtd) {
main_partition.size = main_mtd->size;
- err = add_mtd_partitions(main_mtd, &main_partition, 1);
+ err = mtd_device_register(main_mtd, &main_partition, 1);
if (err)
panic("axisflashmap: Could not initialize "
"partition for whole main mtd device!\n");
@@ -597,7 +597,8 @@ static int __init init_axis_flash(void)
mtd_ram->erasesize = (main_mtd ? main_mtd->erasesize :
CONFIG_ETRAX_PTABLE_SECTOR);
} else {
- err = add_mtd_partitions(main_mtd, &partition[part], 1);
+ err = mtd_device_register(main_mtd, &partition[part],
+ 1);
if (err)
panic("axisflashmap: Could not add mtd "
"partition %d\n", part);
@@ -633,7 +634,7 @@ static int __init init_axis_flash(void)
#ifndef CONFIG_ETRAX_VCS_SIM
if (aux_mtd) {
aux_partition.size = aux_mtd->size;
- err = add_mtd_partitions(aux_mtd, &aux_partition, 1);
+ err = mtd_device_register(aux_mtd, &aux_partition, 1);
if (err)
panic("axisflashmap: Could not initialize "
"aux mtd device!\n");
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 3abf12c..84fed7e 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -880,6 +880,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_setns /* 335 */
/*
* NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index f6fad83..f921b8b 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -339,10 +339,11 @@
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
+#define __NR_setns 335
#ifdef __KERNEL__
-#define NR_syscalls 335
+#define NR_syscalls 336
#include <arch/unistd.h>
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index b28da49..a569dff 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -343,10 +343,11 @@
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
+#define __NR_setns 337
#ifdef __KERNEL__
-#define NR_syscalls 337
+#define NR_syscalls 338
#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 63d579b..017d6d7 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1526,5 +1526,6 @@ sys_call_table:
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
+ .long sys_setns
syscall_table_size = (. - sys_call_table)
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
index 50f2c5a3..2c3f8e6 100644
--- a/arch/h8300/include/asm/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
@@ -325,10 +325,11 @@
#define __NR_move_pages 317
#define __NR_getcpu 318
#define __NR_epoll_pwait 319
+#define __NR_setns 320
#ifdef __KERNEL__
-#define NR_syscalls 320
+#define NR_syscalls 321
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index faefaff..f4b2e67 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -333,6 +333,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
.long SYMBOL_NAME(sys_ni_syscall) /* sys_move_pages */
.long SYMBOL_NAME(sys_getcpu)
.long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_pwait */
+ .long SYMBOL_NAME(sys_setns) /* 320 */
.macro call_sp addr
mov.l #SYMBOL_NAME(\addr),er6
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 404d037..7c928da 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -319,11 +319,13 @@
#define __NR_open_by_handle_at 1327
#define __NR_clock_adjtime 1328
#define __NR_syncfs 1329
+#define __NR_setns 1330
+#define __NR_sendmmsg 1331
#ifdef __KERNEL__
-#define NR_syscalls 306 /* length of syscall table */
+#define NR_syscalls 308 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6de2e23..97dd2ab 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1775,6 +1775,8 @@ sys_call_table:
data8 sys_open_by_handle_at
data8 sys_clock_adjtime
data8 sys_syncfs
+ data8 sys_setns // 1330
+ data8 sys_sendmmsg
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index c705456..3e1db56 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -330,10 +330,11 @@
/* #define __NR_timerfd 322 removed */
#define __NR_eventfd 323
#define __NR_fallocate 324
+#define __NR_setns 325
#ifdef __KERNEL__
-#define NR_syscalls 325
+#define NR_syscalls 326
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 60536e2..528f2e6 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -324,3 +324,4 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
+ .long sys_setns /* 325 */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f3b649d..43f984e 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -349,10 +349,11 @@
#define __NR_open_by_handle_at 341
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
+#define __NR_setns 344
#ifdef __KERNEL__
-#define NR_syscalls 344
+#define NR_syscalls 345
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 6f7b091..00d1452 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -364,4 +364,5 @@ ENTRY(sys_call_table)
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
+ .long sys_setns
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 30edd61..7d7092b 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -390,8 +390,9 @@
#define __NR_open_by_handle_at 372
#define __NR_clock_adjtime 373
#define __NR_syncfs 374
+#define __NR_setns 375
-#define __NR_syscalls 375
+#define __NR_syscalls 376
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 00ee90f..b15cc21 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -130,7 +130,7 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
memblock_init();
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 85cea81..d915a12 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -379,3 +379,4 @@ ENTRY(sys_call_table)
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
+ .long sys_setns /* 375 */
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 008f657..0ee02f5 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -16,7 +16,6 @@
static struct map_info flash_map;
static struct mtd_info *mymtd;
-#ifdef CONFIG_MTD_PARTITIONS
static int nr_parts;
static struct mtd_partition *parts;
static const char *part_probe_types[] = {
@@ -26,7 +25,6 @@ static const char *part_probe_types[] = {
#endif
NULL
};
-#endif
/**
* Module/ driver initialization.
@@ -63,17 +61,10 @@ static int __init flash_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mymtd,
part_probe_types,
&parts, 0);
- if (nr_parts > 0)
- add_mtd_partitions(mymtd, parts, nr_parts);
- else
- add_mtd_device(mymtd);
-#else
- add_mtd_device(mymtd);
-#endif
+ mtd_device_register(mymtd, parts, nr_parts);
} else {
pr_err("Failed to register MTD device for flash\n");
}
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index f29b862..857d9b7 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -14,9 +14,6 @@
#ifdef CONFIG_OF
#include <asm/bootinfo.h>
-/* which is compatible with the flattened device tree (FDT) */
-#define cmd_line arcs_cmdline
-
extern int early_init_dt_scan_memory_arch(unsigned long node,
const char *uname, int depth, void *data);
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index fa2e37e..6fcfc48 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -363,16 +363,17 @@
#define __NR_open_by_handle_at (__NR_Linux + 340)
#define __NR_clock_adjtime (__NR_Linux + 341)
#define __NR_syncfs (__NR_Linux + 342)
+#define __NR_setns (__NR_Linux + 343)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 342
+#define __NR_Linux_syscalls 343
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 342
+#define __NR_O32_Linux_syscalls 343
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -682,16 +683,17 @@
#define __NR_open_by_handle_at (__NR_Linux + 299)
#define __NR_clock_adjtime (__NR_Linux + 300)
#define __NR_syncfs (__NR_Linux + 301)
+#define __NR_setns (__NR_Linux + 302)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 301
+#define __NR_Linux_syscalls 302
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 301
+#define __NR_64_Linux_syscalls 302
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1006,16 +1008,17 @@
#define __NR_open_by_handle_at (__NR_Linux + 304)
#define __NR_clock_adjtime (__NR_Linux + 305)
#define __NR_syncfs (__NR_Linux + 306)
+#define __NR_setns (__NR_Linux + 307)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 306
+#define __NR_Linux_syscalls 307
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 306
+#define __NR_N32_Linux_syscalls 307
#ifdef __KERNEL__
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index a19811e9..5b7eade4 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -83,7 +83,8 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline);
+
/* Scan memory nodes */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7a8e1dd..99e656e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -589,6 +589,7 @@ einval: li v0, -ENOSYS
sys sys_open_by_handle_at 3 /* 4340 */
sys sys_clock_adjtime 2
sys sys_syncfs 1
+ sys sys_setns 2
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2d31c83..fb0575f 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -428,4 +428,5 @@ sys_call_table:
PTR sys_open_by_handle_at
PTR sys_clock_adjtime /* 5300 */
PTR sys_syncfs
+ PTR sys_setns
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 38a0503..4de0c55 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
PTR sys_open_by_handle_at
PTR compat_sys_clock_adjtime /* 6305 */
PTR sys_syncfs
+ PTR sys_setns
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 91ea5e4..4a387de 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -546,4 +546,5 @@ sys_call_table:
PTR compat_sys_open_by_handle_at /* 4340 */
PTR compat_sys_clock_adjtime
PTR sys_syncfs
+ PTR sys_setns
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 812816c..ec38e00 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -639,7 +639,6 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
.flags = IORESOURCE_MEM,
};
struct platform_device *pdev;
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition parts[2];
struct physmap_flash_data pdata_part;
@@ -658,7 +657,7 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
pdata_part.parts = parts;
pdata = &pdata_part;
}
-#endif
+
pdev = platform_device_alloc("physmap-flash", no);
if (!pdev ||
platform_device_add_resources(pdev, &res, 1) ||
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 9d056f5..9051f92 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -349,10 +349,11 @@
#define __NR_rt_tgsigqueueinfo 336
#define __NR_perf_event_open 337
#define __NR_recvmmsg 338
+#define __NR_setns 339
#ifdef __KERNEL__
-#define NR_syscalls 339
+#define NR_syscalls 340
/*
* specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index fb93ad7..ae435e1 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -759,6 +759,7 @@ ENTRY(sys_call_table)
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg
+ .long sys_setns
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 9cbc2c3..3392de3 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -820,8 +820,9 @@
#define __NR_name_to_handle_at (__NR_Linux + 325)
#define __NR_open_by_handle_at (__NR_Linux + 326)
#define __NR_syncfs (__NR_Linux + 327)
+#define __NR_setns (__NR_Linux + 328)
-#define __NR_Linux_syscalls (__NR_syncfs + 1)
+#define __NR_Linux_syscalls (__NR_setns + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index a5b02ce..34a4f5a 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -426,6 +426,7 @@
ENTRY_SAME(name_to_handle_at) /* 325 */
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
+ ENTRY_SAME(setns)
/* Nothing yet */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 8489d37..f6736b7 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -353,3 +353,4 @@ COMPAT_SYS_SPU(open_by_handle_at)
COMPAT_SYS_SPU(clock_adjtime)
SYSCALL_SPU(syncfs)
COMPAT_SYS_SPU(sendmmsg)
+SYSCALL_SPU(setns)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d23c81..b8b3f59 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -372,10 +372,11 @@
#define __NR_clock_adjtime 347
#define __NR_syncfs 348
#define __NR_sendmmsg 349
+#define __NR_setns 350
#ifdef __KERNEL__
-#define __NR_syscalls 350
+#define __NR_syscalls 351
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 48aeb55..f2c906b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -694,7 +694,7 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
memblock_init();
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 9089b04..7667db4 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -715,7 +715,8 @@ static struct syscore_ops pmacpic_syscore_ops = {
static int __init init_pmacpic_syscore(void)
{
- register_syscore_ops(&pmacpic_syscore_ops);
+ if (pmac_irq_hw[0])
+ register_syscore_ops(&pmacpic_syscore_ops);
return 0;
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c4773a2..e4efacf 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -577,16 +577,16 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- unsigned long pfn, bits;
+ unsigned long address, bits;
unsigned char skey;
- pfn = pte_val(*ptep) >> PAGE_SHIFT;
- skey = page_get_storage_key(pfn);
+ address = pte_val(*ptep) & PAGE_MASK;
+ skey = page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Clear page changed & referenced bit in the storage key */
if (bits) {
skey ^= bits;
- page_set_storage_key(pfn, skey, 1);
+ page_set_storage_key(address, skey, 1);
}
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
@@ -628,16 +628,16 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- unsigned long pfn;
+ unsigned long address;
unsigned long okey, nkey;
- pfn = pte_val(*ptep) >> PAGE_SHIFT;
- okey = nkey = page_get_storage_key(pfn);
+ address = pte_val(*ptep) & PAGE_MASK;
+ okey = nkey = page_get_storage_key(address);
nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set page access key and fetch protection bit from pgste */
nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
if (okey != nkey)
- page_set_storage_key(pfn, nkey, 1);
+ page_set_storage_key(address, nkey, 1);
#endif
}
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 9208e69..404bdb96 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -276,7 +276,8 @@
#define __NR_open_by_handle_at 336
#define __NR_clock_adjtime 337
#define __NR_syncfs 338
-#define NR_syscalls 339
+#define __NR_setns 339
+#define NR_syscalls 340
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 1dc96ea..1f5eb78 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1904,3 +1904,9 @@ compat_sys_clock_adjtime_wrapper:
sys_syncfs_wrapper:
lgfr %r2,%r2 # int
jg sys_syncfs
+
+ .globl sys_setns_wrapper
+sys_setns_wrapper:
+ lgfr %r2,%r2 # int
+ lgfr %r3,%r3 # int
+ jg sys_setns
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9c65fd4..6ee39ef 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -347,3 +347,4 @@ SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrappe
SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper)
SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
+SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 71a4b0d..51e5cd9 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -19,7 +19,7 @@
* using the stura instruction.
* Returns the number of bytes copied or -EFAULT.
*/
-static long probe_kernel_write_odd(void *dst, void *src, size_t size)
+static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
{
unsigned long count, aligned;
int offset, mask;
@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)
return rc ? rc : count;
}
-long probe_kernel_write(void *dst, void *src, size_t size)
+long probe_kernel_write(void *dst, const void *src, size_t size)
{
long copied = 0;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 14c6fae..b09763f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -71,12 +71,15 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
void rcu_table_freelist_finish(void)
{
- struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
+ struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
+ struct rcu_table_freelist *batch = *batchp;
if (!batch)
- return;
+ goto out;
call_rcu(&batch->rcu, rcu_table_freelist_callback);
- __get_cpu_var(rcu_table_freelist) = NULL;
+ *batchp = NULL;
+out:
+ put_cpu_var(rcu_table_freelist);
}
static void smp_sync(void *arg)
@@ -141,20 +144,23 @@ void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{
struct rcu_table_freelist *batch;
+ preempt_disable();
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
crst_table_free(mm, table);
- return;
+ goto out;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
crst_table_free(mm, table);
- return;
+ goto out;
}
batch->table[--batch->crst_index] = table;
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
+out:
+ preempt_enable();
}
#ifdef CONFIG_64BIT
@@ -323,16 +329,17 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;
+ preempt_disable();
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
page_table_free(mm, table);
- return;
+ goto out;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
page_table_free(mm, table);
- return;
+ goto out;
}
bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
@@ -345,6 +352,8 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
batch->table[batch->pgt_index++] = table;
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
+out:
+ preempt_enable();
}
/*
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index bb7d270..3432008 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -374,8 +374,9 @@
#define __NR_clock_adjtime 361
#define __NR_syncfs 362
#define __NR_sendmmsg 363
+#define __NR_setns 364
-#define NR_syscalls 364
+#define NR_syscalls 365
#ifdef __KERNEL__
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 46327ce..ec98986 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -395,10 +395,11 @@
#define __NR_clock_adjtime 372
#define __NR_syncfs 373
#define __NR_sendmmsg 374
+#define __NR_setns 375
#ifdef __KERNEL__
-#define NR_syscalls 375
+#define NR_syscalls 376
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 7c486f3..39b051d 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -381,3 +381,4 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
+ .long sys_setns
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index ba1a737a..089c4d8 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -401,3 +401,4 @@ sys_call_table:
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
+ .long sys_setns /* 375 */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index c5387ed..6260d5d 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -405,8 +405,9 @@
#define __NR_clock_adjtime 334
#define __NR_syncfs 335
#define __NR_sendmmsg 336
+#define __NR_setns 337
-#define NR_syscalls 337
+#define NR_syscalls 338
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 332c83f..6e492d5 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -84,4 +84,4 @@ sys_call_table:
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
-/*335*/ .long sys_syncfs, sys_sendmmsg
+/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 43887ca..f566518 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,7 @@ sys_call_table32:
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
- .word sys_syncfs, compat_sys_sendmmsg
+ .word sys_syncfs, compat_sys_sendmmsg, sys_setns
#endif /* CONFIG_COMPAT */
@@ -162,4 +162,4 @@ sys_call_table:
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
- .word sys_syncfs, sys_sendmmsg
+ .word sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index e1e5010..0249b8b 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -11,6 +11,7 @@ config TILE
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
+ select SYS_HYPERVISOR
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 0bed3ec..2ac4228 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -40,6 +40,10 @@
#define HARDWALL_DEACTIVATE \
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE)
+#define _HARDWALL_GET_ID 4
+#define HARDWALL_GET_ID \
+ _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
+
#ifndef __KERNEL__
/* This is the canonical name expected by userspace. */
@@ -47,9 +51,14 @@
#else
-/* Hook for /proc/tile/hardwall. */
-struct seq_file;
-int proc_tile_hardwall_show(struct seq_file *sf, void *v);
+/* /proc hooks for hardwall. */
+struct proc_dir_entry;
+#ifdef CONFIG_HARDWALL
+void proc_tile_hardwall_init(struct proc_dir_entry *root);
+int proc_pid_hardwall(struct task_struct *task, char *buffer);
+#else
+static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
+#endif
#endif
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index b4c8e8e..b4dbc05 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -5,7 +5,7 @@
extra-y := vmlinux.lds head_$(BITS).o
obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \
pci-dma.o proc.o process.o ptrace.o reboot.o \
- setup.o signal.o single_step.o stack.o sys.o time.o traps.o \
+ setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
obj-$(CONFIG_HARDWALL) += hardwall.o
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 3bddef7..8c41891 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -40,16 +40,25 @@
struct hardwall_info {
struct list_head list; /* "rectangles" list */
struct list_head task_head; /* head of tasks in this hardwall */
+ struct cpumask cpumask; /* cpus in the rectangle */
int ulhc_x; /* upper left hand corner x coord */
int ulhc_y; /* upper left hand corner y coord */
int width; /* rectangle width */
int height; /* rectangle height */
+ int id; /* integer id for this hardwall */
int teardown_in_progress; /* are we tearing this one down? */
};
/* Currently allocated hardwall rectangles */
static LIST_HEAD(rectangles);
+/* /proc/tile/hardwall */
+static struct proc_dir_entry *hardwall_proc_dir;
+
+/* Functions to manage files in /proc/tile/hardwall. */
+static void hardwall_add_proc(struct hardwall_info *rect);
+static void hardwall_remove_proc(struct hardwall_info *rect);
+
/*
* Guard changes to the hardwall data structures.
* This could be finer grained (e.g. one lock for the list of hardwall
@@ -105,6 +114,8 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
r->ulhc_y = cpu_y(ulhc);
r->width = cpu_x(lrhc) - r->ulhc_x + 1;
r->height = cpu_y(lrhc) - r->ulhc_y + 1;
+ cpumask_copy(&r->cpumask, mask);
+ r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
/* Width and height must be positive */
if (r->width <= 0 || r->height <= 0)
@@ -388,6 +399,9 @@ static struct hardwall_info *hardwall_create(
/* Set up appropriate hardwalling on all affected cpus. */
hardwall_setup(rect);
+ /* Create a /proc/tile/hardwall entry. */
+ hardwall_add_proc(rect);
+
return rect;
}
@@ -645,6 +659,9 @@ static void hardwall_destroy(struct hardwall_info *rect)
/* Restart switch and disable firewall. */
on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
+ /* Remove the /proc/tile/hardwall entry. */
+ hardwall_remove_proc(rect);
+
/* Now free the rectangle from the list. */
spin_lock_irqsave(&hardwall_lock, flags);
BUG_ON(!list_empty(&rect->task_head));
@@ -654,35 +671,57 @@ static void hardwall_destroy(struct hardwall_info *rect)
}
-/*
- * Dump hardwall state via /proc; initialized in arch/tile/sys/proc.c.
- */
-int proc_tile_hardwall_show(struct seq_file *sf, void *v)
+static int hardwall_proc_show(struct seq_file *sf, void *v)
{
- struct hardwall_info *r;
+ struct hardwall_info *rect = sf->private;
+ char buf[256];
- if (udn_disabled) {
- seq_printf(sf, "%dx%d 0,0 pids:\n", smp_width, smp_height);
- return 0;
- }
-
- spin_lock_irq(&hardwall_lock);
- list_for_each_entry(r, &rectangles, list) {
- struct task_struct *p;
- seq_printf(sf, "%dx%d %d,%d pids:",
- r->width, r->height, r->ulhc_x, r->ulhc_y);
- list_for_each_entry(p, &r->task_head, thread.hardwall_list) {
- unsigned int cpu = cpumask_first(&p->cpus_allowed);
- unsigned int x = cpu % smp_width;
- unsigned int y = cpu / smp_width;
- seq_printf(sf, " %d@%d,%d", p->pid, x, y);
- }
- seq_printf(sf, "\n");
- }
- spin_unlock_irq(&hardwall_lock);
+ int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask);
+ buf[rc++] = '\n';
+ seq_write(sf, buf, rc);
return 0;
}
+static int hardwall_proc_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, hardwall_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations hardwall_proc_fops = {
+ .open = hardwall_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void hardwall_add_proc(struct hardwall_info *rect)
+{
+ char buf[64];
+ snprintf(buf, sizeof(buf), "%d", rect->id);
+ proc_create_data(buf, 0444, hardwall_proc_dir,
+ &hardwall_proc_fops, rect);
+}
+
+static void hardwall_remove_proc(struct hardwall_info *rect)
+{
+ char buf[64];
+ snprintf(buf, sizeof(buf), "%d", rect->id);
+ remove_proc_entry(buf, hardwall_proc_dir);
+}
+
+int proc_pid_hardwall(struct task_struct *task, char *buffer)
+{
+ struct hardwall_info *rect = task->thread.hardwall;
+ return rect ? sprintf(buffer, "%d\n", rect->id) : 0;
+}
+
+void proc_tile_hardwall_init(struct proc_dir_entry *root)
+{
+ if (!udn_disabled)
+ hardwall_proc_dir = proc_mkdir("hardwall", root);
+}
+
/*
* Character device support via ioctl/close.
@@ -716,6 +755,9 @@ static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
return -EINVAL;
return hardwall_deactivate(current);
+ case _HARDWALL_GET_ID:
+ return rect ? rect->id : -EINVAL;
+
default:
return -EINVAL;
}
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 2e02c41..62d8208 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -27,6 +27,7 @@
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/homecache.h>
+#include <asm/hardwall.h>
#include <arch/chip.h>
@@ -88,3 +89,75 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
+
+/*
+ * Support /proc/tile directory
+ */
+
+static int __init proc_tile_init(void)
+{
+ struct proc_dir_entry *root = proc_mkdir("tile", NULL);
+ if (root == NULL)
+ return 0;
+
+ proc_tile_hardwall_init(root);
+
+ return 0;
+}
+
+arch_initcall(proc_tile_init);
+
+/*
+ * Support /proc/sys/tile directory
+ */
+
+#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
+static ctl_table unaligned_subtable[] = {
+ {
+ .procname = "enabled",
+ .data = &unaligned_fixup,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .procname = "printk",
+ .data = &unaligned_printk,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .procname = "count",
+ .data = &unaligned_fixup_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {}
+};
+
+static ctl_table unaligned_table[] = {
+ {
+ .procname = "unaligned_fixup",
+ .mode = 0555,
+ .child = unaligned_subtable
+ },
+ {}
+};
+#endif
+
+static struct ctl_path tile_path[] = {
+ { .procname = "tile" },
+ { }
+};
+
+static int __init proc_sys_tile_init(void)
+{
+#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
+ register_sysctl_paths(tile_path, unaligned_table);
+#endif
+ return 0;
+}
+
+arch_initcall(proc_sys_tile_init);
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
new file mode 100644
index 0000000..b671a86
--- /dev/null
+++ b/arch/tile/kernel/sysfs.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * /sys entry support.
+ */
+
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <hv/hypervisor.h>
+
+/* Return a string queried from the hypervisor, truncated to page size. */
+static ssize_t get_hv_confstr(char *page, int query)
+{
+ ssize_t n = hv_confstr(query, (unsigned long)page, PAGE_SIZE - 1);
+ n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1) - 1;
+ if (n)
+ page[n++] = '\n';
+ page[n] = '\0';
+ return n;
+}
+
+static ssize_t chip_width_show(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ char *page)
+{
+ return sprintf(page, "%u\n", smp_width);
+}
+static SYSDEV_CLASS_ATTR(chip_width, 0444, chip_width_show, NULL);
+
+static ssize_t chip_height_show(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ char *page)
+{
+ return sprintf(page, "%u\n", smp_height);
+}
+static SYSDEV_CLASS_ATTR(chip_height, 0444, chip_height_show, NULL);
+
+static ssize_t chip_serial_show(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ char *page)
+{
+ return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
+}
+static SYSDEV_CLASS_ATTR(chip_serial, 0444, chip_serial_show, NULL);
+
+static ssize_t chip_revision_show(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ char *page)
+{
+ return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
+}
+static SYSDEV_CLASS_ATTR(chip_revision, 0444, chip_revision_show, NULL);
+
+
+static ssize_t type_show(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ char *page)
+{
+ return sprintf(page, "tilera\n");
+}
+static SYSDEV_CLASS_ATTR(type, 0444, type_show, NULL);
+
+#define HV_CONF_ATTR(name, conf) \
+ static ssize_t name ## _show(struct sysdev_class *dev, \
+ struct sysdev_class_attribute *attr, \
+ char *page) \
+ { \
+ return get_hv_confstr(page, conf); \
+ } \
+ static SYSDEV_CLASS_ATTR(name, 0444, name ## _show, NULL);
+
+HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER)
+HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER)
+
+HV_CONF_ATTR(board_part, HV_CONFSTR_BOARD_PART_NUM)
+HV_CONF_ATTR(board_serial, HV_CONFSTR_BOARD_SERIAL_NUM)
+HV_CONF_ATTR(board_revision, HV_CONFSTR_BOARD_REV)
+HV_CONF_ATTR(board_description, HV_CONFSTR_BOARD_DESC)
+HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
+HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
+HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
+HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
+HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
+
+static struct attribute *board_attrs[] = {
+ &attr_board_part.attr,
+ &attr_board_serial.attr,
+ &attr_board_revision.attr,
+ &attr_board_description.attr,
+ &attr_mezz_part.attr,
+ &attr_mezz_serial.attr,
+ &attr_mezz_revision.attr,
+ &attr_mezz_description.attr,
+ &attr_switch_control.attr,
+ NULL
+};
+
+static struct attribute_group board_attr_group = {
+ .name = "board",
+ .attrs = board_attrs,
+};
+
+
+static struct bin_attribute hvconfig_bin;
+
+static ssize_t
+hvconfig_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ static size_t size;
+
+ /* Lazily learn the true size (minus the trailing NUL). */
+ if (size == 0)
+ size = hv_confstr(HV_CONFSTR_HV_CONFIG, 0, 0) - 1;
+
+ /* Check and adjust input parameters. */
+ if (off > size)
+ return -EINVAL;
+ if (count > size - off)
+ count = size - off;
+
+ if (count) {
+ /* Get a copy of the hvc and copy out the relevant portion. */
+ char *hvc;
+
+ size = off + count;
+ hvc = kmalloc(size, GFP_KERNEL);
+ if (hvc == NULL)
+ return -ENOMEM;
+ hv_confstr(HV_CONFSTR_HV_CONFIG, (unsigned long)hvc, size);
+ memcpy(buf, hvc + off, count);
+ kfree(hvc);
+ }
+
+ return count;
+}
+
+static int __init create_sysfs_entries(void)
+{
+ struct sysdev_class *cls = &cpu_sysdev_class;
+ int err = 0;
+
+#define create_cpu_attr(name) \
+ if (!err) \
+ err = sysfs_create_file(&cls->kset.kobj, &attr_##name.attr);
+ create_cpu_attr(chip_width);
+ create_cpu_attr(chip_height);
+ create_cpu_attr(chip_serial);
+ create_cpu_attr(chip_revision);
+
+#define create_hv_attr(name) \
+ if (!err) \
+ err = sysfs_create_file(hypervisor_kobj, &attr_##name.attr);
+ create_hv_attr(type);
+ create_hv_attr(version);
+ create_hv_attr(config_version);
+
+ if (!err)
+ err = sysfs_create_group(hypervisor_kobj, &board_attr_group);
+
+ if (!err) {
+ sysfs_bin_attr_init(&hvconfig_bin);
+ hvconfig_bin.attr.name = "hvconfig";
+ hvconfig_bin.attr.mode = S_IRUGO;
+ hvconfig_bin.read = hvconfig_bin_read;
+ hvconfig_bin.size = PAGE_SIZE;
+ err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin);
+ }
+
+ return err;
+}
+subsys_initcall(create_sysfs_entries);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 95f5826..c1870dd 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -849,4 +849,5 @@ ia32_sys_call_table:
.quad compat_sys_clock_adjtime
.quad sys_syncfs
.quad compat_sys_sendmmsg /* 345 */
+ .quad sys_setns
ia32_syscall_end:
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 416d865..610001d 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -139,7 +139,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
- else if (c1e_detected)
+ else if (amd_e400_c1e_detected)
return 1;
else
return max_cstate;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 5dc6acc..71cc380 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -125,7 +125,7 @@
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 617bd56..7b439d9 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -4,30 +4,33 @@
#include <asm/desc_defs.h>
#include <asm/ldt.h>
#include <asm/mmu.h>
+
#include <linux/smp.h>
-static inline void fill_ldt(struct desc_struct *desc,
- const struct user_desc *info)
-{
- desc->limit0 = info->limit & 0x0ffff;
- desc->base0 = info->base_addr & 0x0000ffff;
-
- desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
- desc->type = (info->read_exec_only ^ 1) << 1;
- desc->type |= info->contents << 2;
- desc->s = 1;
- desc->dpl = 0x3;
- desc->p = info->seg_not_present ^ 1;
- desc->limit = (info->limit & 0xf0000) >> 16;
- desc->avl = info->useable;
- desc->d = info->seg_32bit;
- desc->g = info->limit_in_pages;
- desc->base2 = (info->base_addr & 0xff000000) >> 24;
+static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
+{
+ desc->limit0 = info->limit & 0x0ffff;
+
+ desc->base0 = (info->base_addr & 0x0000ffff);
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+ desc->limit = (info->limit & 0xf0000) >> 16;
+ desc->avl = info->useable;
+ desc->d = info->seg_32bit;
+ desc->g = info->limit_in_pages;
+
+ desc->base2 = (info->base_addr & 0xff000000) >> 24;
/*
* Don't allow setting of the lm bit. It is useless anyway
* because 64bit system calls require __USER_CS:
*/
- desc->l = 0;
+ desc->l = 0;
}
extern struct desc_ptr idt_descr;
@@ -36,6 +39,7 @@ extern gate_desc idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
+
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
@@ -48,16 +52,16 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg)
{
- gate->offset_low = PTR_LOW(func);
- gate->segment = __KERNEL_CS;
- gate->ist = ist;
- gate->p = 1;
- gate->dpl = dpl;
- gate->zero0 = 0;
- gate->zero1 = 0;
- gate->type = type;
- gate->offset_middle = PTR_MIDDLE(func);
- gate->offset_high = PTR_HIGH(func);
+ gate->offset_low = PTR_LOW(func);
+ gate->segment = __KERNEL_CS;
+ gate->ist = ist;
+ gate->p = 1;
+ gate->dpl = dpl;
+ gate->zero0 = 0;
+ gate->zero1 = 0;
+ gate->type = type;
+ gate->offset_middle = PTR_MIDDLE(func);
+ gate->offset_high = PTR_HIGH(func);
}
#else
@@ -66,8 +70,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
unsigned short seg)
{
gate->a = (seg << 16) | (base & 0xffff);
- gate->b = (base & 0xffff0000) |
- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
+ gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
}
#endif
@@ -75,31 +78,29 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
static inline int desc_empty(const void *ptr)
{
const u32 *desc = ptr;
+
return !(desc[0] | desc[1]);
}
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define load_TR_desc() native_load_tr_desc()
-#define load_gdt(dtr) native_load_gdt(dtr)
-#define load_idt(dtr) native_load_idt(dtr)
-#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
-#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
-
-#define store_gdt(dtr) native_store_gdt(dtr)
-#define store_idt(dtr) native_store_idt(dtr)
-#define store_tr(tr) (tr = native_store_tr())
-
-#define load_TLS(t, cpu) native_load_tls(t, cpu)
-#define set_ldt native_set_ldt
-
-#define write_ldt_entry(dt, entry, desc) \
- native_write_ldt_entry(dt, entry, desc)
-#define write_gdt_entry(dt, entry, desc, type) \
- native_write_gdt_entry(dt, entry, desc, type)
-#define write_idt_entry(dt, entry, g) \
- native_write_idt_entry(dt, entry, g)
+#define load_TR_desc() native_load_tr_desc()
+#define load_gdt(dtr) native_load_gdt(dtr)
+#define load_idt(dtr) native_load_idt(dtr)
+#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
+
+#define store_gdt(dtr) native_store_gdt(dtr)
+#define store_idt(dtr) native_store_idt(dtr)
+#define store_tr(tr) (tr = native_store_tr())
+
+#define load_TLS(t, cpu) native_load_tls(t, cpu)
+#define set_ldt native_set_ldt
+
+#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
+#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
+#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
@@ -112,33 +113,27 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
-static inline void native_write_idt_entry(gate_desc *idt, int entry,
- const gate_desc *gate)
+static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
{
memcpy(&idt[entry], gate, sizeof(*gate));
}
-static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
- const void *desc)
+static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
{
memcpy(&ldt[entry], desc, 8);
}
-static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
- const void *desc, int type)
+static inline void
+native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
{
unsigned int size;
+
switch (type) {
- case DESC_TSS:
- size = sizeof(tss_desc);
- break;
- case DESC_LDT:
- size = sizeof(ldt_desc);
- break;
- default:
- size = sizeof(struct desc_struct);
- break;
+ case DESC_TSS: size = sizeof(tss_desc); break;
+ case DESC_LDT: size = sizeof(ldt_desc); break;
+ default: size = sizeof(*gdt); break;
}
+
memcpy(&gdt[entry], desc, size);
}
@@ -154,20 +149,21 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
}
-static inline void set_tssldt_descriptor(void *d, unsigned long addr,
- unsigned type, unsigned size)
+static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
{
#ifdef CONFIG_X86_64
struct ldttss_desc64 *desc = d;
+
memset(desc, 0, sizeof(*desc));
- desc->limit0 = size & 0xFFFF;
- desc->base0 = PTR_LOW(addr);
- desc->base1 = PTR_MIDDLE(addr) & 0xFF;
- desc->type = type;
- desc->p = 1;
- desc->limit1 = (size >> 16) & 0xF;
- desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
- desc->base3 = PTR_HIGH(addr);
+
+ desc->limit0 = size & 0xFFFF;
+ desc->base0 = PTR_LOW(addr);
+ desc->base1 = PTR_MIDDLE(addr) & 0xFF;
+ desc->type = type;
+ desc->p = 1;
+ desc->limit1 = (size >> 16) & 0xF;
+ desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
+ desc->base3 = PTR_HIGH(addr);
#else
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif
@@ -237,14 +233,16 @@ static inline void native_store_idt(struct desc_ptr *dtr)
static inline unsigned long native_store_tr(void)
{
unsigned long tr;
+
asm volatile("str %0":"=r" (tr));
+
return tr;
}
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
- unsigned int i;
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
@@ -313,6 +311,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
+
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
/*
* does not need to be atomic because it is only done once at
@@ -343,8 +342,9 @@ static inline void alloc_system_vector(int vector)
set_bit(vector, used_vectors);
if (first_system_vector > vector)
first_system_vector = vector;
- } else
+ } else {
BUG();
+ }
}
static inline void alloc_intr_gate(unsigned int n, void *addr)
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index 38d8737..f49253d7 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -16,6 +16,6 @@ static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
#endif /* CONFIG_X86_64 */
-void c1e_remove_cpu(int cpu);
+void amd_e400_remove_cpu(int cpu);
#endif /* _ASM_X86_IDLE_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index aeff3e8..5f55e69 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -11,14 +11,14 @@
typedef struct {
void *ldt;
int size;
- struct mutex lock;
- void *vdso;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
unsigned short ia32_compat;
#endif
+ struct mutex lock;
+ void *vdso;
} mm_context_t;
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4c25ab4..2193715 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -754,10 +754,10 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern void init_c1e_mask(void);
+extern void init_amd_e400_c1e_mask(void);
extern unsigned long boot_option_idle_override;
-extern bool c1e_detected;
+extern bool amd_e400_c1e_detected;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
IDLE_POLL, IDLE_FORCE_MWAIT};
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index fb6a625..593485b3 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -351,10 +351,11 @@
#define __NR_clock_adjtime 343
#define __NR_syncfs 344
#define __NR_sendmmsg 345
+#define __NR_setns 346
#ifdef __KERNEL__
-#define NR_syscalls 346
+#define NR_syscalls 347
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 79f90eb..705bf13 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -679,6 +679,8 @@ __SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_sendmmsg 307
__SYSCALL(__NR_sendmmsg, sys_sendmmsg)
+#define __NR_setns 308
+__SYSCALL(__NR_setns, sys_setns)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 130f1ee..a291c40 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -5,7 +5,7 @@
*
* SGI UV Broadcast Assist Unit definitions
*
- * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_BAU_H
@@ -35,17 +35,20 @@
#define MAX_CPUS_PER_UVHUB 64
#define MAX_CPUS_PER_SOCKET 32
-#define UV_ADP_SIZE 64 /* hardware-provided max. */
-#define UV_CPUS_PER_ACT_STATUS 32 /* hardware-provided max. */
-#define UV_ITEMS_PER_DESCRIPTOR 8
+#define ADP_SZ 64 /* hardware-provided max. */
+#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
+#define ITEMS_PER_DESC 8
/* the 'throttle' to prevent the hardware stay-busy bug */
#define MAX_BAU_CONCURRENT 3
#define UV_ACT_STATUS_MASK 0x3
#define UV_ACT_STATUS_SIZE 2
#define UV_DISTRIBUTION_SIZE 256
#define UV_SW_ACK_NPENDING 8
-#define UV_NET_ENDPOINT_INTD 0x38
-#define UV_DESC_BASE_PNODE_SHIFT 49
+#define UV1_NET_ENDPOINT_INTD 0x38
+#define UV2_NET_ENDPOINT_INTD 0x28
+#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
+ UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
+#define UV_DESC_PSHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
@@ -53,29 +56,64 @@
#define UV_BAU_TUNABLES_FILE "bau_tunables"
#define WHITESPACE " \t\n"
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
-#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL
+#define cpubit_isset(cpu, bau_local_cpumask) \
+ test_bit((cpu), (bau_local_cpumask).bits)
+
/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
-#define BAU_MISC_CONTROL_MULT_MASK 3
+/*
+ * UV2: Bit 19 selects between
+ * (0): 10 microsecond timebase and
+ * (1): 80 microseconds
+ * we're using 655us, similar to UV1: 65 units of 10us
+ */
+#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
+#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
+
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
+ UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
+ UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
-#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
+#define BAU_MISC_CONTROL_MULT_MASK 3
+
+#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
/* [30:28] URGENCY_7 an index into a table of times */
-#define BAU_URGENCY_7_SHIFT 28
-#define BAU_URGENCY_7_MASK 7
+#define BAU_URGENCY_7_SHIFT 28
+#define BAU_URGENCY_7_MASK 7
-#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
+#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
/* [45:40] BAU - BAU transaction timeout select - a multiplier */
-#define BAU_TRANS_SHIFT 40
-#define BAU_TRANS_MASK 0x3f
+#define BAU_TRANS_SHIFT 40
+#define BAU_TRANS_MASK 0x3f
+
+/*
+ * shorten some awkward names
+ */
+#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
+#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
+#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
+#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
+#define write_gmmr uv_write_global_mmr64
+#define write_lmmr uv_write_local_mmr
+#define read_lmmr uv_read_local_mmr
+#define read_gmmr uv_read_global_mmr64
/*
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
*/
-#define DESC_STATUS_IDLE 0
-#define DESC_STATUS_ACTIVE 1
-#define DESC_STATUS_DESTINATION_TIMEOUT 2
-#define DESC_STATUS_SOURCE_TIMEOUT 3
+#define DS_IDLE 0
+#define DS_ACTIVE 1
+#define DS_DESTINATION_TIMEOUT 2
+#define DS_SOURCE_TIMEOUT 3
+/*
+ * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
+ * values 1 and 5 will not occur
+ */
+#define UV2H_DESC_IDLE 0
+#define UV2H_DESC_DEST_TIMEOUT 2
+#define UV2H_DESC_DEST_STRONG_NACK 3
+#define UV2H_DESC_BUSY 4
+#define UV2H_DESC_SOURCE_TIMEOUT 6
+#define UV2H_DESC_DEST_PUT_ERR 7
/*
* delay for 'plugged' timeout retries, in microseconds
@@ -86,15 +124,24 @@
* threshholds at which to use IPI to free resources
*/
/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
-#define PLUGSB4RESET 100
+#define PLUGSB4RESET 100
/* after this many consecutive timeouts, use IPI to release resources */
-#define TIMEOUTSB4RESET 1
+#define TIMEOUTSB4RESET 1
/* at this number uses of IPI to release resources, giveup the request */
-#define IPI_RESET_LIMIT 1
+#define IPI_RESET_LIMIT 1
/* after this # consecutive successes, bump up the throttle if it was lowered */
-#define COMPLETE_THRESHOLD 5
+#define COMPLETE_THRESHOLD 5
+
+#define UV_LB_SUBNODEID 0x10
-#define UV_LB_SUBNODEID 0x10
+/* these two are the same for UV1 and UV2: */
+#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
+#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
+/* 4 bits of software ack period */
+#define UV2_ACK_MASK 0x7UL
+#define UV2_ACK_UNITS_SHFT 3
+#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
+#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
/*
* number of entries in the destination side payload queue
@@ -115,9 +162,16 @@
/*
* tuning the action when the numalink network is extremely delayed
*/
-#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */
-#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */
-#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */
+#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
+ microseconds */
+#define CONGESTED_REPS 10 /* long delays averaged over
+ this many broadcasts */
+#define CONGESTED_PERIOD 30 /* time for the bau to be
+ disabled, in seconds */
+/* see msg_type: */
+#define MSG_NOOP 0
+#define MSG_REGULAR 1
+#define MSG_RETRY 2
/*
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -129,8 +183,8 @@
* 'base_dest_nasid' field of the header corresponds to the
* destination nodeID associated with that specified bit.
*/
-struct bau_target_uvhubmask {
- unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
+struct bau_targ_hubmask {
+ unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
};
/*
@@ -139,7 +193,7 @@ struct bau_target_uvhubmask {
* enough bits for max. cpu's per uvhub)
*/
struct bau_local_cpumask {
- unsigned long bits;
+ unsigned long bits;
};
/*
@@ -160,14 +214,14 @@ struct bau_local_cpumask {
* The payload is software-defined for INTD transactions
*/
struct bau_msg_payload {
- unsigned long address; /* signifies a page or all TLB's
- of the cpu */
+ unsigned long address; /* signifies a page or all
+ TLB's of the cpu */
/* 64 bits */
- unsigned short sending_cpu; /* filled in by sender */
+ unsigned short sending_cpu; /* filled in by sender */
/* 16 bits */
- unsigned short acknowledge_count;/* filled in by destination */
+ unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits */
- unsigned int reserved1:32; /* not usable */
+ unsigned int reserved1:32; /* not usable */
};
@@ -176,93 +230,96 @@ struct bau_msg_payload {
* see table 4.2.3.0.1 in broacast_assist spec.
*/
struct bau_msg_header {
- unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
+ unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
- unsigned int base_dest_nasid:15; /* nasid of the */
- /* bits 20:6 */ /* first bit in uvhub map */
- unsigned int command:8; /* message type */
+ unsigned int base_dest_nasid:15; /* nasid of the first bit */
+ /* bits 20:6 */ /* in uvhub map */
+ unsigned int command:8; /* message type */
/* bits 28:21 */
- /* 0x38: SN3net EndPoint Message */
- unsigned int rsvd_1:3; /* must be zero */
+ /* 0x38: SN3net EndPoint Message */
+ unsigned int rsvd_1:3; /* must be zero */
/* bits 31:29 */
- /* int will align on 32 bits */
- unsigned int rsvd_2:9; /* must be zero */
+ /* int will align on 32 bits */
+ unsigned int rsvd_2:9; /* must be zero */
/* bits 40:32 */
- /* Suppl_A is 56-41 */
- unsigned int sequence:16;/* message sequence number */
- /* bits 56:41 */ /* becomes bytes 16-17 of msg */
- /* Address field (96:57) is never used as an
- address (these are address bits 42:3) */
-
- unsigned int rsvd_3:1; /* must be zero */
+ /* Suppl_A is 56-41 */
+ unsigned int sequence:16; /* message sequence number */
+ /* bits 56:41 */ /* becomes bytes 16-17 of msg */
+ /* Address field (96:57) is
+ never used as an address
+ (these are address bits
+ 42:3) */
+
+ unsigned int rsvd_3:1; /* must be zero */
/* bit 57 */
- /* address bits 27:4 are payload */
+ /* address bits 27:4 are payload */
/* these next 24 (58-81) bits become bytes 12-14 of msg */
-
/* bits 65:58 land in byte 12 */
- unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
+ unsigned int replied_to:1; /* sent as 0 by the source to
+ byte 12 */
/* bit 58 */
- unsigned int msg_type:3; /* software type of the message*/
+ unsigned int msg_type:3; /* software type of the
+ message */
/* bits 61:59 */
- unsigned int canceled:1; /* message canceled, resource to be freed*/
+ unsigned int canceled:1; /* message canceled, resource
+ is to be freed*/
/* bit 62 */
- unsigned int payload_1a:1;/* not currently used */
+ unsigned int payload_1a:1; /* not currently used */
/* bit 63 */
- unsigned int payload_1b:2;/* not currently used */
+ unsigned int payload_1b:2; /* not currently used */
/* bits 65:64 */
/* bits 73:66 land in byte 13 */
- unsigned int payload_1ca:6;/* not currently used */
+ unsigned int payload_1ca:6; /* not currently used */
/* bits 71:66 */
- unsigned int payload_1c:2;/* not currently used */
+ unsigned int payload_1c:2; /* not currently used */
/* bits 73:72 */
/* bits 81:74 land in byte 14 */
- unsigned int payload_1d:6;/* not currently used */
+ unsigned int payload_1d:6; /* not currently used */
/* bits 79:74 */
- unsigned int payload_1e:2;/* not currently used */
+ unsigned int payload_1e:2; /* not currently used */
/* bits 81:80 */
- unsigned int rsvd_4:7; /* must be zero */
+ unsigned int rsvd_4:7; /* must be zero */
/* bits 88:82 */
- unsigned int sw_ack_flag:1;/* software acknowledge flag */
+ unsigned int swack_flag:1; /* software acknowledge flag */
/* bit 89 */
- /* INTD trasactions at destination are to
- wait for software acknowledge */
- unsigned int rsvd_5:6; /* must be zero */
+ /* INTD trasactions at
+ destination are to wait for
+ software acknowledge */
+ unsigned int rsvd_5:6; /* must be zero */
/* bits 95:90 */
- unsigned int rsvd_6:5; /* must be zero */
+ unsigned int rsvd_6:5; /* must be zero */
/* bits 100:96 */
- unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
+ unsigned int int_both:1; /* if 1, interrupt both sockets
+ on the uvhub */
/* bit 101*/
- unsigned int fairness:3;/* usually zero */
+ unsigned int fairness:3; /* usually zero */
/* bits 104:102 */
- unsigned int multilevel:1; /* multi-level multicast format */
+ unsigned int multilevel:1; /* multi-level multicast
+ format */
/* bit 105 */
- /* 0 for TLB: endpoint multi-unicast messages */
- unsigned int chaining:1;/* next descriptor is part of this activation*/
+ /* 0 for TLB: endpoint multi-unicast messages */
+ unsigned int chaining:1; /* next descriptor is part of
+ this activation*/
/* bit 106 */
- unsigned int rsvd_7:21; /* must be zero */
+ unsigned int rsvd_7:21; /* must be zero */
/* bits 127:107 */
};
-/* see msg_type: */
-#define MSG_NOOP 0
-#define MSG_REGULAR 1
-#define MSG_RETRY 2
-
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
* Should be 64 bytes
*/
struct bau_desc {
- struct bau_target_uvhubmask distribution;
+ struct bau_targ_hubmask distribution;
/*
* message template, consisting of header and payload:
*/
- struct bau_msg_header header;
- struct bau_msg_payload payload;
+ struct bau_msg_header header;
+ struct bau_msg_payload payload;
};
/*
* -payload-- ---------header------
@@ -281,59 +338,51 @@ struct bau_desc {
* are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
* bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
* (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
- * sw_ack_vector and payload_2)
+ * swack_vec and payload_2)
* "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
* Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
* operation."
*/
-struct bau_payload_queue_entry {
- unsigned long address; /* signifies a page or all TLB's
- of the cpu */
+struct bau_pq_entry {
+ unsigned long address; /* signifies a page or all TLB's
+ of the cpu */
/* 64 bits, bytes 0-7 */
-
- unsigned short sending_cpu; /* cpu that sent the message */
+ unsigned short sending_cpu; /* cpu that sent the message */
/* 16 bits, bytes 8-9 */
-
- unsigned short acknowledge_count; /* filled in by destination */
+ unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits, bytes 10-11 */
-
/* these next 3 bytes come from bits 58-81 of the message header */
- unsigned short replied_to:1; /* sent as 0 by the source */
- unsigned short msg_type:3; /* software message type */
- unsigned short canceled:1; /* sent as 0 by the source */
- unsigned short unused1:3; /* not currently using */
+ unsigned short replied_to:1; /* sent as 0 by the source */
+ unsigned short msg_type:3; /* software message type */
+ unsigned short canceled:1; /* sent as 0 by the source */
+ unsigned short unused1:3; /* not currently using */
/* byte 12 */
-
- unsigned char unused2a; /* not currently using */
+ unsigned char unused2a; /* not currently using */
/* byte 13 */
- unsigned char unused2; /* not currently using */
+ unsigned char unused2; /* not currently using */
/* byte 14 */
-
- unsigned char sw_ack_vector; /* filled in by the hardware */
+ unsigned char swack_vec; /* filled in by the hardware */
/* byte 15 (bits 127:120) */
-
- unsigned short sequence; /* message sequence number */
+ unsigned short sequence; /* message sequence number */
/* bytes 16-17 */
- unsigned char unused4[2]; /* not currently using bytes 18-19 */
+ unsigned char unused4[2]; /* not currently using bytes 18-19 */
/* bytes 18-19 */
-
- int number_of_cpus; /* filled in at destination */
+ int number_of_cpus; /* filled in at destination */
/* 32 bits, bytes 20-23 (aligned) */
-
- unsigned char unused5[8]; /* not using */
+ unsigned char unused5[8]; /* not using */
/* bytes 24-31 */
};
struct msg_desc {
- struct bau_payload_queue_entry *msg;
- int msg_slot;
- int sw_ack_slot;
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
+ struct bau_pq_entry *msg;
+ int msg_slot;
+ int swack_slot;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
};
struct reset_args {
- int sender;
+ int sender;
};
/*
@@ -341,112 +390,226 @@ struct reset_args {
*/
struct ptc_stats {
/* sender statistics */
- unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
- unsigned long s_requestor; /* number of shootdown requests */
- unsigned long s_stimeout; /* source side timeouts */
- unsigned long s_dtimeout; /* destination side timeouts */
- unsigned long s_time; /* time spent in sending side */
- unsigned long s_retriesok; /* successful retries */
- unsigned long s_ntargcpu; /* total number of cpu's targeted */
- unsigned long s_ntargself; /* times the sending cpu was targeted */
- unsigned long s_ntarglocals; /* targets of cpus on the local blade */
- unsigned long s_ntargremotes; /* targets of cpus on remote blades */
- unsigned long s_ntarglocaluvhub; /* targets of the local hub */
- unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
- unsigned long s_ntarguvhub; /* total number of uvhubs targeted */
- unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/
- unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */
- unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */
- unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */
- unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */
- unsigned long s_resets_plug; /* ipi-style resets from plug state */
- unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
- unsigned long s_busy; /* status stayed busy past s/w timer */
- unsigned long s_throttles; /* waits in throttle */
- unsigned long s_retry_messages; /* retry broadcasts */
- unsigned long s_bau_reenabled; /* for bau enable/disable */
- unsigned long s_bau_disabled; /* for bau enable/disable */
+ unsigned long s_giveup; /* number of fall backs to
+ IPI-style flushes */
+ unsigned long s_requestor; /* number of shootdown
+ requests */
+ unsigned long s_stimeout; /* source side timeouts */
+ unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_time; /* time spent in sending side */
+ unsigned long s_retriesok; /* successful retries */
+ unsigned long s_ntargcpu; /* total number of cpu's
+ targeted */
+ unsigned long s_ntargself; /* times the sending cpu was
+ targeted */
+ unsigned long s_ntarglocals; /* targets of cpus on the local
+ blade */
+ unsigned long s_ntargremotes; /* targets of cpus on remote
+ blades */
+ unsigned long s_ntarglocaluvhub; /* targets of the local hub */
+ unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
+ unsigned long s_ntarguvhub; /* total number of uvhubs
+ targeted */
+ unsigned long s_ntarguvhub16; /* number of times target
+ hubs >= 16*/
+ unsigned long s_ntarguvhub8; /* number of times target
+ hubs >= 8 */
+ unsigned long s_ntarguvhub4; /* number of times target
+ hubs >= 4 */
+ unsigned long s_ntarguvhub2; /* number of times target
+ hubs >= 2 */
+ unsigned long s_ntarguvhub1; /* number of times target
+ hubs == 1 */
+ unsigned long s_resets_plug; /* ipi-style resets from plug
+ state */
+ unsigned long s_resets_timeout; /* ipi-style resets from
+ timeouts */
+ unsigned long s_busy; /* status stayed busy past
+ s/w timer */
+ unsigned long s_throttles; /* waits in throttle */
+ unsigned long s_retry_messages; /* retry broadcasts */
+ unsigned long s_bau_reenabled; /* for bau enable/disable */
+ unsigned long s_bau_disabled; /* for bau enable/disable */
/* destination statistics */
- unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
- unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
- unsigned long d_multmsg; /* interrupts with multiple messages */
- unsigned long d_nomsg; /* interrupts with no message */
- unsigned long d_time; /* time spent on destination side */
- unsigned long d_requestee; /* number of messages processed */
- unsigned long d_retries; /* number of retry messages processed */
- unsigned long d_canceled; /* number of messages canceled by retries */
- unsigned long d_nocanceled; /* retries that found nothing to cancel */
- unsigned long d_resets; /* number of ipi-style requests processed */
- unsigned long d_rcanceled; /* number of messages canceled by resets */
+ unsigned long d_alltlb; /* times all tlb's on this
+ cpu were flushed */
+ unsigned long d_onetlb; /* times just one tlb on this
+ cpu was flushed */
+ unsigned long d_multmsg; /* interrupts with multiple
+ messages */
+ unsigned long d_nomsg; /* interrupts with no message */
+ unsigned long d_time; /* time spent on destination
+ side */
+ unsigned long d_requestee; /* number of messages
+ processed */
+ unsigned long d_retries; /* number of retry messages
+ processed */
+ unsigned long d_canceled; /* number of messages canceled
+ by retries */
+ unsigned long d_nocanceled; /* retries that found nothing
+ to cancel */
+ unsigned long d_resets; /* number of ipi-style requests
+ processed */
+ unsigned long d_rcanceled; /* number of messages canceled
+ by resets */
+};
+
+struct tunables {
+ int *tunp;
+ int deflt;
};
struct hub_and_pnode {
- short uvhub;
- short pnode;
+ short uvhub;
+ short pnode;
};
+
+struct socket_desc {
+ short num_cpus;
+ short cpu_number[MAX_CPUS_PER_SOCKET];
+};
+
+struct uvhub_desc {
+ unsigned short socket_mask;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+};
+
/*
* one per-cpu; to locate the software tables
*/
struct bau_control {
- struct bau_desc *descriptor_base;
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
- struct bau_payload_queue_entry *bau_msg_head;
- struct bau_control *uvhub_master;
- struct bau_control *socket_master;
- struct ptc_stats *statp;
- unsigned long timeout_interval;
- unsigned long set_bau_on_time;
- atomic_t active_descriptor_count;
- int plugged_tries;
- int timeout_tries;
- int ipi_attempts;
- int conseccompletes;
- int baudisabled;
- int set_bau_off;
- short cpu;
- short osnode;
- short uvhub_cpu;
- short uvhub;
- short cpus_in_socket;
- short cpus_in_uvhub;
- short partition_base_pnode;
- unsigned short message_number;
- unsigned short uvhub_quiesce;
- short socket_acknowledge_count[DEST_Q_SIZE];
- cycles_t send_message;
- spinlock_t uvhub_lock;
- spinlock_t queue_lock;
+ struct bau_desc *descriptor_base;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
+ struct bau_pq_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ struct ptc_stats *statp;
+ unsigned long timeout_interval;
+ unsigned long set_bau_on_time;
+ atomic_t active_descriptor_count;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ int baudisabled;
+ int set_bau_off;
+ short cpu;
+ short osnode;
+ short uvhub_cpu;
+ short uvhub;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ short partition_base_pnode;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
/* tunables */
- int max_bau_concurrent;
- int max_bau_concurrent_constant;
- int plugged_delay;
- int plugsb4reset;
- int timeoutsb4reset;
- int ipi_reset_limit;
- int complete_threshold;
- int congested_response_us;
- int congested_reps;
- int congested_period;
- cycles_t period_time;
- long period_requests;
- struct hub_and_pnode *target_hub_and_pnode;
+ int max_concurr;
+ int max_concurr_const;
+ int plugged_delay;
+ int plugsb4reset;
+ int timeoutsb4reset;
+ int ipi_reset_limit;
+ int complete_threshold;
+ int cong_response_us;
+ int cong_reps;
+ int cong_period;
+ cycles_t period_time;
+ long period_requests;
+ struct hub_and_pnode *thp;
};
-static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
+static unsigned long read_mmr_uv2_status(void)
+{
+ return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
+}
+
+static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
+}
+
+static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
+}
+
+static void write_mmr_activation(unsigned long index)
+{
+ write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+}
+
+static void write_gmmr_activation(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
+}
+
+static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
+}
+
+static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
+}
+
+static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
+}
+
+static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+}
+
+static unsigned long read_mmr_misc_control(int pnode)
+{
+ return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
+}
+
+static void write_mmr_sw_ack(unsigned long mr)
+{
+ uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
+static unsigned long read_mmr_sw_ack(void)
+{
+ return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+}
+
+static unsigned long read_gmmr_sw_ack(int pnode)
+{
+ return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+}
+
+static void write_mmr_data_config(int pnode, unsigned long mr)
+{
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
+}
+
+static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
{
return constant_test_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
+static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
{
__set_bit(pnode, &dstp->bits[0]);
}
-static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
+static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
int nbits)
{
bitmap_zero(&dstp->bits[0], nbits);
}
-static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
+static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
{
return bitmap_weight((unsigned long *)&dstp->bits[0],
UV_DISTRIBUTION_SIZE);
@@ -457,9 +620,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
bitmap_zero(&dstp->bits, nbits);
}
-#define cpubit_isset(cpu, bau_local_cpumask) \
- test_bit((cpu), (bau_local_cpumask).bits)
-
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
@@ -467,7 +627,7 @@ struct atomic_short {
short counter;
};
-/**
+/*
* atomic_read_short - read a short atomic variable
* @v: pointer of type atomic_short
*
@@ -478,14 +638,14 @@ static inline int atomic_read_short(const struct atomic_short *v)
return v->counter;
}
-/**
- * atomic_add_short_return - add and return a short int
+/*
+ * atom_asr - add and return a short int
* @i: short value to add
* @v: pointer of type atomic_short
*
* Atomically adds @i to @v and returns @i + @v
*/
-static inline int atomic_add_short_return(short i, struct atomic_short *v)
+static inline int atom_asr(short i, struct atomic_short *v)
{
short __i = i;
asm volatile(LOCK_PREFIX "xaddw %0, %1"
@@ -494,4 +654,26 @@ static inline int atomic_add_short_return(short i, struct atomic_short *v)
return i + __i;
}
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+{
+ spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ spin_unlock(lock);
+ return 1;
+}
+
#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 4298002..f26544a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -77,8 +77,9 @@
*
* 1111110000000000
* 5432109876543210
- * pppppppppplc0cch Nehalem-EX
- * ppppppppplcc0cch Westmere-EX
+ * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
+ * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
+ * pppppppppppcccch SandyBridge (15 bits in hdw reg)
* sssssssssss
*
* p = pnode bits
@@ -87,7 +88,7 @@
* h = hyperthread
* s = bits that are in the SOCKET_ID CSR
*
- * Note: Processor only supports 12 bits in the APICID register. The ACPI
+ * Note: Processor may support fewer bits in the APICID register. The ACPI
* tables hold all 16 bits. Software needs to be aware of this.
*
* Unless otherwise specified, all references to APICID refer to
@@ -138,6 +139,8 @@ struct uv_hub_info_s {
unsigned long global_mmr_base;
unsigned long gpa_mask;
unsigned int gnode_extra;
+ unsigned char hub_revision;
+ unsigned char apic_pnode_shift;
unsigned long gnode_upper;
unsigned long lowmem_remap_top;
unsigned long lowmem_remap_base;
@@ -149,13 +152,31 @@ struct uv_hub_info_s {
unsigned char m_val;
unsigned char n_val;
struct uv_scir_s scir;
- unsigned char apic_pnode_shift;
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+/*
+ * Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
+ * hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
+ * This is a software convention - NOT the hardware revision numbers in
+ * the hub chip.
+ */
+#define UV1_HUB_REVISION_BASE 1
+#define UV2_HUB_REVISION_BASE 3
+
+static inline int is_uv1_hub(void)
+{
+ return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
+}
+
+static inline int is_uv2_hub(void)
+{
+ return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
+}
+
union uvh_apicid {
unsigned long v;
struct uvh_apicid_s {
@@ -180,11 +201,25 @@ union uvh_apicid {
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
-#define UV_LOCAL_MMR_BASE 0xf4000000UL
-#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV1_LOCAL_MMR_BASE 0xf4000000UL
+#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
+#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
+
+#define UV2_LOCAL_MMR_BASE 0xfa000000UL
+#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
+ : UV2_LOCAL_MMR_BASE)
+#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
+ : UV2_GLOBAL_MMR32_BASE)
+#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+ UV2_LOCAL_MMR_SIZE)
+#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
+ UV2_GLOBAL_MMR32_SIZE)
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
-#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
-#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
@@ -301,6 +336,17 @@ static inline int uv_apicid_to_pnode(int apicid)
}
/*
+ * Convert an apicid to the socket number on the blade
+ */
+static inline int uv_apicid_to_socket(int apicid)
+{
+ if (is_uv1_hub())
+ return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
+ else
+ return 0;
+}
+
+/*
* Access global MMRs using the low memory MMR32 space. This region supports
* faster MMR access but not all MMRs are accessible in this space.
*/
@@ -519,14 +565,13 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
/*
* Get the minimum revision number of the hub chips within the partition.
- * 1 - initial rev 1.0 silicon
- * 2 - rev 2.0 production silicon
+ * 1 - UV1 rev 1.0 initial silicon
+ * 2 - UV1 rev 2.0 production silicon
+ * 3 - UV2 rev 1.0 initial silicon
*/
static inline int uv_get_min_hub_revision_id(void)
{
- extern int uv_min_hub_revision_id;
-
- return uv_min_hub_revision_id;
+ return uv_hub_info->hub_revision;
}
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index f5bb64a..4be52c8 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -11,13 +11,64 @@
#ifndef _ASM_X86_UV_UV_MMRS_H
#define _ASM_X86_UV_UV_MMRS_H
+/*
+ * This file contains MMR definitions for both UV1 & UV2 hubs.
+ *
+ * In general, MMR addresses and structures are identical on both hubs.
+ * These MMRs are identified as:
+ * #define UVH_xxx <address>
+ * union uvh_xxx {
+ * unsigned long v;
+ * struct uvh_int_cmpd_s {
+ * } s;
+ * };
+ *
+ * If the MMR exists on both hub type but has different addresses or
+ * contents, the MMR definition is similar to:
+ * #define UV1H_xxx <uv1 address>
+ * #define UV2H_xxx <uv2address>
+ * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx)
+ * union uvh_xxx {
+ * unsigned long v;
+ * struct uv1h_int_cmpd_s { (Common fields only)
+ * } s;
+ * struct uv1h_int_cmpd_s { (Full UV1 definition)
+ * } s1;
+ * struct uv2h_int_cmpd_s { (Full UV2 definition)
+ * } s2;
+ * };
+ *
+ * Only essential difference are enumerated. For example, if the address is
+ * the same for both UV1 & UV2, only a single #define is generated. Likewise,
+ * if the contents is the same for both hubs, only the "s" structure is
+ * generated.
+ *
+ * If the MMR exists on ONLY 1 type of hub, no generic definition is
+ * generated:
+ * #define UVnH_xxx <uvn address>
+ * union uvnh_xxx {
+ * unsigned long v;
+ * struct uvh_int_cmpd_s {
+ * } sn;
+ * };
+ */
+
#define UV_MMR_ENABLE (1UL << 63)
+#define UV1_HUB_PART_NUMBER 0x88a5
+#define UV2_HUB_PART_NUMBER 0x8eb8
+
+/* Compat: if this #define is present, UV headers support UV2 */
+#define UV2_HUB_IS_SUPPORTED 1
+
+/* KABI compat: if this #define is present, KABI hacks are present */
+#define UV2_HUB_KABI_HACKS 1
+
/* ========================================================================= */
/* UVH_BAU_DATA_BROADCAST */
/* ========================================================================= */
#define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32 0x0440
+#define UVH_BAU_DATA_BROADCAST_32 0x440
#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -34,7 +85,7 @@ union uvh_bau_data_broadcast_u {
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
#define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UVH_BAU_DATA_CONFIG_32 0x0438
+#define UVH_BAU_DATA_CONFIG_32 0x438
#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
@@ -73,125 +124,245 @@ union uvh_bau_data_config_u {
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x005e8
-
-#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
-#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
-#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
-#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
-#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
-#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
-#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
-#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
-#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
-#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
-#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
-#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
-#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
-#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
-#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
-#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
-#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
-#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
-#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
-#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
-#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
-#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
-#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
-#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
-#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
-#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
-#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
-#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
-#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
-#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
-#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
-#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
-#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
-#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
-#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
-#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
-#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
-#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
-#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
-#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
-#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
-#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
-#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
-#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
-#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
-#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
-#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
-#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
-#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
-#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
-#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
-#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
-#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
-#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
-#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
-#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
-#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
-#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
-#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+
+#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
+#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
+#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
+#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
+#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
+#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
+#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
+#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
+#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
+#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
union uvh_event_occurred0_u {
unsigned long v;
- struct uvh_event_occurred0_s {
+ struct uv1h_event_occurred0_s {
unsigned long lb_hcerr : 1; /* RW, W1C */
unsigned long gr0_hcerr : 1; /* RW, W1C */
unsigned long gr1_hcerr : 1; /* RW, W1C */
@@ -250,14 +421,76 @@ union uvh_event_occurred0_u {
unsigned long bau_data : 1; /* RW, W1C */
unsigned long power_management_req : 1; /* RW, W1C */
unsigned long rsvd_57_63 : 7; /* */
- } s;
+ } s1;
+ struct uv2h_event_occurred0_s {
+ unsigned long lb_hcerr : 1; /* RW */
+ unsigned long qp_hcerr : 1; /* RW */
+ unsigned long rh_hcerr : 1; /* RW */
+ unsigned long lh0_hcerr : 1; /* RW */
+ unsigned long lh1_hcerr : 1; /* RW */
+ unsigned long gr0_hcerr : 1; /* RW */
+ unsigned long gr1_hcerr : 1; /* RW */
+ unsigned long ni0_hcerr : 1; /* RW */
+ unsigned long ni1_hcerr : 1; /* RW */
+ unsigned long lb_aoerr0 : 1; /* RW */
+ unsigned long qp_aoerr0 : 1; /* RW */
+ unsigned long rh_aoerr0 : 1; /* RW */
+ unsigned long lh0_aoerr0 : 1; /* RW */
+ unsigned long lh1_aoerr0 : 1; /* RW */
+ unsigned long gr0_aoerr0 : 1; /* RW */
+ unsigned long gr1_aoerr0 : 1; /* RW */
+ unsigned long xb_aoerr0 : 1; /* RW */
+ unsigned long rt_aoerr0 : 1; /* RW */
+ unsigned long ni0_aoerr0 : 1; /* RW */
+ unsigned long ni1_aoerr0 : 1; /* RW */
+ unsigned long lb_aoerr1 : 1; /* RW */
+ unsigned long qp_aoerr1 : 1; /* RW */
+ unsigned long rh_aoerr1 : 1; /* RW */
+ unsigned long lh0_aoerr1 : 1; /* RW */
+ unsigned long lh1_aoerr1 : 1; /* RW */
+ unsigned long gr0_aoerr1 : 1; /* RW */
+ unsigned long gr1_aoerr1 : 1; /* RW */
+ unsigned long xb_aoerr1 : 1; /* RW */
+ unsigned long rt_aoerr1 : 1; /* RW */
+ unsigned long ni0_aoerr1 : 1; /* RW */
+ unsigned long ni1_aoerr1 : 1; /* RW */
+ unsigned long system_shutdown_int : 1; /* RW */
+ unsigned long lb_irq_int_0 : 1; /* RW */
+ unsigned long lb_irq_int_1 : 1; /* RW */
+ unsigned long lb_irq_int_2 : 1; /* RW */
+ unsigned long lb_irq_int_3 : 1; /* RW */
+ unsigned long lb_irq_int_4 : 1; /* RW */
+ unsigned long lb_irq_int_5 : 1; /* RW */
+ unsigned long lb_irq_int_6 : 1; /* RW */
+ unsigned long lb_irq_int_7 : 1; /* RW */
+ unsigned long lb_irq_int_8 : 1; /* RW */
+ unsigned long lb_irq_int_9 : 1; /* RW */
+ unsigned long lb_irq_int_10 : 1; /* RW */
+ unsigned long lb_irq_int_11 : 1; /* RW */
+ unsigned long lb_irq_int_12 : 1; /* RW */
+ unsigned long lb_irq_int_13 : 1; /* RW */
+ unsigned long lb_irq_int_14 : 1; /* RW */
+ unsigned long lb_irq_int_15 : 1; /* RW */
+ unsigned long l1_nmi_int : 1; /* RW */
+ unsigned long stop_clock : 1; /* RW */
+ unsigned long asic_to_l1 : 1; /* RW */
+ unsigned long l1_to_asic : 1; /* RW */
+ unsigned long la_seq_trigger : 1; /* RW */
+ unsigned long ipi_int : 1; /* RW */
+ unsigned long extio_int0 : 1; /* RW */
+ unsigned long extio_int1 : 1; /* RW */
+ unsigned long extio_int2 : 1; /* RW */
+ unsigned long extio_int3 : 1; /* RW */
+ unsigned long profile_int : 1; /* RW */
+ unsigned long rsvd_59_63 : 5; /* */
+ } s2;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
@@ -432,8 +665,16 @@ union uvh_int_cmpb_u {
/* ========================================================================= */
#define UVH_INT_CMPC 0x22100UL
-#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT (is_uv1_hub() ? \
+ UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT : \
+ UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT)
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
+#define UV2H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
+#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK (is_uv1_hub() ? \
+ UV1H_INT_CMPC_REAL_TIME_CMPC_MASK : \
+ UV2H_INT_CMPC_REAL_TIME_CMPC_MASK)
union uvh_int_cmpc_u {
unsigned long v;
@@ -448,8 +689,16 @@ union uvh_int_cmpc_u {
/* ========================================================================= */
#define UVH_INT_CMPD 0x22180UL
-#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT (is_uv1_hub() ? \
+ UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT : \
+ UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT)
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
+#define UV2H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
+#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK (is_uv1_hub() ? \
+ UV1H_INT_CMPD_REAL_TIME_CMPD_MASK : \
+ UV2H_INT_CMPD_REAL_TIME_CMPD_MASK)
union uvh_int_cmpd_u {
unsigned long v;
@@ -463,7 +712,7 @@ union uvh_int_cmpd_u {
/* UVH_IPI_INT */
/* ========================================================================= */
#define UVH_IPI_INT 0x60500UL
-#define UVH_IPI_INT_32 0x0348
+#define UVH_IPI_INT_32 0x348
#define UVH_IPI_INT_VECTOR_SHFT 0
#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
@@ -493,7 +742,7 @@ union uvh_ipi_int_u {
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009c0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -515,7 +764,7 @@ union uvh_lb_bau_intd_payload_queue_first_u {
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009c8
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -533,7 +782,7 @@ union uvh_lb_bau_intd_payload_queue_last_u {
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x009d0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -551,7 +800,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0a68
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
@@ -585,6 +834,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
union uvh_lb_bau_intd_software_acknowledge_u {
unsigned long v;
struct uvh_lb_bau_intd_software_acknowledge_s {
@@ -612,13 +862,13 @@ union uvh_lb_bau_intd_software_acknowledge_u {
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
/* ========================================================================= */
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
/* ========================================================================= */
/* UVH_LB_BAU_MISC_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
@@ -628,8 +878,8 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
@@ -650,8 +900,86 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
union uvh_lb_bau_misc_control_u {
unsigned long v;
@@ -660,7 +988,25 @@ union uvh_lb_bau_misc_control_u {
unsigned long apic_mode : 1; /* RW */
unsigned long force_broadcast : 1; /* RW */
unsigned long force_lock_nop : 1; /* RW */
- unsigned long csi_agent_presence_vector : 3; /* RW */
+ unsigned long qpi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long rsvd_29_63 : 35;
+ } s;
+ struct uv1h_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long qpi_agent_presence_vector : 3; /* RW */
unsigned long descriptor_fetch_mode : 1; /* RW */
unsigned long enable_intd_soft_ack_mode : 1; /* RW */
unsigned long intd_soft_ack_timeout_period : 4; /* RW */
@@ -673,14 +1019,40 @@ union uvh_lb_bau_misc_control_u {
unsigned long enable_programmed_initial_priority : 1; /* RW */
unsigned long rsvd_29_47 : 19; /* */
unsigned long fun : 16; /* RW */
- } s;
+ } s1;
+ struct uv2h_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long qpi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long enable_automatic_apic_mode_selection : 1; /* RW */
+ unsigned long apic_mode_status : 1; /* RO */
+ unsigned long suppress_interrupts_to_self : 1; /* RW */
+ unsigned long enable_lock_based_system_flush : 1; /* RW */
+ unsigned long enable_extended_sb_status : 1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self : 1; /* RW */
+ unsigned long use_legacy_descriptor_formats : 1; /* RW */
+ unsigned long rsvd_36_47 : 12; /* */
+ unsigned long fun : 16; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009a8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
@@ -703,7 +1075,7 @@ union uvh_lb_bau_sb_activation_control_u {
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009b0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -719,7 +1091,7 @@ union uvh_lb_bau_sb_activation_status_0_u {
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009b8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -735,7 +1107,7 @@ union uvh_lb_bau_sb_activation_status_1_u {
/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
/* ========================================================================= */
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009a0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
@@ -754,23 +1126,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
};
/* ========================================================================= */
-/* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */
-/* ========================================================================= */
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0
-
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
-#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
-
-union uvh_lb_target_physical_apic_id_mask_u {
- unsigned long v;
- struct uvh_lb_target_physical_apic_id_mask_s {
- unsigned long bit_enables : 32; /* RW */
- unsigned long rsvd_32_63 : 32; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
@@ -785,10 +1140,36 @@ union uvh_lb_target_physical_apic_id_mask_u {
#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
#define UVH_NODE_ID_NODE_ID_SHFT 32
#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
-#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
-#define UVH_NODE_ID_NI_PORT_SHFT 56
-#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+#define UV1H_NODE_ID_FORCE1_SHFT 0
+#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV1H_NODE_ID_REVISION_SHFT 28
+#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV1H_NODE_ID_NODE_ID_SHFT 32
+#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UV1H_NODE_ID_NI_PORT_SHFT 56
+#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+#define UV2H_NODE_ID_FORCE1_SHFT 0
+#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV2H_NODE_ID_REVISION_SHFT 28
+#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV2H_NODE_ID_NODE_ID_SHFT 32
+#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV2H_NODE_ID_NI_PORT_SHFT 57
+#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
union uvh_node_id_u {
unsigned long v;
@@ -798,12 +1179,31 @@ union uvh_node_id_u {
unsigned long part_number : 16; /* RO */
unsigned long revision : 4; /* RO */
unsigned long node_id : 15; /* RW */
+ unsigned long rsvd_47_63 : 17;
+ } s;
+ struct uv1h_node_id_s {
+ unsigned long force1 : 1; /* RO */
+ unsigned long manufacturer : 11; /* RO */
+ unsigned long part_number : 16; /* RO */
+ unsigned long revision : 4; /* RO */
+ unsigned long node_id : 15; /* RW */
unsigned long rsvd_47 : 1; /* */
unsigned long nodes_per_bit : 7; /* RW */
unsigned long rsvd_55 : 1; /* */
unsigned long ni_port : 4; /* RO */
unsigned long rsvd_60_63 : 4; /* */
- } s;
+ } s1;
+ struct uv2h_node_id_s {
+ unsigned long force1 : 1; /* RO */
+ unsigned long manufacturer : 11; /* RO */
+ unsigned long part_number : 16; /* RO */
+ unsigned long revision : 4; /* RO */
+ unsigned long node_id : 15; /* RW */
+ unsigned long rsvd_47_49 : 3; /* */
+ unsigned long nodes_per_bit : 7; /* RO */
+ unsigned long ni_port : 5; /* RO */
+ unsigned long rsvd_62_63 : 2; /* */
+ } s2;
};
/* ========================================================================= */
@@ -954,18 +1354,38 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
-#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
union uvh_rh_gam_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_config_mmr_s {
unsigned long m_skt : 6; /* RW */
unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_10_63 : 54;
+ } s;
+ struct uv1h_rh_gam_config_mmr_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long n_skt : 4; /* RW */
unsigned long rsvd_10_11: 2; /* */
unsigned long mmiol_cfg : 1; /* RW */
unsigned long rsvd_13_63: 51; /* */
- } s;
+ } s1;
+ struct uv2h_rh_gam_config_mmr_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_10_63: 54; /* */
+ } s2;
};
/* ========================================================================= */
@@ -975,25 +1395,49 @@ union uvh_rh_gam_config_mmr_u {
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_27: 28; /* */
unsigned long base : 18; /* RW */
+ unsigned long rsvd_46_62 : 17;
+ unsigned long enable : 1; /* RW */
+ } s;
+ struct uv1h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27: 28; /* */
+ unsigned long base : 18; /* RW */
unsigned long rsvd_46_47: 2; /* */
unsigned long gr4 : 1; /* RW */
unsigned long rsvd_49_51: 3; /* */
unsigned long n_gru : 4; /* RW */
unsigned long rsvd_56_62: 7; /* */
unsigned long enable : 1; /* RW */
- } s;
+ } s1;
+ struct uv2h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27: 28; /* */
+ unsigned long base : 18; /* RW */
+ unsigned long rsvd_46_51: 6; /* */
+ unsigned long n_gru : 4; /* RW */
+ unsigned long rsvd_56_62: 7; /* */
+ unsigned long enable : 1; /* RW */
+ } s2;
};
/* ========================================================================= */
@@ -1001,25 +1445,42 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
/* ========================================================================= */
#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_mmioh_overlay_config_mmr_u {
unsigned long v;
- struct uvh_rh_gam_mmioh_overlay_config_mmr_s {
+ struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
unsigned long rsvd_0_29: 30; /* */
unsigned long base : 16; /* RW */
unsigned long m_io : 6; /* RW */
unsigned long n_io : 4; /* RW */
unsigned long rsvd_56_62: 7; /* */
unsigned long enable : 1; /* RW */
- } s;
+ } s1;
+ struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
+ unsigned long rsvd_0_26: 27; /* */
+ unsigned long base : 19; /* RW */
+ unsigned long m_io : 6; /* RW */
+ unsigned long n_io : 4; /* RW */
+ unsigned long rsvd_56_62: 7; /* */
+ unsigned long enable : 1; /* RW */
+ } s2;
};
/* ========================================================================= */
@@ -1029,20 +1490,40 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_mmr_overlay_config_mmr_s {
unsigned long rsvd_0_25: 26; /* */
unsigned long base : 20; /* RW */
+ unsigned long rsvd_46_62 : 17;
+ unsigned long enable : 1; /* RW */
+ } s;
+ struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25: 26; /* */
+ unsigned long base : 20; /* RW */
unsigned long dual_hub : 1; /* RW */
unsigned long rsvd_47_62: 16; /* */
unsigned long enable : 1; /* RW */
- } s;
+ } s1;
+ struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25: 26; /* */
+ unsigned long base : 20; /* RW */
+ unsigned long rsvd_46_62: 17; /* */
+ unsigned long enable : 1; /* RW */
+ } s2;
};
/* ========================================================================= */
@@ -1103,10 +1584,11 @@ union uvh_rtc1_int_config_u {
/* UVH_SCRATCH5 */
/* ========================================================================= */
#define UVH_SCRATCH5 0x2d0200UL
-#define UVH_SCRATCH5_32 0x00778
+#define UVH_SCRATCH5_32 0x778
#define UVH_SCRATCH5_SCRATCH5_SHFT 0
#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
+
union uvh_scratch5_u {
unsigned long v;
struct uvh_scratch5_s {
@@ -1114,4 +1596,154 @@ union uvh_scratch5_u {
} s;
};
+/* ========================================================================= */
+/* UV2H_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UV2H_EVENT_OCCURRED2 0x70100UL
+#define UV2H_EVENT_OCCURRED2_32 0xb68
+
+#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+union uv2h_event_occurred2_u {
+ unsigned long v;
+ struct uv2h_event_occurred2_s {
+ unsigned long rtc_0 : 1; /* RW */
+ unsigned long rtc_1 : 1; /* RW */
+ unsigned long rtc_2 : 1; /* RW */
+ unsigned long rtc_3 : 1; /* RW */
+ unsigned long rtc_4 : 1; /* RW */
+ unsigned long rtc_5 : 1; /* RW */
+ unsigned long rtc_6 : 1; /* RW */
+ unsigned long rtc_7 : 1; /* RW */
+ unsigned long rtc_8 : 1; /* RW */
+ unsigned long rtc_9 : 1; /* RW */
+ unsigned long rtc_10 : 1; /* RW */
+ unsigned long rtc_11 : 1; /* RW */
+ unsigned long rtc_12 : 1; /* RW */
+ unsigned long rtc_13 : 1; /* RW */
+ unsigned long rtc_14 : 1; /* RW */
+ unsigned long rtc_15 : 1; /* RW */
+ unsigned long rtc_16 : 1; /* RW */
+ unsigned long rtc_17 : 1; /* RW */
+ unsigned long rtc_18 : 1; /* RW */
+ unsigned long rtc_19 : 1; /* RW */
+ unsigned long rtc_20 : 1; /* RW */
+ unsigned long rtc_21 : 1; /* RW */
+ unsigned long rtc_22 : 1; /* RW */
+ unsigned long rtc_23 : 1; /* RW */
+ unsigned long rtc_24 : 1; /* RW */
+ unsigned long rtc_25 : 1; /* RW */
+ unsigned long rtc_26 : 1; /* RW */
+ unsigned long rtc_27 : 1; /* RW */
+ unsigned long rtc_28 : 1; /* RW */
+ unsigned long rtc_29 : 1; /* RW */
+ unsigned long rtc_30 : 1; /* RW */
+ unsigned long rtc_31 : 1; /* RW */
+ unsigned long rsvd_32_63: 32; /* */
+ } s1;
+};
+
+/* ========================================================================= */
+/* UV2H_EVENT_OCCURRED2_ALIAS */
+/* ========================================================================= */
+#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
+
+/* ========================================================================= */
+/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
+/* ========================================================================= */
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+union uv2h_lb_bau_sb_activation_status_2_u {
+ unsigned long v;
+ struct uv2h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error : 64; /* RW */
+ } s1;
+};
+
+/* ========================================================================= */
+/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
+/* ========================================================================= */
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
+
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
+
+union uv1h_lb_target_physical_apic_id_mask_u {
+ unsigned long v;
+ struct uv1h_lb_target_physical_apic_id_mask_s {
+ unsigned long bit_enables : 32; /* RW */
+ unsigned long rsvd_32_63 : 32; /* */
+ } s1;
+};
+
+
#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index f450b68..b511a01 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -91,6 +91,10 @@ static int __init early_get_pnodeid(void)
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
uv_min_hub_revision_id = node_id.s.revision;
+ if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
+ uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+
+ uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
return pnode;
}
@@ -112,17 +116,25 @@ static void __init early_get_apic_pnode_shift(void)
*/
static void __init uv_set_apicid_hibit(void)
{
- union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
+ union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
- apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK);
- uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
+ if (is_uv1_hub()) {
+ apicid_mask.v =
+ uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
+ uv_apicid_hibits =
+ apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
+ }
}
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
- int pnodeid;
+ int pnodeid, is_uv1, is_uv2;
- if (!strcmp(oem_id, "SGI")) {
+ is_uv1 = !strcmp(oem_id, "SGI");
+ is_uv2 = !strcmp(oem_id, "SGI2");
+ if (is_uv1 || is_uv2) {
+ uv_hub_info->hub_revision =
+ is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
pnodeid = early_get_pnodeid();
early_get_apic_pnode_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -484,12 +496,19 @@ static __init void map_mmr_high(int max_pnode)
static __init void map_mmioh_high(int max_pnode)
{
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
- int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ int shift;
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
- if (mmioh.s.enable)
- map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
+ if (is_uv1_hub() && mmioh.s1.enable) {
+ shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
+ max_pnode, map_uc);
+ }
+ if (is_uv2_hub() && mmioh.s2.enable) {
+ shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
max_pnode, map_uc);
+ }
}
static __init void map_low_mmrs(void)
@@ -736,13 +755,14 @@ void __init uv_system_init(void)
unsigned long mmr_base, present, paddr;
unsigned short pnode_mask, pnode_io_mask;
+ printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
map_low_mmrs();
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt;
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
- n_io = mmioh.s.n_io;
+ n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
mmr_base =
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
~UV_MMR_ENABLE;
@@ -811,6 +831,8 @@ void __init uv_system_init(void)
*/
uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
+ uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
+
pnode = uv_apicid_to_pnode(apicid);
blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 3bfa022..965a766 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -361,6 +361,7 @@ struct apm_user {
* idle percentage above which bios idle calls are done
*/
#ifdef CONFIG_APM_CPU_IDLE
+#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
#define DEFAULT_IDLE_THRESHOLD 95
#else
#define DEFAULT_IDLE_THRESHOLD 100
@@ -904,6 +905,7 @@ static void apm_cpu_idle(void)
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
unsigned int bucket;
+ WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
recalc:
if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
use_apm_idle = 0;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8f5cabb3..b13ed39 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
#endif
- /* As a rule processors have APIC timer running in deep C states */
- if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
+ /*
+ * Family 0x12 and above processors have APIC timer
+ * running in deep C states.
+ */
+ if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT);
/*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c39576c..525514c 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -19,6 +19,7 @@
static int __init no_halt(char *s)
{
+ WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
boot_cpu_data.hlt_works_ok = 0;
return 1;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c8b4162..22a073d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -477,13 +477,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
if (smp_num_siblings <= 1)
goto out;
- if (smp_num_siblings > nr_cpu_ids) {
- pr_warning("CPU: Unsupported number of siblings %d",
- smp_num_siblings);
- smp_num_siblings = 1;
- return;
- }
-
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -909,7 +902,7 @@ static void vgetcpu_set_mode(void)
void __init identify_boot_cpu(void)
{
identify_cpu(&boot_cpu_data);
- init_c1e_mask();
+ init_amd_e400_c1e_mask();
#ifdef CONFIG_X86_32
sysenter_setup();
enable_sep_cpu();
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0ba15a6..c9a281f 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -123,7 +123,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
-static void *mod_code_newcode; /* holds the text to write to the IP */
+static const void *mod_code_newcode; /* holds the text to write to the IP */
static unsigned nmi_wait_count;
static atomic_t nmi_update_count = ATOMIC_INIT(0);
@@ -225,7 +225,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
}
static int
-do_ftrace_mod_code(unsigned long ip, void *new_code)
+do_ftrace_mod_code(unsigned long ip, const void *new_code)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
@@ -266,8 +266,8 @@ static const unsigned char *ftrace_nop_replace(void)
}
static int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code)
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ unsigned const char *new_code)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -301,7 +301,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned char *new, *old;
+ unsigned const char *new, *old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, addr);
@@ -312,7 +312,7 @@ int ftrace_make_nop(struct module *mod,
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned char *new, *old;
+ unsigned const char *new, *old;
unsigned long ip = rec->ip;
old = ftrace_nop_replace();
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 88a90a9..2e4928d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,9 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
+#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
EXPORT_SYMBOL(pm_idle);
+#endif
#ifdef CONFIG_X86_32
/*
@@ -397,7 +399,7 @@ void default_idle(void)
cpu_relax();
}
}
-#ifdef CONFIG_APM_MODULE
+#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
EXPORT_SYMBOL(default_idle);
#endif
@@ -535,45 +537,45 @@ int mwait_usable(const struct cpuinfo_x86 *c)
return (edx & MWAIT_EDX_C1);
}
-bool c1e_detected;
-EXPORT_SYMBOL(c1e_detected);
+bool amd_e400_c1e_detected;
+EXPORT_SYMBOL(amd_e400_c1e_detected);
-static cpumask_var_t c1e_mask;
+static cpumask_var_t amd_e400_c1e_mask;
-void c1e_remove_cpu(int cpu)
+void amd_e400_remove_cpu(int cpu)
{
- if (c1e_mask != NULL)
- cpumask_clear_cpu(cpu, c1e_mask);
+ if (amd_e400_c1e_mask != NULL)
+ cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
}
/*
- * C1E aware idle routine. We check for C1E active in the interrupt
+ * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
* pending message MSR. If we detect C1E, then we handle it the same
* way as C3 power states (local apic timer and TSC stop)
*/
-static void c1e_idle(void)
+static void amd_e400_idle(void)
{
if (need_resched())
return;
- if (!c1e_detected) {
+ if (!amd_e400_c1e_detected) {
u32 lo, hi;
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (lo & K8_INTP_C1E_ACTIVE_MASK) {
- c1e_detected = true;
+ amd_e400_c1e_detected = true;
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
printk(KERN_INFO "System has AMD C1E enabled\n");
}
}
- if (c1e_detected) {
+ if (amd_e400_c1e_detected) {
int cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, c1e_mask)) {
- cpumask_set_cpu(cpu, c1e_mask);
+ if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
+ cpumask_set_cpu(cpu, amd_e400_c1e_mask);
/*
* Force broadcast so ACPI can not interfere.
*/
@@ -616,17 +618,17 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
pm_idle = mwait_idle;
} else if (cpu_has_amd_erratum(amd_erratum_400)) {
/* E400: APIC timer interrupt does not wake up CPU from C1e */
- printk(KERN_INFO "using C1E aware idle routine\n");
- pm_idle = c1e_idle;
+ printk(KERN_INFO "using AMD E400 aware idle routine\n");
+ pm_idle = amd_e400_idle;
} else
pm_idle = default_idle;
}
-void __init init_c1e_mask(void)
+void __init init_amd_e400_c1e_mask(void)
{
- /* If we're using c1e_idle, we need to allocate c1e_mask. */
- if (pm_idle == c1e_idle)
- zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
+ /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
+ if (pm_idle == amd_e400_idle)
+ zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
}
static int __init idle_setup(char *str)
@@ -640,6 +642,7 @@ static int __init idle_setup(char *str)
boot_option_idle_override = IDLE_POLL;
} else if (!strcmp(str, "mwait")) {
boot_option_idle_override = IDLE_FORCE_MWAIT;
+ WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a3e5948..afaf384 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -910,6 +910,13 @@ void __init setup_arch(char **cmdline_p)
memblock.current_limit = get_max_mapped();
memblock_x86_fill();
+ /*
+ * The EFI specification says that boot service code won't be called
+ * after ExitBootServices(). This is, in fact, a lie.
+ */
+ if (efi_enabled)
+ efi_reserve_boot_services();
+
/* preallocate 4k for mptable mpc */
early_reserve_e820_mpc_new();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a3c430b..33a0c11 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1307,7 +1307,7 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
- c1e_remove_cpu(raw_smp_processor_id());
+ amd_e400_remove_cpu(raw_smp_processor_id());
mb();
/* Ack it */
@@ -1332,7 +1332,7 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
- if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))
+ if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
return;
if (!this_cpu_has(X86_FEATURE_CLFLSH))
return;
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 32cbffb..fbb0a04 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -345,3 +345,4 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg /* 345 */
+ .long sys_setns
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e191c09..db832fd 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -993,6 +993,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
static void lguest_time_init(void)
{
/* Set up the timer interrupt (0) to go to our simple timer routine */
+ lguest_setup_irq(0);
irq_set_handler(0, lguest_time_irq);
clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f7a2a05..2dbf6bf 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -823,16 +823,30 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
force_sig_info_fault(SIGBUS, code, address, tsk, fault);
}
-static noinline void
+static noinline int
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
+ /*
+ * Pagefault was interrupted by SIGKILL. We have no reason to
+ * continue pagefault.
+ */
+ if (fatal_signal_pending(current)) {
+ if (!(fault & VM_FAULT_RETRY))
+ up_read(&current->mm->mmap_sem);
+ if (!(error_code & PF_USER))
+ no_context(regs, error_code, address);
+ return 1;
+ }
+ if (!(fault & VM_FAULT_ERROR))
+ return 0;
+
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address);
- return;
+ return 1;
}
out_of_memory(regs, error_code, address);
@@ -843,6 +857,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
else
BUG();
}
+ return 1;
}
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
@@ -1133,19 +1148,9 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, error_code, address, fault);
- return;
- }
-
- /*
- * Pagefault was interrupted by SIGKILL. We have no reason to
- * continue pagefault.
- */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
- if (!(error_code & PF_USER))
- no_context(regs, error_code, address);
- return;
+ if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ if (mm_fault_error(regs, error_code, address, fault))
+ return;
}
/*
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index c3b8e24..9fd8a56 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void)
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
}
-static inline int eilvt_is_available(int offset)
+static inline int get_eilvt(int offset)
{
- /* check if we may assign a vector */
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
}
+static inline int put_eilvt(int offset)
+{
+ return !setup_APIC_eilvt(offset, 0, 0, 1);
+}
+
static inline int ibs_eilvt_valid(void)
{
int offset;
u64 val;
+ int valid = 0;
+
+ preempt_disable();
rdmsrl(MSR_AMD64_IBSCTL, val);
offset = val & IBSCTL_LVT_OFFSET_MASK;
@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void)
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
- return 0;
+ goto out;
}
- if (!eilvt_is_available(offset)) {
+ if (!get_eilvt(offset)) {
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
- return 0;
+ goto out;
}
- return 1;
+ valid = 1;
+out:
+ preempt_enable();
+
+ return valid;
}
static inline int get_ibs_offset(void)
@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
static int force_ibs_eilvt_setup(void)
{
- int i;
+ int offset;
int ret;
- /* find the next free available EILVT entry */
- for (i = 1; i < 4; i++) {
- if (!eilvt_is_available(i))
- continue;
- ret = setup_ibs_ctl(i);
- if (ret)
- return ret;
- pr_err(FW_BUG "using offset %d for IBS interrupts\n", i);
- return 0;
+ /*
+ * find the next free available EILVT entry, skip offset 0,
+ * pin search to this cpu
+ */
+ preempt_disable();
+ for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
+ if (get_eilvt(offset))
+ break;
}
+ preempt_enable();
- printk(KERN_DEBUG "No EILVT entry available\n");
-
- return -EBUSY;
-}
-
-static int __init_ibs_nmi(void)
-{
- int ret;
-
- if (ibs_eilvt_valid())
- return 0;
+ if (offset == APIC_EILVT_NR_MAX) {
+ printk(KERN_DEBUG "No EILVT entry available\n");
+ return -EBUSY;
+ }
- ret = force_ibs_eilvt_setup();
+ ret = setup_ibs_ctl(offset);
if (ret)
- return ret;
+ goto out;
- if (!ibs_eilvt_valid())
- return -EFAULT;
+ if (!ibs_eilvt_valid()) {
+ ret = -EFAULT;
+ goto out;
+ }
+ pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
return 0;
+out:
+ preempt_disable();
+ put_eilvt(offset);
+ preempt_enable();
+ return ret;
}
/*
* check and reserve APIC extended interrupt LVT offset for IBS if
* available
- *
- * init_ibs() preforms implicitly cpu-local operations, so pin this
- * thread to its current CPU
*/
static void init_ibs(void)
{
- preempt_disable();
-
ibs_caps = get_ibs_caps();
+
if (!ibs_caps)
+ return;
+
+ if (ibs_eilvt_valid())
goto out;
- if (__init_ibs_nmi() < 0)
- ibs_caps = 0;
- else
- printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
+ if (!force_ibs_eilvt_setup())
+ goto out;
+
+ /* Failed to setup ibs */
+ ibs_caps = 0;
+ return;
out:
- preempt_enable();
+ printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
}
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index b30aa26..0d3a4fa 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -304,6 +304,40 @@ static void __init print_efi_memmap(void)
}
#endif /* EFI_DEBUG */
+void __init efi_reserve_boot_services(void)
+{
+ void *p;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ efi_memory_desc_t *md = p;
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ memblock_x86_reserve_range(start, start + size, "EFI Boot");
+ }
+}
+
+static void __init efi_free_boot_services(void)
+{
+ void *p;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ efi_memory_desc_t *md = p;
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ free_bootmem_late(start, size);
+ }
+}
+
void __init efi_init(void)
{
efi_config_table_t *config_tables;
@@ -536,7 +570,9 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
continue;
size = md->num_pages << EFI_PAGE_SHIFT;
@@ -593,6 +629,13 @@ void __init efi_enter_virtual_mode(void)
}
/*
+ * Thankfully, it does seem that no runtime services other than
+ * SetVirtualAddressMap() will touch boot services code, so we can
+ * get rid of it all at this point
+ */
+ efi_free_boot_services();
+
+ /*
* Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses.
*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 2649426..ac3aa54 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -49,10 +49,11 @@ static void __init early_code_mapping_set_exec(int executable)
if (!(__supported_pte_mask & _PAGE_NX))
return;
- /* Make EFI runtime service code area executable */
+ /* Make EFI service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
}
}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index c58e0ea..68e467f 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1,7 +1,7 @@
/*
* SGI UltraViolet TLB flush routines.
*
- * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
+ * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
*
* This code is released under the GNU General Public License version 2 or
* later.
@@ -35,6 +35,7 @@ static int timeout_base_ns[] = {
5242880,
167772160
};
+
static int timeout_us;
static int nobau;
static int baudisabled;
@@ -42,20 +43,70 @@ static spinlock_t disable_lock;
static cycles_t congested_cycles;
/* tunables: */
-static int max_bau_concurrent = MAX_BAU_CONCURRENT;
-static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
-static int plugged_delay = PLUGGED_DELAY;
-static int plugsb4reset = PLUGSB4RESET;
-static int timeoutsb4reset = TIMEOUTSB4RESET;
-static int ipi_reset_limit = IPI_RESET_LIMIT;
-static int complete_threshold = COMPLETE_THRESHOLD;
-static int congested_response_us = CONGESTED_RESPONSE_US;
-static int congested_reps = CONGESTED_REPS;
-static int congested_period = CONGESTED_PERIOD;
+static int max_concurr = MAX_BAU_CONCURRENT;
+static int max_concurr_const = MAX_BAU_CONCURRENT;
+static int plugged_delay = PLUGGED_DELAY;
+static int plugsb4reset = PLUGSB4RESET;
+static int timeoutsb4reset = TIMEOUTSB4RESET;
+static int ipi_reset_limit = IPI_RESET_LIMIT;
+static int complete_threshold = COMPLETE_THRESHOLD;
+static int congested_respns_us = CONGESTED_RESPONSE_US;
+static int congested_reps = CONGESTED_REPS;
+static int congested_period = CONGESTED_PERIOD;
+
+static struct tunables tunables[] = {
+ {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
+ {&plugged_delay, PLUGGED_DELAY},
+ {&plugsb4reset, PLUGSB4RESET},
+ {&timeoutsb4reset, TIMEOUTSB4RESET},
+ {&ipi_reset_limit, IPI_RESET_LIMIT},
+ {&complete_threshold, COMPLETE_THRESHOLD},
+ {&congested_respns_us, CONGESTED_RESPONSE_US},
+ {&congested_reps, CONGESTED_REPS},
+ {&congested_period, CONGESTED_PERIOD}
+};
+
static struct dentry *tunables_dir;
static struct dentry *tunables_file;
-static int __init setup_nobau(char *arg)
+/* these correspond to the statistics printed by ptc_seq_show() */
+static char *stat_description[] = {
+ "sent: number of shootdown messages sent",
+ "stime: time spent sending messages",
+ "numuvhubs: number of hubs targeted with shootdown",
+ "numuvhubs16: number times 16 or more hubs targeted",
+ "numuvhubs8: number times 8 or more hubs targeted",
+ "numuvhubs4: number times 4 or more hubs targeted",
+ "numuvhubs2: number times 2 or more hubs targeted",
+ "numuvhubs1: number times 1 hub targeted",
+ "numcpus: number of cpus targeted with shootdown",
+ "dto: number of destination timeouts",
+ "retries: destination timeout retries sent",
+ "rok: : destination timeouts successfully retried",
+ "resetp: ipi-style resource resets for plugs",
+ "resett: ipi-style resource resets for timeouts",
+ "giveup: fall-backs to ipi-style shootdowns",
+ "sto: number of source timeouts",
+ "bz: number of stay-busy's",
+ "throt: number times spun in throttle",
+ "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
+ "recv: shootdown messages received",
+ "rtime: time spent processing messages",
+ "all: shootdown all-tlb messages",
+ "one: shootdown one-tlb messages",
+ "mult: interrupts that found multiple messages",
+ "none: interrupts that found no messages",
+ "retry: number of retry messages processed",
+ "canc: number messages canceled by retries",
+ "nocan: number retries that found nothing to cancel",
+ "reset: number of ipi-style reset requests processed",
+ "rcan: number messages canceled by reset requests",
+ "disable: number times use of the BAU was disabled",
+ "enable: number times use of the BAU was re-enabled"
+};
+
+static int __init
+setup_nobau(char *arg)
{
nobau = 1;
return 0;
@@ -63,7 +114,7 @@ static int __init setup_nobau(char *arg)
early_param("nobau", setup_nobau);
/* base pnode in this partition */
-static int uv_partition_base_pnode __read_mostly;
+static int uv_base_pnode __read_mostly;
/* position of pnode (which is nasid>>1): */
static int uv_nshift __read_mostly;
static unsigned long uv_mmask __read_mostly;
@@ -109,60 +160,52 @@ static int __init uvhub_to_first_apicid(int uvhub)
* clear of the Timeout bit (as well) will free the resource. No reply will
* be sent (the hardware will only do one reply per message).
*/
-static inline void uv_reply_to_message(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
{
unsigned long dw;
- struct bau_payload_queue_entry *msg;
+ struct bau_pq_entry *msg;
msg = mdp->msg;
if (!msg->canceled) {
- dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
- msg->sw_ack_vector;
- uv_write_local_mmr(
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
+ dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
+ write_mmr_sw_ack(dw);
}
msg->replied_to = 1;
- msg->sw_ack_vector = 0;
+ msg->swack_vec = 0;
}
/*
* Process the receipt of a RETRY message
*/
-static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void bau_process_retry_msg(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
int i;
int cancel_count = 0;
- int slot2;
unsigned long msg_res;
unsigned long mmr = 0;
- struct bau_payload_queue_entry *msg;
- struct bau_payload_queue_entry *msg2;
- struct ptc_stats *stat;
+ struct bau_pq_entry *msg = mdp->msg;
+ struct bau_pq_entry *msg2;
+ struct ptc_stats *stat = bcp->statp;
- msg = mdp->msg;
- stat = bcp->statp;
stat->d_retries++;
/*
* cancel any message from msg+1 to the retry itself
*/
for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
- if (msg2 > mdp->va_queue_last)
- msg2 = mdp->va_queue_first;
+ if (msg2 > mdp->queue_last)
+ msg2 = mdp->queue_first;
if (msg2 == msg)
break;
- /* same conditions for cancellation as uv_do_reset */
+ /* same conditions for cancellation as do_reset */
if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
- (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
- msg->sw_ack_vector) == 0) &&
+ (msg2->swack_vec) && ((msg2->swack_vec &
+ msg->swack_vec) == 0) &&
(msg2->sending_cpu == msg->sending_cpu) &&
(msg2->msg_type != MSG_NOOP)) {
- slot2 = msg2 - mdp->va_queue_first;
- mmr = uv_read_local_mmr
- (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
- msg_res = msg2->sw_ack_vector;
+ mmr = read_mmr_sw_ack();
+ msg_res = msg2->swack_vec;
/*
* This is a message retry; clear the resources held
* by the previous message only if they timed out.
@@ -170,6 +213,7 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
* situation to report.
*/
if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
+ unsigned long mr;
/*
* is the resource timed out?
* make everyone ignore the cancelled message.
@@ -177,10 +221,8 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
msg2->canceled = 1;
stat->d_canceled++;
cancel_count++;
- uv_write_local_mmr(
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
- (msg_res << UV_SW_ACK_NPENDING) |
- msg_res);
+ mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
+ write_mmr_sw_ack(mr);
}
}
}
@@ -192,20 +234,19 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
* Do all the things a cpu should do for a TLB shootdown message.
* Other cpu's may come here at the same time for this message.
*/
-static void uv_bau_process_message(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void bau_process_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
- int msg_ack_count;
short socket_ack_count = 0;
- struct ptc_stats *stat;
- struct bau_payload_queue_entry *msg;
+ short *sp;
+ struct atomic_short *asp;
+ struct ptc_stats *stat = bcp->statp;
+ struct bau_pq_entry *msg = mdp->msg;
struct bau_control *smaster = bcp->socket_master;
/*
* This must be a normal message, or retry of a normal message
*/
- msg = mdp->msg;
- stat = bcp->statp;
if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb();
stat->d_alltlb++;
@@ -222,30 +263,32 @@ static void uv_bau_process_message(struct msg_desc *mdp,
* cpu number.
*/
if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
- uv_bau_process_retry_msg(mdp, bcp);
+ bau_process_retry_msg(mdp, bcp);
/*
- * This is a sw_ack message, so we have to reply to it.
+ * This is a swack message, so we have to reply to it.
* Count each responding cpu on the socket. This avoids
* pinging the count's cache line back and forth between
* the sockets.
*/
- socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
- &smaster->socket_acknowledge_count[mdp->msg_slot]);
+ sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
+ asp = (struct atomic_short *)sp;
+ socket_ack_count = atom_asr(1, asp);
if (socket_ack_count == bcp->cpus_in_socket) {
+ int msg_ack_count;
/*
* Both sockets dump their completed count total into
* the message's count.
*/
smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
- msg_ack_count = atomic_add_short_return(socket_ack_count,
- (struct atomic_short *)&msg->acknowledge_count);
+ asp = (struct atomic_short *)&msg->acknowledge_count;
+ msg_ack_count = atom_asr(socket_ack_count, asp);
if (msg_ack_count == bcp->cpus_in_uvhub) {
/*
* All cpus in uvhub saw it; reply
*/
- uv_reply_to_message(mdp, bcp);
+ reply_to_message(mdp, bcp);
}
}
@@ -268,62 +311,51 @@ static int uvhub_to_first_cpu(int uvhub)
* Last resort when we get a large number of destination timeouts is
* to clear resources held by a given cpu.
* Do this with IPI so that all messages in the BAU message queue
- * can be identified by their nonzero sw_ack_vector field.
+ * can be identified by their nonzero swack_vec field.
*
* This is entered for a single cpu on the uvhub.
* The sender want's this uvhub to free a specific message's
- * sw_ack resources.
+ * swack resources.
*/
-static void
-uv_do_reset(void *ptr)
+static void do_reset(void *ptr)
{
int i;
- int slot;
- int count = 0;
- unsigned long mmr;
- unsigned long msg_res;
- struct bau_control *bcp;
- struct reset_args *rap;
- struct bau_payload_queue_entry *msg;
- struct ptc_stats *stat;
+ struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
+ struct reset_args *rap = (struct reset_args *)ptr;
+ struct bau_pq_entry *msg;
+ struct ptc_stats *stat = bcp->statp;
- bcp = &per_cpu(bau_control, smp_processor_id());
- rap = (struct reset_args *)ptr;
- stat = bcp->statp;
stat->d_resets++;
-
/*
* We're looking for the given sender, and
- * will free its sw_ack resource.
+ * will free its swack resource.
* If all cpu's finally responded after the timeout, its
* message 'replied_to' was set.
*/
- for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
- /* uv_do_reset: same conditions for cancellation as
- uv_bau_process_retry_msg() */
+ for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
+ unsigned long msg_res;
+ /* do_reset: same conditions for cancellation as
+ bau_process_retry_msg() */
if ((msg->replied_to == 0) &&
(msg->canceled == 0) &&
(msg->sending_cpu == rap->sender) &&
- (msg->sw_ack_vector) &&
+ (msg->swack_vec) &&
(msg->msg_type != MSG_NOOP)) {
+ unsigned long mmr;
+ unsigned long mr;
/*
* make everyone else ignore this message
*/
msg->canceled = 1;
- slot = msg - bcp->va_queue_first;
- count++;
/*
* only reset the resource if it is still pending
*/
- mmr = uv_read_local_mmr
- (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
- msg_res = msg->sw_ack_vector;
+ mmr = read_mmr_sw_ack();
+ msg_res = msg->swack_vec;
+ mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
if (mmr & msg_res) {
stat->d_rcanceled++;
- uv_write_local_mmr(
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
- (msg_res << UV_SW_ACK_NPENDING) |
- msg_res);
+ write_mmr_sw_ack(mr);
}
}
}
@@ -334,39 +366,38 @@ uv_do_reset(void *ptr)
* Use IPI to get all target uvhubs to release resources held by
* a given sending cpu number.
*/
-static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
- int sender)
+static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender)
{
int uvhub;
- int cpu;
+ int maskbits;
cpumask_t mask;
struct reset_args reset_args;
reset_args.sender = sender;
-
cpus_clear(mask);
/* find a single cpu for each uvhub in this distribution mask */
- for (uvhub = 0;
- uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
- uvhub++) {
+ maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE;
+ for (uvhub = 0; uvhub < maskbits; uvhub++) {
+ int cpu;
if (!bau_uvhub_isset(uvhub, distribution))
continue;
/* find a cpu for this uvhub */
cpu = uvhub_to_first_cpu(uvhub);
cpu_set(cpu, mask);
}
- /* IPI all cpus; Preemption is already disabled */
- smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
+
+ /* IPI all cpus; preemption is already disabled */
+ smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1);
return;
}
-static inline unsigned long
-cycles_2_us(unsigned long long cyc)
+static inline unsigned long cycles_2_us(unsigned long long cyc)
{
unsigned long long ns;
unsigned long us;
- ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
- >> CYC2NS_SCALE_FACTOR;
+ int cpu = smp_processor_id();
+
+ ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
us = ns / 1000;
return us;
}
@@ -376,56 +407,56 @@ cycles_2_us(unsigned long long cyc)
* leaves uvhub_quiesce set so that no new broadcasts are started by
* bau_flush_send_and_wait()
*/
-static inline void
-quiesce_local_uvhub(struct bau_control *hmaster)
+static inline void quiesce_local_uvhub(struct bau_control *hmaster)
{
- atomic_add_short_return(1, (struct atomic_short *)
- &hmaster->uvhub_quiesce);
+ atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
}
/*
* mark this quiet-requestor as done
*/
-static inline void
-end_uvhub_quiesce(struct bau_control *hmaster)
+static inline void end_uvhub_quiesce(struct bau_control *hmaster)
{
- atomic_add_short_return(-1, (struct atomic_short *)
- &hmaster->uvhub_quiesce);
+ atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
+}
+
+static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
+{
+ unsigned long descriptor_status;
+
+ descriptor_status = uv_read_local_mmr(mmr_offset);
+ descriptor_status >>= right_shift;
+ descriptor_status &= UV_ACT_STATUS_MASK;
+ return descriptor_status;
}
/*
* Wait for completion of a broadcast software ack message
* return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
*/
-static int uv_wait_completion(struct bau_desc *bau_desc,
- unsigned long mmr_offset, int right_shift, int this_cpu,
- struct bau_control *bcp, struct bau_control *smaster, long try)
+static int uv1_wait_completion(struct bau_desc *bau_desc,
+ unsigned long mmr_offset, int right_shift,
+ struct bau_control *bcp, long try)
{
unsigned long descriptor_status;
- cycles_t ttime;
+ cycles_t ttm;
struct ptc_stats *stat = bcp->statp;
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
+ descriptor_status = uv1_read_status(mmr_offset, right_shift);
/* spin on the status MMR, waiting for it to go idle */
- while ((descriptor_status = (((unsigned long)
- uv_read_local_mmr(mmr_offset) >>
- right_shift) & UV_ACT_STATUS_MASK)) !=
- DESC_STATUS_IDLE) {
+ while ((descriptor_status != DS_IDLE)) {
/*
- * Our software ack messages may be blocked because there are
- * no swack resources available. As long as none of them
- * has timed out hardware will NACK our message and its
- * state will stay IDLE.
+ * Our software ack messages may be blocked because
+ * there are no swack resources available. As long
+ * as none of them has timed out hardware will NACK
+ * our message and its state will stay IDLE.
*/
- if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
+ if (descriptor_status == DS_SOURCE_TIMEOUT) {
stat->s_stimeout++;
return FLUSH_GIVEUP;
- } else if (descriptor_status ==
- DESC_STATUS_DESTINATION_TIMEOUT) {
+ } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
stat->s_dtimeout++;
- ttime = get_cycles();
+ ttm = get_cycles();
/*
* Our retries may be blocked by all destination
@@ -433,8 +464,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
* pending. In that case hardware returns the
* ERROR that looks like a destination timeout.
*/
- if (cycles_2_us(ttime - bcp->send_message) <
- timeout_us) {
+ if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
bcp->conseccompletes = 0;
return FLUSH_RETRY_PLUGGED;
}
@@ -447,80 +477,160 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
*/
cpu_relax();
}
+ descriptor_status = uv1_read_status(mmr_offset, right_shift);
}
bcp->conseccompletes++;
return FLUSH_COMPLETE;
}
-static inline cycles_t
-sec_2_cycles(unsigned long sec)
+/*
+ * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
+ */
+static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
{
- unsigned long ns;
- cycles_t cyc;
+ unsigned long descriptor_status;
+ unsigned long descriptor_status2;
- ns = sec * 1000000000;
- cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
- return cyc;
+ descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
+ descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
+ descriptor_status = (descriptor_status << 1) | descriptor_status2;
+ return descriptor_status;
+}
+
+static int uv2_wait_completion(struct bau_desc *bau_desc,
+ unsigned long mmr_offset, int right_shift,
+ struct bau_control *bcp, long try)
+{
+ unsigned long descriptor_stat;
+ cycles_t ttm;
+ int cpu = bcp->uvhub_cpu;
+ struct ptc_stats *stat = bcp->statp;
+
+ descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+
+ /* spin on the status MMR, waiting for it to go idle */
+ while (descriptor_stat != UV2H_DESC_IDLE) {
+ /*
+ * Our software ack messages may be blocked because
+ * there are no swack resources available. As long
+ * as none of them has timed out hardware will NACK
+ * our message and its state will stay IDLE.
+ */
+ if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
+ (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
+ (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
+ stat->s_stimeout++;
+ return FLUSH_GIVEUP;
+ } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
+ stat->s_dtimeout++;
+ ttm = get_cycles();
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ */
+ if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_PLUGGED;
+ }
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
+ /*
+ * descriptor_stat is still BUSY
+ */
+ cpu_relax();
+ }
+ descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+ }
+ bcp->conseccompletes++;
+ return FLUSH_COMPLETE;
}
/*
- * conditionally add 1 to *v, unless *v is >= u
- * return 0 if we cannot add 1 to *v because it is >= u
- * return 1 if we can add 1 to *v because it is < u
- * the add is atomic
- *
- * This is close to atomic_add_unless(), but this allows the 'u' value
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
+ * There are 2 status registers; each and array[32] of 2 bits. Set up for
+ * which register to read and position in that register based on cpu in
+ * current hub.
*/
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+static int wait_completion(struct bau_desc *bau_desc,
+ struct bau_control *bcp, long try)
{
- spin_lock(lock);
- if (atomic_read(v) >= u) {
- spin_unlock(lock);
- return 0;
+ int right_shift;
+ unsigned long mmr_offset;
+ int cpu = bcp->uvhub_cpu;
+
+ if (cpu < UV_CPUS_PER_AS) {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+ right_shift = cpu * UV_ACT_STATUS_SIZE;
+ } else {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+ right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
}
- atomic_inc(v);
- spin_unlock(lock);
- return 1;
+
+ if (is_uv1_hub())
+ return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
+ bcp, try);
+ else
+ return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
+ bcp, try);
+}
+
+static inline cycles_t sec_2_cycles(unsigned long sec)
+{
+ unsigned long ns;
+ cycles_t cyc;
+
+ ns = sec * 1000000000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
}
/*
- * Our retries are blocked by all destination swack resources being
+ * Our retries are blocked by all destination sw ack resources being
* in use, and a timeout is pending. In that case hardware immediately
* returns the ERROR that looks like a destination timeout.
*/
-static void
-destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
+static void destination_plugged(struct bau_desc *bau_desc,
+ struct bau_control *bcp,
struct bau_control *hmaster, struct ptc_stats *stat)
{
udelay(bcp->plugged_delay);
bcp->plugged_tries++;
+
if (bcp->plugged_tries >= bcp->plugsb4reset) {
bcp->plugged_tries = 0;
+
quiesce_local_uvhub(hmaster);
+
spin_lock(&hmaster->queue_lock);
- uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ reset_with_ipi(&bau_desc->distribution, bcp->cpu);
spin_unlock(&hmaster->queue_lock);
+
end_uvhub_quiesce(hmaster);
+
bcp->ipi_attempts++;
stat->s_resets_plug++;
}
}
-static void
-destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
- struct bau_control *hmaster, struct ptc_stats *stat)
+static void destination_timeout(struct bau_desc *bau_desc,
+ struct bau_control *bcp, struct bau_control *hmaster,
+ struct ptc_stats *stat)
{
- hmaster->max_bau_concurrent = 1;
+ hmaster->max_concurr = 1;
bcp->timeout_tries++;
if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
bcp->timeout_tries = 0;
+
quiesce_local_uvhub(hmaster);
+
spin_lock(&hmaster->queue_lock);
- uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ reset_with_ipi(&bau_desc->distribution, bcp->cpu);
spin_unlock(&hmaster->queue_lock);
+
end_uvhub_quiesce(hmaster);
+
bcp->ipi_attempts++;
stat->s_resets_timeout++;
}
@@ -530,34 +640,104 @@ destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
* Completions are taking a very long time due to a congested numalink
* network.
*/
-static void
-disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
+static void disable_for_congestion(struct bau_control *bcp,
+ struct ptc_stats *stat)
{
- int tcpu;
- struct bau_control *tbcp;
-
/* let only one cpu do this disabling */
spin_lock(&disable_lock);
+
if (!baudisabled && bcp->period_requests &&
((bcp->period_time / bcp->period_requests) > congested_cycles)) {
+ int tcpu;
+ struct bau_control *tbcp;
/* it becomes this cpu's job to turn on the use of the
BAU again */
baudisabled = 1;
bcp->set_bau_off = 1;
- bcp->set_bau_on_time = get_cycles() +
- sec_2_cycles(bcp->congested_period);
+ bcp->set_bau_on_time = get_cycles();
+ bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
stat->s_bau_disabled++;
for_each_present_cpu(tcpu) {
tbcp = &per_cpu(bau_control, tcpu);
- tbcp->baudisabled = 1;
+ tbcp->baudisabled = 1;
}
}
+
spin_unlock(&disable_lock);
}
-/**
- * uv_flush_send_and_wait
- *
+static void count_max_concurr(int stat, struct bau_control *bcp,
+ struct bau_control *hmaster)
+{
+ bcp->plugged_tries = 0;
+ bcp->timeout_tries = 0;
+ if (stat != FLUSH_COMPLETE)
+ return;
+ if (bcp->conseccompletes <= bcp->complete_threshold)
+ return;
+ if (hmaster->max_concurr >= hmaster->max_concurr_const)
+ return;
+ hmaster->max_concurr++;
+}
+
+static void record_send_stats(cycles_t time1, cycles_t time2,
+ struct bau_control *bcp, struct ptc_stats *stat,
+ int completion_status, int try)
+{
+ cycles_t elapsed;
+
+ if (time2 > time1) {
+ elapsed = time2 - time1;
+ stat->s_time += elapsed;
+
+ if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
+ bcp->period_requests++;
+ bcp->period_time += elapsed;
+ if ((elapsed > congested_cycles) &&
+ (bcp->period_requests > bcp->cong_reps))
+ disable_for_congestion(bcp, stat);
+ }
+ } else
+ stat->s_requestor--;
+
+ if (completion_status == FLUSH_COMPLETE && try > 1)
+ stat->s_retriesok++;
+ else if (completion_status == FLUSH_GIVEUP)
+ stat->s_giveup++;
+}
+
+/*
+ * Because of a uv1 hardware bug only a limited number of concurrent
+ * requests can be made.
+ */
+static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
+{
+ spinlock_t *lock = &hmaster->uvhub_lock;
+ atomic_t *v;
+
+ v = &hmaster->active_descriptor_count;
+ if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
+ stat->s_throttles++;
+ do {
+ cpu_relax();
+ } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
+ }
+}
+
+/*
+ * Handle the completion status of a message send.
+ */
+static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
+ struct bau_control *bcp, struct bau_control *hmaster,
+ struct ptc_stats *stat)
+{
+ if (completion_status == FLUSH_RETRY_PLUGGED)
+ destination_plugged(bau_desc, bcp, hmaster, stat);
+ else if (completion_status == FLUSH_RETRY_TIMEOUT)
+ destination_timeout(bau_desc, bcp, hmaster, stat);
+}
+
+/*
* Send a broadcast and wait for it to complete.
*
* The flush_mask contains the cpus the broadcast is to be sent to including
@@ -568,44 +748,23 @@ disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
* returned to the kernel.
*/
int uv_flush_send_and_wait(struct bau_desc *bau_desc,
- struct cpumask *flush_mask, struct bau_control *bcp)
+ struct cpumask *flush_mask, struct bau_control *bcp)
{
- int right_shift;
- int completion_status = 0;
int seq_number = 0;
+ int completion_stat = 0;
long try = 0;
- int cpu = bcp->uvhub_cpu;
- int this_cpu = bcp->cpu;
- unsigned long mmr_offset;
unsigned long index;
cycles_t time1;
cycles_t time2;
- cycles_t elapsed;
struct ptc_stats *stat = bcp->statp;
- struct bau_control *smaster = bcp->socket_master;
struct bau_control *hmaster = bcp->uvhub_master;
- if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
- &hmaster->active_descriptor_count,
- hmaster->max_bau_concurrent)) {
- stat->s_throttles++;
- do {
- cpu_relax();
- } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
- &hmaster->active_descriptor_count,
- hmaster->max_bau_concurrent));
- }
+ if (is_uv1_hub())
+ uv1_throttle(hmaster, stat);
+
while (hmaster->uvhub_quiesce)
cpu_relax();
- if (cpu < UV_CPUS_PER_ACT_STATUS) {
- mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
- right_shift = cpu * UV_ACT_STATUS_SIZE;
- } else {
- mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
- right_shift =
- ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
- }
time1 = get_cycles();
do {
if (try == 0) {
@@ -615,64 +774,134 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
bau_desc->header.msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
+
bau_desc->header.sequence = seq_number;
- index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
- bcp->uvhub_cpu;
+ index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles();
- uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+
+ write_mmr_activation(index);
+
try++;
- completion_status = uv_wait_completion(bau_desc, mmr_offset,
- right_shift, this_cpu, bcp, smaster, try);
+ completion_stat = wait_completion(bau_desc, bcp, try);
+
+ handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
- if (completion_status == FLUSH_RETRY_PLUGGED) {
- destination_plugged(bau_desc, bcp, hmaster, stat);
- } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
- destination_timeout(bau_desc, bcp, hmaster, stat);
- }
if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
bcp->ipi_attempts = 0;
- completion_status = FLUSH_GIVEUP;
+ completion_stat = FLUSH_GIVEUP;
break;
}
cpu_relax();
- } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
- (completion_status == FLUSH_RETRY_TIMEOUT));
+ } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
+ (completion_stat == FLUSH_RETRY_TIMEOUT));
+
time2 = get_cycles();
- bcp->plugged_tries = 0;
- bcp->timeout_tries = 0;
- if ((completion_status == FLUSH_COMPLETE) &&
- (bcp->conseccompletes > bcp->complete_threshold) &&
- (hmaster->max_bau_concurrent <
- hmaster->max_bau_concurrent_constant))
- hmaster->max_bau_concurrent++;
+
+ count_max_concurr(completion_stat, bcp, hmaster);
+
while (hmaster->uvhub_quiesce)
cpu_relax();
+
atomic_dec(&hmaster->active_descriptor_count);
- if (time2 > time1) {
- elapsed = time2 - time1;
- stat->s_time += elapsed;
- if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
- bcp->period_requests++;
- bcp->period_time += elapsed;
- if ((elapsed > congested_cycles) &&
- (bcp->period_requests > bcp->congested_reps)) {
- disable_for_congestion(bcp, stat);
+
+ record_send_stats(time1, time2, bcp, stat, completion_stat, try);
+
+ if (completion_stat == FLUSH_GIVEUP)
+ return 1;
+ return 0;
+}
+
+/*
+ * The BAU is disabled. When the disabled time period has expired, the cpu
+ * that disabled it must re-enable it.
+ * Return 0 if it is re-enabled for all cpus.
+ */
+static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
+{
+ int tcpu;
+ struct bau_control *tbcp;
+
+ if (bcp->set_bau_off) {
+ if (get_cycles() >= bcp->set_bau_on_time) {
+ stat->s_bau_reenabled++;
+ baudisabled = 0;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ tbcp->baudisabled = 0;
+ tbcp->period_requests = 0;
+ tbcp->period_time = 0;
}
+ return 0;
}
+ }
+ return -1;
+}
+
+static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
+ int remotes, struct bau_desc *bau_desc)
+{
+ stat->s_requestor++;
+ stat->s_ntargcpu += remotes + locals;
+ stat->s_ntargremotes += remotes;
+ stat->s_ntarglocals += locals;
+
+ /* uvhub statistics */
+ hubs = bau_uvhub_weight(&bau_desc->distribution);
+ if (locals) {
+ stat->s_ntarglocaluvhub++;
+ stat->s_ntargremoteuvhub += (hubs - 1);
} else
- stat->s_requestor--;
- if (completion_status == FLUSH_COMPLETE && try > 1)
- stat->s_retriesok++;
- else if (completion_status == FLUSH_GIVEUP) {
- stat->s_giveup++;
- return 1;
+ stat->s_ntargremoteuvhub += hubs;
+
+ stat->s_ntarguvhub += hubs;
+
+ if (hubs >= 16)
+ stat->s_ntarguvhub16++;
+ else if (hubs >= 8)
+ stat->s_ntarguvhub8++;
+ else if (hubs >= 4)
+ stat->s_ntarguvhub4++;
+ else if (hubs >= 2)
+ stat->s_ntarguvhub2++;
+ else
+ stat->s_ntarguvhub1++;
+}
+
+/*
+ * Translate a cpu mask to the uvhub distribution mask in the BAU
+ * activation descriptor.
+ */
+static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
+ struct bau_desc *bau_desc, int *localsp, int *remotesp)
+{
+ int cpu;
+ int pnode;
+ int cnt = 0;
+ struct hub_and_pnode *hpp;
+
+ for_each_cpu(cpu, flush_mask) {
+ /*
+ * The distribution vector is a bit map of pnodes, relative
+ * to the partition base pnode (and the partition base nasid
+ * in the header).
+ * Translate cpu to pnode and hub using a local memory array.
+ */
+ hpp = &bcp->socket_master->thp[cpu];
+ pnode = hpp->pnode - bcp->partition_base_pnode;
+ bau_uvhub_set(pnode, &bau_desc->distribution);
+ cnt++;
+ if (hpp->uvhub == bcp->uvhub)
+ (*localsp)++;
+ else
+ (*remotesp)++;
}
+ if (!cnt)
+ return 1;
return 0;
}
-/**
- * uv_flush_tlb_others - globally purge translation cache of a virtual
- * address or all TLB's
+/*
+ * globally purge translation cache of a virtual address or all TLB's
* @cpumask: mask of all cpu's in which the address is to be removed
* @mm: mm_struct containing virtual address range
* @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
@@ -696,20 +925,16 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
* done. The returned pointer is valid till preemption is re-enabled.
*/
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long va, unsigned int cpu)
+ struct mm_struct *mm, unsigned long va,
+ unsigned int cpu)
{
int locals = 0;
int remotes = 0;
int hubs = 0;
- int tcpu;
- int tpnode;
struct bau_desc *bau_desc;
struct cpumask *flush_mask;
struct ptc_stats *stat;
struct bau_control *bcp;
- struct bau_control *tbcp;
- struct hub_and_pnode *hpp;
/* kernel was booted 'nobau' */
if (nobau)
@@ -720,20 +945,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
/* bau was disabled due to slow response */
if (bcp->baudisabled) {
- /* the cpu that disabled it must re-enable it */
- if (bcp->set_bau_off) {
- if (get_cycles() >= bcp->set_bau_on_time) {
- stat->s_bau_reenabled++;
- baudisabled = 0;
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
- tbcp->baudisabled = 0;
- tbcp->period_requests = 0;
- tbcp->period_time = 0;
- }
- }
- }
- return cpumask;
+ if (check_enable(bcp, stat))
+ return cpumask;
}
/*
@@ -744,59 +957,20 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
/* don't actually do a shootdown of the local cpu */
cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+
if (cpu_isset(cpu, *cpumask))
stat->s_ntargself++;
bau_desc = bcp->descriptor_base;
- bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
+ bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
-
- for_each_cpu(tcpu, flush_mask) {
- /*
- * The distribution vector is a bit map of pnodes, relative
- * to the partition base pnode (and the partition base nasid
- * in the header).
- * Translate cpu to pnode and hub using an array stored
- * in local memory.
- */
- hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
- tpnode = hpp->pnode - bcp->partition_base_pnode;
- bau_uvhub_set(tpnode, &bau_desc->distribution);
- if (hpp->uvhub == bcp->uvhub)
- locals++;
- else
- remotes++;
- }
- if ((locals + remotes) == 0)
+ if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
return NULL;
- stat->s_requestor++;
- stat->s_ntargcpu += remotes + locals;
- stat->s_ntargremotes += remotes;
- stat->s_ntarglocals += locals;
- remotes = bau_uvhub_weight(&bau_desc->distribution);
- /* uvhub statistics */
- hubs = bau_uvhub_weight(&bau_desc->distribution);
- if (locals) {
- stat->s_ntarglocaluvhub++;
- stat->s_ntargremoteuvhub += (hubs - 1);
- } else
- stat->s_ntargremoteuvhub += hubs;
- stat->s_ntarguvhub += hubs;
- if (hubs >= 16)
- stat->s_ntarguvhub16++;
- else if (hubs >= 8)
- stat->s_ntarguvhub8++;
- else if (hubs >= 4)
- stat->s_ntarguvhub4++;
- else if (hubs >= 2)
- stat->s_ntarguvhub2++;
- else
- stat->s_ntarguvhub1++;
+ record_send_statistics(stat, locals, hubs, remotes, bau_desc);
bau_desc->payload.address = va;
bau_desc->payload.sending_cpu = cpu;
-
/*
* uv_flush_send_and_wait returns 0 if all cpu's were messaged,
* or 1 if it gave up and the original cpumask should be returned.
@@ -825,26 +999,31 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
{
int count = 0;
cycles_t time_start;
- struct bau_payload_queue_entry *msg;
+ struct bau_pq_entry *msg;
struct bau_control *bcp;
struct ptc_stats *stat;
struct msg_desc msgdesc;
time_start = get_cycles();
+
bcp = &per_cpu(bau_control, smp_processor_id());
stat = bcp->statp;
- msgdesc.va_queue_first = bcp->va_queue_first;
- msgdesc.va_queue_last = bcp->va_queue_last;
+
+ msgdesc.queue_first = bcp->queue_first;
+ msgdesc.queue_last = bcp->queue_last;
+
msg = bcp->bau_msg_head;
- while (msg->sw_ack_vector) {
+ while (msg->swack_vec) {
count++;
- msgdesc.msg_slot = msg - msgdesc.va_queue_first;
- msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
+
+ msgdesc.msg_slot = msg - msgdesc.queue_first;
+ msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
msgdesc.msg = msg;
- uv_bau_process_message(&msgdesc, bcp);
+ bau_process_message(&msgdesc, bcp);
+
msg++;
- if (msg > msgdesc.va_queue_last)
- msg = msgdesc.va_queue_first;
+ if (msg > msgdesc.queue_last)
+ msg = msgdesc.queue_first;
bcp->bau_msg_head = msg;
}
stat->d_time += (get_cycles() - time_start);
@@ -852,18 +1031,17 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
stat->d_nomsg++;
else if (count > 1)
stat->d_multmsg++;
+
ack_APIC_irq();
}
/*
- * uv_enable_timeouts
- *
- * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
+ * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
* shootdown message timeouts enabled. The timeout does not cause
* an interrupt, but causes an error message to be returned to
* the sender.
*/
-static void __init uv_enable_timeouts(void)
+static void __init enable_timeouts(void)
{
int uvhub;
int nuvhubs;
@@ -877,47 +1055,44 @@ static void __init uv_enable_timeouts(void)
continue;
pnode = uv_blade_to_pnode(uvhub);
- mmr_image =
- uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
+ mmr_image = read_mmr_misc_control(pnode);
/*
* Set the timeout period and then lock it in, in three
* steps; captures and locks in the period.
*
* To program the period, the SOFT_ACK_MODE must be off.
*/
- mmr_image &= ~((unsigned long)1 <<
- UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
- uv_write_global_mmr64
- (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ mmr_image &= ~(1L << SOFTACK_MSHIFT);
+ write_mmr_misc_control(pnode, mmr_image);
/*
* Set the 4-bit period.
*/
- mmr_image &= ~((unsigned long)0xf <<
- UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
- mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
- UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
- uv_write_global_mmr64
- (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
+ mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
+ write_mmr_misc_control(pnode, mmr_image);
/*
+ * UV1:
* Subsequent reversals of the timebase bit (3) cause an
* immediate timeout of one or all INTD resources as
* indicated in bits 2:0 (7 causes all of them to timeout).
*/
- mmr_image |= ((unsigned long)1 <<
- UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
- uv_write_global_mmr64
- (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ mmr_image |= (1L << SOFTACK_MSHIFT);
+ if (is_uv2_hub()) {
+ mmr_image |= (1L << UV2_LEG_SHFT);
+ mmr_image |= (1L << UV2_EXT_SHFT);
+ }
+ write_mmr_misc_control(pnode, mmr_image);
}
}
-static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
+static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
{
if (*offset < num_possible_cpus())
return offset;
return NULL;
}
-static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
+static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
{
(*offset)++;
if (*offset < num_possible_cpus())
@@ -925,12 +1100,11 @@ static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
return NULL;
}
-static void uv_ptc_seq_stop(struct seq_file *file, void *data)
+static void ptc_seq_stop(struct seq_file *file, void *data)
{
}
-static inline unsigned long long
-microsec_2_cycles(unsigned long microsec)
+static inline unsigned long long usec_2_cycles(unsigned long microsec)
{
unsigned long ns;
unsigned long long cyc;
@@ -941,29 +1115,27 @@ microsec_2_cycles(unsigned long microsec)
}
/*
- * Display the statistics thru /proc.
+ * Display the statistics thru /proc/sgi_uv/ptc_statistics
* 'data' points to the cpu number
+ * Note: see the descriptions in stat_description[].
*/
-static int uv_ptc_seq_show(struct seq_file *file, void *data)
+static int ptc_seq_show(struct seq_file *file, void *data)
{
struct ptc_stats *stat;
int cpu;
cpu = *(loff_t *)data;
-
if (!cpu) {
seq_printf(file,
"# cpu sent stime self locals remotes ncpus localhub ");
seq_printf(file,
"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
- seq_printf(file,
- "retries rok resetp resett giveup sto bz throt ");
+ "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
seq_printf(file,
- "sw_ack recv rtime all ");
+ "resetp resett giveup sto bz throt swack recv rtime ");
seq_printf(file,
- "one mult none retry canc nocan reset rcan ");
+ "all one mult none retry canc nocan reset rcan ");
seq_printf(file,
"disable enable\n");
}
@@ -990,8 +1162,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
/* destination side statistics */
seq_printf(file,
"%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
- UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
+ read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
stat->d_requestee, cycles_2_us(stat->d_time),
stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
stat->d_nomsg, stat->d_retries, stat->d_canceled,
@@ -1000,7 +1171,6 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "%ld %ld\n",
stat->s_bau_disabled, stat->s_bau_reenabled);
}
-
return 0;
}
@@ -1008,18 +1178,18 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
* Display the tunables thru debugfs
*/
static ssize_t tunables_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
char *buf;
int ret;
buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
- "max_bau_concurrent plugged_delay plugsb4reset",
+ "max_concur plugged_delay plugsb4reset",
"timeoutsb4reset ipi_reset_limit complete_threshold",
"congested_response_us congested_reps congested_period",
- max_bau_concurrent, plugged_delay, plugsb4reset,
+ max_concurr, plugged_delay, plugsb4reset,
timeoutsb4reset, ipi_reset_limit, complete_threshold,
- congested_response_us, congested_reps, congested_period);
+ congested_respns_us, congested_reps, congested_period);
if (!buf)
return -ENOMEM;
@@ -1030,13 +1200,16 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
}
/*
- * -1: resetf the statistics
+ * handle a write to /proc/sgi_uv/ptc_statistics
+ * -1: reset the statistics
* 0: display meaning of the statistics
*/
-static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
+static ssize_t ptc_proc_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
{
int cpu;
+ int i;
+ int elements;
long input_arg;
char optstr[64];
struct ptc_stats *stat;
@@ -1046,79 +1219,18 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
+
if (strict_strtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
return -EINVAL;
}
if (input_arg == 0) {
+ elements = sizeof(stat_description)/sizeof(*stat_description);
printk(KERN_DEBUG "# cpu: cpu number\n");
printk(KERN_DEBUG "Sender statistics:\n");
- printk(KERN_DEBUG
- "sent: number of shootdown messages sent\n");
- printk(KERN_DEBUG
- "stime: time spent sending messages\n");
- printk(KERN_DEBUG
- "numuvhubs: number of hubs targeted with shootdown\n");
- printk(KERN_DEBUG
- "numuvhubs16: number times 16 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs8: number times 8 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs4: number times 4 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs2: number times 2 or more hubs targeted\n");
- printk(KERN_DEBUG
- "numuvhubs1: number times 1 hub targeted\n");
- printk(KERN_DEBUG
- "numcpus: number of cpus targeted with shootdown\n");
- printk(KERN_DEBUG
- "dto: number of destination timeouts\n");
- printk(KERN_DEBUG
- "retries: destination timeout retries sent\n");
- printk(KERN_DEBUG
- "rok: : destination timeouts successfully retried\n");
- printk(KERN_DEBUG
- "resetp: ipi-style resource resets for plugs\n");
- printk(KERN_DEBUG
- "resett: ipi-style resource resets for timeouts\n");
- printk(KERN_DEBUG
- "giveup: fall-backs to ipi-style shootdowns\n");
- printk(KERN_DEBUG
- "sto: number of source timeouts\n");
- printk(KERN_DEBUG
- "bz: number of stay-busy's\n");
- printk(KERN_DEBUG
- "throt: number times spun in throttle\n");
- printk(KERN_DEBUG "Destination side statistics:\n");
- printk(KERN_DEBUG
- "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
- printk(KERN_DEBUG
- "recv: shootdown messages received\n");
- printk(KERN_DEBUG
- "rtime: time spent processing messages\n");
- printk(KERN_DEBUG
- "all: shootdown all-tlb messages\n");
- printk(KERN_DEBUG
- "one: shootdown one-tlb messages\n");
- printk(KERN_DEBUG
- "mult: interrupts that found multiple messages\n");
- printk(KERN_DEBUG
- "none: interrupts that found no messages\n");
- printk(KERN_DEBUG
- "retry: number of retry messages processed\n");
- printk(KERN_DEBUG
- "canc: number messages canceled by retries\n");
- printk(KERN_DEBUG
- "nocan: number retries that found nothing to cancel\n");
- printk(KERN_DEBUG
- "reset: number of ipi-style reset requests processed\n");
- printk(KERN_DEBUG
- "rcan: number messages canceled by reset requests\n");
- printk(KERN_DEBUG
- "disable: number times use of the BAU was disabled\n");
- printk(KERN_DEBUG
- "enable: number times use of the BAU was re-enabled\n");
+ for (i = 0; i < elements; i++)
+ printk(KERN_DEBUG "%s\n", stat_description[i]);
} else if (input_arg == -1) {
for_each_present_cpu(cpu) {
stat = &per_cpu(ptcstats, cpu);
@@ -1145,27 +1257,18 @@ static int local_atoi(const char *name)
}
/*
- * set the tunables
- * 0 values reset them to defaults
+ * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
+ * Zero values reset them to defaults.
*/
-static ssize_t tunables_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
+static int parse_tunables_write(struct bau_control *bcp, char *instr,
+ int count)
{
- int cpu;
- int cnt = 0;
- int val;
char *p;
char *q;
- char instr[64];
- struct bau_control *bcp;
-
- if (count == 0 || count > sizeof(instr)-1)
- return -EINVAL;
- if (copy_from_user(instr, user, count))
- return -EFAULT;
+ int cnt = 0;
+ int val;
+ int e = sizeof(tunables) / sizeof(*tunables);
- instr[count] = '\0';
- /* count the fields */
p = instr + strspn(instr, WHITESPACE);
q = p;
for (; *p; p = q + strspn(q, WHITESPACE)) {
@@ -1174,8 +1277,8 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
if (q == p)
break;
}
- if (cnt != 9) {
- printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
+ if (cnt != e) {
+ printk(KERN_INFO "bau tunable error: should be %d values\n", e);
return -EINVAL;
}
@@ -1187,97 +1290,80 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
switch (cnt) {
case 0:
if (val == 0) {
- max_bau_concurrent = MAX_BAU_CONCURRENT;
- max_bau_concurrent_constant =
- MAX_BAU_CONCURRENT;
+ max_concurr = MAX_BAU_CONCURRENT;
+ max_concurr_const = MAX_BAU_CONCURRENT;
continue;
}
- bcp = &per_cpu(bau_control, smp_processor_id());
if (val < 1 || val > bcp->cpus_in_uvhub) {
printk(KERN_DEBUG
"Error: BAU max concurrent %d is invalid\n",
val);
return -EINVAL;
}
- max_bau_concurrent = val;
- max_bau_concurrent_constant = val;
- continue;
- case 1:
- if (val == 0)
- plugged_delay = PLUGGED_DELAY;
- else
- plugged_delay = val;
- continue;
- case 2:
- if (val == 0)
- plugsb4reset = PLUGSB4RESET;
- else
- plugsb4reset = val;
- continue;
- case 3:
- if (val == 0)
- timeoutsb4reset = TIMEOUTSB4RESET;
- else
- timeoutsb4reset = val;
- continue;
- case 4:
- if (val == 0)
- ipi_reset_limit = IPI_RESET_LIMIT;
- else
- ipi_reset_limit = val;
- continue;
- case 5:
- if (val == 0)
- complete_threshold = COMPLETE_THRESHOLD;
- else
- complete_threshold = val;
- continue;
- case 6:
- if (val == 0)
- congested_response_us = CONGESTED_RESPONSE_US;
- else
- congested_response_us = val;
- continue;
- case 7:
- if (val == 0)
- congested_reps = CONGESTED_REPS;
- else
- congested_reps = val;
+ max_concurr = val;
+ max_concurr_const = val;
continue;
- case 8:
+ default:
if (val == 0)
- congested_period = CONGESTED_PERIOD;
+ *tunables[cnt].tunp = tunables[cnt].deflt;
else
- congested_period = val;
+ *tunables[cnt].tunp = val;
continue;
}
if (q == p)
break;
}
+ return 0;
+}
+
+/*
+ * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
+ */
+static ssize_t tunables_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
+{
+ int cpu;
+ int ret;
+ char instr[100];
+ struct bau_control *bcp;
+
+ if (count == 0 || count > sizeof(instr)-1)
+ return -EINVAL;
+ if (copy_from_user(instr, user, count))
+ return -EFAULT;
+
+ instr[count] = '\0';
+
+ bcp = &per_cpu(bau_control, smp_processor_id());
+
+ ret = parse_tunables_write(bcp, instr, count);
+ if (ret)
+ return ret;
+
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
- bcp->max_bau_concurrent = max_bau_concurrent;
- bcp->max_bau_concurrent_constant = max_bau_concurrent;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->congested_response_us = congested_response_us;
- bcp->congested_reps = congested_reps;
- bcp->congested_period = congested_period;
+ bcp->max_concurr = max_concurr;
+ bcp->max_concurr_const = max_concurr;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->cong_response_us = congested_respns_us;
+ bcp->cong_reps = congested_reps;
+ bcp->cong_period = congested_period;
}
return count;
}
static const struct seq_operations uv_ptc_seq_ops = {
- .start = uv_ptc_seq_start,
- .next = uv_ptc_seq_next,
- .stop = uv_ptc_seq_stop,
- .show = uv_ptc_seq_show
+ .start = ptc_seq_start,
+ .next = ptc_seq_next,
+ .stop = ptc_seq_stop,
+ .show = ptc_seq_show
};
-static int uv_ptc_proc_open(struct inode *inode, struct file *file)
+static int ptc_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &uv_ptc_seq_ops);
}
@@ -1288,9 +1374,9 @@ static int tunables_open(struct inode *inode, struct file *file)
}
static const struct file_operations proc_uv_ptc_operations = {
- .open = uv_ptc_proc_open,
+ .open = ptc_proc_open,
.read = seq_read,
- .write = uv_ptc_proc_write,
+ .write = ptc_proc_write,
.llseek = seq_lseek,
.release = seq_release,
};
@@ -1324,7 +1410,7 @@ static int __init uv_ptc_init(void)
return -EINVAL;
}
tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
- tunables_dir, NULL, &tunables_fops);
+ tunables_dir, NULL, &tunables_fops);
if (!tunables_file) {
printk(KERN_ERR "unable to create debugfs file %s\n",
UV_BAU_TUNABLES_FILE);
@@ -1336,24 +1422,24 @@ static int __init uv_ptc_init(void)
/*
* Initialize the sending side's sending buffers.
*/
-static void
-uv_activation_descriptor_init(int node, int pnode, int base_pnode)
+static void activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
unsigned long pa;
unsigned long m;
unsigned long n;
+ size_t dsize;
struct bau_desc *bau_desc;
struct bau_desc *bd2;
struct bau_control *bcp;
/*
- * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
- * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
+ * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
+ * per cpu; and one per cpu on the uvhub (ADP_SZ)
*/
- bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
- * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+ dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
+ bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
BUG_ON(!bau_desc);
pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1361,27 +1447,25 @@ uv_activation_descriptor_init(int node, int pnode, int base_pnode)
m = pa & uv_mmask;
/* the 14-bit pnode */
- uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
- (n << UV_DESC_BASE_PNODE_SHIFT | m));
+ write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
/*
- * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
+ * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
* cpu even though we only use the first one; one descriptor can
* describe a broadcast to 256 uv hubs.
*/
- for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
- i++, bd2++) {
+ for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc));
- bd2->header.sw_ack_flag = 1;
+ bd2->header.swack_flag = 1;
/*
* The base_dest_nasid set in the message header is the nasid
* of the first uvhub in the partition. The bit map will
* indicate destination pnode numbers relative to that base.
* They may not be consecutive if nasid striding is being used.
*/
- bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
- bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
- bd2->header.command = UV_NET_ENDPOINT_INTD;
- bd2->header.int_both = 1;
+ bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+ bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
+ bd2->header.command = UV_NET_ENDPOINT_INTD;
+ bd2->header.int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
@@ -1401,57 +1485,55 @@ uv_activation_descriptor_init(int node, int pnode, int base_pnode)
* - node is first node (kernel memory notion) on the uvhub
* - pnode is the uvhub's physical identifier
*/
-static void
-uv_payload_queue_init(int node, int pnode)
+static void pq_init(int node, int pnode)
{
- int pn;
int cpu;
+ size_t plsize;
char *cp;
- unsigned long pa;
- struct bau_payload_queue_entry *pqp;
- struct bau_payload_queue_entry *pqp_malloc;
+ void *vp;
+ unsigned long pn;
+ unsigned long first;
+ unsigned long pn_first;
+ unsigned long last;
+ struct bau_pq_entry *pqp;
struct bau_control *bcp;
- pqp = kmalloc_node((DEST_Q_SIZE + 1)
- * sizeof(struct bau_payload_queue_entry),
- GFP_KERNEL, node);
+ plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
+ vp = kmalloc_node(plsize, GFP_KERNEL, node);
+ pqp = (struct bau_pq_entry *)vp;
BUG_ON(!pqp);
- pqp_malloc = pqp;
cp = (char *)pqp + 31;
- pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
+ pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
for_each_present_cpu(cpu) {
if (pnode != uv_cpu_to_pnode(cpu))
continue;
/* for every cpu on this pnode: */
bcp = &per_cpu(bau_control, cpu);
- bcp->va_queue_first = pqp;
- bcp->bau_msg_head = pqp;
- bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
+ bcp->queue_first = pqp;
+ bcp->bau_msg_head = pqp;
+ bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
}
/*
* need the pnode of where the memory was really allocated
*/
- pa = uv_gpa(pqp);
- pn = pa >> uv_nshift;
- uv_write_global_mmr64(pnode,
- UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
- ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
- uv_physnodeaddr(pqp));
- uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
- uv_physnodeaddr(pqp));
- uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
- (unsigned long)
- uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
+ pn = uv_gpa(pqp) >> uv_nshift;
+ first = uv_physnodeaddr(pqp);
+ pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
+ last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
+ write_mmr_payload_first(pnode, pn_first);
+ write_mmr_payload_tail(pnode, first);
+ write_mmr_payload_last(pnode, last);
+
/* in effect, all msg_type's are set to MSG_NOOP */
- memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
+ memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
}
/*
* Initialization of each UV hub's structures
*/
-static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
+static void __init init_uvhub(int uvhub, int vector, int base_pnode)
{
int node;
int pnode;
@@ -1459,24 +1541,24 @@ static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
node = uvhub_to_first_node(uvhub);
pnode = uv_blade_to_pnode(uvhub);
- uv_activation_descriptor_init(node, pnode, base_pnode);
- uv_payload_queue_init(node, pnode);
+
+ activation_descriptor_init(node, pnode, base_pnode);
+
+ pq_init(node, pnode);
/*
* The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS.
*/
apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
- ((apicid << 32) | vector));
+ write_mmr_data_config(pnode, ((apicid << 32) | vector));
}
/*
* We will set BAU_MISC_CONTROL with a timeout period.
* But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
- * So the destination timeout period has be be calculated from them.
+ * So the destination timeout period has to be calculated from them.
*/
-static int
-calculate_destination_timeout(void)
+static int calculate_destination_timeout(void)
{
unsigned long mmr_image;
int mult1;
@@ -1486,73 +1568,92 @@ calculate_destination_timeout(void)
int ret;
unsigned long ts_ns;
- mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
- index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
- mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
- mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
- base = timeout_base_ns[index];
- ts_ns = base * mult1 * mult2;
- ret = ts_ns / 1000;
+ if (is_uv1_hub()) {
+ mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
+ mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
+ mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
+ mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
+ base = timeout_base_ns[index];
+ ts_ns = base * mult1 * mult2;
+ ret = ts_ns / 1000;
+ } else {
+ /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+ mult1 = 80;
+ else
+ mult1 = 10;
+ base = mmr_image & UV2_ACK_MASK;
+ ret = mult1 * base;
+ }
return ret;
}
+static void __init init_per_cpu_tunables(void)
+{
+ int cpu;
+ struct bau_control *bcp;
+
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->baudisabled = 0;
+ bcp->statp = &per_cpu(ptcstats, cpu);
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = usec_2_cycles(2*timeout_us);
+ bcp->max_concurr = max_concurr;
+ bcp->max_concurr_const = max_concurr;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->cong_response_us = congested_respns_us;
+ bcp->cong_reps = congested_reps;
+ bcp->cong_period = congested_period;
+ }
+}
+
/*
- * initialize the bau_control structure for each cpu
+ * Scan all cpus to collect blade and socket summaries.
*/
-static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
+static int __init get_cpu_topology(int base_pnode,
+ struct uvhub_desc *uvhub_descs,
+ unsigned char *uvhub_mask)
{
- int i;
int cpu;
- int tcpu;
int pnode;
int uvhub;
- int have_hmaster;
- short socket = 0;
- unsigned short socket_mask;
- unsigned char *uvhub_mask;
+ int socket;
struct bau_control *bcp;
struct uvhub_desc *bdp;
struct socket_desc *sdp;
- struct bau_control *hmaster = NULL;
- struct bau_control *smaster = NULL;
- struct socket_desc {
- short num_cpus;
- short cpu_number[MAX_CPUS_PER_SOCKET];
- };
- struct uvhub_desc {
- unsigned short socket_mask;
- short num_cpus;
- short uvhub;
- short pnode;
- struct socket_desc socket[2];
- };
- struct uvhub_desc *uvhub_descs;
-
- timeout_us = calculate_destination_timeout();
- uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
- memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
- uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
+
memset(bcp, 0, sizeof(struct bau_control));
+
pnode = uv_cpu_hub_info(cpu)->pnode;
- if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
+ if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
printk(KERN_EMERG
"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
- cpu, pnode, base_part_pnode,
- UV_DISTRIBUTION_SIZE);
+ cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
return 1;
}
+
bcp->osnode = cpu_to_node(cpu);
- bcp->partition_base_pnode = uv_partition_base_pnode;
+ bcp->partition_base_pnode = base_pnode;
+
uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
*(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
bdp = &uvhub_descs[uvhub];
+
bdp->num_cpus++;
bdp->uvhub = uvhub;
bdp->pnode = pnode;
+
/* kludge: 'assuming' one node per socket, and assuming that
disabling a socket just leaves a gap in node numbers */
socket = bcp->osnode & 1;
@@ -1561,84 +1662,129 @@ static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
sdp->cpu_number[sdp->num_cpus] = cpu;
sdp->num_cpus++;
if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
- printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
+ printk(KERN_EMERG "%d cpus per socket invalid\n",
+ sdp->num_cpus);
return 1;
}
}
+ return 0;
+}
+
+/*
+ * Each socket is to get a local array of pnodes/hubs.
+ */
+static void make_per_cpu_thp(struct bau_control *smaster)
+{
+ int cpu;
+ size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
+
+ smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
+ memset(smaster->thp, 0, hpsz);
+ for_each_present_cpu(cpu) {
+ smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
+ smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ }
+}
+
+/*
+ * Initialize all the per_cpu information for the cpu's on a given socket,
+ * given what has been gathered into the socket_desc struct.
+ * And reports the chosen hub and socket masters back to the caller.
+ */
+static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
+ struct bau_control **smasterp,
+ struct bau_control **hmasterp)
+{
+ int i;
+ int cpu;
+ struct bau_control *bcp;
+
+ for (i = 0; i < sdp->num_cpus; i++) {
+ cpu = sdp->cpu_number[i];
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->cpu = cpu;
+ if (i == 0) {
+ *smasterp = bcp;
+ if (!(*hmasterp))
+ *hmasterp = bcp;
+ }
+ bcp->cpus_in_uvhub = bdp->num_cpus;
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = *smasterp;
+ bcp->uvhub = bdp->uvhub;
+ bcp->uvhub_master = *hmasterp;
+ bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+ if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
+ printk(KERN_EMERG "%d cpus per uvhub invalid\n",
+ bcp->uvhub_cpu);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Summarize the blade and socket topology into the per_cpu structures.
+ */
+static int __init summarize_uvhub_sockets(int nuvhubs,
+ struct uvhub_desc *uvhub_descs,
+ unsigned char *uvhub_mask)
+{
+ int socket;
+ int uvhub;
+ unsigned short socket_mask;
+
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ struct uvhub_desc *bdp;
+ struct bau_control *smaster = NULL;
+ struct bau_control *hmaster = NULL;
+
if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
continue;
- have_hmaster = 0;
+
bdp = &uvhub_descs[uvhub];
socket_mask = bdp->socket_mask;
socket = 0;
while (socket_mask) {
- if (!(socket_mask & 1))
- goto nextsocket;
- sdp = &bdp->socket[socket];
- for (i = 0; i < sdp->num_cpus; i++) {
- cpu = sdp->cpu_number[i];
- bcp = &per_cpu(bau_control, cpu);
- bcp->cpu = cpu;
- if (i == 0) {
- smaster = bcp;
- if (!have_hmaster) {
- have_hmaster++;
- hmaster = bcp;
- }
- }
- bcp->cpus_in_uvhub = bdp->num_cpus;
- bcp->cpus_in_socket = sdp->num_cpus;
- bcp->socket_master = smaster;
- bcp->uvhub = bdp->uvhub;
- bcp->uvhub_master = hmaster;
- bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
- blade_processor_id;
- if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
- printk(KERN_EMERG
- "%d cpus per uvhub invalid\n",
- bcp->uvhub_cpu);
+ struct socket_desc *sdp;
+ if ((socket_mask & 1)) {
+ sdp = &bdp->socket[socket];
+ if (scan_sock(sdp, bdp, &smaster, &hmaster))
return 1;
- }
}
-nextsocket:
socket++;
socket_mask = (socket_mask >> 1);
- /* each socket gets a local array of pnodes/hubs */
- bcp = smaster;
- bcp->target_hub_and_pnode = kmalloc_node(
- sizeof(struct hub_and_pnode) *
- num_possible_cpus(), GFP_KERNEL, bcp->osnode);
- memset(bcp->target_hub_and_pnode, 0,
- sizeof(struct hub_and_pnode) *
- num_possible_cpus());
- for_each_present_cpu(tcpu) {
- bcp->target_hub_and_pnode[tcpu].pnode =
- uv_cpu_hub_info(tcpu)->pnode;
- bcp->target_hub_and_pnode[tcpu].uvhub =
- uv_cpu_hub_info(tcpu)->numa_blade_id;
- }
+ make_per_cpu_thp(smaster);
}
}
+ return 0;
+}
+
+/*
+ * initialize the bau_control structure for each cpu
+ */
+static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
+{
+ unsigned char *uvhub_mask;
+ void *vp;
+ struct uvhub_desc *uvhub_descs;
+
+ timeout_us = calculate_destination_timeout();
+
+ vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ uvhub_descs = (struct uvhub_desc *)vp;
+ memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+ uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
+
+ if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
+ return 1;
+
+ if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
+ return 1;
+
kfree(uvhub_descs);
kfree(uvhub_mask);
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->baudisabled = 0;
- bcp->statp = &per_cpu(ptcstats, cpu);
- /* time interval to catch a hardware stay-busy bug */
- bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
- bcp->max_bau_concurrent = max_bau_concurrent;
- bcp->max_bau_concurrent_constant = max_bau_concurrent;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->congested_response_us = congested_response_us;
- bcp->congested_reps = congested_reps;
- bcp->congested_period = congested_period;
- }
+ init_per_cpu_tunables();
return 0;
}
@@ -1651,8 +1797,9 @@ static int __init uv_bau_init(void)
int pnode;
int nuvhubs;
int cur_cpu;
+ int cpus;
int vector;
- unsigned long mmr;
+ cpumask_var_t *mask;
if (!is_uv_system())
return 0;
@@ -1660,24 +1807,25 @@ static int __init uv_bau_init(void)
if (nobau)
return 0;
- for_each_possible_cpu(cur_cpu)
- zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
- GFP_KERNEL, cpu_to_node(cur_cpu));
+ for_each_possible_cpu(cur_cpu) {
+ mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
+ zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
+ }
uv_nshift = uv_hub_info->m_val;
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
nuvhubs = uv_num_possible_blades();
spin_lock_init(&disable_lock);
- congested_cycles = microsec_2_cycles(congested_response_us);
+ congested_cycles = usec_2_cycles(congested_respns_us);
- uv_partition_base_pnode = 0x7fffffff;
+ uv_base_pnode = 0x7fffffff;
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- if (uv_blade_nr_possible_cpus(uvhub) &&
- (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
- uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+ cpus = uv_blade_nr_possible_cpus(uvhub);
+ if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
+ uv_base_pnode = uv_blade_to_pnode(uvhub);
}
- if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
+ if (init_per_cpu(nuvhubs, uv_base_pnode)) {
nobau = 1;
return 0;
}
@@ -1685,21 +1833,21 @@ static int __init uv_bau_init(void)
vector = UV_BAU_MESSAGE;
for_each_possible_blade(uvhub)
if (uv_blade_nr_possible_cpus(uvhub))
- uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
+ init_uvhub(uvhub, vector, uv_base_pnode);
- uv_enable_timeouts();
+ enable_timeouts();
alloc_intr_gate(vector, uv_bau_message_intr1);
for_each_possible_blade(uvhub) {
if (uv_blade_nr_possible_cpus(uvhub)) {
+ unsigned long val;
+ unsigned long mmr;
pnode = uv_blade_to_pnode(uvhub);
/* INIT the bau */
- uv_write_global_mmr64(pnode,
- UVH_LB_BAU_SB_ACTIVATION_CONTROL,
- ((unsigned long)1 << 63));
+ val = 1L << 63;
+ write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
- mmr);
+ write_mmr_data_broadcast(pnode, mmr);
}
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 0eb9018..9f29a01 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -99,8 +99,12 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
- return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
- UVH_EVENT_OCCURRED0_RTC1_MASK;
+ if (is_uv1_hub())
+ return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
+ UV1H_EVENT_OCCURRED0_RTC1_MASK;
+ else
+ return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) &
+ UV2H_EVENT_OCCURRED2_RTC_1_MASK;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -114,8 +118,12 @@ static int uv_setup_intr(int cpu, u64 expires)
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
- uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
- UVH_EVENT_OCCURRED0_RTC1_MASK);
+ if (is_uv1_hub())
+ uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
+ UV1H_EVENT_OCCURRED0_RTC1_MASK);
+ else
+ uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS,
+ UV2H_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 528042c..a6f934f 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -683,8 +683,10 @@ __SYSCALL(305, sys_ni_syscall, 0)
__SYSCALL(306, sys_eventfd, 1)
#define __NR_recvmmsg 307
__SYSCALL(307, sys_recvmmsg, 5)
+#define __NR_setns 308
+__SYSCALL(308, sys_setns, 2)
-#define __NR_syscall_count 308
+#define __NR_syscall_count 309
/*
* sysxtensa syscall handler
diff --git a/drivers/Makefile b/drivers/Makefile
index 6b17f58..09f3232 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
obj-$(CONFIG_ARM_AMBA) += amba/
+# Many drivers will want to use DMA so this has to be made available
+# really early.
+obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2218d..de0e3df 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -369,6 +369,21 @@ config ACPI_HED
which is used to report some hardware errors notified via
SCI, mainly the corrected errors.
+config ACPI_CUSTOM_METHOD
+ tristate "Allow ACPI methods to be inserted/replaced at run time"
+ depends on DEBUG_FS
+ default n
+ help
+ This debug facility allows ACPI AML methods to me inserted and/or
+ replaced without rebooting the system. For details refer to:
+ Documentation/acpi/method-customizing.txt.
+
+ NOTE: This option is security sensitive, because it allows arbitrary
+ kernel memory to be written to by root (uid=0) users, allowing them
+ to bypass certain security measures (e.g. if root is not allowed to
+ load additional kernel modules after boot, this feature may be used
+ to override that restriction).
+
source "drivers/acpi/apei/Kconfig"
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b66fbb2..ecb26b4 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
+obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a122471..301bd2d 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index ab87396..bc533dd 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -187,7 +187,6 @@
/* Operation regions */
-#define ACPI_NUM_PREDEFINED_REGIONS 9
#define ACPI_USER_REGION_BEGIN 0x80
/* Maximum space_ids for Operation Regions */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41d247d..bea3b48 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -58,12 +58,6 @@ u32 acpi_ev_fixed_event_detect(void);
*/
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_init_global_lock_handler(void);
-
u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
acpi_status
@@ -71,6 +65,17 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
+ * evglock - Global Lock support
+ */
+acpi_status acpi_ev_init_global_lock_handler(void);
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout);
+
+acpi_status acpi_ev_release_global_lock(void);
+
+acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*
* evgpe - Low-level GPE support
*/
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d69750b..73863d86 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*
* Global lock mutex is an actual AML mutex object
- * Global lock semaphore works in conjunction with the HW global lock
+ * Global lock semaphore works in conjunction with the actual global lock
+ * Global lock spinlock is used for "pending" handshake
*/
ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
ACPI_EXTERN u8 acpi_gbl_global_lock_present;
+ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
-ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
-#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
-#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
-#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
+ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f4f0998..1077f17 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -394,21 +394,6 @@
#define AML_CLASS_METHOD_CALL 0x09
#define AML_CLASS_UNKNOWN 0x0A
-/* Predefined Operation Region space_iDs */
-
-typedef enum {
- REGION_MEMORY = 0,
- REGION_IO,
- REGION_PCI_CONFIG,
- REGION_EC,
- REGION_SMBUS,
- REGION_CMOS,
- REGION_PCI_BAR,
- REGION_IPMI,
- REGION_DATA_TABLE, /* Internal use only */
- REGION_FIXED_HW = 0x7F
-} AML_REGION_TYPES;
-
/* Comparison operation codes for match_op operator */
typedef enum {
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 23a3b1a..324acec 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ex_create_region(op->named.data,
op->named.length,
- REGION_DATA_TABLE,
+ ACPI_ADR_SPACE_DATA_TABLE,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 4be4e92..9763181 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
((op->common.value.arg)->common.value.
integer);
} else {
- region_space = REGION_DATA_TABLE;
+ region_space = ACPI_ADR_SPACE_DATA_TABLE;
}
/*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
new file mode 100644
index 0000000..56a562a
--- /dev/null
+++ b/drivers/acpi/acpica/evglock.c
@@ -0,0 +1,335 @@
+/******************************************************************************
+ *
+ * Module Name: evglock - Global Lock support
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evglock")
+
+/* Local prototypes */
+static u32 acpi_ev_global_lock_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+ /* Attempt installation of the global lock handler */
+
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler,
+ NULL);
+
+ /*
+ * If the global lock does not exist on this platform, the attempt to
+ * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
+ * Map to AE_OK, but mark global lock as not present. Any attempt to
+ * actually use the global lock will be flagged with an error.
+ */
+ acpi_gbl_global_lock_present = FALSE;
+ if (status == AE_NO_HARDWARE_RESPONSE) {
+ ACPI_ERROR((AE_INFO,
+ "No response from Global Lock hardware, disabling lock"));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_gbl_global_lock_present = TRUE;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_global_lock_handler
+ *
+ * PARAMETERS: Context - From thread interface, not used
+ *
+ * RETURN: ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ * release interrupt occurs. If there is actually a pending
+ * request for the lock, signal the waiting thread.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ /*
+ * If a request for the global lock is not actually pending,
+ * we are done. This handles "spurious" global lock interrupts
+ * which are possible (and have been seen) with bad BIOSs.
+ */
+ if (!acpi_gbl_global_lock_pending) {
+ goto cleanup_and_exit;
+ }
+
+ /*
+ * Send a unit to the global lock semaphore. The actual acquisition
+ * of the global lock will be performed by the waiting thread.
+ */
+ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+
+ cleanup_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+ return (ACPI_INTERRUPT_HANDLED);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+ acpi_cpu_flags flags;
+ acpi_status status;
+ u8 acquired = FALSE;
+
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+ /*
+ * Only one thread can acquire the GL at a time, the global_lock_mutex
+ * enforces this. This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
+ os_mutex, timeout);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Update the global lock handle and check for wraparound. The handle is
+ * only used for the external global lock interfaces, but it is updated
+ * here to properly handle the case where a single thread may acquire the
+ * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+ * handle is therefore updated on the first acquire from a given thread
+ * regardless of where the acquisition request originated.
+ */
+ acpi_gbl_global_lock_handle++;
+ if (acpi_gbl_global_lock_handle == 0) {
+ acpi_gbl_global_lock_handle = 1;
+ }
+
+ /*
+ * Make sure that a global lock actually exists. If not, just
+ * treat the lock as a standard mutex.
+ */
+ if (!acpi_gbl_global_lock_present) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ do {
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+ if (acquired) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+ break;
+ }
+
+ /*
+ * Did not get the lock. The pending bit was set above, and
+ * we must now wait until we receive the global lock
+ * released interrupt.
+ */
+ acpi_gbl_global_lock_pending = TRUE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Waiting for hardware Global Lock\n"));
+
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_semaphore
+ (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ } while (ACPI_SUCCESS(status));
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_release_global_lock
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+ u8 pending = FALSE;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+ /* Lock must be already acquired */
+
+ if (!acpi_gbl_global_lock_acquired) {
+ ACPI_WARNING((AE_INFO,
+ "Cannot release the ACPI Global Lock, it has not been acquired"));
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
+ }
+
+ if (acpi_gbl_global_lock_present) {
+
+ /* Allow any thread to release the lock */
+
+ ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
+
+ /*
+ * If the pending bit was set, we must write GBL_RLS to the control
+ * register
+ */
+ if (pending) {
+ status =
+ acpi_write_bit_register
+ (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+ ACPI_ENABLE_EVENT);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Released hardware Global Lock\n"));
+ }
+
+ acpi_gbl_global_lock_acquired = FALSE;
+
+ /* Release the local GL mutex */
+
+ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 7dc8094..d0b3318 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -45,7 +45,6 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
-static u32 acpi_ev_global_lock_handler(void *context);
-
-static acpi_status acpi_ev_remove_global_lock_handler(void);
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_is_notify_object
@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
acpi_ut_delete_generic_state(notify_info);
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_global_lock_handler
- *
- * PARAMETERS: Context - From thread interface, not used
- *
- * RETURN: ACPI_INTERRUPT_HANDLED
- *
- * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- * release interrupt occurs. If there's a thread waiting for
- * the global lock, signal it.
- *
- * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
- * this is not possible for some reason, a separate thread will have to be
- * scheduled to do this.
- *
- ******************************************************************************/
-static u8 acpi_ev_global_lock_pending;
-
-static u32 acpi_ev_global_lock_handler(void *context)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- if (!acpi_ev_global_lock_pending) {
- goto out;
- }
-
- /* Send a unit to the semaphore */
-
- status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
- }
-
- acpi_ev_global_lock_pending = FALSE;
-
- out:
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return (ACPI_INTERRUPT_HANDLED);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_init_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Install a handler for the global lock release event
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_init_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
-
- /* Attempt installation of the global lock handler */
-
- status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler,
- NULL);
-
- /*
- * If the global lock does not exist on this platform, the attempt to
- * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
- * Map to AE_OK, but mark global lock as not present. Any attempt to
- * actually use the global lock will be flagged with an error.
- */
- if (status == AE_NO_HARDWARE_RESPONSE) {
- ACPI_ERROR((AE_INFO,
- "No response from Global Lock hardware, disabling lock"));
-
- acpi_gbl_global_lock_present = FALSE;
- return_ACPI_STATUS(AE_OK);
- }
-
- acpi_gbl_global_lock_present = TRUE;
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_remove_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove the handler for the Global Lock
- *
- ******************************************************************************/
-
-static acpi_status acpi_ev_remove_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
-
- acpi_gbl_global_lock_present = FALSE;
- status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler);
-
- return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ev_acquire_global_lock
- *
- * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Attempt to gain ownership of the Global Lock.
- *
- * MUTEX: Interpreter must be locked
- *
- * Note: The original implementation allowed multiple threads to "acquire" the
- * Global Lock, and the OS would hold the lock until the last thread had
- * released it. However, this could potentially starve the BIOS out of the
- * lock, especially in the case where there is a tight handshake between the
- * Embedded Controller driver and the BIOS. Therefore, this implementation
- * allows only one thread to acquire the HW Global Lock at a time, and makes
- * the global lock appear as a standard mutex on the OS side.
- *
- *****************************************************************************/
-static acpi_thread_id acpi_ev_global_lock_thread_id;
-static int acpi_ev_global_lock_acquired;
-
-acpi_status acpi_ev_acquire_global_lock(u16 timeout)
-{
- acpi_cpu_flags flags;
- acpi_status status = AE_OK;
- u8 acquired = FALSE;
-
- ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
-
- /*
- * Only one thread can acquire the GL at a time, the global_lock_mutex
- * enforces this. This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
- if (status == AE_TIME) {
- if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
- acpi_ev_global_lock_acquired++;
- return AE_OK;
- }
- }
-
- if (ACPI_FAILURE(status)) {
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex,
- timeout);
- }
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
- acpi_ev_global_lock_acquired++;
-
- /*
- * Update the global lock handle and check for wraparound. The handle is
- * only used for the external global lock interfaces, but it is updated
- * here to properly handle the case where a single thread may acquire the
- * lock via both the AML and the acpi_acquire_global_lock interfaces. The
- * handle is therefore updated on the first acquire from a given thread
- * regardless of where the acquisition request originated.
- */
- acpi_gbl_global_lock_handle++;
- if (acpi_gbl_global_lock_handle == 0) {
- acpi_gbl_global_lock_handle = 1;
- }
-
- /*
- * Make sure that a global lock actually exists. If not, just treat the
- * lock as a standard mutex.
- */
- if (!acpi_gbl_global_lock_present) {
- acpi_gbl_global_lock_acquired = TRUE;
- return_ACPI_STATUS(AE_OK);
- }
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- do {
-
- /* Attempt to acquire the actual hardware lock */
-
- ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
- if (acquired) {
- acpi_gbl_global_lock_acquired = TRUE;
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Acquired hardware Global Lock\n"));
- break;
- }
-
- acpi_ev_global_lock_pending = TRUE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- /*
- * Did not get the lock. The pending bit was set above, and we
- * must wait until we get the global lock released interrupt.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Waiting for hardware Global Lock\n"));
-
- /*
- * Wait for handshake with the global lock interrupt handler.
- * This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_semaphore(
- acpi_gbl_global_lock_semaphore,
- ACPI_WAIT_FOREVER);
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- } while (ACPI_SUCCESS(status));
-
- acpi_ev_global_lock_pending = FALSE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_release_global_lock
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Releases ownership of the Global Lock.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_release_global_lock(void)
-{
- u8 pending = FALSE;
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(ev_release_global_lock);
-
- /* Lock must be already acquired */
-
- if (!acpi_gbl_global_lock_acquired) {
- ACPI_WARNING((AE_INFO,
- "Cannot release the ACPI Global Lock, it has not been acquired"));
- return_ACPI_STATUS(AE_NOT_ACQUIRED);
- }
-
- acpi_ev_global_lock_acquired--;
- if (acpi_ev_global_lock_acquired > 0) {
- return AE_OK;
- }
-
- if (acpi_gbl_global_lock_present) {
-
- /* Allow any thread to release the lock */
-
- ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
-
- /*
- * If the pending bit was set, we must write GBL_RLS to the control
- * register
- */
- if (pending) {
- status =
- acpi_write_bit_register
- (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
- ACPI_ENABLE_EVENT);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Released hardware Global Lock\n"));
- }
-
- acpi_gbl_global_lock_acquired = FALSE;
-
- /* Release the local GL mutex */
- acpi_ev_global_lock_thread_id = 0;
- acpi_ev_global_lock_acquired = 0;
- acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
- return_ACPI_STATUS(status);
-}
-
/******************************************************************************
*
* FUNCTION: acpi_ev_terminate
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bea7223..f0edf5c 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -55,6 +55,8 @@ static u8
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
+static void acpi_ev_orphan_ec_reg_method(void);
+
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Now stop region accesses by executing the _REG method */
- status = acpi_ev_execute_reg_method(region_obj, 0);
+ status =
+ acpi_ev_execute_reg_method(region_obj,
+ ACPI_REG_DISCONNECT);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]",
@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
NULL, &space_id, NULL);
+ /* Special case for EC: handle "orphan" _REG methods with no region */
+
+ if (space_id == ACPI_ADR_SPACE_EC) {
+ acpi_ev_orphan_ec_reg_method();
+ }
+
return_ACPI_STATUS(status);
}
@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
return (AE_OK);
}
- status = acpi_ev_execute_reg_method(obj_desc, 1);
+ status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
return (status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_orphan_ec_reg_method
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
+ * device. This is a _REG method that has no corresponding region
+ * within the EC device scope. The orphan _REG method appears to
+ * have been enabled by the description of the ECDT in the ACPI
+ * specification: "The availability of the region space can be
+ * detected by providing a _REG method object underneath the
+ * Embedded Controller device."
+ *
+ * To quickly access the EC device, we use the EC_ID that appears
+ * within the ECDT. Otherwise, we would need to perform a time-
+ * consuming namespace walk, executing _HID methods to find the
+ * EC device.
+ *
+ ******************************************************************************/
+
+static void acpi_ev_orphan_ec_reg_method(void)
+{
+ struct acpi_table_ecdt *table;
+ acpi_status status;
+ struct acpi_object_list args;
+ union acpi_object objects[2];
+ struct acpi_namespace_node *ec_device_node;
+ struct acpi_namespace_node *reg_method;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
+
+ /* Get the ECDT (if present in system) */
+
+ status = acpi_get_table(ACPI_SIG_ECDT, 0,
+ ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
+ &table));
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* We need a valid EC_ID string */
+
+ if (!(*table->id)) {
+ return_VOID;
+ }
+
+ /* Namespace is currently locked, must release */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Get a handle to the EC device referenced in the ECDT */
+
+ status = acpi_get_handle(NULL,
+ ACPI_CAST_PTR(char, table->id),
+ ACPI_CAST_PTR(acpi_handle, &ec_device_node));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Get a handle to a _REG method immediately under the EC device */
+
+ status = acpi_get_handle(ec_device_node,
+ METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
+ &reg_method));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /*
+ * Execute the _REG method only if there is no Operation Region in
+ * this scope with the Embedded Controller space ID. Otherwise, it
+ * will already have been executed. Note, this allows for Regions
+ * with other space IDs to be present; but the code below will then
+ * execute the _REG method with the EC space ID argument.
+ */
+ next_node = acpi_ns_get_next_node(ec_device_node, NULL);
+ while (next_node) {
+ if ((next_node->type == ACPI_TYPE_REGION) &&
+ (next_node->object) &&
+ (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
+ goto exit; /* Do not execute _REG */
+ }
+ next_node = acpi_ns_get_next_node(ec_device_node, next_node);
+ }
+
+ /* Evaluate the _REG(EC,Connect) method */
+
+ args.count = 2;
+ args.pointer = objects;
+ objects[0].type = ACPI_TYPE_INTEGER;
+ objects[0].integer.value = ACPI_ADR_SPACE_EC;
+ objects[1].type = ACPI_TYPE_INTEGER;
+ objects[1].integer.value = ACPI_REG_CONNECT;
+
+ status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+
+ exit:
+ /* We ignore all errors from above, don't care */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 9659cee..55a5d35 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
status =
acpi_ev_execute_reg_method
- (region_obj, 1);
+ (region_obj, ACPI_REG_CONNECT);
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index c85c8c4..00cd956 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_DATA_TABLE:
- if (acpi_gbl_reg_methods_executed) {
+ if (!acpi_gbl_reg_methods_executed) {
- /* Run all _REG methods for this address space */
-
- status = acpi_ev_execute_reg_methods(node, space_id);
+ /* We will defer execution of the _REG methods for this space */
+ goto unlock_and_exit;
}
break;
default:
-
- status = acpi_ev_execute_reg_methods(node, space_id);
break;
}
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e7b372d..110711a 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
* range
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN)) {
+ (region_space < ACPI_USER_REGION_BEGIN) &&
+ (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 1d76ac8..ac7b854 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
*
* Additional possible repairs:
*
- * Optional/unnecessary NULL package elements removed
* Required package elements that are NULL replaced by Integer/String/Buffer
* Incorrect standalone package wrapped with required outer package
*
@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
ACPI_FUNCTION_NAME(ns_remove_null_elements);
/*
- * PTYPE1 packages contain no subpackages.
- * PTYPE2 packages contain a variable number of sub-packages. We can
- * safely remove all NULL elements from the PTYPE2 packages.
+ * We can safely remove all NULL elements from these package types:
+ * PTYPE1_VAR packages contain a variable number of simple data types.
+ * PTYPE2 packages contain a variable number of sub-packages.
*/
switch (package_type) {
- case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_VAR:
- case ACPI_PTYPE1_OPTION:
- return;
-
case ACPI_PTYPE2:
case ACPI_PTYPE2_COUNT:
case ACPI_PTYPE2_PKG_COUNT:
@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
break;
default:
+ case ACPI_PTYPE1_FIXED:
+ case ACPI_PTYPE1_OPTION:
return;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 136a814..97cb36f 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI",
- "DataTable"
+ "IPMI"
};
char *acpi_ut_get_region_name(u8 space_id)
@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion");
+ } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
+ return ("DataTable");
} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
return ("FunctionalFixedHW");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index a946c68..7d797e2 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
/* Create the spinlocks for use at interrupt level */
- spin_lock_init(acpi_gbl_gpe_lock);
- spin_lock_init(acpi_gbl_hardware_lock);
- spin_lock_init(acpi_ev_global_lock_pending_lock);
+ status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+
+ status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9749980..d1e06c1 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
acpi_status status = AE_OK;
char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
/* Make sure this is a valid target state */
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
new file mode 100644
index 0000000..5d42c24
--- /dev/null
+++ b/drivers/acpi/custom_method.c
@@ -0,0 +1,100 @@
+/*
+ * debugfs.c - ACPI debugfs interface to userspace.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("custom_method");
+MODULE_LICENSE("GPL");
+
+static struct dentry *cm_dentry;
+
+/* /sys/kernel/debug/acpi/custom_method */
+
+static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
+ return -EINVAL;
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ buf = NULL;
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ buf = NULL;
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+ .llseek = default_llseek,
+};
+
+static int __init acpi_custom_method_init(void)
+{
+ if (acpi_debugfs_dir == NULL)
+ return -ENOENT;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
+ acpi_debugfs_dir, NULL, &cm_fops);
+ if (cm_dentry == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_custom_method_exit(void)
+{
+ if (cm_dentry)
+ debugfs_remove(cm_dentry);
+ }
+
+module_init(acpi_custom_method_init);
+module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 384f7ab..182a9fc 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -3,100 +3,16 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
+struct dentry *acpi_debugfs_dir;
+EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
-/* /sys/modules/acpi/parameters/aml_debug_output */
-
-module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
- bool, 0644);
-MODULE_PARM_DESC(aml_debug_output,
- "To enable/disable the ACPI Debug Object output.");
-
-/* /sys/kernel/debug/acpi/custom_method */
-
-static ssize_t cm_write(struct file *file, const char __user * user_buf,
- size_t count, loff_t *ppos)
+void __init acpi_debugfs_init(void)
{
- static char *buf;
- static u32 max_size;
- static u32 uncopied_bytes;
-
- struct acpi_table_header table;
- acpi_status status;
-
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
- return -EINVAL;
- if (copy_from_user(&table, user_buf,
- sizeof(struct acpi_table_header)))
- return -EFAULT;
- uncopied_bytes = max_size = table.length;
- buf = kzalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- if (buf == NULL)
- return -EINVAL;
-
- if ((*ppos > max_size) ||
- (*ppos + count > max_size) ||
- (*ppos + count < count) ||
- (count > uncopied_bytes))
- return -EINVAL;
-
- if (copy_from_user(buf + (*ppos), user_buf, count)) {
- kfree(buf);
- buf = NULL;
- return -EFAULT;
- }
-
- uncopied_bytes -= count;
- *ppos += count;
-
- if (!uncopied_bytes) {
- status = acpi_install_method(buf);
- kfree(buf);
- buf = NULL;
- if (ACPI_FAILURE(status))
- return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
-
- return count;
-}
-
-static const struct file_operations cm_fops = {
- .write = cm_write,
- .llseek = default_llseek,
-};
-
-int __init acpi_debugfs_init(void)
-{
- struct dentry *acpi_dir, *cm_dentry;
-
- acpi_dir = debugfs_create_dir("acpi", NULL);
- if (!acpi_dir)
- goto err;
-
- cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
- acpi_dir, NULL, &cm_fops);
- if (!cm_dentry)
- goto err;
-
- return 0;
-
-err:
- if (acpi_dir)
- debugfs_remove(acpi_dir);
- return -EINVAL;
+ acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fa848c4..b19a18d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -69,7 +69,6 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
-#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len,
- int force_poll)
+ u8 * rdata, unsigned rdata_len)
{
struct transaction t = {.command = command,
.wdata = wdata, .rdata = rdata,
@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
mutex_unlock(&ec->lock);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
-
static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
return -EINVAL;
}
- ec->handle = device->handle;
-
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods, NULL, ec, NULL);
@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
{
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
+ {
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
+ {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
{},
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4bfb759d..ca75b9c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,9 +28,10 @@ int acpi_scan_init(void);
int acpi_sysfs_init(void);
#ifdef CONFIG_DEBUG_FS
+extern struct dentry *acpi_debugfs_dir;
int acpi_debugfs_init(void);
#else
-static inline int acpi_debugfs_init(void) { return 0; }
+static inline void acpi_debugfs_init(void) { return; }
#endif
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45ad4ff..52ca964 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete);
-/*
- * Deallocate the memory for a spinlock.
- */
-void acpi_os_delete_lock(acpi_spinlock handle)
-{
- return;
-}
-
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
@@ -1341,6 +1333,31 @@ int acpi_resources_are_enforced(void)
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
+ * Create and initialize a spinlock.
+ */
+acpi_status
+acpi_os_create_lock(acpi_spinlock *out_handle)
+{
+ spinlock_t *lock;
+
+ lock = ACPI_ALLOCATE(sizeof(spinlock_t));
+ if (!lock)
+ return AE_NO_MEMORY;
+ spin_lock_init(lock);
+ *out_handle = lock;
+
+ return AE_OK;
+}
+
+/*
+ * Deallocate the memory for a spinlock.
+ */
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+ ACPI_FREE(handle);
+}
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25bf17d..02d2a4c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
{},
};
-#ifdef CONFIG_SMP
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -165,7 +164,9 @@ exit:
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
+#ifdef CONFIG_SMP
int i;
+#endif
int apic_id = -1;
apic_id = map_mat_entry(handle, type, acpi_id);
@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
if (apic_id == -1)
return apic_id;
+#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
return i;
}
+#else
+ /* In UP kernel, only processor 0 is valid */
+ if (apic_id == 0)
+ return apic_id;
+#endif
return -1;
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#endif
static bool __init processor_physically_present(acpi_handle handle)
{
@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
- if ((cpuid == -1) && (num_possible_cpus() > 1))
+ if (cpuid == -1)
return false;
return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d615b7d..431ab11 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (c1e_detected)
+ if (amd_e400_c1e_detected)
type = ACPI_STATE_C1;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 61891e7..77255f2 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
+
+/* /sys/modules/acpi/parameters/aml_debug_output */
+
+module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
+ bool, 0644);
+MODULE_PARM_DESC(aml_debug_output,
+ "To enable/disable the ACPI Debug Object output.");
+
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7025593..d74926e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -603,6 +603,10 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_out;
+ /* Hard-coded primecell ID instead of plug-n-play */
+ if (dev->periphid != 0)
+ goto skip_probe;
+
/*
* Dynamically calculate the size of the resource
* and use this for iomap
@@ -643,6 +647,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_release;
+ skip_probe:
ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index db8f885..98de8f4 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void)
{
unsigned long flags;
+ WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
spin_lock_irqsave(&floppy_hlt_lock, flags);
if (!hlt_disabled) {
hlt_disabled = 1;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a0aabd9..46b8136 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -321,7 +321,6 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ecf89c..079c088 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,10 +6,13 @@
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <scsi/scsi_cmnd.h>
#define PART_BITS 4
static int major, index;
+struct workqueue_struct *virtblk_wq;
struct virtio_blk
{
@@ -26,6 +29,9 @@ struct virtio_blk
mempool_t *pool;
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -141,7 +147,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
- sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+ sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
}
@@ -291,6 +297,46 @@ static ssize_t virtblk_serial_show(struct device *dev,
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+static void virtblk_config_changed_work(struct work_struct *work)
+{
+ struct virtio_blk *vblk =
+ container_of(work, struct virtio_blk, config_work);
+ struct virtio_device *vdev = vblk->vdev;
+ struct request_queue *q = vblk->disk->queue;
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+
+ /* If capacity is too big, truncate with warning. */
+ if ((sector_t)capacity != capacity) {
+ dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
+ (unsigned long long)capacity);
+ capacity = (sector_t)-1;
+ }
+
+ size = capacity * queue_logical_block_size(q);
+ string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
+
+ dev_notice(&vdev->dev,
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ (unsigned long long)capacity,
+ queue_logical_block_size(q),
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
+}
+
+static void virtblk_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ queue_work(virtblk_wq, &vblk->config_work);
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -327,6 +373,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
/* We expect one virtqueue, for output. */
vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -477,6 +524,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ flush_work(&vblk->config_work);
+
/* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
@@ -508,27 +557,47 @@ static unsigned int features[] = {
* Use __refdata to avoid this warning.
*/
static struct virtio_driver __refdata virtio_blk = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = virtblk_probe,
- .remove = __devexit_p(virtblk_remove),
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtblk_probe,
+ .remove = __devexit_p(virtblk_remove),
+ .config_changed = virtblk_config_changed,
};
static int __init init(void)
{
+ int error;
+
+ virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ if (!virtblk_wq)
+ return -ENOMEM;
+
major = register_blkdev(0, "virtblk");
- if (major < 0)
- return major;
- return register_virtio_driver(&virtio_blk);
+ if (major < 0) {
+ error = major;
+ goto out_destroy_workqueue;
+ }
+
+ error = register_virtio_driver(&virtio_blk);
+ if (error)
+ goto out_unregister_blkdev;
+ return 0;
+
+out_unregister_blkdev:
+ unregister_blkdev(major, "virtblk");
+out_destroy_workqueue:
+ destroy_workqueue(virtblk_wq);
+ return error;
}
static void __exit fini(void)
{
unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
+ destroy_workqueue(virtblk_wq);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index ae15a4d..7878da8 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -627,7 +627,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- gendisk->events = DISK_EVENT_MEDIA_CHANGE;
set_capacity(gendisk, 0);
gendisk->private_data = d;
d->viocd_disk = gendisk;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 838568a..fb68b12 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1677,17 +1677,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
portdev->config.max_nr_ports = 1;
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
- vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
-
vdev->config->get(vdev, offsetof(struct virtio_console_config,
max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
}
- /* Let the Host know we support multiple ports.*/
- vdev->config->finalize_features(vdev);
-
err = init_vqs(portdev);
if (err < 0) {
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f508690..c47f3d0 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
unsigned int power_usage = -1;
int i;
int multiplier;
+ struct timespec t;
if (data->needs_update) {
menu_update(dev);
@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
return 0;
/* determine the expected residency time, round up */
+ t = ktime_to_timespec(tick_nohz_get_sleep_length());
data->expected_us =
- DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
+ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
data->bucket = which_bucket(data->expected_us);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a572600..25cf327 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,16 +200,18 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
+ tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
- Output Hub) which is for IVI(In-Vehicle Infotainment) use.
- ML7213 is companion chip for Intel Atom E6xx series.
- ML7213 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7213 and ML7223.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+ for MP(Media Phone) use.
+ ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
new file mode 100644
index 0000000..a4af858
--- /dev/null
+++ b/drivers/dma/TODO
@@ -0,0 +1,14 @@
+TODO for slave dma
+
+1. Move remaining drivers to use new slave interface
+2. Remove old slave pointer machansim
+3. Make issue_pending to start the transaction in below drivers
+ - mpc512x_dma
+ - imx-dma
+ - imx-sdma
+ - mxs-dma.c
+ - dw_dmac
+ - intel_mid_dma
+ - ste_dma40
+4. Check other subsystems for dma drivers and merge/move to dmaengine
+5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 235f53b..36144f8 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -37,8 +37,8 @@
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
#define ATC_DEFAULT_CTRLA (0)
-#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
- |ATC_DIF(1))
+#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
+ |ATC_DIF(AT_DMA_MEM_IF))
/*
* Initial number of descriptors to allocate for each channel. This could
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
}
/**
+ * atc_desc_chain - build chain adding a descripor
+ * @first: address of first descripor of the chain
+ * @prev: address of previous descripor of the chain
+ * @desc: descriptor to queue
+ *
+ * Called from prep_* functions
+ */
+static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
+ struct at_desc *desc)
+{
+ if (!(*first)) {
+ *first = desc;
+ } else {
+ /* inform the HW lli about chaining */
+ (*prev)->lli.dscr = desc->txd.phys;
+ /* insert the link descriptor to the LD ring */
+ list_add_tail(&desc->desc_node,
+ &(*first)->tx_list);
+ }
+ *prev = desc;
+}
+
+/**
* atc_assign_cookie - compute and assign new cookie
* @atchan: channel we work on
* @desc: descriptor to assign cookie for
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
atchan->completed_cookie = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
- callback(param);
+ /* for cyclic transfers,
+ * no need to replay callback function while stopping */
+ if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+ }
dma_run_dependencies(txd);
}
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
atc_chain_complete(atchan, bad_desc);
}
+/**
+ * atc_handle_cyclic - at the end of a period, run callback function
+ * @atchan: channel used for cyclic operations
+ *
+ * Called with atchan->lock held and bh disabled
+ */
+static void atc_handle_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_desc *first = atc_first_active(atchan);
+ struct dma_async_tx_descriptor *txd = &first->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ dev_vdbg(chan2dev(&atchan->chan_common),
+ "new cyclic period llp 0x%08x\n",
+ channel_readl(atchan, DSCR));
+
+ if (callback)
+ callback(param);
+}
/*-- IRQ & Tasklet ---------------------------------------------------*/
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- /* Channel cannot be enabled here */
- if (atc_chan_is_enabled(atchan)) {
- dev_err(chan2dev(&atchan->chan_common),
- "BUG: channel enabled in tasklet\n");
- return;
- }
-
spin_lock(&atchan->lock);
- if (test_and_clear_bit(0, &atchan->error_status))
+ if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
+ else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
for (i = 0; i < atdma->dma_common.chancnt; i++) {
atchan = &atdma->chan[i];
- if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
+ if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
if (pending & AT_DMA_ERR(i)) {
/* Disable channel on AHB error */
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHDR,
+ AT_DMA_RES(i) | atchan->mask);
/* Give information to tasklet */
- set_bit(0, &atchan->error_status);
+ set_bit(ATC_IS_ERROR, &atchan->status);
}
tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED;
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
ctrla = ATC_DEFAULT_CTRLA;
- ctrlb = ATC_DEFAULT_CTRLB
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
| ATC_FC_MEM2MEM;
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->txd.cookie = 0;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
}
/* First descriptor of the chain embedds additional information */
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
+ dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
+ sg_len,
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
flags);
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = atslave->reg_width;
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
+ ctrlb = ATC_IEN;
switch (direction) {
case DMA_TO_DEVICE:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
- | ATC_FC_MEM2PER;
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
reg = atslave->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> mem_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
- | ATC_FC_PER2MEM;
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
reg = atslave->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> reg_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -759,41 +777,211 @@ err_desc_get:
return NULL;
}
+/**
+ * atc_dma_cyclic_check_values
+ * Check for too big/unaligned periods and unaligned DMA buffer
+ */
+static int
+atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ if (period_len > (ATC_BTSIZE_MAX << reg_width))
+ goto err_out;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ return -EINVAL;
+}
+
+/**
+ * atc_dma_cyclic_fill_desc - Fill one period decriptor
+ */
+static int
+atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
+ unsigned int period_index, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ u32 ctrla;
+ unsigned int reg_width = atslave->reg_width;
+
+ /* prepare common CRTLA value */
+ ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ | ATC_DST_WIDTH(reg_width)
+ | ATC_SRC_WIDTH(reg_width)
+ | period_len >> reg_width;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.saddr = buf_addr + (period_len * period_index);
+ desc->lli.daddr = atslave->tx_reg;
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
+ | ATC_SRC_ADDR_MODE_INCR
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF)
+ | ATC_DIF(AT_DMA_PER_IF);
+ break;
+
+ case DMA_FROM_DEVICE:
+ desc->lli.saddr = atslave->rx_reg;
+ desc->lli.daddr = buf_addr + (period_len * period_index);
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
+ | ATC_SRC_ADDR_MODE_FIXED
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF)
+ | ATC_DIF(AT_DMA_MEM_IF);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_slave *atslave = chan->private;
+ struct at_desc *first = NULL;
+ struct at_desc *prev = NULL;
+ unsigned long was_cyclic;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
+ direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ buf_addr,
+ periods, buf_len, period_len);
+
+ if (unlikely(!atslave || !buf_len || !period_len)) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
+ return NULL;
+ }
+
+ was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
+ return NULL;
+ }
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer */
+ if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
+ period_len, direction))
+ goto err_out;
+
+ /* build cyclic linked list */
+ for (i = 0; i < periods; i++) {
+ struct at_desc *desc;
+
+ desc = atc_desc_get(atchan);
+ if (!desc)
+ goto err_desc_get;
+
+ if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
+ period_len, direction))
+ goto err_desc_get;
+
+ atc_desc_chain(&first, &prev, desc);
+ }
+
+ /* lets make a cyclic list */
+ prev->lli.dscr = first->txd.phys;
+
+ /* First descriptor of the chain embedds additional information */
+ first->txd.cookie = -EBUSY;
+ first->len = buf_len;
+
+ return &first->txd;
+
+err_desc_get:
+ dev_err(chan2dev(chan), "not enough descriptors available\n");
+ atc_desc_put(atchan, first);
+err_out:
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+ return NULL;
+}
+
+
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc, *_desc;
+ int chan_id = atchan->chan_common.chan_id;
+
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&atchan->lock);
+ if (cmd == DMA_PAUSE) {
+ spin_lock_bh(&atchan->lock);
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
+ set_bit(ATC_IS_PAUSED, &atchan->status);
- /* confirm that this channel is disabled */
- while (dma_readl(atdma, CHSR) & atchan->mask)
- cpu_relax();
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_RESUME) {
+ if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ return 0;
- /* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
+ spin_lock_bh(&atchan->lock);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ struct at_desc *desc, *_desc;
+ /*
+ * This is only called when something went wrong elsewhere, so
+ * we don't really care about the data. Just disable the
+ * channel. We still have to poll the channel enable bit due
+ * to AHB/HSB limitations.
+ */
+ spin_lock_bh(&atchan->lock);
+
+ /* disabling channel: must also remove suspend state */
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
+
+ /* confirm that this channel is disabled */
+ while (dma_readl(atdma, CHSR) & atchan->mask)
+ cpu_relax();
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&atchan->queue, &list);
+ list_splice_init(&atchan->active_list, &list);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ atc_chain_complete(atchan, desc);
+
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
+ /* if channel dedicated to cyclic operations, free it */
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+
+ spin_unlock_bh(&atchan->lock);
+ } else {
+ return -ENXIO;
+ }
return 0;
}
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
- cookie, last_complete ? last_complete : 0,
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ atc_first_active(atchan)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ ret = DMA_PAUSED;
+
+ dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
+ ret, cookie, last_complete ? last_complete : 0,
last_used ? last_used : 0);
return ret;
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
+ /* Not needed for cyclic transfers */
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ return;
+
spin_lock_bh(&atchan->lock);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
}
list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0;
+ atchan->status = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+ atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
+
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
+ dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
- }
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 495457e..087dbf1 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -103,6 +103,10 @@
/* Bitfields in CTRLB */
#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
+ /* Specify AHB interfaces */
+#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
+#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
+
#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
/*-- Channels --------------------------------------------------------*/
/**
+ * atc_status - information bits stored in channel status flag
+ *
+ * Manipulated with atomic operations.
+ */
+enum atc_status {
+ ATC_IS_ERROR = 0,
+ ATC_IS_PAUSED = 1,
+ ATC_IS_CYCLIC = 24,
+};
+
+/**
* struct at_dma_chan - internal representation of an Atmel HDMAC channel
* @chan_common: common dmaengine channel object members
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
- * @error_status: transmit error status information from irq handler
+ * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
* @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -201,7 +216,7 @@ struct at_dma_chan {
struct at_dma *device;
void __iomem *ch_regs;
u8 mask;
- unsigned long error_status;
+ unsigned long status;
struct tasklet_struct tasklet;
spinlock_t lock;
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
u32 ebci;
- /* enable interrupts on buffer chain completion & error */
- ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
+ /* enable interrupts on buffer transfer completion & error */
+ ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
| AT_DMA_ERR(atchan->chan_common.chan_id);
if (on)
dma_writel(atdma, EBCIER, ebci);
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
*/
static void set_desc_eol(struct at_desc *desc)
{
- desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+ u32 ctrlb = desc->lli.ctrlb;
+
+ ctrlb &= ~ATC_IEN;
+ ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+
+ desc->lli.ctrlb = ctrlb;
desc->lli.dscr = 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f48e540..af8c0b5 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
-arch_initcall(coh901318_init);
+subsys_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2a2e2fa..4d180ca 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *ret = NULL;
unsigned int i = 0;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
*/
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
+ unsigned long flags;
+
if (desc) {
struct dw_desc *child;
dwc_sync_desc_for_cpu(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/*----------------------------------------------------------------------*/
static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+ bool callback_required)
{
- dma_async_tx_callback callback;
- void *param;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
+ unsigned long flags;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+ spin_lock_irqsave(&dwc->lock, flags);
dwc->completed = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }
dwc_sync_desc_for_cpu(dwc, desc);
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ if (callback_required && callback)
callback(param);
}
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: XFER bit set, but channel not idle!\n");
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, true);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *child;
u32 status_xfer;
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
/*
* Clear block interrupt flag before scanning so that we don't
* miss any, and read LLP before RAW_XFER to ensure it is
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
dwc_complete_all(dw, dwc);
return;
}
- if (list_empty(&dwc->active_list))
+ if (list_empty(&dwc->active_list)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
- if (desc->lli.llp == llp)
+ /* check first descriptors addr */
+ if (desc->txd.phys == llp) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* check first descriptors llp */
+ if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
list_for_each_entry(child, &desc->tx_list, desc_node)
- if (child->lli.llp == llp)
+ if (child->lli.llp == llp) {
/* Currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
* this one must be done.
*/
- dwc_descriptor_complete(dwc, desc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc_descriptor_complete(dwc, desc, true);
+ spin_lock_irqsave(&dwc->lock, flags);
}
dev_err(chan2dev(&dwc->chan),
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_move(dwc->queue.next, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *bad_desc;
struct dw_desc *child;
+ unsigned long flags;
dwc_scan_descriptors(dw, dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+
/*
* The descriptor currently at the head of the active list is
* borked. Since we don't have any way to report errors, we'll
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
/* Pretend the descriptor completed successfully */
- dwc_descriptor_complete(dwc, bad_desc);
+ dwc_descriptor_complete(dwc, bad_desc, true);
}
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_block, u32 status_err, u32 status_xfer)
{
+ unsigned long flags;
+
if (status_block & dwc->mask) {
void (*callback)(void *param);
void *callback_param;
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
- if (callback) {
- spin_unlock(&dwc->lock);
+
+ if (callback)
callback(callback_param);
- spin_lock(&dwc->lock);
- }
}
/*
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
"interrupt, stopping DMA transfer\n",
status_xfer ? "xfer" : "error");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
channel_readl(dwc, SAR),
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
for (i = 0; i < dwc->cdesc->periods; i++)
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
- spin_lock(&dwc->lock);
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
dwc_handle_cyclic(dw, dwc, status_block, status_err,
status_xfer);
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
- spin_unlock(&dwc->lock);
}
/*
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
struct dw_desc *desc = txd_to_dw_desc(tx);
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
cookie = dwc_assign_cookie(dwc, desc);
/*
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &dwc->queue);
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return cookie;
}
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
+ u32 len, dlen, mem;
+
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc) {
dev_err(chan2dev(chan),
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto err_desc_get;
}
- mem = sg_phys(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
-
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- desc->lli.ctlhi = len >> mem_width;
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;
if (!first) {
first = desc;
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
}
break;
case DMA_FROM_DEVICE:
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
-
- desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
- goto err_desc_get;
- }
+ u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
+slave_sg_fromdev_fill_desc:
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- desc->lli.ctlhi = len >> reg_width;
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;
if (!first) {
first = desc;
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
}
break;
default:
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
+ u32 cfglo;
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ if (cmd == DMA_PAUSE) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&dwc->lock);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
- channel_clear_bit(dw, CH_EN, dwc->mask);
+ dwc->paused = true;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_RESUME) {
+ if (!dwc->paused)
+ return 0;
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ spin_lock_irqsave(&dwc->lock, flags);
- /* active_list entries will end up before queued entries */
- list_splice_init(&dwc->queue, &list);
- list_splice_init(&dwc->active_list, &list);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dwc->paused = false;
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dwc->paused = false;
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dwc->queue, &list);
+ list_splice_init(&dwc->active_list, &list);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc, false);
+ } else
+ return -ENXIO;
return 0;
}
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ dwc_first_active(dwc)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (dwc->paused)
+ return DMA_PAUSED;
return ret;
}
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- spin_lock_bh(&dwc->lock);
if (!list_empty(&dwc->queue))
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
int i;
u32 cfghi;
u32 cfglo;
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
break;
}
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = ++dwc->descs_allocated;
}
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
LIST_HEAD(list);
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&dwc->queue));
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
return -ENODEV;
}
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
/* assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_readl(dwc, LLP),
channel_readl(dwc, CTL_HI),
channel_readl(dwc, CTL_LO));
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
}
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_set_bit(dw, CH_EN, dwc->mask);
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned int reg_width;
unsigned int periods;
unsigned int i;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(&dwc->chan),
"queue and/or active list are not empty\n");
return ERR_PTR(-EBUSY);
}
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
if (was_cyclic) {
dev_dbg(chan2dev(&dwc->chan),
"channel already prepared for cyclic DMA\n");
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_cyclic_desc *cdesc = dwc->cdesc;
int i;
+ unsigned long flags;
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
if (!cdesc)
return;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
for (i = 0; i < cdesc->periods; i++)
dwc_desc_put(dwc, cdesc->desc[i]);
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821..c341951 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -2,6 +2,7 @@
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -138,6 +139,7 @@ struct dw_dma_chan {
void __iomem *ch_regs;
u8 mask;
u8 priority;
+ bool paused;
spinlock_t lock;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3d4ec38..f653517 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err)
goto err_dma;
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
@@ -1322,6 +1321,9 @@ err_enable_device:
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
middma_shutdown(pdev);
pci_dev_put(pdev);
kfree(device);
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
static int dma_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_suspend(pci_dev, PMSG_SUSPEND);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = SUSPENDED;
+ return 0;
}
static int dma_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_resume(pci_dev);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = RUNNING;
+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+ return 0;
}
static int dma_runtime_idle(struct device *dev)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index f4a51d4..5d65f83 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat_ring_ent **ring;
u64 status;
int order;
+ int i = 0;
/* have we already been set up? */
if (ioat->ring)
@@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
- udelay(5);
- status = ioat_chansts(chan);
+ do {
+ udelay(1);
+ status = ioat_chansts(chan);
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
if (is_ioat_active(status) || is_ioat_idle(status)) {
set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6b01f5..e03f811 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a25f5f6..954e334 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memcpy_slot_count(len);
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memset_slot_count(len);
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan->device->common.dev,
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 8d8fef1..ff5b38f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -77,10 +77,10 @@ struct pch_dma_regs {
u32 dma_ctl0;
u32 dma_ctl1;
u32 dma_ctl2;
- u32 reserved1;
+ u32 dma_ctl3;
u32 dma_sts0;
u32 dma_sts1;
- u32 reserved2;
+ u32 dma_sts2;
u32 reserved3;
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
@@ -130,6 +130,7 @@ struct pch_dma {
#define PCH_DMA_CTL0 0x00
#define PCH_DMA_CTL1 0x04
#define PCH_DMA_CTL2 0x08
+#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
@@ -138,7 +139,8 @@ struct pch_dma {
#define dma_writel(pd, name, val) \
writel((val), (pd)->membase + PCH_DMA_##name)
-static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
+static inline
+struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct pch_dma_desc, txd);
}
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
return chan->dev->device.parent;
}
-static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->active_list,
struct pch_dma_desc, desc_node);
}
-static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->queue,
struct pch_dma_desc, desc_node);
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
- if (pd_chan->dir == DMA_TO_DEVICE)
- val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS);
- else
- val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS));
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+ val = dma_readl(pd, CTL3);
- dma_writel(pd, CTL0, val);
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL3, val);
+ }
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
chan->chan_id, val);
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
- val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+
+ val = dma_readl(pd, CTL3);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
- dma_writel(pd, CTL0, val);
+ dma_writel(pd, CTL3, val);
+
+ }
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
{
- struct pch_dma *pd = to_pd(pd_chan->chan.device);
- u32 val;
-
if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan),
"BUG: Attempt to start non-idle channel\n");
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
channel_writel(pd_chan, NEXT, desc->txd.phys);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
}
-
- val = dma_readl(pd, CTL2);
- val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
- dma_writel(pd, CTL2, val);
}
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
{
struct pch_dma_desc *desc, *_d;
struct pch_dma_desc *ret = NULL;
- int i;
+ int i = 0;
spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
spin_unlock_bh(&pd_chan->lock);
pdc_enable_irq(chan, 1);
- pdc_set_dir(chan);
return pd_chan->descs_allocated;
}
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
else
return NULL;
+ pd_chan->dir = direction;
+ pdc_set_dir(chan);
+
for_each_sg(sgl, sg, sg_len, i) {
desc = pdc_desc_get(pd_chan);
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
+ pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
+ dma_writel(pd, CTL3, pd->regs.dma_ctl3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->membase = &regs->desc[i];
- pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
spin_lock_init(&pd_chan->lock);
INIT_LIST_HEAD(&pd_chan->active_list);
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
+#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
+#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
+#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
+#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
+#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
-static const struct pci_device_id pch_dma_id_table[] = {
+DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
{ 0, },
};
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 3b0247e..fc457a7 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
dma_dest, dma_src, src_cnt));
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(ppc440spe_chan->device->common.dev,
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
dst, src, src_cnt));
BUG_ON(!len);
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
BUG_ON(!src_cnt);
if (src_cnt == 1 && dst[1] == src[0]) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 94ee15d..8f222d4 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
if (chan->runtime_addr)
return chan->runtime_addr;
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
-arch_initcall(stedma40_init);
+subsys_initcall(stedma40_init);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d213646..4a7f631 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -86,6 +86,34 @@ config GPIO_IT8761E
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+config GPIO_EXYNOS4
+ bool "Samsung Exynos4 GPIO library support"
+ default y if CPU_EXYNOS4210
+ depends on ARM
+ help
+ Say yes here to support Samsung Exynos4 series SoCs GPIO library
+
+config GPIO_PLAT_SAMSUNG
+ bool "Samsung SoCs GPIO library support"
+ default y if SAMSUNG_GPIOLIB_4BIT
+ depends on ARM
+ help
+ Say yes here to support Samsung SoCs GPIO library
+
+config GPIO_S5PC100
+ bool "Samsung S5PC100 GPIO library support"
+ default y if CPU_S5PC100
+ depends on ARM
+ help
+ Say yes here to support Samsung S5PC100 SoCs GPIO library
+
+config GPIO_S5PV210
+ bool "Samsung S5PV210/S5PC110 GPIO library support"
+ default y if CPU_S5PV210
+ depends on ARM
+ help
+ Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -303,7 +331,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO
+ depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -334,13 +362,19 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "PCH GPIO of Intel Topcliff"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
depends on PCI && X86
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7223 is companion chip for Intel Atom E6xx series.
+ ML7223 is completely compatible for Intel EG20T PCH.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on PCI
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 6a3387a..b605f8e 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
+obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
+obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
+obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
+obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX730X) += max730x.o
obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -16,6 +20,7 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_74X164) += 74x164.o
+obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
@@ -34,6 +39,8 @@ obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_MACH_U300) += gpio-u300.o
+obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
diff --git a/arch/arm/mach-exynos4/gpiolib.c b/drivers/gpio/gpio-exynos4.c
index d54ca6a..d54ca6a 100644
--- a/arch/arm/mach-exynos4/gpiolib.c
+++ b/drivers/gpio/gpio-exynos4.c
diff --git a/arch/arm/plat-nomadik/gpio.c b/drivers/gpio/gpio-nomadik.c
index 307b813..4961ef9 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -57,6 +57,7 @@ struct nmk_gpio_chip {
u32 fwimsc;
u32 slpm;
u32 enabled;
+ u32 pull_up;
};
static struct nmk_gpio_chip *
@@ -103,16 +104,22 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
u32 pdis;
pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
- if (pull == NMK_GPIO_PULL_NONE)
+ if (pull == NMK_GPIO_PULL_NONE) {
pdis |= bit;
- else
+ nmk_chip->pull_up &= ~bit;
+ } else {
pdis &= ~bit;
+ }
+
writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
- if (pull == NMK_GPIO_PULL_UP)
+ if (pull == NMK_GPIO_PULL_UP) {
+ nmk_chip->pull_up |= bit;
writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
- else if (pull == NMK_GPIO_PULL_DOWN)
+ } else if (pull == NMK_GPIO_PULL_DOWN) {
+ nmk_chip->pull_up &= ~bit;
writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+ }
}
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
@@ -811,20 +818,43 @@ static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
bool pull;
u32 bit = 1 << i;
- if (!label)
- continue;
-
is_out = readl(nmk_chip->addr + NMK_GPIO_DIR) & bit;
pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
mode = nmk_gpio_get_mode(gpio);
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
- gpio, label,
+ gpio, label ?: "(none)",
is_out ? "out" : "in ",
chip->get
? (chip->get(chip, i) ? "hi" : "lo")
: "? ",
(mode < 0) ? "unknown" : modes[mode],
pull ? "pull" : "none");
+
+ if (label && !is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+
+ if (nmk_chip->edge_rising & bitmask)
+ trigger = "edge-rising";
+ else if (nmk_chip->edge_falling & bitmask)
+ trigger = "edge-falling";
+ else
+ trigger = "edge-undefined";
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ irqd_is_wakeup_set(&desc->irq_data)
+ ? " wakeup" : "");
+ }
+ }
+
seq_printf(s, "\n");
}
}
@@ -898,6 +928,25 @@ void nmk_gpio_wakeups_resume(void)
}
}
+/*
+ * Read the pull up/pull down status.
+ * A bit set in 'pull_up' means that pull up
+ * is selected if pull is enabled in PDIS register.
+ * Note: only pull up/down set via this driver can
+ * be detected due to HW limitations.
+ */
+void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
+{
+ if (gpio_bank < NUM_BANKS) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank];
+
+ if (!chip)
+ return;
+
+ *pull_up = chip->pull_up;
+ }
+}
+
static int __devinit nmk_gpio_probe(struct platform_device *dev)
{
struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
diff --git a/arch/arm/plat-omap/gpio.c b/drivers/gpio/gpio-omap.c
index efb8693..6c51191 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/arm/plat-omap/gpio.c
- *
* Support functions for OMAP GPIO
*
* Copyright (C) 2003-2005 Nokia Corporation
@@ -30,109 +28,6 @@
#include <mach/gpio.h>
#include <asm/mach/irq.h>
-/*
- * OMAP1510 GPIO registers
- */
-#define OMAP1510_GPIO_DATA_INPUT 0x00
-#define OMAP1510_GPIO_DATA_OUTPUT 0x04
-#define OMAP1510_GPIO_DIR_CONTROL 0x08
-#define OMAP1510_GPIO_INT_CONTROL 0x0c
-#define OMAP1510_GPIO_INT_MASK 0x10
-#define OMAP1510_GPIO_INT_STATUS 0x14
-#define OMAP1510_GPIO_PIN_CONTROL 0x18
-
-#define OMAP1510_IH_GPIO_BASE 64
-
-/*
- * OMAP1610 specific GPIO registers
- */
-#define OMAP1610_GPIO_REVISION 0x0000
-#define OMAP1610_GPIO_SYSCONFIG 0x0010
-#define OMAP1610_GPIO_SYSSTATUS 0x0014
-#define OMAP1610_GPIO_IRQSTATUS1 0x0018
-#define OMAP1610_GPIO_IRQENABLE1 0x001c
-#define OMAP1610_GPIO_WAKEUPENABLE 0x0028
-#define OMAP1610_GPIO_DATAIN 0x002c
-#define OMAP1610_GPIO_DATAOUT 0x0030
-#define OMAP1610_GPIO_DIRECTION 0x0034
-#define OMAP1610_GPIO_EDGE_CTRL1 0x0038
-#define OMAP1610_GPIO_EDGE_CTRL2 0x003c
-#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c
-#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8
-#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0
-#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc
-#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8
-#define OMAP1610_GPIO_SET_DATAOUT 0x00f0
-
-/*
- * OMAP7XX specific GPIO registers
- */
-#define OMAP7XX_GPIO_DATA_INPUT 0x00
-#define OMAP7XX_GPIO_DATA_OUTPUT 0x04
-#define OMAP7XX_GPIO_DIR_CONTROL 0x08
-#define OMAP7XX_GPIO_INT_CONTROL 0x0c
-#define OMAP7XX_GPIO_INT_MASK 0x10
-#define OMAP7XX_GPIO_INT_STATUS 0x14
-
-/*
- * omap2+ specific GPIO registers
- */
-#define OMAP24XX_GPIO_REVISION 0x0000
-#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
-#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
-#define OMAP24XX_GPIO_IRQENABLE2 0x002c
-#define OMAP24XX_GPIO_IRQENABLE1 0x001c
-#define OMAP24XX_GPIO_WAKE_EN 0x0020
-#define OMAP24XX_GPIO_CTRL 0x0030
-#define OMAP24XX_GPIO_OE 0x0034
-#define OMAP24XX_GPIO_DATAIN 0x0038
-#define OMAP24XX_GPIO_DATAOUT 0x003c
-#define OMAP24XX_GPIO_LEVELDETECT0 0x0040
-#define OMAP24XX_GPIO_LEVELDETECT1 0x0044
-#define OMAP24XX_GPIO_RISINGDETECT 0x0048
-#define OMAP24XX_GPIO_FALLINGDETECT 0x004c
-#define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050
-#define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054
-#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060
-#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064
-#define OMAP24XX_GPIO_CLEARWKUENA 0x0080
-#define OMAP24XX_GPIO_SETWKUENA 0x0084
-#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
-#define OMAP24XX_GPIO_SETDATAOUT 0x0094
-
-#define OMAP4_GPIO_REVISION 0x0000
-#define OMAP4_GPIO_EOI 0x0020
-#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
-#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
-#define OMAP4_GPIO_IRQSTATUS0 0x002c
-#define OMAP4_GPIO_IRQSTATUS1 0x0030
-#define OMAP4_GPIO_IRQSTATUSSET0 0x0034
-#define OMAP4_GPIO_IRQSTATUSSET1 0x0038
-#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c
-#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
-#define OMAP4_GPIO_IRQWAKEN0 0x0044
-#define OMAP4_GPIO_IRQWAKEN1 0x0048
-#define OMAP4_GPIO_IRQENABLE1 0x011c
-#define OMAP4_GPIO_WAKE_EN 0x0120
-#define OMAP4_GPIO_IRQSTATUS2 0x0128
-#define OMAP4_GPIO_IRQENABLE2 0x012c
-#define OMAP4_GPIO_CTRL 0x0130
-#define OMAP4_GPIO_OE 0x0134
-#define OMAP4_GPIO_DATAIN 0x0138
-#define OMAP4_GPIO_DATAOUT 0x013c
-#define OMAP4_GPIO_LEVELDETECT0 0x0140
-#define OMAP4_GPIO_LEVELDETECT1 0x0144
-#define OMAP4_GPIO_RISINGDETECT 0x0148
-#define OMAP4_GPIO_FALLINGDETECT 0x014c
-#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
-#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
-#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160
-#define OMAP4_GPIO_SETIRQENABLE1 0x0164
-#define OMAP4_GPIO_CLEARWKUENA 0x0180
-#define OMAP4_GPIO_SETWKUENA 0x0184
-#define OMAP4_GPIO_CLEARDATAOUT 0x0190
-#define OMAP4_GPIO_SETDATAOUT 0x0194
-
struct gpio_bank {
unsigned long pbase;
void __iomem *base;
diff --git a/arch/arm/plat-samsung/gpiolib.c b/drivers/gpio/gpio-plat-samsung.c
index ea37c04..ea37c04 100644
--- a/arch/arm/plat-samsung/gpiolib.c
+++ b/drivers/gpio/gpio-plat-samsung.c
diff --git a/arch/arm/mach-s5pc100/gpiolib.c b/drivers/gpio/gpio-s5pc100.c
index 2842394..2842394 100644
--- a/arch/arm/mach-s5pc100/gpiolib.c
+++ b/drivers/gpio/gpio-s5pc100.c
diff --git a/arch/arm/mach-s5pv210/gpiolib.c b/drivers/gpio/gpio-s5pv210.c
index 1ba20a7..1ba20a7 100644
--- a/arch/arm/mach-s5pv210/gpiolib.c
+++ b/drivers/gpio/gpio-s5pv210.c
diff --git a/arch/arm/mach-u300/gpio.c b/drivers/gpio/gpio-u300.c
index d927901..d927901 100644
--- a/arch/arm/mach-u300/gpio.c
+++ b/drivers/gpio/gpio-u300.c
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 137a8ca..a971e3d 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1296,7 +1296,7 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-int gpio_request_array(struct gpio *array, size_t num)
+int gpio_request_array(const struct gpio *array, size_t num)
{
int i, err;
@@ -1319,7 +1319,7 @@ EXPORT_SYMBOL_GPL(gpio_request_array);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-void gpio_free_array(struct gpio *array, size_t num)
+void gpio_free_array(const struct gpio *array, size_t num)
{
while (num--)
gpio_free((array++)->gpio);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 1b06f67..bd6571e 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -63,6 +64,7 @@ struct lnw_gpio {
void *reg_base;
spinlock_t lock;
unsigned irq_base;
+ struct pci_dev *pdev;
};
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -104,11 +106,18 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u32 value;
unsigned long flags;
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -120,11 +129,19 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned long flags;
lnw_gpio_set(chip, offset, value);
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -145,6 +162,10 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
value = readl(grer) | BIT(gpio % 32);
@@ -159,6 +180,9 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
writel(value, gfer);
spin_unlock_irqrestore(&lnw->lock, flags);
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -211,6 +235,39 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
chip->irq_eoi(data);
}
+#ifdef CONFIG_PM
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_idle(struct device *dev)
+{
+ int err = pm_schedule_suspend(dev, 500);
+
+ if (!err)
+ return 0;
+
+ return -EBUSY;
+}
+
+#else
+#define lnw_gpio_runtime_suspend NULL
+#define lnw_gpio_runtime_resume NULL
+#define lnw_gpio_runtime_idle NULL
+#endif
+
+static const struct dev_pm_ops lnw_gpio_pm_ops = {
+ .runtime_suspend = lnw_gpio_runtime_suspend,
+ .runtime_resume = lnw_gpio_runtime_resume,
+ .runtime_idle = lnw_gpio_runtime_idle,
+};
+
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -270,6 +327,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.base = gpio_base;
lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
+ lnw->pdev = pdev;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
@@ -285,6 +343,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
}
spin_lock_init(&lnw->lock);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
goto done;
err5:
kfree(lnw);
@@ -302,6 +364,9 @@ static struct pci_driver lnw_gpio_driver = {
.name = "langwell_gpio",
.id_table = lnw_gpio_ids,
.probe = lnw_gpio_probe,
+ .driver = {
+ .pm = &lnw_gpio_pm_ops,
+ },
};
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 78a8439..0451d7a 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -24,33 +24,46 @@
#include <linux/of_gpio.h>
#endif
-#define PCA953X_INPUT 0
-#define PCA953X_OUTPUT 1
-#define PCA953X_INVERT 2
-#define PCA953X_DIRECTION 3
-
-#define PCA953X_GPIOS 0x00FF
-#define PCA953X_INT 0x0100
+#define PCA953X_INPUT 0
+#define PCA953X_OUTPUT 1
+#define PCA953X_INVERT 2
+#define PCA953X_DIRECTION 3
+
+#define PCA957X_IN 0
+#define PCA957X_INVRT 1
+#define PCA957X_BKEN 2
+#define PCA957X_PUPD 3
+#define PCA957X_CFG 4
+#define PCA957X_OUT 5
+#define PCA957X_MSK 6
+#define PCA957X_INTS 7
+
+#define PCA_GPIO_MASK 0x00FF
+#define PCA_INT 0x0100
+#define PCA953X_TYPE 0x1000
+#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
- { "pca9534", 8 | PCA953X_INT, },
- { "pca9535", 16 | PCA953X_INT, },
- { "pca9536", 4, },
- { "pca9537", 4 | PCA953X_INT, },
- { "pca9538", 8 | PCA953X_INT, },
- { "pca9539", 16 | PCA953X_INT, },
- { "pca9554", 8 | PCA953X_INT, },
- { "pca9555", 16 | PCA953X_INT, },
- { "pca9556", 8, },
- { "pca9557", 8, },
-
- { "max7310", 8, },
- { "max7312", 16 | PCA953X_INT, },
- { "max7313", 16 | PCA953X_INT, },
- { "max7315", 8 | PCA953X_INT, },
- { "pca6107", 8 | PCA953X_INT, },
- { "tca6408", 8 | PCA953X_INT, },
- { "tca6416", 16 | PCA953X_INT, },
+ { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9536", 4 | PCA953X_TYPE, },
+ { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
+ { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9556", 8 | PCA953X_TYPE, },
+ { "pca9557", 8 | PCA953X_TYPE, },
+ { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
+ { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+
+ { "max7310", 8 | PCA953X_TYPE, },
+ { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
/* NYET: { "tca6424", 24, }, */
{ }
};
@@ -75,16 +88,32 @@ struct pca953x_chip {
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
+ int chip_type;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
{
- int ret;
+ int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
- else
- ret = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ else {
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ ret = i2c_smbus_write_word_data(chip->client,
+ reg << 1, val);
+ break;
+ case PCA957X_TYPE:
+ ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+ val & 0xff);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << 1) + 1,
+ (val & 0xff00) >> 8);
+ break;
+ }
+ }
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
@@ -116,13 +145,22 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -138,7 +176,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -149,7 +187,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -157,7 +203,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
/* then direction */
reg_val = chip->reg_direction & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -172,12 +226,20 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &reg_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -194,7 +256,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -204,7 +266,15 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -322,9 +392,17 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
- int ret;
-
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &cur_stat);
+ int ret, offset = 0;
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &cur_stat);
if (ret)
return 0;
@@ -372,14 +450,21 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- int ret;
+ int ret, offset = 0;
if (pdata->irq_base != -1
- && (id->driver_data & PCA953X_INT)) {
+ && (id->driver_data & PCA_INT)) {
int lvl;
- ret = pca953x_read_reg(chip, PCA953X_INPUT,
- &chip->irq_stat);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
if (ret)
goto out_failed;
@@ -439,7 +524,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
+ if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -499,12 +584,65 @@ pca953x_get_alt_pdata(struct i2c_client *client)
}
#endif
+static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+
+ ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ if (ret)
+ goto out;
+
+ ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
+ &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+ uint16_t val = 0;
+
+ /* Let every port in proper state, that could save power */
+ pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
+ pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
+ pca953x_write_reg(chip, PCA957X_OUT, 0x0);
+
+ ret = pca953x_read_reg(chip, PCA957X_IN, &val);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ pca953x_write_reg(chip, PCA957X_INVRT, invert);
+
+ /* To enable register 6, 7 to controll pull up and pull down */
+ pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
+
+ return 0;
+out:
+ return ret;
+}
+
static int __devinit pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
- int ret;
+ int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
@@ -531,25 +669,20 @@ static int __devinit pca953x_probe(struct i2c_client *client,
chip->gpio_start = pdata->gpio_base;
chip->names = pdata->names;
+ chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, id->driver_data & PCA953X_GPIOS);
+ pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
- ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
- if (ret)
- goto out_failed;
-
- ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction);
- if (ret)
- goto out_failed;
-
- /* set platform specific polarity inversion */
- ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
- if (ret)
+ if (chip->chip_type == PCA953X_TYPE)
+ device_pca953x_init(chip, pdata->invert);
+ else if (chip->chip_type == PCA957X_TYPE)
+ device_pca957x_init(chip, pdata->invert);
+ else
goto out_failed;
ret = pca953x_irq_setup(chip, id);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index f970a5f..36919e7 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -283,8 +283,10 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#define pch_gpio_resume NULL
#endif
+#define PCI_VENDOR_ID_ROHM 0x10DB
static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e5123b..144d272 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 76a5af0..2067288 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,6 +19,8 @@
#define DM_MSG_PREFIX "io"
#define DM_IO_MAX_REGIONS BITS_PER_LONG
+#define MIN_IOS 16
+#define MIN_BIOS 16
struct dm_io_client {
mempool_t *pool;
@@ -41,33 +43,21 @@ struct io {
static struct kmem_cache *_dm_io_cache;
/*
- * io contexts are only dynamically allocated for asynchronous
- * io. Since async io is likely to be the majority of io we'll
- * have the same number of io contexts as bios! (FIXME: must reduce this).
- */
-
-static unsigned int pages_to_ios(unsigned int pages)
-{
- return 4 * pages; /* too many ? */
-}
-
-/*
* Create a client with mempool and bioset.
*/
-struct dm_io_client *dm_io_client_create(unsigned num_pages)
+struct dm_io_client *dm_io_client_create(void)
{
- unsigned ios = pages_to_ios(num_pages);
struct dm_io_client *client;
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
+ client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
if (!client->pool)
goto bad;
- client->bios = bioset_create(16, 0);
+ client->bios = bioset_create(MIN_BIOS, 0);
if (!client->bios)
goto bad;
@@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
}
EXPORT_SYMBOL(dm_io_client_create);
-int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
-{
- return mempool_resize(client->pool, pages_to_ios(num_pages),
- GFP_KERNEL);
-}
-EXPORT_SYMBOL(dm_io_client_resize);
-
void dm_io_client_destroy(struct dm_io_client *client)
{
mempool_destroy(client->pool);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bb73a1..819e37e 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -27,15 +27,19 @@
#include "dm.h"
+#define SUB_JOB_SIZE 128
+#define SPLIT_COUNT 8
+#define MIN_JOBS 8
+#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
+
/*-----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
*---------------------------------------------------------------*/
struct dm_kcopyd_client {
- spinlock_t lock;
struct page_list *pages;
- unsigned int nr_pages;
- unsigned int nr_free_pages;
+ unsigned nr_reserved_pages;
+ unsigned nr_free_pages;
struct dm_io_client *io_client;
@@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc)
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
}
-static struct page_list *alloc_pl(void)
+/*
+ * Obtain one page for the use of kcopyd.
+ */
+static struct page_list *alloc_pl(gfp_t gfp)
{
struct page_list *pl;
- pl = kmalloc(sizeof(*pl), GFP_KERNEL);
+ pl = kmalloc(sizeof(*pl), gfp);
if (!pl)
return NULL;
- pl->page = alloc_page(GFP_KERNEL);
+ pl->page = alloc_page(gfp);
if (!pl->page) {
kfree(pl);
return NULL;
@@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl)
kfree(pl);
}
-static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
- unsigned int nr, struct page_list **pages)
+/*
+ * Add the provided pages to a client's free page list, releasing
+ * back to the system any beyond the reserved_pages limit.
+ */
+static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
{
- struct page_list *pl;
-
- spin_lock(&kc->lock);
- if (kc->nr_free_pages < nr) {
- spin_unlock(&kc->lock);
- return -ENOMEM;
- }
-
- kc->nr_free_pages -= nr;
- for (*pages = pl = kc->pages; --nr; pl = pl->next)
- ;
+ struct page_list *next;
- kc->pages = pl->next;
- pl->next = NULL;
+ do {
+ next = pl->next;
- spin_unlock(&kc->lock);
+ if (kc->nr_free_pages >= kc->nr_reserved_pages)
+ free_pl(pl);
+ else {
+ pl->next = kc->pages;
+ kc->pages = pl;
+ kc->nr_free_pages++;
+ }
- return 0;
+ pl = next;
+ } while (pl);
}
-static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
+static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
+ unsigned int nr, struct page_list **pages)
{
- struct page_list *cursor;
+ struct page_list *pl;
+
+ *pages = NULL;
+
+ do {
+ pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
+ if (unlikely(!pl)) {
+ /* Use reserved pages */
+ pl = kc->pages;
+ if (unlikely(!pl))
+ goto out_of_memory;
+ kc->pages = pl->next;
+ kc->nr_free_pages--;
+ }
+ pl->next = *pages;
+ *pages = pl;
+ } while (--nr);
- spin_lock(&kc->lock);
- for (cursor = pl; cursor->next; cursor = cursor->next)
- kc->nr_free_pages++;
+ return 0;
- kc->nr_free_pages++;
- cursor->next = kc->pages;
- kc->pages = pl;
- spin_unlock(&kc->lock);
+out_of_memory:
+ if (*pages)
+ kcopyd_put_pages(kc, *pages);
+ return -ENOMEM;
}
/*
@@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl)
}
}
-static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
+/*
+ * Allocate and reserve nr_pages for the use of a specific client.
+ */
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
{
- unsigned int i;
+ unsigned i;
struct page_list *pl = NULL, *next;
- for (i = 0; i < nr; i++) {
- next = alloc_pl();
+ for (i = 0; i < nr_pages; i++) {
+ next = alloc_pl(GFP_KERNEL);
if (!next) {
if (pl)
drop_pages(pl);
@@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
pl = next;
}
+ kc->nr_reserved_pages += nr_pages;
kcopyd_put_pages(kc, pl);
- kc->nr_pages += nr;
+
return 0;
}
static void client_free_pages(struct dm_kcopyd_client *kc)
{
- BUG_ON(kc->nr_free_pages != kc->nr_pages);
+ BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
drop_pages(kc->pages);
kc->pages = NULL;
- kc->nr_free_pages = kc->nr_pages = 0;
+ kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
/*-----------------------------------------------------------------
@@ -216,16 +242,17 @@ struct kcopyd_job {
struct mutex lock;
atomic_t sub_jobs;
sector_t progress;
-};
-/* FIXME: this should scale with the number of pages */
-#define MIN_JOBS 512
+ struct kcopyd_job *master_job;
+};
static struct kmem_cache *_job_cache;
int __init dm_kcopyd_init(void)
{
- _job_cache = KMEM_CACHE(kcopyd_job, 0);
+ _job_cache = kmem_cache_create("kcopyd_job",
+ sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
+ __alignof__(struct kcopyd_job), 0, NULL);
if (!_job_cache)
return -ENOMEM;
@@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job)
if (job->pages)
kcopyd_put_pages(kc, job->pages);
- mempool_free(job, kc->job_pool);
+ /*
+ * If this is the master job, the sub jobs have already
+ * completed so we can free everything.
+ */
+ if (job->master_job == job)
+ mempool_free(job, kc->job_pool);
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job)
wake(kc);
}
-#define SUB_JOB_SIZE 128
static void segment_complete(int read_err, unsigned long write_err,
void *context)
{
/* FIXME: tidy this function */
sector_t progress = 0;
sector_t count = 0;
- struct kcopyd_job *job = (struct kcopyd_job *) context;
+ struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
+ struct kcopyd_job *job = sub_job->master_job;
struct dm_kcopyd_client *kc = job->kc;
mutex_lock(&job->lock);
@@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err,
if (count) {
int i;
- struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
- GFP_NOIO);
*sub_job = *job;
sub_job->source.sector += progress;
@@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err,
}
sub_job->fn = segment_complete;
- sub_job->context = job;
+ sub_job->context = sub_job;
dispatch_job(sub_job);
} else if (atomic_dec_and_test(&job->sub_jobs)) {
@@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err,
}
/*
- * Create some little jobs that will do the move between
- * them.
+ * Create some sub jobs to share the work between them.
*/
-#define SPLIT_COUNT 8
-static void split_job(struct kcopyd_job *job)
+static void split_job(struct kcopyd_job *master_job)
{
int i;
- atomic_inc(&job->kc->nr_jobs);
+ atomic_inc(&master_job->kc->nr_jobs);
- atomic_set(&job->sub_jobs, SPLIT_COUNT);
- for (i = 0; i < SPLIT_COUNT; i++)
- segment_complete(0, 0u, job);
+ atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
+ for (i = 0; i < SPLIT_COUNT; i++) {
+ master_job[i + 1].master_job = master_job;
+ segment_complete(0, 0u, &master_job[i + 1]);
+ }
}
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
@@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
struct kcopyd_job *job;
/*
- * Allocate a new job.
+ * Allocate an array of jobs consisting of one master job
+ * followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
@@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->fn = fn;
job->context = context;
+ job->master_job = job;
- if (job->source.count < SUB_JOB_SIZE)
+ if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
-
else {
mutex_init(&job->lock);
job->progress = 0;
@@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-int dm_kcopyd_client_create(unsigned int nr_pages,
- struct dm_kcopyd_client **result)
+struct dm_kcopyd_client *dm_kcopyd_client_create(void)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- spin_lock_init(&kc->lock);
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
@@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
goto bad_workqueue;
kc->pages = NULL;
- kc->nr_pages = kc->nr_free_pages = 0;
- r = client_alloc_pages(kc, nr_pages);
+ kc->nr_reserved_pages = kc->nr_free_pages = 0;
+ r = client_reserve_pages(kc, RESERVE_PAGES);
if (r)
goto bad_client_pages;
- kc->io_client = dm_io_client_create(nr_pages);
+ kc->io_client = dm_io_client_create();
if (IS_ERR(kc->io_client)) {
r = PTR_ERR(kc->io_client);
goto bad_io_client;
@@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
init_waitqueue_head(&kc->destroyq);
atomic_set(&kc->nr_jobs, 0);
- *result = kc;
- return 0;
+ return kc;
bad_io_client:
client_free_pages(kc);
@@ -659,7 +687,7 @@ bad_workqueue:
bad_slab:
kfree(kc);
- return r;
+ return ERR_PTR(r);
}
EXPORT_SYMBOL(dm_kcopyd_client_create);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a1f3218..948e3f4 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
lc->io_req.mem.type = DM_IO_VMA;
lc->io_req.notify.fn = NULL;
- lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
- PAGE_SIZE));
+ lc->io_req.client = dm_io_client_create();
if (IS_ERR(lc->io_req.client)) {
r = PTR_ERR(lc->io_req.client);
DMWARN("couldn't allocate disk io client");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a550a05..aa4e570 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP || error == -EREMOTEIO)
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
return error;
if (mpio->pgpath)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 976ad46..9bfd057 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -22,8 +22,6 @@
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
-#define DM_IO_PAGES 64
-#define DM_KCOPYD_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
return NULL;
}
- ms->io_client = dm_io_client_create(DM_IO_PAGES);
+ ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
mempool_destroy(ms->read_record_pool);
@@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
- if (r)
+ ms->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(ms->kcopyd_client)) {
+ r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
+ }
wakeup_mirrord(ms);
return 0;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 95891df..135c2f1 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -154,11 +154,6 @@ struct pstore {
struct workqueue_struct *metadata_wq;
};
-static unsigned sectors_to_pages(unsigned sectors)
-{
- return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
-}
-
static int alloc_area(struct pstore *ps)
{
int r = -ENOMEM;
@@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
chunk_size_supplied = 0;
}
- ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
- chunk_size));
+ ps->io_client = dm_io_client_create();
if (IS_ERR(ps->io_client))
return PTR_ERR(ps->io_client);
@@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot)
return r;
}
- r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
- ps->io_client);
- if (r)
- return r;
-
r = alloc_area(ps);
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2d3309..9ecff5f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,11 +40,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
#define SNAPSHOT_COPY_PRIORITY 2
/*
- * Reserve 1MB for each snapshot initially (with minimum of 1 page).
- */
-#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
- if (r) {
+ s->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(s->kcopyd_client)) {
+ r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
goto bad_kcopyd;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cb8380c..451c3bb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
+ struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
+ /*
+ * Some devices exist without request functions,
+ * such as loop devices not yet bound to backing files.
+ * Forbid the use of such devices.
+ */
+ q = bdev_get_queue(bdev);
+ if (!q || !q->make_request_fn) {
+ DMWARN("%s: %s is not yet initialised: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
+ return 1;
+ }
+
if (!dev_size)
return 0;
@@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
/*
- * Ensure that at least one underlying device supports discards.
+ * Unless any target used by the table set discards_supported,
+ * require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting discard must provide.
@@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (ti->discards_supported)
+ return 1;
+
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
return 1;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b6c2677..0f09c05 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -721,7 +721,7 @@ config MFD_PM8XXX_IRQ
config MFD_TPS65910
bool "TPS65910 Power Management chip"
- depends on I2C=y
+ depends on I2C=y && GPIOLIB
select MFD_CORE
select GPIO_TPS65910
help
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e637821..02a15d7 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2005,7 +2005,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
- .mfd_data = &db8500_regulators,
+ .platform_data = &db8500_regulators,
+ .pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-u8500",
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index b0c5631..8cebec5 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -304,7 +304,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
return 1;
}
/* Readjust the instruction pointer if needed */
- instruction_pointer_set(&kgdbts_regs, ip + offset);
+ ip += offset;
+#ifdef GDB_ADJUSTS_BREAK_OFFSET
+ instruction_pointer_set(&kgdbts_regs, ip);
+#endif
return 0;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 4941e06..5da5bea0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -51,6 +51,7 @@ static unsigned int fmax = 515633;
* is asserted (likewise for RX)
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
*/
struct variant_data {
unsigned int clkreg;
@@ -60,6 +61,7 @@ struct variant_data {
unsigned int fifohalfsize;
bool sdio;
bool st_clkdiv;
+ bool blksz_datactrl16;
};
static struct variant_data variant_arm = {
@@ -92,6 +94,17 @@ static struct variant_data variant_ux500 = {
.st_clkdiv = true,
};
+static struct variant_data variant_ux500v2 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .datalength_bits = 24,
+ .sdio = true,
+ .st_clkdiv = true,
+ .blksz_datactrl16 = true,
+};
+
/*
* This must be called with host->lock held
*/
@@ -465,7 +478,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+ if (variant->blksz_datactrl16)
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+ else
+ datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
@@ -1311,9 +1327,14 @@ static struct amba_id mmci_ids[] = {
},
{
.id = 0x00480180,
- .mask = 0x00ffffff,
+ .mask = 0xf0ffffff,
.data = &variant_ux500,
},
+ {
+ .id = 0x10480180,
+ .mask = 0xf0ffffff,
+ .data = &variant_ux500v2,
+ },
{ 0, 0 },
};
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index bc50d5e..4be8373 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -33,20 +33,6 @@ config MTD_TESTS
should normally be compiled as kernel modules. The modules perform
various checks and verifications when loaded.
-config MTD_PARTITIONS
- bool "MTD partitioning support"
- help
- If you have a device which needs to divide its flash chip(s) up
- into multiple 'partitions', each of which appears to the user as
- a separate MTD device, you require this option to be enabled. If
- unsure, say 'Y'.
-
- Note, however, that you don't need this option for the DiskOnChip
- devices. Partitioning on NFTL 'devices' is a different - that's the
- 'normal' form of partitioning used on a block device.
-
-if MTD_PARTITIONS
-
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
---help---
@@ -99,7 +85,7 @@ endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
bool "Command line partition table parsing"
- depends on MTD_PARTITIONS = "y" && MTD = "y"
+ depends on MTD = "y"
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
@@ -163,8 +149,6 @@ config MTD_AR7_PARTS
---help---
TI AR7 partitioning support
-endif # MTD_PARTITIONS
-
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index d578095..39664c4 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,8 +4,7 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o
-mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 09cb7c8..e1e122f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -812,12 +812,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
break;
if (time_after(jiffies, timeo)) {
- /* Urgh. Resume and pretend we weren't here. */
- map_write(map, CMD(0xd0), adr);
- /* Make sure we're in 'read status' mode if it had finished */
- map_write(map, CMD(0x70), adr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ /* Urgh. Resume and pretend we weren't here.
+ * Make sure we're in 'read status' mode if it had finished */
+ put_chip(map, chip, adr);
printk(KERN_ERR "%s: Chip not ready after erase "
"suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
@@ -997,7 +994,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0b49266..23175ed 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -462,13 +462,14 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_fixup_major_minor(cfi, extp);
/*
- * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
+ * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
* see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
+ * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
*/
if (extp->MajorVersion != '1' ||
- (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
+ (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c (%#02x/%#02x).\n",
extp->MajorVersion, extp->MinorVersion,
@@ -710,9 +711,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* there was an error (so leave the erase
* routine to recover from it) or we trying to
* use the erase-in-progress sector. */
- map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ put_chip(map, chip, adr);
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
return -EIO;
}
@@ -762,7 +761,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index ed56ad3..179814a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -296,6 +296,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
+ wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97183c8..b78f231 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -294,7 +294,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (add_mtd_device(&dev->mtd)) {
+ if (mtd_device_register(&dev->mtd, NULL, 0)) {
/* Device didn't get added, so free the entry */
goto devinit_err;
}
@@ -465,7 +465,7 @@ static void __devexit block2mtd_exit(void)
list_for_each_safe(pos, next, &blkmtd_device_list) {
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
- del_mtd_device(&dev->mtd);
+ mtd_device_unregister(&dev->mtd);
INFO("mtd%d: [%s] removed", dev->mtd.index,
dev->mtd.name + strlen("block2mtd: "));
list_del(&dev->list);
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 5bf5f46..f7fbf60 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -597,7 +597,7 @@ void DoC2k_init(struct mtd_info *mtd)
doc2klist = mtd;
mtd->size = this->totlen;
mtd->erasesize = this->erasesize;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -1185,7 +1185,7 @@ static void __exit cleanup_doc2000(void)
this = mtd->priv;
doc2klist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 0990f78..241192f 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -376,7 +376,7 @@ void DoCMil_init(struct mtd_info *mtd)
this->nextdoc = docmillist;
docmillist = mtd;
mtd->size = this->totlen;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -826,7 +826,7 @@ static void __exit cleanup_doc2001(void)
this = mtd->priv;
docmillist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 8b36fa7..09ae0ad 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -499,7 +499,7 @@ void DoCMilPlus_init(struct mtd_info *mtd)
docmilpluslist = mtd;
mtd->size = this->totlen;
mtd->erasesize = this->erasesize;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -1091,7 +1091,7 @@ static void __exit cleanup_doc2001plus(void)
this = mtd->priv;
docmilpluslist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4b829f9..772a0ff 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -684,9 +684,10 @@ static int __init lart_flash_init (void)
#endif
#ifndef HAVE_PARTITIONS
- result = add_mtd_device (&mtd);
+ result = mtd_device_register(&mtd, NULL, 0);
#else
- result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
+ result = mtd_device_register(&mtd, lart_partitions,
+ ARRAY_SIZE(lart_partitions));
#endif
return (result);
@@ -695,9 +696,9 @@ static int __init lart_flash_init (void)
static void __exit lart_flash_exit (void)
{
#ifndef HAVE_PARTITIONS
- del_mtd_device (&mtd);
+ mtd_device_unregister(&mtd);
#else
- del_mtd_partitions (&mtd);
+ mtd_device_unregister(&mtd);
#endif
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 3fb981d..35180e4 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/mod_devicetable.h>
+#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -55,6 +56,9 @@
#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
+/* Used for Spansion flashes only. */
+#define OPCODE_BRWR 0x17 /* Bank register write */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -76,6 +80,8 @@
#define FAST_READ_DUMMY_BYTE 0
#endif
+#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
/****************************************************************************/
struct m25p {
@@ -158,11 +164,18 @@ static inline int write_disable(struct m25p *flash)
/*
* Enable/disable 4-byte addressing mode.
*/
-static inline int set_4byte(struct m25p *flash, int enable)
+static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
- u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
+ return spi_write(flash->spi, flash->command, 1);
+ default:
+ /* Spansion style */
+ flash->command[0] = OPCODE_BRWR;
+ flash->command[1] = enable << 7;
+ return spi_write(flash->spi, flash->command, 2);
+ }
}
/*
@@ -668,6 +681,7 @@ static const struct spi_device_id m25p_ids[] = {
/* Macronix */
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
@@ -684,6 +698,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
@@ -729,7 +747,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
@@ -804,6 +825,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct m25p *flash;
struct flash_info *info;
unsigned i;
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -868,9 +891,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
* up with the software protection bits set
*/
- if (info->jedec_id >> 16 == 0x1f ||
- info->jedec_id >> 16 == 0x89 ||
- info->jedec_id >> 16 == 0xbf) {
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
write_enable(flash);
write_sr(flash, 0);
}
@@ -888,7 +911,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.read = m25p80_read;
/* sst flash chips use AAI word program */
- if (info->jedec_id >> 16 == 0xbf)
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
flash->mtd.write = sst_write;
else
flash->mtd.write = m25p80_write;
@@ -914,7 +937,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* enable 4-byte addressing if the device exceeds 16MiB */
if (flash->mtd.size > 0x1000000) {
flash->addr_width = 4;
- set_4byte(flash, 1);
+ set_4byte(flash, info->jedec_id, 1);
} else
flash->addr_width = 3;
}
@@ -945,48 +968,41 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* partitions should match sector boundaries; and it may be good to
* use readonly partitions for writeprotected sectors (BP2..BP0).
*/
- if (mtd_has_partitions()) {
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
-
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[]
+ = { "cmdlinepart", NULL, };
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes, &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes, &parts, 0);
+ }
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
#ifdef CONFIG_MTD_OF_PARTS
- if (nr_parts <= 0 && spi->dev.of_node) {
- nr_parts = of_mtd_parse_partitions(&spi->dev,
- spi->dev.of_node, &parts);
- }
+ if (nr_parts <= 0 && spi->dev.of_node) {
+ nr_parts = of_mtd_parse_partitions(&spi->dev,
+ spi->dev.of_node, &parts);
+ }
#endif
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
- flash->partitioned = 1;
- return add_mtd_partitions(&flash->mtd, parts, nr_parts);
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
- } else if (data && data->nr_parts)
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- data->nr_parts, data->name);
+ flash->partitioned = 1;
+ }
- return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0;
+ return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
+ -ENODEV : 0;
}
@@ -996,10 +1012,7 @@ static int __devexit m25p_remove(struct spi_device *spi)
int status;
/* Clean up MTD stuff. */
- if (mtd_has_partitions() && flash->partitioned)
- status = del_mtd_partitions(&flash->mtd);
- else
- status = del_mtd_device(&flash->mtd);
+ status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
kfree(flash->command);
kfree(flash);
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 6a9a24a..8423fb6 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -220,7 +220,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->writesize = 1;
ret = -EIO;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_ERR
"ms02-nv: Unable to register MTD device, aborting!\n");
goto err_out_csr_res;
@@ -262,7 +262,7 @@ static void __exit ms02nv_remove_one(void)
root_ms02nv_mtd = mp->next;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
release_resource(mp->resource.csr);
kfree(mp->resource.csr);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index c5015cc..13749d4 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -637,6 +637,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
struct flash_platform_data *pdata = spi->dev.platform_data;
char *otp_tag = "";
int err = 0;
+ struct mtd_partition *parts;
+ int nr_parts = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@@ -675,33 +677,25 @@ add_dataflash_otp(struct spi_device *spi, char *name,
pagesize, otp_tag);
dev_set_drvdata(&spi->dev, priv);
- if (mtd_has_partitions()) {
- struct mtd_partition *parts;
- int nr_parts = 0;
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = { "cmdlinepart", NULL, };
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(device,
- part_probes, &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(device, part_probes, &parts,
+ 0);
+ }
- if (nr_parts <= 0 && pdata && pdata->parts) {
- parts = pdata->parts;
- nr_parts = pdata->nr_parts;
- }
+ if (nr_parts <= 0 && pdata && pdata->parts) {
+ parts = pdata->parts;
+ nr_parts = pdata->nr_parts;
+ }
- if (nr_parts > 0) {
- priv->partitioned = 1;
- err = add_mtd_partitions(device, parts, nr_parts);
- goto out;
- }
- } else if (pdata && pdata->nr_parts)
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- pdata->nr_parts, device->name);
+ if (nr_parts > 0) {
+ priv->partitioned = 1;
+ err = mtd_device_register(device, parts, nr_parts);
+ goto out;
+ }
- if (add_mtd_device(device) == 1)
+ if (mtd_device_register(device, NULL, 0) == 1)
err = -ENODEV;
out:
@@ -939,10 +933,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
- if (mtd_has_partitions() && flash->partitioned)
- status = del_mtd_partitions(&flash->mtd);
- else
- status = del_mtd_device(&flash->mtd);
+ status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
dev_set_drvdata(&spi->dev, NULL);
kfree(flash);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 1483e18..2562689 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -104,7 +104,7 @@ static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
static void __exit cleanup_mtdram(void)
{
if (mtd_info) {
- del_mtd_device(mtd_info);
+ mtd_device_unregister(mtd_info);
vfree(mtd_info->priv);
kfree(mtd_info);
}
@@ -133,9 +133,8 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->read = ram_read;
mtd->write = ram_write;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0))
return -EIO;
- }
return 0;
}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8d28fa0..23423bd 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -115,7 +115,7 @@ static void unregister_devices(void)
struct phram_mtd_list *this, *safe;
list_for_each_entry_safe(this, safe, &phram_list, list) {
- del_mtd_device(&this->mtd);
+ mtd_device_unregister(&this->mtd);
iounmap(this->mtd.priv);
kfree(this->mtd.name);
kfree(this);
@@ -153,7 +153,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.writesize = 1;
ret = -EAGAIN;
- if (add_mtd_device(&new->mtd)) {
+ if (mtd_device_register(&new->mtd, NULL, 0)) {
pr_err("Failed to register new device\n");
goto out2;
}
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 41b8cdc..ecff765 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -798,7 +798,7 @@ static int __init init_pmc551(void)
mtd->writesize = 1;
mtd->owner = THIS_MODULE;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
pci_iounmap(PCI_Device, priv->start);
kfree(mtd->priv);
@@ -806,7 +806,7 @@ static int __init init_pmc551(void)
break;
}
- /* Keep a reference as the add_mtd_device worked */
+ /* Keep a reference as the mtd_device_register worked */
pci_dev_get(PCI_Device);
printk(KERN_NOTICE "Registered pmc551 memory device.\n");
@@ -856,7 +856,7 @@ static void __exit cleanup_pmc551(void)
pci_dev_put(priv->dev);
kfree(mtd->priv);
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
kfree(mtd);
found++;
}
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 592016a..e585263 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -210,7 +210,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
(*curmtd)->mtdinfo->writesize = 1;
- if (add_mtd_device((*curmtd)->mtdinfo)) {
+ if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
@@ -231,7 +231,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
- del_mtd_device(slram_mtdlist->mtdinfo);
+ mtd_device_unregister(slram_mtdlist->mtdinfo);
iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c163e61..1e2c430 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -66,7 +66,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __initdata sst25l_flash_info[] = {
+static struct flash_info __devinitdata sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -381,6 +381,8 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct sst25l_flash *flash;
struct flash_platform_data *data;
int ret, i;
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -420,46 +422,37 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (mtd_has_partitions()) {
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] =
- {"cmdlinepart", NULL};
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = {"cmdlinepart", NULL};
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes,
- &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes,
+ &parts, 0);
+ }
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
-
- flash->partitioned = 1;
- return add_mtd_partitions(&flash->mtd,
- parts, nr_parts);
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
- } else if (data && data->nr_parts) {
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- data->nr_parts, data->name);
+ flash->partitioned = 1;
+ return mtd_device_register(&flash->mtd, parts,
+ nr_parts);
}
- ret = add_mtd_device(&flash->mtd);
+ ret = mtd_device_register(&flash->mtd, NULL, 0);
if (ret == 1) {
kfree(flash);
dev_set_drvdata(&spi->dev, NULL);
@@ -469,15 +462,12 @@ static int __devinit sst25l_probe(struct spi_device *spi)
return 0;
}
-static int __exit sst25l_remove(struct spi_device *spi)
+static int __devexit sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
- if (mtd_has_partitions() && flash->partitioned)
- ret = del_mtd_partitions(&flash->mtd);
- else
- ret = del_mtd_device(&flash->mtd);
+ ret = mtd_device_unregister(&flash->mtd);
if (ret == 0)
kfree(flash);
return ret;
@@ -490,7 +480,7 @@ static struct spi_driver sst25l_driver = {
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __exit_p(sst25l_remove),
+ .remove = __devexit_p(sst25l_remove),
};
static int __init sst25l_init(void)
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1267992..65655dd 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -313,12 +313,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
if (ret) {
/* Oops. something got wrong. */
/* Resume and pretend we weren't here. */
- map_write(map, CMD(LPDDR_RESUME),
- map->pfow_base + PFOW_COMMAND_CODE);
- map_write(map, CMD(LPDDR_START_EXECUTION),
- map->pfow_base + PFOW_COMMAND_EXECUTE);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ put_chip(map, chip);
printk(KERN_ERR "%s: suspend operation failed."
"State may be wrong \n", map->name);
return -EIO;
@@ -383,7 +378,6 @@ static void put_chip(struct map_info *map, struct flchip *chip)
switch (chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
map_write(map, CMD(LPDDR_RESUME),
map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(LPDDR_START_EXECUTION),
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5069111..c0c328c 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -82,7 +82,6 @@ config MTD_PHYSMAP_OF
config MTD_PMC_MSP_EVM
tristate "CFI Flash device mapped on PMC-Sierra MSP"
depends on PMC_MSP && MTD_CFI
- select MTD_PARTITIONS
help
This provides a 'mapping' driver which supports the way
in which user-programmable flash chips are connected on the
@@ -122,7 +121,7 @@ config MTD_SC520CDP
config MTD_NETSC520
tristate "CFI Flash device mapped on AMD NetSc520"
- depends on X86 && MTD_CFI && MTD_PARTITIONS
+ depends on X86 && MTD_CFI
help
This enables access routines for the flash chips on the AMD NetSc520
demonstration board. If you have one of these boards and would like
@@ -131,7 +130,6 @@ config MTD_NETSC520
config MTD_TS5500
tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
depends on X86
- select MTD_PARTITIONS
select MTD_JEDECPROBE
select MTD_CFI_AMDSTD
help
@@ -149,7 +147,7 @@ config MTD_TS5500
config MTD_SBC_GXX
tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
- depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS
+ depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
help
This provides a driver for the on-board flash of Arcom Control
Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
@@ -161,7 +159,6 @@ config MTD_SBC_GXX
config MTD_PXA2XX
tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
- select MTD_PARTITIONS
help
This provides a driver for the NOR flash attached to a PXA2xx chip.
@@ -185,7 +182,7 @@ config MTD_VMAX
config MTD_SCx200_DOCFLASH
tristate "Flash device mapped with DOCCS on NatSemi SCx200"
- depends on SCx200 && MTD_CFI && MTD_PARTITIONS
+ depends on SCx200 && MTD_CFI
help
Enable support for a flash chip mapped using the DOCCS signal on a
National Semiconductor SCx200 processor.
@@ -247,7 +244,7 @@ config MTD_TSUNAMI
config MTD_NETtel
tristate "CFI flash device on SnapGear/SecureEdge"
- depends on X86 && MTD_PARTITIONS && MTD_JEDECPROBE
+ depends on X86 && MTD_JEDECPROBE
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
@@ -269,7 +266,7 @@ config MTD_LANTIQ
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
+ depends on X86 && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -355,7 +352,7 @@ config MTD_CDB89712
config MTD_SA1100
tristate "CFI Flash device mapped on StrongARM SA11x0"
- depends on MTD_CFI && ARCH_SA1100 && MTD_PARTITIONS
+ depends on MTD_CFI && ARCH_SA1100
help
This enables access to the flash chips on most platforms based on
the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
@@ -389,7 +386,7 @@ config MTD_IXP2000
config MTD_FORTUNET
tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET
+ depends on MTD_CFI && SA1100_FORTUNET
help
This enables access to the Flash on the FortuNet board. If you
have such a board, say 'Y'.
@@ -461,7 +458,6 @@ config MTD_PCMCIA_ANONYMOUS
config MTD_BFIN_ASYNC
tristate "Blackfin BF533-STAMP Flash Chip Support"
depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
- select MTD_PARTITIONS
default y
help
Map driver which allows for simultaneous utilization of
@@ -473,7 +469,6 @@ config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
depends on GENERIC_GPIO || GPIOLIB
depends on MTD_COMPLEX_MAPPINGS
- select MTD_PARTITIONS
help
Map driver which allows flashes to be partially physically addressed
and assisted by GPIOs.
@@ -482,14 +477,13 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_PARTITIONS && MTD_RAM=y && !MMU
+ depends on MTD_RAM=y && !MMU
help
Map driver to support image based filesystems for uClinux.
config MTD_WRSBC8260
tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
depends on (SBC82xx || SBC8560)
- select MTD_PARTITIONS
select MTD_MAP_BANK_WIDTH_4
select MTD_MAP_BANK_WIDTH_1
select MTD_CFI_I1
@@ -502,7 +496,6 @@ config MTD_WRSBC8260
config MTD_DMV182
tristate "Map driver for Dy-4 SVME/DMV-182 board."
depends on DMV182
- select MTD_PARTITIONS
select MTD_MAP_BANK_WIDTH_32
select MTD_CFI_I8
select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 92de7e3..e2875d6 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -82,7 +82,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
if (map->rsrc.parent) {
release_resource(&map->rsrc);
}
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -262,7 +262,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 5366418..e5bfd0e 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -88,7 +88,7 @@ map:
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
- if (add_mtd_device(sram_mtd)) {
+ if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("NV-RAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -111,7 +111,7 @@ out:
static void __exit cleanup_autcpu12_maps(void)
{
if (sram_mtd) {
- del_mtd_device(sram_mtd);
+ mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)autcpu12_sram_map.virt);
}
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 1f30495..608967f 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -224,8 +224,8 @@ probe_ok:
goto err_probe;
}
- return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
+ return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
+ parsed_nr_parts);
err_probe:
iounmap(bcm963xx_map.virt);
@@ -235,7 +235,7 @@ err_probe:
static int bcm963xx_remove(struct platform_device *pdev)
{
if (bcm963xx_mtd_info) {
- del_mtd_partitions(bcm963xx_mtd_info);
+ mtd_device_unregister(bcm963xx_mtd_info);
map_destroy(bcm963xx_mtd_info);
}
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 85dd181..d4297a9 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,9 +41,7 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
static void switch_to_flash(struct async_state *state)
@@ -124,9 +122,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
switch_back(state);
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
static int __devinit bfin_flash_probe(struct platform_device *pdev)
{
@@ -169,22 +165,17 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, ret);
+ mtd_device_register(state->mtd, pdata->parts, ret);
state->parts = pdata->parts;
-
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
-
- } else
-#endif
- {
+ mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
+ } else {
pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
- add_mtd_device(state->mtd);
+ mtd_device_register(state->mtd, NULL, 0);
}
platform_set_drvdata(pdev, state);
@@ -196,10 +187,8 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(state->mtd);
+ mtd_device_unregister(state->mtd);
kfree(state->parts);
-#endif
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 8d92d8d..c29cbf8 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -75,7 +75,7 @@ static int __init init_cdb89712_flash (void)
flash_mtd->owner = THIS_MODULE;
- if (add_mtd_device(flash_mtd)) {
+ if (mtd_device_register(flash_mtd, NULL, 0)) {
printk("FLASH device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -141,7 +141,7 @@ static int __init init_cdb89712_sram (void)
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
- if (add_mtd_device(sram_mtd)) {
+ if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("SRAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -209,7 +209,7 @@ static int __init init_cdb89712_bootrom (void)
bootrom_mtd->owner = THIS_MODULE;
bootrom_mtd->erasesize = 0x10000;
- if (add_mtd_device(bootrom_mtd)) {
+ if (mtd_device_register(bootrom_mtd, NULL, 0)) {
printk("BootROM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -249,21 +249,21 @@ static int __init init_cdb89712_maps(void)
static void __exit cleanup_cdb89712_maps(void)
{
if (sram_mtd) {
- del_mtd_device(sram_mtd);
+ mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)cdb89712_sram_map.virt);
release_resource (&cdb89712_sram_resource);
}
if (flash_mtd) {
- del_mtd_device(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
iounmap((void *)cdb89712_flash_map.virt);
release_resource (&cdb89712_flash_resource);
}
if (bootrom_mtd) {
- del_mtd_device(bootrom_mtd);
+ mtd_device_unregister(bootrom_mtd);
map_destroy(bootrom_mtd);
iounmap((void *)cdb89712_bootrom_map.virt);
release_resource (&cdb89712_bootrom_resource);
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 23f551d..06f9c98 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -224,7 +224,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
{
int i;
- del_mtd_partitions(mtd);
+ mtd_device_unregister(mtd);
if (mtd != clps[0].mtd)
mtd_concat_destroy(mtd);
@@ -292,11 +292,11 @@ static void __init clps_locate_partitions(struct mtd_info *mtd)
if (nr_parts == 0) {
printk(KERN_NOTICE "clps flash: no partition info "
"available, registering whole flash\n");
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
} else {
printk(KERN_NOTICE "clps flash: using %s partition "
"definition\n", part_type);
- add_mtd_partitions(mtd, parsed_parts, nr_parts);
+ mtd_device_register(mtd, parsed_parts, nr_parts);
}
/* Always succeeds. */
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index f71343c..d16fc9d 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -107,7 +107,7 @@ static int __init init_flagadm(void)
mymtd = do_map_probe("cfi_probe", &flagadm_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT);
+ mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
printk(KERN_NOTICE "FlagaDM flash device initialized\n");
return 0;
}
@@ -119,7 +119,7 @@ static int __init init_flagadm(void)
static void __exit cleanup_flagadm(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (flagadm_map.virt) {
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 5fdb7b2..3d0e762 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -94,7 +94,7 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -291,7 +291,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index cfacfa6..85bdece 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -93,7 +93,7 @@ static int __init init_dbox2_flash(void)
mymtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
@@ -105,7 +105,7 @@ static int __init init_dbox2_flash(void)
static void __exit cleanup_dbox2_flash(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dbox2_flash_map.virt) {
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b3cb3a1..7a9e198 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,17 +145,13 @@ static struct map_info dc21285_map = {
/* Partition stuff */
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition *dc21285_parts;
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
static int __init init_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
int nrparts;
-#endif
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
@@ -204,13 +200,8 @@ static int __init init_dc21285(void)
dc21285_mtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
- if (nrparts > 0)
- add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
- else
-#endif
- add_mtd_device(dc21285_mtd);
+ mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
if(machine_is_ebsa285()) {
/*
@@ -232,14 +223,9 @@ static int __init init_dc21285(void)
static void __exit cleanup_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
- if (dc21285_parts) {
- del_mtd_partitions(dc21285_mtd);
+ mtd_device_unregister(dc21285_mtd);
+ if (dc21285_parts)
kfree(dc21285_parts);
- } else
-#endif
- del_mtd_device(dc21285_mtd);
-
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 0713e3a..3e393f0 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -450,7 +450,7 @@ static int __init init_dnpc(void)
partition_info[2].mtdp = &lowlvl_parts[1];
partition_info[3].mtdp = &lowlvl_parts[3];
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
/*
** now create a virtual MTD device by concatenating the for partitions
@@ -463,7 +463,8 @@ static int __init init_dnpc(void)
** we do not supply mtd pointers in higlvl_partition_info, so
** add_mtd_partitions() will register the devices.
*/
- add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS);
+ mtd_device_register(merged_mtd, higlvl_partition_info,
+ NUM_HIGHLVL_PARTITIONS);
}
return 0;
@@ -472,12 +473,12 @@ static int __init init_dnpc(void)
static void __exit cleanup_dnpc(void)
{
if(merged_mtd) {
- del_mtd_partitions(merged_mtd);
+ mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dnpc_map.virt) {
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index d171674..6538ac6 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -120,7 +120,7 @@ static int __init init_svme182(void)
this_mtd->size >> 20, FLASH_BASE_ADDR);
this_mtd->owner = THIS_MODULE;
- add_mtd_partitions(this_mtd, partitions, num_parts);
+ mtd_device_register(this_mtd, partitions, num_parts);
return 0;
}
@@ -129,7 +129,7 @@ static void __exit cleanup_svme182(void)
{
if (this_mtd)
{
- del_mtd_partitions(this_mtd);
+ mtd_device_unregister(this_mtd);
map_destroy(this_mtd);
}
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index be9e90b..fe42a21 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -15,10 +15,7 @@
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE 0x01000000
@@ -40,8 +37,6 @@ struct map_info edb7312nor_map = {
.phys = WINDOW_ADDR,
};
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* MTD partitioning stuff
*/
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[3] =
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
-
static int mtd_parts_nb = 0;
static struct mtd_partition *mtd_parts = 0;
@@ -96,27 +89,24 @@ static int __init init_edb7312nor(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
if (mtd_parts_nb > 0)
- part_type = "detected";
+ part_type = "detected";
- if (mtd_parts_nb == 0)
- {
+ if (mtd_parts_nb == 0) {
mtd_parts = static_partitions;
mtd_parts_nb = ARRAY_SIZE(static_partitions);
part_type = "static";
}
-#endif
- add_mtd_device(mymtd);
+
if (mtd_parts_nb == 0)
- printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
+ printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
else
- {
printk(KERN_NOTICE MSG_PREFIX
"using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
- }
+ /* Register the whole device first. */
+ mtd_device_register(mymtd, NULL, 0);
+ mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
return 0;
}
@@ -127,7 +117,7 @@ static int __init init_edb7312nor(void)
static void __exit cleanup_edb7312nor(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (edb7312nor_map.virt) {
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 4feb750..08322b1 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -128,7 +128,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -352,7 +352,7 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 1e43124..956e2e4 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -243,8 +243,9 @@ static int __init init_fortunet(void)
&map_regions[ix].map_info);
}
map_regions[ix].mymtd->owner = THIS_MODULE;
- add_mtd_partitions(map_regions[ix].mymtd,
- map_regions[ix].parts,map_regions_parts[ix]);
+ mtd_device_register(map_regions[ix].mymtd,
+ map_regions[ix].parts,
+ map_regions_parts[ix]);
}
}
if(iy)
@@ -261,7 +262,7 @@ static void __exit cleanup_fortunet(void)
{
if( map_regions[ix].mymtd )
{
- del_mtd_partitions( map_regions[ix].mymtd );
+ mtd_device_unregister(map_regions[ix].mymtd);
map_destroy( map_regions[ix].mymtd );
}
iounmap((void *)map_regions[ix].map_info.virt);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index af5707a..7568c5f 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -155,9 +155,7 @@ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from,
memcpy_toio(map->virt + (to % state->win_size), from, len);
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
/**
* gpio_flash_probe() - setup a mapping for a GPIO assisted flash
@@ -189,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
*/
static int __devinit gpio_flash_probe(struct platform_device *pdev)
{
- int ret;
+ int nr_parts;
size_t i, arr_size;
struct physmap_flash_data *pdata;
struct resource *memory;
@@ -254,24 +252,21 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
- ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
- if (ret > 0) {
+ nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
+ &pdata->parts, 0);
+ if (nr_parts > 0) {
pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, ret);
kfree(pdata->parts);
-
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
-
- } else
-#endif
- {
+ nr_parts = pdata->nr_parts;
+ } else {
pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
- add_mtd_device(state->mtd);
+ nr_parts = 0;
}
+ mtd_device_register(state->mtd, pdata->parts, nr_parts);
+
return 0;
}
@@ -282,9 +277,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
do {
gpio_free(state->gpio_addrs[i]);
} while (++i < state->gpio_count);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(state->mtd);
-#endif
+ mtd_device_unregister(state->mtd);
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 72c724f..7f03586 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -92,18 +92,16 @@ static int __init h720x_mtd_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
if (nr_mtd_parts > 0)
part_type = "command line";
-#endif
if (nr_mtd_parts <= 0) {
mtd_parts = h720x_partitions;
nr_mtd_parts = NUM_PARTITIONS;
part_type = "builtin";
}
printk(KERN_INFO "Using %s partition table\n", part_type);
- add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts);
+ mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
return 0;
}
@@ -118,7 +116,7 @@ static void __exit h720x_mtd_cleanup(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 1337a41..6689dcb 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -67,7 +67,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -287,7 +287,7 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 998a27d..404a50c 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -15,10 +15,7 @@
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE0 0x00800000
@@ -49,8 +46,6 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
},
};
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* MTD partitioning stuff
*/
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[] =
static int mtd_parts_nb[NUM_FLASHBANKS];
static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
-#endif
-
static const char *probes[] = { "cmdlinepart", NULL };
static int __init init_impa7(void)
@@ -104,7 +97,6 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
probes,
&mtd_parts[i],
@@ -120,12 +112,8 @@ static int __init init_impa7(void)
printk(KERN_NOTICE MSG_PREFIX
"using %s partition definition\n",
part_type);
- add_mtd_partitions(impa7_mtd[i],
- mtd_parts[i], mtd_parts_nb[i]);
-#else
- add_mtd_device(impa7_mtd[i]);
-
-#endif
+ mtd_device_register(impa7_mtd[i],
+ mtd_parts[i], mtd_parts_nb[i]);
}
else
iounmap((void *)impa7_map[i].virt);
@@ -138,11 +126,7 @@ static void __exit cleanup_impa7(void)
int i;
for (i=0; i<NUM_FLASHBANKS; i++) {
if (impa7_mtd[i]) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(impa7_mtd[i]);
-#else
- del_mtd_device(impa7_mtd[i]);
-#endif
+ mtd_device_unregister(impa7_mtd[i]);
map_destroy(impa7_mtd[i]);
iounmap((void *)impa7_map[i].virt);
impa7_map[i].virt = 0;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index fc19985..d2f47be 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -66,33 +66,18 @@ struct vr_nor_mtd {
static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
- if (p->nr_parts > 0) {
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
- del_mtd_partitions(p->info);
-#endif
- } else
- del_mtd_device(p->info);
+ mtd_device_unregister(p->info);
}
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
- int err = 0;
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
struct mtd_partition *parts;
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/* register the flash bank */
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
/* partition the flash bank */
p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
- if (p->nr_parts > 0)
- err = add_mtd_partitions(p->info, parts, p->nr_parts);
-#endif
- if (p->nr_parts <= 0)
- err = add_mtd_device(p->info);
-
- return err;
+ return mtd_device_register(p->info, parts, p->nr_parts);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 9639d83..c00b917 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -119,7 +119,7 @@ static int ixp2000_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- del_mtd_partitions(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
if (info->map.map_priv_1)
@@ -230,7 +230,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
if (err > 0) {
- err = add_mtd_partitions(info->mtd, info->partitions, err);
+ err = mtd_device_register(info->mtd, info->partitions, err);
if(err)
dev_err(&dev->dev, "Could not parse partitions\n");
}
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 1f9fde0..155b219 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -162,7 +162,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- del_mtd_partitions(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
if (info->map.virt)
@@ -252,10 +252,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
dev->resource->start);
-#endif
if (nr_parts > 0) {
part_type = "dynamic";
} else {
@@ -263,18 +261,16 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
nr_parts = plat->nr_parts;
part_type = "static";
}
- if (nr_parts == 0) {
+ if (nr_parts == 0)
printk(KERN_NOTICE "IXP4xx flash: no partition info "
"available, registering whole flash\n");
- err = add_mtd_device(info->mtd);
- } else {
+ else
printk(KERN_NOTICE "IXP4xx flash: using %s partition "
"definition\n", part_type);
- err = add_mtd_partitions(info->mtd, info->partitions, nr_parts);
- if(err)
- printk(KERN_ERR "Could not parse partitions\n");
- }
+ err = mtd_device_register(info->mtd, info->partitions, nr_parts);
+ if (err)
+ printk(KERN_ERR "Could not parse partitions\n");
if (err)
goto Error;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 9e05450..dd0360b 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -138,7 +138,7 @@ static int __init init_l440gx(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
+ mtd_device_register(mymtd, NULL, 0);
return 0;
}
@@ -148,7 +148,7 @@ static int __init init_l440gx(void)
static void __exit cleanup_l440gx(void)
{
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
iounmap(l440gx_map.virt);
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ee25480..5936c46 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -112,18 +112,9 @@ static int latch_addr_flash_remove(struct platform_device *dev)
latch_addr_data = dev->dev.platform_data;
if (info->mtd != NULL) {
- if (mtd_has_partitions()) {
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
- kfree(info->parts);
- } else if (latch_addr_data->nr_parts) {
- del_mtd_partitions(info->mtd);
- } else {
- del_mtd_device(info->mtd);
- }
- } else {
- del_mtd_device(info->mtd);
- }
+ if (info->nr_parts)
+ kfree(info->parts);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
@@ -215,23 +206,21 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- if (mtd_has_partitions()) {
-
- err = parse_mtd_partitions(info->mtd,
- (const char **)part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
- return 0;
- }
- if (latch_addr_data->nr_parts) {
- pr_notice("Using latch-addr-flash partition information\n");
- add_mtd_partitions(info->mtd, latch_addr_data->parts,
- latch_addr_data->nr_parts);
- return 0;
- }
+ err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
+ &info->parts, 0);
+ if (err > 0) {
+ mtd_device_register(info->mtd, info->parts, err);
+ return 0;
+ }
+ if (latch_addr_data->nr_parts) {
+ pr_notice("Using latch-addr-flash partition information\n");
+ mtd_device_register(info->mtd,
+ latch_addr_data->parts,
+ latch_addr_data->nr_parts);
+ return 0;
}
- add_mtd_device(info->mtd);
+
+ mtd_device_register(info->mtd, NULL, 0);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 0eb5a7c..93fa56c 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -69,8 +69,8 @@ static int __init init_mbx(void)
mymtd = do_map_probe("jedec_probe", &mbx_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, NULL, 0);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
@@ -81,7 +81,7 @@ static int __init init_mbx(void)
static void __exit cleanup_mbx(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (mbx_map.virt) {
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index c0cb319..81dc259 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -116,14 +116,14 @@ static int __init init_netsc520(void)
}
mymtd->owner = THIS_MODULE;
- add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS );
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_netsc520(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (netsc520_map.virt) {
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a97133e..eadcfff 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -383,13 +383,13 @@ static int __init nettel_init(void)
/* No BIOS regions when AMD boot */
num_intel_partitions -= 2;
}
- rc = add_mtd_partitions(intel_mtd, nettel_intel_partitions,
- num_intel_partitions);
+ rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
+ num_intel_partitions);
#endif
if (amd_mtd) {
- rc = add_mtd_partitions(amd_mtd, nettel_amd_partitions,
- num_amd_partitions);
+ rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
+ num_amd_partitions);
}
#ifdef CONFIG_MTD_CFI_INTELEXT
@@ -419,7 +419,7 @@ static void __exit nettel_cleanup(void)
unregister_reboot_notifier(&nettel_notifier_block);
#endif
if (amd_mtd) {
- del_mtd_partitions(amd_mtd);
+ mtd_device_unregister(amd_mtd);
map_destroy(amd_mtd);
}
if (nettel_mmcrp) {
@@ -432,7 +432,7 @@ static void __exit nettel_cleanup(void)
}
#ifdef CONFIG_MTD_CFI_INTELEXT
if (intel_mtd) {
- del_mtd_partitions(intel_mtd);
+ mtd_device_unregister(intel_mtd);
map_destroy(intel_mtd);
}
if (nettel_intel_map.virt) {
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 23fe178..807ac2a 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -175,7 +175,7 @@ void cleanup_oct5066(void)
int i;
for (i=0; i<2; i++) {
if (oct5066_mtd[i]) {
- del_mtd_device(oct5066_mtd[i]);
+ mtd_device_unregister(oct5066_mtd[i]);
map_destroy(oct5066_mtd[i]);
}
}
@@ -220,7 +220,7 @@ static int __init init_oct5066(void)
oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
if (oct5066_mtd[i]) {
oct5066_mtd[i]->owner = THIS_MODULE;
- add_mtd_device(oct5066_mtd[i]);
+ mtd_device_register(oct5066_mtd[i], NULL, 0);
}
}
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 48f4cf5..1d005a3 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -313,7 +313,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto release;
mtd->owner = THIS_MODULE;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
pci_set_drvdata(dev, mtd);
@@ -336,7 +336,7 @@ mtd_pci_remove(struct pci_dev *dev)
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
map_destroy(mtd);
map->exit(dev, map);
kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 33dc282..bbe168b 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -630,7 +630,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.copy_to = pcmcia_copy_to;
}
- if(add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
map_destroy(mtd);
dev->mtd_info = NULL;
dev_err(&dev->p_dev->dev,
@@ -669,7 +669,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
DEBUG(3, "link=0x%p", link);
if(dev->mtd_info) {
- del_mtd_device(dev->mtd_info);
+ mtd_device_unregister(dev->mtd_info);
dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
dev->mtd_info->index);
map_destroy(dev->mtd_info);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1a9b94f..f64cee4 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,10 +27,8 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
static int physmap_flash_remove(struct platform_device *dev)
@@ -47,18 +45,9 @@ static int physmap_flash_remove(struct platform_device *dev)
physmap_data = dev->dev.platform_data;
if (info->cmtd) {
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts || physmap_data->nr_parts) {
- del_mtd_partitions(info->cmtd);
-
- if (info->nr_parts)
- kfree(info->parts);
- } else {
- del_mtd_device(info->cmtd);
- }
-#else
- del_mtd_device(info->cmtd);
-#endif
+ mtd_device_unregister(info->cmtd);
+ if (info->nr_parts)
+ kfree(info->parts);
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
}
@@ -92,10 +81,8 @@ static const char *rom_probe_types[] = {
"qinfo_probe",
"map_rom",
NULL };
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
NULL };
-#endif
static int physmap_flash_probe(struct platform_device *dev)
{
@@ -188,24 +175,23 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
+ &info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->cmtd, info->parts, err);
+ mtd_device_register(info->cmtd, info->parts, err);
info->nr_parts = err;
return 0;
}
if (physmap_data->nr_parts) {
printk(KERN_NOTICE "Using physmap partition information\n");
- add_mtd_partitions(info->cmtd, physmap_data->parts,
- physmap_data->nr_parts);
+ mtd_device_register(info->cmtd, physmap_data->parts,
+ physmap_data->nr_parts);
return 0;
}
-#endif
- add_mtd_device(info->cmtd);
+ mtd_device_register(info->cmtd, NULL, 0);
+
return 0;
err_out:
@@ -269,14 +255,12 @@ void physmap_configure(unsigned long addr, unsigned long size,
physmap_flash_data.set_vpp = set_vpp;
}
-#ifdef CONFIG_MTD_PARTITIONS
void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
{
physmap_flash_data.nr_parts = num_parts;
physmap_flash_data.parts = parts;
}
#endif
-#endif
static int __init physmap_init(void)
{
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c1d3346..d251d1d 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,16 +34,12 @@ struct of_flash_list {
struct of_flash {
struct mtd_info *cmtd;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
int list_size; /* number of elements in of_flash_list */
struct of_flash_list list[0];
};
-#ifdef CONFIG_MTD_PARTITIONS
#define OF_FLASH_PARTS(info) ((info)->parts)
-
static int parse_obsolete_partitions(struct platform_device *dev,
struct of_flash *info,
struct device_node *dp)
@@ -89,10 +85,6 @@ static int parse_obsolete_partitions(struct platform_device *dev,
return nr_parts;
}
-#else /* MTD_PARTITIONS */
-#define OF_FLASH_PARTS(info) (0)
-#define parse_partitions(info, dev) (0)
-#endif /* MTD_PARTITIONS */
static int of_flash_remove(struct platform_device *dev)
{
@@ -105,17 +97,14 @@ static int of_flash_remove(struct platform_device *dev)
dev_set_drvdata(&dev->dev, NULL);
if (info->cmtd != info->list[0].mtd) {
- del_mtd_device(info->cmtd);
+ mtd_device_unregister(info->cmtd);
mtd_concat_destroy(info->cmtd);
}
if (info->cmtd) {
- if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->cmtd);
+ if (OF_FLASH_PARTS(info))
kfree(OF_FLASH_PARTS(info));
- } else {
- del_mtd_device(info->cmtd);
- }
+ mtd_device_unregister(info->cmtd);
}
for (i = 0; i < info->list_size; i++) {
@@ -172,7 +161,6 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
}
}
-#ifdef CONFIG_MTD_PARTITIONS
/* When partitions are set we look for a linux,part-probe property which
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
@@ -212,14 +200,11 @@ static void __devinit of_free_probes(const char **probes)
if (probes != part_probe_types_def)
kfree(probes);
}
-#endif
static struct of_device_id of_flash_match[];
static int __devinit of_flash_probe(struct platform_device *dev)
{
-#ifdef CONFIG_MTD_PARTITIONS
const char **part_probe_types;
-#endif
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
@@ -346,7 +331,6 @@ static int __devinit of_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
@@ -356,13 +340,11 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
of_free_probes(part_probe_types);
-#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
if (err < 0)
goto err_out;
}
-#endif
if (err == 0) {
err = parse_obsolete_partitions(dev, info, dp);
@@ -370,11 +352,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- if (err > 0)
- add_mtd_partitions(info->cmtd, info->parts, err);
- else
-#endif
- add_mtd_device(info->cmtd);
+ mtd_device_register(info->cmtd, info->parts, err);
kfree(mtd_list);
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 76a76be..9ca1ecc 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -94,14 +94,11 @@ static int platram_remove(struct platform_device *pdev)
return 0;
if (info->mtd) {
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_unregister(info->mtd);
if (info->partitions) {
- del_mtd_partitions(info->mtd);
if (info->free_partitions)
kfree(info->partitions);
}
-#endif
- del_mtd_device(info->mtd);
map_destroy(info->mtd);
}
@@ -231,7 +228,6 @@ static int platram_probe(struct platform_device *pdev)
/* check to see if there are any available partitions, or wether
* to add this device whole */
-#ifdef CONFIG_MTD_PARTITIONS
if (!pdata->nr_partitions) {
/* try to probe using the supplied probe type */
if (pdata->probes) {
@@ -239,24 +235,22 @@ static int platram_probe(struct platform_device *pdev)
&info->partitions, 0);
info->free_partitions = 1;
if (err > 0)
- err = add_mtd_partitions(info->mtd,
+ err = mtd_device_register(info->mtd,
info->partitions, err);
}
}
/* use the static mapping */
else
- err = add_mtd_partitions(info->mtd, pdata->partitions,
- pdata->nr_partitions);
-#endif /* CONFIG_MTD_PARTITIONS */
-
- if (add_mtd_device(info->mtd)) {
- dev_err(&pdev->dev, "add_mtd_device() failed\n");
- err = -ENOMEM;
- }
-
+ err = mtd_device_register(info->mtd, pdata->partitions,
+ pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err)
+ dev_err(&pdev->dev, "failed to register the entire device\n");
+
return err;
exit_free:
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 64aea6a..744ca5c 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -173,7 +173,7 @@ static int __init init_msp_flash(void)
msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
if (msp_flash[i]) {
msp_flash[i]->owner = THIS_MODULE;
- add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt);
+ mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
} else {
printk(KERN_ERR "map probe failed for flash\n");
ret = -ENXIO;
@@ -188,7 +188,7 @@ static int __init init_msp_flash(void)
cleanup_loop:
while (i--) {
- del_mtd_partitions(msp_flash[i]);
+ mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
kfree(msp_maps[i].name);
iounmap(msp_maps[i].virt);
@@ -207,7 +207,7 @@ static void __exit cleanup_msp_flash(void)
int i;
for (i = 0; i < fcnt; i++) {
- del_mtd_partitions(msp_flash[i]);
+ mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
iounmap((void *)msp_maps[i].virt);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d8ae634..f59d62f 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -104,23 +104,18 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (ret > 0) {
info->nr_parts = ret;
info->parts = parts;
}
-#endif
- if (info->nr_parts) {
- add_mtd_partitions(info->mtd, info->parts,
- info->nr_parts);
- } else {
+ if (!info->nr_parts)
printk("Registering %s as whole device\n",
info->map.name);
- add_mtd_device(info->mtd);
- }
+
+ mtd_device_register(info->mtd, info->parts, info->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
@@ -132,12 +127,7 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts)
- del_mtd_partitions(info->mtd);
- else
-#endif
- del_mtd_device(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
iounmap(info->map.virt);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 83ed645..761fb45 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,10 +25,8 @@
struct rbtx4939_flash_info {
struct mtd_info *mtd;
struct map_info map;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,28 +39,18 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
if (info->mtd) {
-#ifdef CONFIG_MTD_PARTITIONS
struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
+ if (info->nr_parts)
kfree(info->parts);
- } else if (pdata->nr_parts)
- del_mtd_partitions(info->mtd);
- else
- del_mtd_device(info->mtd);
-#else
- del_mtd_device(info->mtd);
-#endif
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
return 0;
}
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", NULL };
-#endif
static int rbtx4939_flash_probe(struct platform_device *dev)
{
@@ -120,23 +108,21 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->mtd, part_probe_types,
&info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
+ mtd_device_register(info->mtd, info->parts, err);
info->nr_parts = err;
return 0;
}
if (pdata->nr_parts) {
pr_notice("Using rbtx4939 partition information\n");
- add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
return 0;
}
-#endif
- add_mtd_device(info->mtd);
+ mtd_device_register(info->mtd, NULL, 0);
return 0;
err_out:
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 3e3ef53..ed88225 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -36,7 +36,7 @@ static int __init init_rpxlite(void)
mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
+ mtd_device_register(mymtd, NULL, 0);
return 0;
}
@@ -47,7 +47,7 @@ static int __init init_rpxlite(void)
static void __exit cleanup_rpxlite(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (rpxlite_map.virt) {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index da875908..a9b5e0e 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -226,12 +226,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
int i;
if (info->mtd) {
- if (info->nr_parts == 0)
- del_mtd_device(info->mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- else
- del_mtd_partitions(info->mtd);
-#endif
+ mtd_device_unregister(info->mtd);
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
}
@@ -363,28 +358,24 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
/*
* Partition selection stuff.
*/
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
if (nr_parts > 0) {
info->parts = parts;
part_type = "dynamic";
- } else
-#endif
- {
+ } else {
parts = plat->parts;
nr_parts = plat->nr_parts;
part_type = "static";
}
- if (nr_parts == 0) {
+ if (nr_parts == 0)
printk(KERN_NOTICE "SA1100 flash: no partition info "
"available, registering whole flash\n");
- add_mtd_device(info->mtd);
- } else {
+ else
printk(KERN_NOTICE "SA1100 flash: using %s partition "
"definition\n", part_type);
- add_mtd_partitions(info->mtd, parts, nr_parts);
- }
+
+ mtd_device_register(info->mtd, parts, nr_parts);
info->nr_parts = nr_parts;
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 04b2781..556a2df 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -182,7 +182,7 @@ static struct mtd_info *all_mtd;
static void cleanup_sbc_gxx(void)
{
if( all_mtd ) {
- del_mtd_partitions( all_mtd );
+ mtd_device_unregister(all_mtd);
map_destroy( all_mtd );
}
@@ -223,7 +223,7 @@ static int __init init_sbc_gxx(void)
all_mtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
- add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS );
+ mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
return 0;
}
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4d8aaaf..8fead8e 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -266,10 +266,10 @@ static int __init init_sc520cdp(void)
/* Combine the two flash banks into a single MTD device & register it: */
merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
if(merged_mtd)
- add_mtd_device(merged_mtd);
+ mtd_device_register(merged_mtd, NULL, 0);
}
if(devices_found == 3) /* register the third (DIL-Flash) device */
- add_mtd_device(mymtd[2]);
+ mtd_device_register(mymtd[2], NULL, 0);
return(devices_found ? 0 : -ENXIO);
}
@@ -278,11 +278,11 @@ static void __exit cleanup_sc520cdp(void)
int i;
if (merged_mtd) {
- del_mtd_device(merged_mtd);
+ mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd[2])
- del_mtd_device(mymtd[2]);
+ mtd_device_unregister(mymtd[2]);
for (i = 0; i < NUM_FLASH_BANKS; i++) {
if (mymtd[i])
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 7e329f0..d88c842 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -180,7 +180,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
scb2_mtd->owner = THIS_MODULE;
if (scb2_fixup_mtd(scb2_mtd) < 0) {
- del_mtd_device(scb2_mtd);
+ mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
if (!region_fail)
@@ -192,7 +192,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
(unsigned long long)scb2_mtd->size,
(unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
- add_mtd_device(scb2_mtd);
+ mtd_device_register(scb2_mtd, NULL, 0);
return 0;
}
@@ -207,7 +207,7 @@ scb2_flash_remove(struct pci_dev *dev)
if (scb2_mtd->lock)
scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
- del_mtd_device(scb2_mtd);
+ mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 027e628..f1c1f73 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -44,7 +44,6 @@ static struct resource docmem = {
static struct mtd_info *mymtd;
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition partition_info[] = {
{
.name = "DOCCS Boot kernel",
@@ -68,8 +67,6 @@ static struct mtd_partition partition_info[] = {
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-#endif
-
static struct map_info scx200_docflash_map = {
.name = "NatSemi SCx200 DOCCS Flash",
@@ -198,24 +195,17 @@ static int __init init_scx200_docflash(void)
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
partition_info[3].offset = mymtd->size-partition_info[3].size;
partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
-#else
- add_mtd_device(mymtd);
-#endif
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+
return 0;
}
static void __exit cleanup_scx200_docflash(void)
{
if (mymtd) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mymtd);
-#else
- del_mtd_device(mymtd);
-#endif
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (scx200_docflash_map.virt) {
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 0eb41d9..cbf6bad 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -89,7 +89,7 @@ static int __init init_soleng_maps(void)
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
if (eprom_mtd) {
eprom_mtd->owner = THIS_MODULE;
- add_mtd_device(eprom_mtd);
+ mtd_device_register(eprom_mtd, NULL, 0);
}
nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
@@ -104,9 +104,9 @@ static int __init init_soleng_maps(void)
#endif /* CONFIG_MTD_SUPERH_RESERVE */
if (nr_parts > 0)
- add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
+ mtd_device_register(flash_mtd, parsed_parts, nr_parts);
else
- add_mtd_device(flash_mtd);
+ mtd_device_register(flash_mtd, NULL, 0);
return 0;
}
@@ -114,14 +114,14 @@ static int __init init_soleng_maps(void)
static void __exit cleanup_soleng_maps(void)
{
if (eprom_mtd) {
- del_mtd_device(eprom_mtd);
+ mtd_device_unregister(eprom_mtd);
map_destroy(eprom_mtd);
}
if (parsed_parts)
- del_mtd_partitions(flash_mtd);
+ mtd_device_unregister(flash_mtd);
else
- del_mtd_device(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3f1cb32..2d66234 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -101,7 +101,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
up->mtd->owner = THIS_MODULE;
- add_mtd_device(up->mtd);
+ mtd_device_register(up->mtd, NULL, 0);
dev_set_drvdata(&op->dev, up);
@@ -126,7 +126,7 @@ static int __devexit uflash_remove(struct platform_device *op)
struct uflash_dev *up = dev_get_drvdata(&op->dev);
if (up->mtd) {
- del_mtd_device(up->mtd);
+ mtd_device_unregister(up->mtd);
map_destroy(up->mtd);
}
if (up->map.virt) {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 0718dfb..d785879 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -62,7 +62,6 @@ static void __iomem *start_scan_addr;
* "struct map_desc *_io_desc" for the corresponding machine.
*/
-#ifdef CONFIG_MTD_PARTITIONS
/* Currently, TQM8xxL has up to 8MiB flash */
static unsigned long tqm8xxl_max_flash_size = 0x00800000;
@@ -107,7 +106,6 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
//.size = MTDPART_SIZ_FULL,
}
};
-#endif
static int __init init_tqm_mtd(void)
{
@@ -188,7 +186,6 @@ static int __init init_tqm_mtd(void)
goto error_mem;
}
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Select Static partition definitions
*/
@@ -201,21 +198,14 @@ static int __init init_tqm_mtd(void)
part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
for(idx = 0; idx < num_banks ; idx++) {
- if (part_banks[idx].nums == 0) {
+ if (part_banks[idx].nums == 0)
printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
- add_mtd_device(mtd_banks[idx]);
- } else {
+ else
printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
idx, part_banks[idx].type);
- add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
- part_banks[idx].nums);
- }
+ mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
+ part_banks[idx].nums);
}
-#else
- printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
- for(idx = 0 ; idx < num_banks ; idx++)
- add_mtd_device(mtd_banks[idx]);
-#endif
return 0;
error_mem:
for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
@@ -237,7 +227,7 @@ static void __exit cleanup_tqm_mtd(void)
for(idx = 0 ; idx < num_banks ; idx++) {
/* destroy mtd_info previously allocated */
if (mtd_banks[idx]) {
- del_mtd_partitions(mtd_banks[idx]);
+ mtd_device_unregister(mtd_banks[idx]);
map_destroy(mtd_banks[idx]);
}
/* release map_info not used anymore */
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index e02dfa9..d1d671d 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -89,7 +89,7 @@ static int __init init_ts5500_map(void)
}
mymtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS);
+ mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
return 0;
@@ -102,7 +102,7 @@ err2:
static void __exit cleanup_ts5500_map(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 77a8bfc..1de390e 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -76,7 +76,7 @@ static void __exit cleanup_tsunami_flash(void)
struct mtd_info *mtd;
mtd = tsunami_flash_mtd;
if (mtd) {
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
map_destroy(mtd);
}
tsunami_flash_mtd = 0;
@@ -97,7 +97,7 @@ static int __init init_tsunami_flash(void)
}
if (tsunami_flash_mtd) {
tsunami_flash_mtd->owner = THIS_MODULE;
- add_mtd_device(tsunami_flash_mtd);
+ mtd_device_register(tsunami_flash_mtd, NULL, 0);
return 0;
}
return -ENXIO;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 3500929..6793074 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -89,11 +89,7 @@ static int __init uclinux_mtd_init(void)
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
-#ifdef CONFIG_MTD_PARTITIONS
- add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
-#else
- add_mtd_device(mtd);
-#endif
+ mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
return(0);
}
@@ -103,11 +99,7 @@ static int __init uclinux_mtd_init(void)
static void __exit uclinux_mtd_cleanup(void)
{
if (uclinux_ram_mtdinfo) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(uclinux_ram_mtdinfo);
-#else
- del_mtd_device(uclinux_ram_mtdinfo);
-#endif
+ mtd_device_unregister(uclinux_ram_mtdinfo);
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 6adaa6a..5e68de7 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -138,7 +138,7 @@ static void __exit cleanup_vmax301(void)
for (i=0; i<2; i++) {
if (vmax_mtd[i]) {
- del_mtd_device(vmax_mtd[i]);
+ mtd_device_unregister(vmax_mtd[i]);
map_destroy(vmax_mtd[i]);
}
}
@@ -176,7 +176,7 @@ static int __init init_vmax301(void)
vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
vmax_mtd[i]->owner = THIS_MODULE;
- add_mtd_device(vmax_mtd[i]);
+ mtd_device_register(vmax_mtd[i], NULL, 0);
}
}
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 4afc167..3a04b07 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -563,7 +563,7 @@ static void vmu_queryblocks(struct mapleq *mq)
goto fail_cache_create;
part_cur->pcache = pcache;
- error = add_mtd_device(mtd_cur);
+ error = mtd_device_register(mtd_cur, NULL, 0);
if (error)
goto fail_mtd_register;
@@ -709,7 +709,7 @@ static void __devexit vmu_disconnect(struct maple_device *mdev)
for (x = 0; x < card->partitions; x++) {
mpart = ((card->mtd)[x]).priv;
mpart->mdev = NULL;
- del_mtd_device(&((card->mtd)[x]));
+ mtd_device_unregister(&((card->mtd)[x]));
kfree(((card->parts)[x]).name);
}
kfree(card->parts);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 933a2b6..901ce96 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -132,17 +132,20 @@ static int __init init_sbc82xx_flash(void)
nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
&sbcmtd_parts[i], 0);
if (nr_parts > 0) {
- add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts);
+ mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
+ nr_parts);
continue;
}
/* No partitioning detected. Use default */
if (i == 2) {
- add_mtd_device(sbcmtd[i]);
+ mtd_device_register(sbcmtd[i], NULL, 0);
} else if (i == bigflash) {
- add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts));
+ mtd_device_register(sbcmtd[i], bigflash_parts,
+ ARRAY_SIZE(bigflash_parts));
} else {
- add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts));
+ mtd_device_register(sbcmtd[i], smallflash_parts,
+ ARRAY_SIZE(smallflash_parts));
}
}
return 0;
@@ -157,9 +160,9 @@ static void __exit cleanup_sbc82xx_flash(void)
continue;
if (i<2 || sbcmtd_parts[i])
- del_mtd_partitions(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
else
- del_mtd_device(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
kfree(sbcmtd_parts[i]);
map_destroy(sbcmtd[i]);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a534e1f..ca38569 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -221,15 +221,33 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
kref_get(&dev->ref);
__module_get(dev->tr->owner);
- if (dev->mtd) {
- ret = dev->tr->open ? dev->tr->open(dev) : 0;
- __get_mtd_device(dev->mtd);
+ if (!dev->mtd)
+ goto unlock;
+
+ if (dev->tr->open) {
+ ret = dev->tr->open(dev);
+ if (ret)
+ goto error_put;
}
+ ret = __get_mtd_device(dev->mtd);
+ if (ret)
+ goto error_release;
+
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
+
+error_release:
+ if (dev->tr->release)
+ dev->tr->release(dev);
+error_put:
+ module_put(dev->tr->owner);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4c36ef6..3f92731 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file)
return 0;
} /* mtd_close */
-/* FIXME: This _really_ needs to die. In 2.5, we should lock the
- userspace buffer down and use it directly with readv/writev.
-*/
-#define MAX_KMALLOC_SIZE 0x20000
+/* Back in June 2001, dwmw2 wrote:
+ *
+ * FIXME: This _really_ needs to die. In 2.5, we should lock the
+ * userspace buffer down and use it directly with readv/writev.
+ *
+ * The implementation below, using mtd_kmalloc_up_to, mitigates
+ * allocation failures when the system is under low-memory situations
+ * or if memory is highly fragmented at the cost of reducing the
+ * performance of the requested transfer due to a smaller buffer size.
+ *
+ * A more complex but more memory-efficient implementation based on
+ * get_user_pages and iovecs to cover extents of those pages is a
+ * longer-term goal, as intimated by dwmw2 above. However, for the
+ * write case, this requires yet more complex head and tail transfer
+ * handling when those head and tail offsets and sizes are such that
+ * alignment requirements are not met in the NAND subdriver.
+ */
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
size_t total_retlen=0;
int ret=0;
int len;
+ size_t size = count;
char *kbuf;
DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
if (!count)
return 0;
- /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
- and pass them directly to the MTD functions */
-
- if (count > MAX_KMALLOC_SIZE)
- kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
- else
- kbuf=kmalloc(count, GFP_KERNEL);
-
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
-
- if (count > MAX_KMALLOC_SIZE)
- len = MAX_KMALLOC_SIZE;
- else
- len = count;
+ len = min_t(size_t, count, size);
switch (mfi->mode) {
case MTD_MODE_OTP_FACTORY:
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ size_t size = count;
char *kbuf;
size_t retlen;
size_t total_retlen=0;
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
if (!count)
return 0;
- if (count > MAX_KMALLOC_SIZE)
- kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
- else
- kbuf=kmalloc(count, GFP_KERNEL);
-
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
-
- if (count > MAX_KMALLOC_SIZE)
- len = MAX_KMALLOC_SIZE;
- else
- len = count;
+ len = min_t(size_t, count, size);
if (copy_from_user(kbuf, buf, len)) {
kfree(kbuf);
@@ -512,7 +508,6 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
static int mtd_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
@@ -548,8 +543,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
return -EINVAL;
}
}
-#endif
-
static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
@@ -941,7 +934,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
-#ifdef CONFIG_MTD_PARTITIONS
case BLKPG:
{
ret = mtd_blkpg_ioctl(mtd,
@@ -955,7 +947,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
ret = 0;
break;
}
-#endif
default:
ret = -ENOTTY;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 5060e60..e601672 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- ops->retlen = 0;
+ ops->retlen = ops->oobretlen = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
devops.len = subdev->size - to;
err = subdev->write_oob(subdev, to, &devops);
- ops->retlen += devops.retlen;
+ ops->retlen += devops.oobretlen;
if (err)
return err;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index da69bc8..c510aff 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
@@ -37,6 +38,7 @@
#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include "mtdcore.h"
/*
@@ -391,7 +393,7 @@ fail_locked:
* if the requested device does not appear to be present in the list.
*/
-int del_mtd_device (struct mtd_info *mtd)
+int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
@@ -427,6 +429,50 @@ out_error:
}
/**
+ * mtd_device_register - register an MTD device.
+ *
+ * @master: the MTD device to register
+ * @parts: the partitions to register - only valid if nr_parts > 0
+ * @nr_parts: the number of partitions in parts. If zero then the full MTD
+ * device is registered
+ *
+ * Register an MTD device with the system and optionally, a number of
+ * partitions. If nr_parts is 0 then the whole device is registered, otherwise
+ * only the partitions are registered. To register both the full device *and*
+ * the partitions, call mtd_device_register() twice, once with nr_parts == 0
+ * and once equal to the number of partitions.
+ */
+int mtd_device_register(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nr_parts)
+{
+ return parts ? add_mtd_partitions(master, parts, nr_parts) :
+ add_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister. This will unregister both the master
+ * and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+ int err;
+
+ err = del_mtd_partitions(master);
+ if (err)
+ return err;
+
+ if (!device_is_registered(&master->dev))
+ return 0;
+
+ return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
* register_mtd_user - register a 'user' of MTD devices.
* @new: pointer to notifier info structure
*
@@ -443,7 +489,7 @@ void register_mtd_user (struct mtd_notifier *new)
list_add(&new->list, &mtd_notifiers);
- __module_get(THIS_MODULE);
+ __module_get(THIS_MODULE);
mtd_for_each_device(mtd)
new->add(mtd);
@@ -532,7 +578,6 @@ int __get_mtd_device(struct mtd_info *mtd)
return -ENODEV;
if (mtd->get_device) {
-
err = mtd->get_device(mtd);
if (err) {
@@ -570,21 +615,13 @@ struct mtd_info *get_mtd_device_nm(const char *name)
if (!mtd)
goto out_unlock;
- if (!try_module_get(mtd->owner))
+ err = __get_mtd_device(mtd);
+ if (err)
goto out_unlock;
- if (mtd->get_device) {
- err = mtd->get_device(mtd);
- if (err)
- goto out_put;
- }
-
- mtd->usecount++;
mutex_unlock(&mtd_table_mutex);
return mtd;
-out_put:
- module_put(mtd->owner);
out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
@@ -638,8 +675,54 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
return ret;
}
-EXPORT_SYMBOL_GPL(add_mtd_device);
-EXPORT_SYMBOL_GPL(del_mtd_device);
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
+ __GFP_NORETRY | __GFP_NO_KSWAPD;
+ size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+ void *kbuf;
+
+ *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+ while (*size > min_alloc) {
+ kbuf = kmalloc(*size, flags);
+ if (kbuf)
+ return kbuf;
+
+ *size >>= 1;
+ *size = ALIGN(*size, mtd->writesize);
+ }
+
+ /*
+ * For the last resort allocation allow 'kmalloc()' to do all sorts of
+ * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+ */
+ return kmalloc(*size, GFP_KERNEL);
+}
+
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -648,6 +731,7 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
@@ -656,44 +740,32 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info(char *buf, struct mtd_info *this)
-{
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
- (unsigned long long)this->size,
- this->erasesize, this->name);
-}
-
-static int mtd_read_proc (char *page, char **start, off_t off, int count,
- int *eof, void *data_unused)
+static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
- int len, l;
- off_t begin = 0;
+ seq_puts(m, "dev: size erasesize name\n");
mutex_lock(&mtd_table_mutex);
-
- len = sprintf(page, "dev: size erasesize name\n");
mtd_for_each_device(mtd) {
- l = mtd_proc_info(page + len, mtd);
- len += l;
- if (len+begin > off+count)
- goto done;
- if (len+begin < off) {
- begin += len;
- len = 0;
- }
- }
-
- *eof = 1;
-
-done:
+ seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+ mtd->index, (unsigned long long)mtd->size,
+ mtd->erasesize, mtd->name);
+ }
mutex_unlock(&mtd_table_mutex);
- if (off >= len+begin)
- return 0;
- *start = page + (off-begin);
- return ((count < begin+len-off) ? count : begin+len-off);
+ return 0;
+}
+
+static int mtd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtd_proc_show, NULL);
}
+static const struct file_operations mtd_proc_ops = {
+ .open = mtd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_PROC_FS */
/*====================================================================*/
@@ -734,8 +806,7 @@ static int __init init_mtd(void)
goto err_bdi3;
#ifdef CONFIG_PROC_FS
- if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
- proc_mtd->read_proc = mtd_read_proc;
+ proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
#endif /* CONFIG_PROC_FS */
return 0;
@@ -753,7 +824,7 @@ err_reg:
static void __exit cleanup_mtd(void)
{
#ifdef CONFIG_PROC_FS
- if (proc_mtd)
+ if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
class_unregister(&mtd_class);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 6a64fde..0ed6126 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -10,6 +10,12 @@
extern struct mutex mtd_table_mutex;
extern struct mtd_info *__mtd_next_device(int i);
+extern int add_mtd_device(struct mtd_info *mtd);
+extern int del_mtd_device(struct mtd_info *mtd);
+extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
+ int);
+extern int del_mtd_partitions(struct mtd_info *);
+
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
(mtd) != NULL; \
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 0a47601..630be3e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -31,6 +31,8 @@
#include <linux/mtd/partitions.h>
#include <linux/err.h>
+#include "mtdcore.h"
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -376,7 +378,6 @@ int del_mtd_partitions(struct mtd_info *master)
return err;
}
-EXPORT_SYMBOL(del_mtd_partitions);
static struct mtd_part *allocate_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
@@ -671,7 +672,6 @@ int add_mtd_partitions(struct mtd_info *master,
return 0;
}
-EXPORT_SYMBOL(add_mtd_partitions);
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
@@ -722,11 +722,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
parser = get_partition_parser(*types);
- if (!parser) {
- printk(KERN_NOTICE "%s partition parsing not available\n",
- *types);
+ if (!parser)
continue;
- }
ret = (*parser->parse_fn)(master, pparts, origin);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fed215c..fd78853 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1450,7 +1450,13 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
}
oinfo = mtd->ecclayout;
- if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ if (!oinfo) {
+ printk(KERN_ERR "%s: mtd%d does not have OOB\n",
+ MTDSWAP_PREFIX, mtd->index);
+ return;
+ }
+
+ if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index edec457..4c34252 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -92,7 +92,7 @@ config MTD_NAND_EDB7312
config MTD_NAND_H1900
tristate "iPAQ H1900 flash"
- depends on ARCH_PXA && MTD_PARTITIONS
+ depends on ARCH_PXA
help
This enables the driver for the iPAQ h1900 flash.
@@ -419,7 +419,6 @@ config MTD_NAND_TMIO
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
- depends on MTD_PARTITIONS
help
The simulator may simulate various NAND flash chips for the
MTD nand layer.
@@ -513,7 +512,7 @@ config MTD_NAND_SOCRATES
config MTD_NAND_NUC900
tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
- depends on ARCH_W90X900 && MTD_PARTITIONS
+ depends on ARCH_W90X900
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 8691e04..eb40ea8 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -120,7 +120,7 @@ static void alauda_delete(struct kref *kref)
struct alauda *al = container_of(kref, struct alauda, kref);
if (al->mtd) {
- del_mtd_device(al->mtd);
+ mtd_device_unregister(al->mtd);
kfree(al->mtd);
}
usb_put_dev(al->dev);
@@ -592,7 +592,7 @@ static int alauda_init_media(struct alauda *al)
mtd->priv = al;
mtd->owner = THIS_MODULE;
- err = add_mtd_device(mtd);
+ err = mtd_device_register(mtd, NULL, 0);
if (err) {
err = -ENFILE;
goto error;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index bc65bf7..78017eb 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -235,8 +235,8 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
}
/* Register the partitions */
- add_mtd_partitions(ams_delta_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(ams_delta_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
goto out;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 950646a..b300705 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -30,6 +30,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/dmaengine.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -494,11 +495,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct resource *regs;
struct resource *mem;
int res;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_partitions = 0;
-#endif
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -656,7 +654,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_scan_tail;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "atmel_nand";
num_partitions = parse_mtd_partitions(mtd, part_probes,
@@ -672,17 +669,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_no_partitions;
}
- res = add_mtd_partitions(mtd, partitions, num_partitions);
-#else
- res = add_mtd_device(mtd);
-#endif
-
+ res = mtd_device_register(mtd, partitions, num_partitions);
if (!res)
return res;
-#ifdef CONFIG_MTD_PARTITIONS
err_no_partitions:
-#endif
nand_release(mtd);
err_scan_tail:
err_scan_ident:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5d513b5..e7767ee 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -581,7 +581,8 @@ static int __init au1xxx_nand_init(void)
}
/* Register the partitions */
- add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
+ mtd_device_register(au1550_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
return 0;
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index 0911cf0..eddc9a2 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -185,20 +185,20 @@ static int __init autcpu12_init(void)
/* Register the partitions */
switch (autcpu12_mtd->size) {
case SZ_16M:
- add_mtd_partitions(autcpu12_mtd, partition_info16k,
- NUM_PARTITIONS16K);
+ mtd_device_register(autcpu12_mtd, partition_info16k,
+ NUM_PARTITIONS16K);
break;
case SZ_32M:
- add_mtd_partitions(autcpu12_mtd, partition_info32k,
- NUM_PARTITIONS32K);
+ mtd_device_register(autcpu12_mtd, partition_info32k,
+ NUM_PARTITIONS32K);
break;
case SZ_64M:
- add_mtd_partitions(autcpu12_mtd, partition_info64k,
- NUM_PARTITIONS64K);
+ mtd_device_register(autcpu12_mtd, partition_info64k,
+ NUM_PARTITIONS64K);
break;
case SZ_128M:
- add_mtd_partitions(autcpu12_mtd, partition_info128k,
- NUM_PARTITIONS128K);
+ mtd_device_register(autcpu12_mtd, partition_info128k,
+ NUM_PARTITIONS128K);
break;
default:
printk("Unsupported SmartMedia device\n");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index dfe262c..9ec2807 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,9 +52,7 @@
static const __devinitconst char gBanner[] = KERN_INFO \
"BCM UMI MTD NAND Driver: 1.00\n";
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
#if NAND_ECC_BCH
static uint8_t scan_ff_pattern[] = { 0xff };
@@ -509,7 +507,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
kfree(board_mtd);
return -EIO;
}
- add_mtd_partitions(board_mtd, partition_info, nr_partitions);
+ mtd_device_register(board_mtd, partition_info, nr_partitions);
}
/* Return happy */
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 79947be..dd899cb 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -659,15 +659,10 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts = info->platform->partitions;
int nr = info->platform->nr_partitions;
- return add_mtd_partitions(mtd, parts, nr);
-#else
- return add_mtd_device(mtd);
-#endif
+ return mtd_device_register(mtd, parts, nr);
}
static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e06c898..87ebb4e 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,9 +90,7 @@ static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -632,10 +630,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
int nr_parts;
-#endif
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -804,9 +800,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mtd);
/* We register the whole device first, separate from the partitions */
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "cafe_nand";
#endif
@@ -814,9 +809,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
if (nr_parts > 0) {
cafe->parts = parts;
dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
- add_mtd_partitions(mtd, parts, nr_parts);
+ mtd_device_register(mtd, parts, nr_parts);
}
-#endif
goto out;
out_irq:
@@ -838,7 +832,6 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
- del_mtd_device(mtd);
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6e64952..6fc043a 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -238,7 +238,7 @@ static int __init cmx270_init(void)
/* Register the partitions */
pr_notice("Using %s partition definition\n", part_type);
- ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
+ ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 71c35a0..f59ad1f 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -277,22 +277,15 @@ static int is_geode(void)
return 0;
}
-
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
-
static int __init cs553x_init(void)
{
int err = -ENXIO;
int i;
uint64_t val;
-
-#ifdef CONFIG_MTD_PARTITIONS
int mtd_parts_nb = 0;
struct mtd_partition *mtd_parts = NULL;
-#endif
/* If the CPU isn't a Geode GX or LX, abort */
if (!is_geode())
@@ -324,17 +317,11 @@ static int __init cs553x_init(void)
if (cs553x_mtd[i]) {
/* If any devices registered, return success. Else the last error. */
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0) {
+ if (mtd_parts_nb > 0)
printk(KERN_NOTICE "Using command line partition definition\n");
- add_mtd_partitions(cs553x_mtd[i], mtd_parts, mtd_parts_nb);
- } else {
- add_mtd_device(cs553x_mtd[i]);
- }
-#else
- add_mtd_device(cs553x_mtd[i]);
-#endif
+ mtd_device_register(cs553x_mtd[i], mtd_parts,
+ mtd_parts_nb);
err = 0;
}
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index aff3468..1f34951 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -530,6 +530,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ struct mtd_partition *mtd_parts = NULL;
+ int mtd_parts_nb = 0;
/* insist on board-specific configuration */
if (!pdata)
@@ -749,41 +751,33 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- if (mtd_has_partitions()) {
- struct mtd_partition *mtd_parts = NULL;
- int mtd_parts_nb = 0;
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] __initconst = {
+ "cmdlinepart", NULL
+ };
- if (mtd_has_cmdlinepart()) {
- static const char *probes[] __initconst =
- { "cmdlinepart", NULL };
-
- mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
- &mtd_parts, 0);
- }
-
- if (mtd_parts_nb <= 0) {
- mtd_parts = pdata->parts;
- mtd_parts_nb = pdata->nr_parts;
- }
+ mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
+ &mtd_parts, 0);
+ }
- /* Register any partitions */
- if (mtd_parts_nb > 0) {
- ret = add_mtd_partitions(&info->mtd,
- mtd_parts, mtd_parts_nb);
- if (ret == 0)
- info->partitioned = true;
- }
+ if (mtd_parts_nb <= 0) {
+ mtd_parts = pdata->parts;
+ mtd_parts_nb = pdata->nr_parts;
+ }
- } else if (pdata->nr_parts) {
- dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
- pdata->nr_parts, info->mtd.name);
+ /* Register any partitions */
+ if (mtd_parts_nb > 0) {
+ ret = mtd_device_register(&info->mtd, mtd_parts,
+ mtd_parts_nb);
+ if (ret == 0)
+ info->partitioned = true;
}
/* If there's no partition info, just package the whole chip
* as a single MTD device.
*/
if (!info->partitioned)
- ret = add_mtd_device(&info->mtd) ? -ENODEV : 0;
+ ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
if (ret < 0)
goto err_scan;
@@ -824,10 +818,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
struct davinci_nand_info *info = platform_get_drvdata(pdev);
int status;
- if (mtd_has_partitions() && info->partitioned)
- status = del_mtd_partitions(&info->mtd);
- else
- status = del_mtd_device(&info->mtd);
+ status = mtd_device_unregister(&info->mtd);
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 4633f09..d527621 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -44,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
/* We define a macro here that combines all interrupts this driver uses into
* a single constant value, for convenience. */
-#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
- INTR_STATUS0__ECC_TRANSACTION_DONE | \
- INTR_STATUS0__ECC_ERR | \
- INTR_STATUS0__PROGRAM_FAIL | \
- INTR_STATUS0__LOAD_COMP | \
- INTR_STATUS0__PROGRAM_COMP | \
- INTR_STATUS0__TIME_OUT | \
- INTR_STATUS0__ERASE_FAIL | \
- INTR_STATUS0__RST_COMP | \
- INTR_STATUS0__ERASE_COMP)
+#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
+ INTR_STATUS__ECC_TRANSACTION_DONE | \
+ INTR_STATUS__ECC_ERR | \
+ INTR_STATUS__PROGRAM_FAIL | \
+ INTR_STATUS__LOAD_COMP | \
+ INTR_STATUS__PROGRAM_COMP | \
+ INTR_STATUS__TIME_OUT | \
+ INTR_STATUS__ERASE_FAIL | \
+ INTR_STATUS__RST_COMP | \
+ INTR_STATUS__ERASE_COMP)
/* indicates whether or not the internal value for the flash bank is
* valid or not */
@@ -95,30 +96,6 @@ static const struct pci_device_id denali_pci_ids[] = {
{ /* end: all zeroes */ }
};
-
-/* these are static lookup tables that give us easy access to
- * registers in the NAND controller.
- */
-static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1,
- INTR_STATUS2,
- INTR_STATUS3};
-
-static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
-
-static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
-
-static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -180,19 +157,17 @@ static void read_status(struct denali_nand_info *denali)
static void reset_bank(struct denali_nand_info *denali)
{
uint32_t irq_status = 0;
- uint32_t irq_mask = reset_complete[denali->flash_bank] |
- operation_timeout[denali->flash_bank];
- int bank = 0;
+ uint32_t irq_mask = INTR_STATUS__RST_COMP |
+ INTR_STATUS__TIME_OUT;
clear_interrupts(denali);
- bank = device_reset_banks[denali->flash_bank];
- iowrite32(bank, denali->flash_reg + DEVICE_RESET);
+ iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
irq_status = wait_for_irq(denali, irq_mask);
- if (irq_status & operation_timeout[denali->flash_bank])
- dev_err(&denali->dev->dev, "reset bank failed.\n");
+ if (irq_status & INTR_STATUS__TIME_OUT)
+ dev_err(denali->dev, "reset bank failed.\n");
}
/* Reset the flash controller */
@@ -200,29 +175,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
{
uint32_t i;
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(reset_complete[i] | operation_timeout[i],
- denali->flash_reg + intr_status_addresses[i]);
+ for (i = 0 ; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- iowrite32(device_reset_banks[i],
- denali->flash_reg + DEVICE_RESET);
+ for (i = 0 ; i < denali->max_banks; i++) {
+ iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
while (!(ioread32(denali->flash_reg +
- intr_status_addresses[i]) &
- (reset_complete[i] | operation_timeout[i])))
+ INTR_STATUS(i)) &
+ (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
cpu_relax();
- if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
- operation_timeout[i])
- dev_dbg(&denali->dev->dev,
+ if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
+ INTR_STATUS__TIME_OUT)
+ dev_dbg(denali->dev,
"NAND Reset operation timed out on bank %d\n", i);
}
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(reset_complete[i] | operation_timeout[i],
- denali->flash_reg + intr_status_addresses[i]);
+ for (i = 0; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
return PASS;
}
@@ -254,7 +228,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t acc_clks;
uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -291,7 +265,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
acc_clks++;
if ((data_invalid - acc_clks * CLK_X) < 2)
- dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n",
+ dev_warn(denali->dev, "%s, Line %d: Warning!\n",
__FILE__, __LINE__);
addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -419,7 +393,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
#endif
break;
default:
- dev_warn(&denali->dev->dev,
+ dev_warn(denali->dev,
"Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
"Will use default parameter values instead.\n",
device_id);
@@ -431,17 +405,17 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
*/
static void find_valid_banks(struct denali_nand_info *denali)
{
- uint32_t id[LLD_MAX_FLASH_BANKS];
+ uint32_t id[denali->max_banks];
int i;
denali->total_used_banks = 1;
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ for (i = 0; i < denali->max_banks; i++) {
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
index_addr_read_data(denali,
(uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"Return 1st ID for bank[%d]: %x\n", i, id[i]);
if (i == 0) {
@@ -461,16 +435,27 @@ static void find_valid_banks(struct denali_nand_info *denali)
* Multichip support is not enabled.
*/
if (denali->total_used_banks != 1) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"Sorry, Intel CE4100 only supports "
"a single NAND device.\n");
BUG();
}
}
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"denali->total_used_banks: %d\n", denali->total_used_banks);
}
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void detect_max_banks(struct denali_nand_info *denali)
+{
+ uint32_t features = ioread32(denali->flash_reg + FEATURES);
+
+ denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+}
+
static void detect_partition_feature(struct denali_nand_info *denali)
{
/* For MRST platform, denali->fwblks represent the
@@ -480,15 +465,15 @@ static void detect_partition_feature(struct denali_nand_info *denali)
* blocks it can't touch.
* */
if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
- PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
+ PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
denali->fwblks =
- ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MIN_VALUE) *
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
+ MIN_MAX_BANK__MIN_VALUE) *
denali->blksperchip)
+
- (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
- MIN_BLK_ADDR_1__VALUE);
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
+ MIN_BLK_ADDR__VALUE);
} else
denali->fwblks = SPECTRA_START_BLOCK;
} else
@@ -501,7 +486,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
uint32_t id_bytes[5], addr;
uint8_t i, maf_id, device_id;
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
@@ -530,7 +515,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
get_hynix_nand_para(denali, device_id);
}
- dev_info(&denali->dev->dev,
+ dev_info(denali->dev,
"Dump timing register values:"
"acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
"we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
@@ -560,7 +545,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
static void denali_set_intr_modes(struct denali_nand_info *denali,
uint16_t INT_ENABLE)
{
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
if (INT_ENABLE)
@@ -580,6 +565,7 @@ static inline bool is_flash_bank_valid(int flash_bank)
static void denali_irq_init(struct denali_nand_info *denali)
{
uint32_t int_mask = 0;
+ int i;
/* Disable global interrupts */
denali_set_intr_modes(denali, false);
@@ -587,10 +573,8 @@ static void denali_irq_init(struct denali_nand_info *denali)
int_mask = DENALI_IRQ_ALL;
/* Clear all status bits */
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
denali_irq_enable(denali, int_mask);
}
@@ -604,10 +588,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
static void denali_irq_enable(struct denali_nand_info *denali,
uint32_t int_mask)
{
- iowrite32(int_mask, denali->flash_reg + INTR_EN0);
- iowrite32(int_mask, denali->flash_reg + INTR_EN1);
- iowrite32(int_mask, denali->flash_reg + INTR_EN2);
- iowrite32(int_mask, denali->flash_reg + INTR_EN3);
+ int i;
+
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
}
/* This function only returns when an interrupt that this driver cares about
@@ -624,7 +608,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali,
{
uint32_t intr_status_reg = 0;
- intr_status_reg = intr_status_addresses[denali->flash_bank];
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
}
@@ -645,7 +629,7 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
{
uint32_t intr_status_reg = 0;
- intr_status_reg = intr_status_addresses[denali->flash_bank];
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
return ioread32(denali->flash_reg + intr_status_reg);
}
@@ -754,7 +738,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
irq_mask = 0;
if (op == DENALI_READ)
- irq_mask = INTR_STATUS0__LOAD_COMP;
+ irq_mask = INTR_STATUS__LOAD_COMP;
else if (op == DENALI_WRITE)
irq_mask = 0;
else
@@ -800,7 +784,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"cmd, page, addr on timeout "
"(0x%x, 0x%x, 0x%x)\n",
cmd, denali->page, addr);
@@ -861,8 +845,8 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL;
+ uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
int status = 0;
denali->page = page;
@@ -875,11 +859,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev, "OOB write failed\n");
+ dev_err(denali->dev, "OOB write failed\n");
status = -EIO;
}
} else {
- dev_err(&denali->dev->dev, "unable to send pipeline command\n");
+ dev_err(denali->dev, "unable to send pipeline command\n");
status = -EIO;
}
return status;
@@ -889,7 +873,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t irq_mask = INTR_STATUS0__LOAD_COMP,
+ uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
irq_status = 0, addr = 0x0, cmd = 0x0;
denali->page = page;
@@ -904,7 +888,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0)
- dev_err(&denali->dev->dev, "page on OOB timeout %d\n",
+ dev_err(denali->dev, "page on OOB timeout %d\n",
denali->page);
/* We set the device back to MAIN_ACCESS here as I observed
@@ -944,7 +928,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
{
bool check_erased_page = false;
- if (irq_status & INTR_STATUS0__ECC_ERR) {
+ if (irq_status & INTR_STATUS__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
uint32_t err_address = 0, err_correction_info = 0;
uint32_t err_byte = 0, err_sector = 0, err_device = 0;
@@ -995,7 +979,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
* for a while for this interrupt
* */
while (!(read_interrupt_status(denali) &
- INTR_STATUS0__ECC_TRANSACTION_DONE))
+ INTR_STATUS__ECC_TRANSACTION_DONE))
cpu_relax();
clear_interrupts(denali);
denali_set_intr_modes(denali, true);
@@ -1045,14 +1029,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, bool raw_xfer)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__PROGRAM_FAIL;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
/* if it is a raw xfer, we want to disable ecc, and send
* the spare area.
@@ -1071,7 +1054,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize);
}
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
clear_interrupts(denali);
denali_enable_dma(denali, true);
@@ -1082,16 +1065,16 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"timeout on write_page (type = %d)\n",
raw_xfer);
denali->status =
- (irq_status & INTR_STATUS0__PROGRAM_FAIL) ?
+ (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
NAND_STATUS_FAIL : PASS;
}
denali_enable_dma(denali, false);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
}
/* NAND core entry points */
@@ -1139,18 +1122,17 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR;
+ uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
+ INTR_STATUS__ECC_ERR;
bool check_erased_page = false;
if (page != denali->page) {
- dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
@@ -1159,7 +1141,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
setup_ecc_for_xfer(denali, true, false);
denali_enable_dma(denali, true);
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
@@ -1167,7 +1149,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
memcpy(buf, denali->buf.buf, mtd->writesize);
@@ -1192,16 +1174,15 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
if (page != denali->page) {
- dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
@@ -1210,7 +1191,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
setup_ecc_for_xfer(denali, false, true);
denali_enable_dma(denali, true);
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
@@ -1218,7 +1199,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
denali_enable_dma(denali, false);
@@ -1271,10 +1252,10 @@ static void denali_erase(struct mtd_info *mtd, int page)
index_addr(denali, (uint32_t)cmd, 0x1);
/* wait for erase to complete or failure to occur */
- irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
- INTR_STATUS0__ERASE_FAIL);
+ irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
+ INTR_STATUS__ERASE_FAIL);
- denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ?
+ denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
NAND_STATUS_FAIL : PASS;
}
@@ -1330,7 +1311,7 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
uint8_t *ecc_code)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_calculate called unexpectedly\n");
BUG();
return -EIO;
@@ -1340,7 +1321,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_correct called unexpectedly\n");
BUG();
return -EIO;
@@ -1349,7 +1330,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_hwctl called unexpectedly\n");
BUG();
}
@@ -1375,6 +1356,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
/* Should set value for these registers when init */
iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->flash_reg + ECC_ENABLE);
+ detect_max_banks(denali);
denali_nand_timing_set(denali);
denali_irq_init(denali);
}
@@ -1484,24 +1466,22 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
/* Is 32-bit DMA supported? */
- ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
-
+ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (ret) {
printk(KERN_ERR "Spectra: no usable DMA configuration\n");
goto failed_enable_dev;
}
- denali->buf.dma_buf =
- pci_map_single(dev, denali->buf.buf,
- DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
+ if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
goto failed_enable_dev;
}
pci_set_master(dev);
- denali->dev = dev;
+ denali->dev = &dev->dev;
denali->mtd.dev.parent = &dev->dev;
ret = pci_request_regions(dev, DENALI_NAND_NAME);
@@ -1554,7 +1534,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
* with the nand subsystem */
- if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
+ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
ret = -ENXIO;
goto failed_req_irq;
}
@@ -1664,7 +1644,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_irq;
}
- ret = add_mtd_device(&denali->mtd);
+ ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
ret);
@@ -1681,8 +1661,8 @@ failed_remap_reg:
failed_req_regions:
pci_release_regions(dev);
failed_dma_map:
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
failed_enable_dev:
pci_disable_device(dev);
failed_alloc_memery:
@@ -1696,7 +1676,7 @@ static void denali_pci_remove(struct pci_dev *dev)
struct denali_nand_info *denali = pci_get_drvdata(dev);
nand_release(&denali->mtd);
- del_mtd_device(&denali->mtd);
+ mtd_device_unregister(&denali->mtd);
denali_irq_cleanup(dev->irq, denali);
@@ -1704,8 +1684,8 @@ static void denali_pci_remove(struct pci_dev *dev)
iounmap(denali->flash_mem);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
pci_set_drvdata(dev, NULL);
kfree(denali);
}
@@ -1721,8 +1701,7 @@ static struct pci_driver denali_pci_driver = {
static int __devinit denali_init(void)
{
- printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n",
- __DATE__, __TIME__);
+ printk(KERN_INFO "Spectra MTD driver\n");
return pci_register_driver(&denali_pci_driver);
}
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 3918bcb..fabb9d5 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -211,185 +211,46 @@
#define TRANSFER_MODE 0x400
#define TRANSFER_MODE__VALUE 0x0003
-#define INTR_STATUS0 0x410
-#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS0__ECC_ERR 0x0002
-#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-#define INTR_STATUS0__TIME_OUT 0x0008
-#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-#define INTR_STATUS0__ERASE_FAIL 0x0020
-#define INTR_STATUS0__LOAD_COMP 0x0040
-#define INTR_STATUS0__PROGRAM_COMP 0x0080
-#define INTR_STATUS0__ERASE_COMP 0x0100
-#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS0__LOCKED_BLK 0x0400
-#define INTR_STATUS0__UNSUP_CMD 0x0800
-#define INTR_STATUS0__INT_ACT 0x1000
-#define INTR_STATUS0__RST_COMP 0x2000
-#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-
-#define INTR_EN0 0x420
-#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN0__ECC_ERR 0x0002
-#define INTR_EN0__DMA_CMD_COMP 0x0004
-#define INTR_EN0__TIME_OUT 0x0008
-#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
-#define INTR_EN0__LOAD_COMP 0x0040
-#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
-#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
-#define INTR_EN0__UNSUP_CMD 0x0800
-#define INTR_EN0__INT_ACT 0x1000
-#define INTR_EN0__RST_COMP 0x2000
-#define INTR_EN0__PIPE_CMD_ERR 0x4000
-#define INTR_EN0__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT0 0x430
-#define PAGE_CNT0__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR0 0x440
-#define ERR_PAGE_ADDR0__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR0 0x450
-#define ERR_BLOCK_ADDR0__VALUE 0xffff
-
-#define INTR_STATUS1 0x460
-#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS1__ECC_ERR 0x0002
-#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-#define INTR_STATUS1__TIME_OUT 0x0008
-#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-#define INTR_STATUS1__ERASE_FAIL 0x0020
-#define INTR_STATUS1__LOAD_COMP 0x0040
-#define INTR_STATUS1__PROGRAM_COMP 0x0080
-#define INTR_STATUS1__ERASE_COMP 0x0100
-#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS1__LOCKED_BLK 0x0400
-#define INTR_STATUS1__UNSUP_CMD 0x0800
-#define INTR_STATUS1__INT_ACT 0x1000
-#define INTR_STATUS1__RST_COMP 0x2000
-#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-
-#define INTR_EN1 0x470
-#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN1__ECC_ERR 0x0002
-#define INTR_EN1__DMA_CMD_COMP 0x0004
-#define INTR_EN1__TIME_OUT 0x0008
-#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
-#define INTR_EN1__LOAD_COMP 0x0040
-#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
-#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
-#define INTR_EN1__UNSUP_CMD 0x0800
-#define INTR_EN1__INT_ACT 0x1000
-#define INTR_EN1__RST_COMP 0x2000
-#define INTR_EN1__PIPE_CMD_ERR 0x4000
-#define INTR_EN1__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT1 0x480
-#define PAGE_CNT1__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR1 0x490
-#define ERR_PAGE_ADDR1__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR1 0x4a0
-#define ERR_BLOCK_ADDR1__VALUE 0xffff
-
-#define INTR_STATUS2 0x4b0
-#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS2__ECC_ERR 0x0002
-#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-#define INTR_STATUS2__TIME_OUT 0x0008
-#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-#define INTR_STATUS2__ERASE_FAIL 0x0020
-#define INTR_STATUS2__LOAD_COMP 0x0040
-#define INTR_STATUS2__PROGRAM_COMP 0x0080
-#define INTR_STATUS2__ERASE_COMP 0x0100
-#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS2__LOCKED_BLK 0x0400
-#define INTR_STATUS2__UNSUP_CMD 0x0800
-#define INTR_STATUS2__INT_ACT 0x1000
-#define INTR_STATUS2__RST_COMP 0x2000
-#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-
-#define INTR_EN2 0x4c0
-#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN2__ECC_ERR 0x0002
-#define INTR_EN2__DMA_CMD_COMP 0x0004
-#define INTR_EN2__TIME_OUT 0x0008
-#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
-#define INTR_EN2__LOAD_COMP 0x0040
-#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
-#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
-#define INTR_EN2__UNSUP_CMD 0x0800
-#define INTR_EN2__INT_ACT 0x1000
-#define INTR_EN2__RST_COMP 0x2000
-#define INTR_EN2__PIPE_CMD_ERR 0x4000
-#define INTR_EN2__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT2 0x4d0
-#define PAGE_CNT2__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR2 0x4e0
-#define ERR_PAGE_ADDR2__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR2 0x4f0
-#define ERR_BLOCK_ADDR2__VALUE 0xffff
-
-#define INTR_STATUS3 0x500
-#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS3__ECC_ERR 0x0002
-#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-#define INTR_STATUS3__TIME_OUT 0x0008
-#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-#define INTR_STATUS3__ERASE_FAIL 0x0020
-#define INTR_STATUS3__LOAD_COMP 0x0040
-#define INTR_STATUS3__PROGRAM_COMP 0x0080
-#define INTR_STATUS3__ERASE_COMP 0x0100
-#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS3__LOCKED_BLK 0x0400
-#define INTR_STATUS3__UNSUP_CMD 0x0800
-#define INTR_STATUS3__INT_ACT 0x1000
-#define INTR_STATUS3__RST_COMP 0x2000
-#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-
-#define INTR_EN3 0x510
-#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN3__ECC_ERR 0x0002
-#define INTR_EN3__DMA_CMD_COMP 0x0004
-#define INTR_EN3__TIME_OUT 0x0008
-#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
-#define INTR_EN3__LOAD_COMP 0x0040
-#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
-#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
-#define INTR_EN3__UNSUP_CMD 0x0800
-#define INTR_EN3__INT_ACT 0x1000
-#define INTR_EN3__RST_COMP 0x2000
-#define INTR_EN3__PIPE_CMD_ERR 0x4000
-#define INTR_EN3__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT3 0x520
-#define PAGE_CNT3__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR3 0x530
-#define ERR_PAGE_ADDR3__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR3 0x540
-#define ERR_BLOCK_ADDR3__VALUE 0xffff
+#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
+#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
+
+#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS__ECC_ERR 0x0002
+#define INTR_STATUS__DMA_CMD_COMP 0x0004
+#define INTR_STATUS__TIME_OUT 0x0008
+#define INTR_STATUS__PROGRAM_FAIL 0x0010
+#define INTR_STATUS__ERASE_FAIL 0x0020
+#define INTR_STATUS__LOAD_COMP 0x0040
+#define INTR_STATUS__PROGRAM_COMP 0x0080
+#define INTR_STATUS__ERASE_COMP 0x0100
+#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS__LOCKED_BLK 0x0400
+#define INTR_STATUS__UNSUP_CMD 0x0800
+#define INTR_STATUS__INT_ACT 0x1000
+#define INTR_STATUS__RST_COMP 0x2000
+#define INTR_STATUS__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS__PAGE_XFER_INC 0x8000
+
+#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN__ECC_ERR 0x0002
+#define INTR_EN__DMA_CMD_COMP 0x0004
+#define INTR_EN__TIME_OUT 0x0008
+#define INTR_EN__PROGRAM_FAIL 0x0010
+#define INTR_EN__ERASE_FAIL 0x0020
+#define INTR_EN__LOAD_COMP 0x0040
+#define INTR_EN__PROGRAM_COMP 0x0080
+#define INTR_EN__ERASE_COMP 0x0100
+#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN__LOCKED_BLK 0x0400
+#define INTR_EN__UNSUP_CMD 0x0800
+#define INTR_EN__INT_ACT 0x1000
+#define INTR_EN__RST_COMP 0x2000
+#define INTR_EN__PIPE_CMD_ERR 0x4000
+#define INTR_EN__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
+#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
+#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
#define DATA_INTR 0x550
#define DATA_INTR__WRITE_SPACE_AV 0x0001
@@ -484,141 +345,23 @@
#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
-#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_0 0x840
-#define MIN_BLK_ADDR_0__VALUE 0xffff
-
-#define MAX_BLK_ADDR_0 0x850
-#define MAX_BLK_ADDR_0__VALUE 0xffff
-
-#define MIN_MAX_BANK_0 0x860
-#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
-#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_1 0x880
-#define MIN_BLK_ADDR_1__VALUE 0xffff
-
-#define MAX_BLK_ADDR_1 0x890
-#define MAX_BLK_ADDR_1__VALUE 0xffff
-
-#define MIN_MAX_BANK_1 0x8a0
-#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
-#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_2 0x8c0
-#define MIN_BLK_ADDR_2__VALUE 0xffff
-
-#define MAX_BLK_ADDR_2 0x8d0
-#define MAX_BLK_ADDR_2__VALUE 0xffff
-
-#define MIN_MAX_BANK_2 0x8e0
-#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
-#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_3 0x900
-#define MIN_BLK_ADDR_3__VALUE 0xffff
-
-#define MAX_BLK_ADDR_3 0x910
-#define MAX_BLK_ADDR_3__VALUE 0xffff
-
-#define MIN_MAX_BANK_3 0x920
-#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
-#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_4 0x940
-#define MIN_BLK_ADDR_4__VALUE 0xffff
-
-#define MAX_BLK_ADDR_4 0x950
-#define MAX_BLK_ADDR_4__VALUE 0xffff
-
-#define MIN_MAX_BANK_4 0x960
-#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
-#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_5 0x980
-#define MIN_BLK_ADDR_5__VALUE 0xffff
-
-#define MAX_BLK_ADDR_5 0x990
-#define MAX_BLK_ADDR_5__VALUE 0xffff
-
-#define MIN_MAX_BANK_5 0x9a0
-#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
-#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_6 0x9c0
-#define MIN_BLK_ADDR_6__VALUE 0xffff
-
-#define MAX_BLK_ADDR_6 0x9d0
-#define MAX_BLK_ADDR_6__VALUE 0xffff
-
-#define MIN_MAX_BANK_6 0x9e0
-#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
-#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
+#define PERM_SRC_ID__SRCID 0x00ff
+#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID__READ_ACTIVE 0x4000
+#define PERM_SRC_ID__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
+#define MIN_BLK_ADDR__VALUE 0xffff
+
+#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
+#define MAX_BLK_ADDR__VALUE 0xffff
+
+#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
+#define MIN_MAX_BANK__MIN_VALUE 0x0003
+#define MIN_MAX_BANK__MAX_VALUE 0x000c
-#define MIN_BLK_ADDR_7 0xa00
-#define MIN_BLK_ADDR_7__VALUE 0xffff
-
-#define MAX_BLK_ADDR_7 0xa10
-#define MAX_BLK_ADDR_7__VALUE 0xffff
-
-#define MIN_MAX_BANK_7 0xa20
-#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
/* ffsdefs.h */
#define CLEAR 0 /*use this to clear a field instead of "fail"*/
@@ -711,7 +454,6 @@
#define READ_WRITE_ENABLE_HIGH_COUNT 22
#define ECC_SECTOR_SIZE 512
-#define LLD_MAX_FLASH_BANKS 4
#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
@@ -732,7 +474,7 @@ struct denali_nand_info {
int status;
int platform;
struct nand_buf buf;
- struct pci_dev *dev;
+ struct device *dev;
int total_used_banks;
uint32_t block; /* stored for future use */
uint16_t page;
@@ -751,6 +493,7 @@ struct denali_nand_info {
uint32_t totalblks;
uint32_t blksperchip;
uint32_t bbtskipbytes;
+ uint32_t max_banks;
};
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 657b9f4..7837728 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1360,11 +1360,9 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
At least as nand_bbt.c is currently written. */
if ((ret = nand_scan_bbt(mtd, NULL)))
return ret;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_register(mtd, NULL, 0);
if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
+ mtd_device_register(mtd, parts, numparts);
return 0;
}
@@ -1419,11 +1417,9 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
autopartitioning, but I want to give it more thought. */
if (!numparts)
return -EIO;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_register(mtd, NULL, 0);
if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
+ mtd_device_register(mtd, parts, numparts);
return 0;
}
@@ -1678,9 +1674,9 @@ static int __init doc_probe(unsigned long physadr)
/* DBB note: i believe nand_release is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
- /* nand_release will call del_mtd_device, but we haven't yet
- added it. This is handled without incident by
- del_mtd_device, as far as I can tell. */
+ /* nand_release will call mtd_device_unregister, but we
+ haven't yet added it. This is handled without incident by
+ mtd_device_unregister, as far as I can tell. */
nand_release(mtd);
kfree(mtd);
goto fail;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 86366bf..8400d0f 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -55,7 +55,6 @@ static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash device
*/
@@ -67,8 +66,6 @@ static struct mtd_partition partition_info[] = {
#define NUM_PARTITIONS 1
-#endif
-
/*
* hardware specific access to control-lines
*
@@ -101,9 +98,7 @@ static int ep7312_device_ready(struct mtd_info *mtd)
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -162,14 +157,12 @@ static int __init ep7312_init(void)
kfree(ep7312_mtd);
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
ep7312_mtd->name = "edb7312-nand";
mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
if (mtd_parts_nb == 0) {
mtd_parts = partition_info;
mtd_parts_nb = NUM_PARTITIONS;
@@ -178,7 +171,7 @@ static int __init ep7312_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 537e380..0bb254c 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -841,12 +841,9 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
struct fsl_elbc_mtd *priv;
struct resource res;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
-
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
struct mtd_partition *parts;
-#endif
int ret;
int bank;
struct device *dev;
@@ -935,26 +932,19 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
if (ret < 0)
goto err;
-#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) {
ret = of_mtd_parse_partitions(priv->dev, node, &parts);
if (ret < 0)
goto err;
}
-#endif
- if (ret > 0)
- add_mtd_partitions(&priv->mtd, parts, ret);
- else
-#endif
- add_mtd_device(&priv->mtd);
+ mtd_device_register(&priv->mtd, parts, ret);
printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 073ee02..23752fd 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -33,10 +33,7 @@ struct fsl_upm_nand {
struct mtd_info mtd;
struct nand_chip chip;
int last_ctrl;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
-
struct fsl_upm upm;
uint8_t upm_addr_offset;
uint8_t upm_cmd_offset;
@@ -161,9 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
{
int ret;
struct device_node *flash_np;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_types[] = { "cmdlinepart", NULL, };
-#endif
fun->chip.IO_ADDR_R = fun->io_base;
fun->chip.IO_ADDR_W = fun->io_base;
@@ -197,7 +192,6 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
@@ -207,11 +201,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
goto err;
}
#endif
- if (ret > 0)
- ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
- else
-#endif
- ret = add_mtd_device(&fun->mtd);
+ ret = mtd_device_register(&fun->mtd, fun->parts, ret);
err:
of_node_put(flash_np);
return ret;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 0d45ef3..e9b275a 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -120,8 +120,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Default partition tables to be used if the partition information not
* provided through platform data.
@@ -182,7 +180,6 @@ static struct mtd_partition partition_info_128KB_blk[] = {
#ifdef CONFIG_MTD_CMDLINE_PARTS
const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#endif
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
@@ -719,7 +716,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* platform data,
* default partition information present in driver.
*/
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
/*
* Check if partition info passed via command line
@@ -777,19 +773,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
#endif
- if (host->partitions) {
- ret = add_mtd_partitions(&host->mtd, host->partitions,
- host->nr_partitions);
- if (ret)
- goto err_probe;
- }
-#else
- dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
- if (!add_mtd_device(mtd)) {
- ret = -ENXIO;
+ ret = mtd_device_register(&host->mtd, host->partitions,
+ host->nr_partitions);
+ if (ret)
goto err_probe;
- }
-#endif
platform_set_drvdata(pdev, host);
dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
@@ -835,11 +822,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (host) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(&host->mtd);
-#else
- del_mtd_device(&host->mtd);
-#endif
+ mtd_device_unregister(&host->mtd);
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 0cde618..2c2060b 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -316,8 +316,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
platform_set_drvdata(dev, gpiomtd);
return 0;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index f8ce79b..02a03e6 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -38,7 +38,6 @@ static struct mtd_info *h1910_nand_mtd = NULL;
* Module stuff
*/
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash device
*/
@@ -50,8 +49,6 @@ static struct mtd_partition partition_info[] = {
#define NUM_PARTITIONS 1
-#endif
-
/*
* hardware specific access to control-lines
*
@@ -154,7 +151,7 @@ static int __init h1910_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index cea38a5..6e813da 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -299,10 +299,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partition_info;
int num_partitions = 0;
-#endif
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
if (!nand) {
@@ -375,7 +373,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
goto err_gpio_free;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partition_info, 0);
@@ -384,12 +381,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
num_partitions = pdata->num_partitions;
partition_info = pdata->partitions;
}
-
- if (num_partitions > 0)
- ret = add_mtd_partitions(mtd, partition_info, num_partitions);
- else
-#endif
- ret = add_mtd_device(mtd);
+ ret = mtd_device_register(mtd, partition_info, num_partitions);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 0b81b5b..2f7c930 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,9 +131,7 @@ struct mpc5121_nfc_prv {
static void mpc5121_nfc_done(struct mtd_info *mtd);
-#ifdef CONFIG_MTD_PARTITIONS
static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
-#endif
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
@@ -658,9 +656,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const __be32 *chips_no;
@@ -841,7 +837,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
-#ifdef CONFIG_MTD_PARTITIONS
retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
if (retval == 0)
@@ -854,12 +849,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
goto error;
}
- if (retval > 0)
- retval = add_mtd_partitions(mtd, parts, retval);
- else
-#endif
- retval = add_mtd_device(mtd);
-
+ retval = mtd_device_register(mtd, parts, retval);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 42a95fb..90df34c 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -56,8 +56,14 @@
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
-#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20)
-#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
@@ -152,6 +158,7 @@ struct mxc_nand_host {
int clk_act;
int irq;
int eccsize;
+ int active_cs;
struct completion op_completion;
@@ -236,9 +243,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
@@ -445,7 +450,7 @@ static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
for (i = 0; i < bufs; i++) {
/* NANDFC buffer 0 is used for page read/write */
- writew(i, NFC_V1_V2_BUF_ADDR);
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
writew(ops, NFC_V1_V2_CONFIG2);
@@ -470,7 +475,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
struct nand_chip *this = &host->nand;
/* NANDFC buffer 0 is used for device ID output */
- writew(0x0, NFC_V1_V2_BUF_ADDR);
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
writew(NFC_ID, NFC_V1_V2_CONFIG2);
@@ -505,7 +510,7 @@ static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
uint32_t store;
uint16_t ret;
- writew(0x0, NFC_V1_V2_BUF_ADDR);
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
/*
* The device status is stored in main_area0. To
@@ -686,24 +691,24 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- switch (chip) {
- case -1:
+ if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
clk_disable(host->clk);
host->clk_act = 0;
}
- break;
- case 0:
+ return;
+ }
+
+ if (!host->clk_act) {
/* Enable the NFC clock */
- if (!host->clk_act) {
- clk_enable(host->clk);
- host->clk_act = 1;
- }
- break;
+ clk_enable(host->clk);
+ host->clk_act = 1;
+ }
- default:
- break;
+ if (nfc_is_v21()) {
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
}
@@ -834,8 +839,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
/* Blocks to be unlocked */
if (nfc_is_v21()) {
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
} else if (nfc_is_v1()) {
writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
@@ -1200,7 +1211,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
irq_control_v1_v2(host, 1);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
+ if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -1220,18 +1231,15 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Register the partitions */
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts =
parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
if (nr_parts > 0)
- add_mtd_partitions(mtd, host->parts, nr_parts);
+ mtd_device_register(mtd, host->parts, nr_parts);
else if (pdata->parts)
- add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
- else
-#endif
- {
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else {
pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
}
platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c54a4cb..a46e9bb 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -47,10 +47,7 @@
#include <linux/bitops.h>
#include <linux/leds.h>
#include <linux/io.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
@@ -976,9 +973,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
- /* de-select the NAND device */
- chip->select_chip(mtd, -1);
-
nand_release_device(mtd);
return ret;
@@ -1046,9 +1040,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
- /* de-select the NAND device */
- chip->select_chip(mtd, -1);
-
nand_release_device(mtd);
return ret;
@@ -3112,6 +3103,8 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ chip->badblockbits = 8;
+
/* Set the bad block position */
if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
@@ -3539,12 +3532,7 @@ void nand_release(struct mtd_info *mtd)
if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
-#ifdef CONFIG_MTD_PARTITIONS
- /* Deregister partitions */
- del_mtd_partitions(mtd);
-#endif
- /* Deregister the device */
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
/* Free bad block table memory */
kfree(chip->bbt);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index af46428..ccbeaa1 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1276,20 +1276,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
* while scanning a device for factory marked good / bad blocks. */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static struct nand_bbt_descr smallpage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = NAND_SMALL_BADBLOCK_POS,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = NAND_LARGE_BADBLOCK_POS,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
static struct nand_bbt_descr agand_flashbased = {
@@ -1355,10 +1341,6 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
*
- * TODO: Handle other flags, replace other static structs
- * (e.g. handle NAND_BBT_FLASH for flash-based BBT,
- * replace smallpage_flashbased)
- *
*/
static int nand_create_default_bbt_descr(struct nand_chip *this)
{
@@ -1422,15 +1404,14 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_md = &bbt_mirror_descr;
}
}
- if (!this->badblock_pattern) {
- this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
- }
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
- if (!this->badblock_pattern)
- nand_create_default_bbt_descr(this);
}
+
+ if (!this->badblock_pattern)
+ nand_create_default_bbt_descr(this);
+
return nand_scan_bbt(mtd, this->badblock_pattern);
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 893d95b..357e8c5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2383,7 +2383,9 @@ static int __init ns_init_module(void)
goto err_exit;
/* Register NAND partitions */
- if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0)
+ retval = mtd_device_register(nsmtd, &nand->partitions[0],
+ nand->nbparts);
+ if (retval != 0)
goto err_exit;
return 0;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index bbe6d45..ea2dea8 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -33,6 +33,7 @@
#include <linux/of_platform.h>
#include <asm/io.h>
+#define NDFC_MAX_CS 4
struct ndfc_controller {
struct platform_device *ofdev;
@@ -41,17 +42,16 @@ struct ndfc_controller {
struct nand_chip chip;
int chip_select;
struct nand_hw_control ndfc_control;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
-static struct ndfc_controller ndfc_ctrl;
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
static void ndfc_select_chip(struct mtd_info *mtd, int chip)
{
uint32_t ccr;
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *nchip = mtd->priv;
+ struct ndfc_controller *ndfc = nchip->priv;
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
if (chip >= 0) {
@@ -64,7 +64,8 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
if (cmd == NAND_CMD_NONE)
return;
@@ -77,7 +78,8 @@ static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
static int ndfc_ready(struct mtd_info *mtd)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
}
@@ -85,7 +87,8 @@ static int ndfc_ready(struct mtd_info *mtd)
static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
{
uint32_t ccr;
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
ccr |= NDFC_CCR_RESET_ECC;
@@ -96,7 +99,8 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
static int ndfc_calculate_ecc(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t ecc;
uint8_t *p = (uint8_t *)&ecc;
@@ -119,7 +123,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
*/
static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -128,7 +133,8 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -137,7 +143,8 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -152,13 +159,11 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct device_node *node)
{
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_types[] = { "cmdlinepart", NULL };
#else
static const char *part_types[] = { NULL };
#endif
-#endif
struct device_node *flash_np;
struct nand_chip *chip = &ndfc->chip;
int ret;
@@ -179,6 +184,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->priv = ndfc;
ndfc->mtd.priv = chip;
ndfc->mtd.owner = THIS_MODULE;
@@ -198,25 +204,18 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
if (ret < 0)
goto err;
-#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) {
ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
&ndfc->parts);
if (ret < 0)
goto err;
}
-#endif
- if (ret > 0)
- ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
- else
-#endif
- ret = add_mtd_device(&ndfc->mtd);
+ ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
err:
of_node_put(flash_np);
@@ -227,15 +226,10 @@ err:
static int __devinit ndfc_probe(struct platform_device *ofdev)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct ndfc_controller *ndfc;
const __be32 *reg;
u32 ccr;
- int err, len;
-
- spin_lock_init(&ndfc->ndfc_control.lock);
- init_waitqueue_head(&ndfc->ndfc_control.wq);
- ndfc->ofdev = ofdev;
- dev_set_drvdata(&ofdev->dev, ndfc);
+ int err, len, cs;
/* Read the reg property to get the chip select */
reg = of_get_property(ofdev->dev.of_node, "reg", &len);
@@ -243,7 +237,20 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
- ndfc->chip_select = be32_to_cpu(reg[0]);
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ spin_lock_init(&ndfc->ndfc_control.lock);
+ init_waitqueue_head(&ndfc->ndfc_control.wq);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a045a4a..b6a5c86 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -158,12 +158,7 @@ static int nomadik_nand_probe(struct platform_device *pdev)
goto err_unmap;
}
-#ifdef CONFIG_MTD_PARTITIONS
- add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts);
-#else
- pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_device(mtd);
-#endif
+ mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
platform_set_drvdata(pdev, host);
return 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 6eddf73..9c30a0b 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -321,8 +321,8 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
goto fail3;
}
- add_mtd_partitions(&(nuc900_nand->mtd), partitions,
- ARRAY_SIZE(partitions));
+ mtd_device_register(&(nuc900_nand->mtd), partitions,
+ ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, nuc900_nand);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index da9a351..0db2c0e 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -94,9 +94,7 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
@@ -263,11 +261,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len);
+ omap_read_buf16(mtd, (u_char *)p, len);
else
- omap_read_buf8(mtd, buf, len);
+ omap_read_buf8(mtd, (u_char *)p, len);
} else {
- p = (u32 *) buf;
do {
r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
r_count = r_count >> 2;
@@ -293,7 +290,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd);
uint32_t w_count = 0;
int i = 0, ret = 0;
- u16 *p;
+ u16 *p = (u16 *)buf;
unsigned long tim, limit;
/* take care of subpage writes */
@@ -309,11 +306,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, buf, len);
+ omap_write_buf16(mtd, (u_char *)p, len);
else
- omap_write_buf8(mtd, buf, len);
+ omap_write_buf8(mtd, (u_char *)p, len);
} else {
- p = (u16 *) buf;
while (len) {
w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
w_count = w_count >> 1;
@@ -1073,9 +1069,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
*/
- if (nand_scan(&info->mtd, 1)) {
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
info->nand.options ^= NAND_BUSWIDTH_16;
- if (nand_scan(&info->mtd, 1)) {
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
err = -ENXIO;
goto out_release_mem_region;
}
@@ -1101,15 +1097,19 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.ecc.layout = &omap_oobinfo;
}
-#ifdef CONFIG_MTD_PARTITIONS
+ /* second phase scan */
+ if (nand_scan_tail(&info->mtd)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
- add_mtd_partitions(&info->mtd, info->parts, err);
+ mtd_device_register(&info->mtd, info->parts, err);
else if (pdata->parts)
- add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- add_mtd_device(&info->mtd);
+ mtd_device_register(&info->mtd, NULL, 0);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index da6e753..7794d06 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,9 +21,7 @@
#include <mach/hardware.h>
#include <plat/orion_nand.h>
-#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
@@ -83,10 +81,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *io_base;
int ret = 0;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_part = 0;
-#endif
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
@@ -136,7 +132,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
goto no_dev;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "orion_nand";
num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
@@ -147,14 +142,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
partitions = board->parts;
}
- if (partitions && num_part > 0)
- ret = add_mtd_partitions(mtd, partitions, num_part);
- else
- ret = add_mtd_device(mtd);
-#else
- ret = add_mtd_device(mtd);
-#endif
-
+ ret = mtd_device_register(mtd, partitions, num_part);
if (ret) {
nand_release(mtd);
goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 20bfe5f..b1aa41b 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -163,7 +163,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
goto out_lpc;
}
- if (add_mtd_device(pasemi_nand_mtd)) {
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
err = -ENODEV;
goto out_lpc;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index caf5a73..633c04b 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,10 +21,8 @@ struct plat_nand_data {
struct nand_chip chip;
struct mtd_info mtd;
void __iomem *io_base;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
/*
@@ -101,13 +99,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
-#ifdef CONFIG_MTD_PARTITIONS
if (pdata->chip.part_probe_types) {
err = parse_mtd_partitions(&data->mtd,
pdata->chip.part_probe_types,
&data->parts, 0);
if (err > 0) {
- add_mtd_partitions(&data->mtd, data->parts, err);
+ mtd_device_register(&data->mtd, data->parts, err);
return 0;
}
}
@@ -115,11 +112,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
- err = add_mtd_partitions(&data->mtd, data->parts,
+ err = mtd_device_register(&data->mtd, data->parts,
pdata->chip.nr_partitions);
} else
-#endif
- err = add_mtd_device(&data->mtd);
+ err = mtd_device_register(&data->mtd, NULL, 0);
if (!err)
return err;
@@ -149,10 +145,8 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
-#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
-#endif
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index cc86584..3bbb796 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -73,7 +73,6 @@ __setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
#endif
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash devices
*/
@@ -101,7 +100,6 @@ static struct mtd_partition partition_info_evb[] = {
#define NUM_PARTITIONS 1
extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
-#endif
/*
* hardware specific access to control-lines
@@ -189,10 +187,8 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
}
#endif
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
const char *part_probes_evb[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -284,14 +280,13 @@ static int __init ppchameleonevb_init(void)
this->chip_delay = NAND_SMALL_DELAY_US;
#endif
-#ifdef CONFIG_MTD_PARTITIONS
ppchameleon_mtd->name = "ppchameleon-nand";
mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
+
if (mtd_parts_nb == 0) {
if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
mtd_parts = partition_info_me;
@@ -303,7 +298,7 @@ static int __init ppchameleonevb_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
nand_evb_init:
/****************************
@@ -385,14 +380,14 @@ static int __init ppchameleonevb_init(void)
iounmap(ppchameleon_fio_base);
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
+
ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
+
if (mtd_parts_nb == 0) {
mtd_parts = partition_info_evb;
mtd_parts_nb = NUM_PARTITIONS;
@@ -401,7 +396,7 @@ static int __init ppchameleonevb_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ff07012..1fb3b3a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1119,10 +1119,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
clk_put(info->clk);
if (mtd) {
- del_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mtd);
-#endif
+ mtd_device_unregister(mtd);
kfree(mtd);
}
return 0;
@@ -1149,7 +1146,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
-#ifdef CONFIG_MTD_PARTITIONS
if (mtd_has_cmdlinepart()) {
const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
@@ -1158,13 +1154,10 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (nr_parts)
- return add_mtd_partitions(info->mtd, parts, nr_parts);
+ return mtd_device_register(info->mtd, parts, nr_parts);
}
- return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
-#else
- return 0;
-#endif
+ return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
}
#ifdef CONFIG_PM
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 67440b5..c9f9127 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -580,7 +580,8 @@ static int __init rtc_from4_init(void)
#endif
/* Register the partitions */
- ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
+ ret = mtd_device_register(rtc_from4_mtd, partition_info,
+ NUM_PARTITIONS);
if (ret)
goto err_3;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 33d832d..4405468 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -55,7 +55,7 @@ static int hardware_ecc = 0;
#endif
#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-static int clock_stop = 1;
+static const int clock_stop = 1;
#else
static const int clock_stop = 0;
#endif
@@ -96,6 +96,12 @@ enum s3c_cpu_type {
TYPE_S3C2440,
};
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
/* overview of the s3c2410 nand state */
/**
@@ -111,6 +117,7 @@ enum s3c_cpu_type {
* @mtd_count: The number of MTDs created from this controller.
* @save_sel: The contents of @sel_reg to be saved over suspend.
* @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
*/
struct s3c2410_nand_info {
@@ -129,6 +136,7 @@ struct s3c2410_nand_info {
int mtd_count;
unsigned long save_sel;
unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
enum s3c_cpu_type cpu_type;
@@ -159,11 +167,33 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
return dev->dev.platform_data;
}
-static inline int allow_clk_stop(struct s3c2410_nand_info *info)
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
return clock_stop;
}
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
/* timing calculations */
#define NS_IN_KHZ 1000000
@@ -333,8 +363,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
nmtd = this->priv;
info = nmtd->info;
- if (chip != -1 && allow_clk_stop(info))
- clk_enable(info->clk);
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
cur = readl(info->sel_reg);
@@ -356,8 +386,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
writel(cur, info->sel_reg);
- if (chip == -1 && allow_clk_stop(info))
- clk_disable(info->clk);
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
/* s3c2410_nand_hwcontrol
@@ -694,8 +724,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
/* free the common resources */
if (info->clk != NULL && !IS_ERR(info->clk)) {
- if (!allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
clk_put(info->clk);
}
@@ -715,7 +744,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
@@ -725,7 +753,7 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
int nr_part = 0;
if (set == NULL)
- return add_mtd_device(&mtd->mtd);
+ return mtd_device_register(&mtd->mtd, NULL, 0);
mtd->mtd.name = set->name;
nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
@@ -735,19 +763,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
part_info = set->partitions;
}
- if (nr_part > 0 && part_info)
- return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
-
- return add_mtd_device(&mtd->mtd);
-}
-#else
-static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *mtd,
- struct s3c2410_nand_set *set)
-{
- return add_mtd_device(&mtd->mtd);
+ return mtd_device_register(&mtd->mtd, part_info, nr_part);
}
-#endif
/**
* s3c2410_nand_init_chip - initialise a single instance of an chip
@@ -947,7 +964,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
goto exit_error;
}
- clk_enable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
/* allocate and map the resource */
@@ -1026,9 +1043,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
goto exit_error;
}
- if (allow_clk_stop(info)) {
+ if (allow_clk_suspend(info)) {
dev_info(&pdev->dev, "clock idle support enabled\n");
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
pr_debug("initialised ok\n");
@@ -1059,8 +1076,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
writel(info->save_sel | info->sel_bit, info->sel_reg);
- if (!allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
}
return 0;
@@ -1072,7 +1088,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
unsigned long sel;
if (info) {
- clk_enable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
s3c2410_nand_inithw(info);
/* Restore the state of the nFCE line. */
@@ -1082,8 +1098,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
sel |= info->save_sel & info->sel_bit;
writel(sel, info->sel_reg);
- if (allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 81bbb5e..93b1f74 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -867,7 +867,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
if (ret)
goto err;
- add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 54ec754..19e24ed 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,9 +103,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
return readb(sharpsl->io + ECCCNTR) != 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -113,10 +111,8 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *sharpsl_partition_info;
int nr_partitions;
-#endif
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
@@ -188,18 +184,14 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Register the partitions */
sharpsl->mtd.name = "sharpsl-nand";
-#ifdef CONFIG_MTD_PARTITIONS
nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
if (nr_partitions <= 0) {
nr_partitions = data->nr_partitions;
sharpsl_partition_info = data->partitions;
}
- if (nr_partitions > 0)
- err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
- else
-#endif
- err = add_mtd_device(&sharpsl->mtd);
+ err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
+ nr_partitions);
if (err)
goto err_add;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 57cc80c..b6332e8 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -139,7 +139,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
if (ret)
return ret;
- return add_mtd_device(mtd);
+ return mtd_device_register(mtd, NULL, 0);
}
EXPORT_SYMBOL_GPL(sm_register_device);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a853548..ca2d055 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,9 +155,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Probe for the NAND device.
@@ -168,11 +166,8 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int res;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_partitions = 0;
-#endif
/* Allocate memory for the device structure (and zero it) */
host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -230,7 +225,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto out;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partitions, 0);
@@ -240,7 +234,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
}
#endif
-#ifdef CONFIG_MTD_OF_PARTS
if (num_partitions == 0) {
num_partitions = of_mtd_parse_partitions(&ofdev->dev,
ofdev->dev.of_node,
@@ -250,19 +243,12 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto release;
}
}
-#endif
- if (partitions && (num_partitions > 0))
- res = add_mtd_partitions(mtd, partitions, num_partitions);
- else
-#endif
- res = add_mtd_device(mtd);
+ res = mtd_device_register(mtd, partitions, num_partitions);
if (!res)
return res;
-#ifdef CONFIG_MTD_PARTITIONS
release:
-#endif
nand_release(mtd);
out:
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 0cc6d0a..bef76cd 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -149,7 +149,7 @@ static int __init spia_init(void)
}
/* Register the partitions */
- add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index c004e47..11e8371 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -381,10 +381,8 @@ static int tmio_probe(struct platform_device *dev)
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
int nbparts = 0;
-#endif
int retval;
if (data == NULL)
@@ -463,7 +461,6 @@ static int tmio_probe(struct platform_device *dev)
goto err_scan;
}
/* Register the partitions */
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
#endif
@@ -472,12 +469,7 @@ static int tmio_probe(struct platform_device *dev)
nbparts = data->num_partitions;
}
- if (nbparts)
- retval = add_mtd_partitions(mtd, parts, nbparts);
- else
-#endif
- retval = add_mtd_device(mtd);
-
+ retval = mtd_device_register(mtd, parts, nbparts);
if (!retval)
return retval;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ca270a4..bfba4e3 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,9 +74,7 @@ struct txx9ndfmc_drvdata {
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_hw_control hw_control;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
-#endif
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -289,9 +287,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *probes[] = { "cmdlinepart", NULL };
-#endif
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
@@ -337,9 +333,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
-#endif
if (!(plat->ch_mask & (1 << i)))
continue;
@@ -399,13 +393,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
}
mtd->name = txx9_priv->mtdname;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mtd, probes,
&drvdata->parts[i], 0);
- if (nr_parts > 0)
- add_mtd_partitions(mtd, drvdata->parts[i], nr_parts);
-#endif
- add_mtd_device(mtd);
+ mtd_device_register(mtd, drvdata->parts[i], nr_parts);
drvdata->mtds[i] = mtd;
}
@@ -431,9 +421,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
txx9_priv = chip->priv;
nand_release(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
kfree(drvdata->parts[i]);
-#endif
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4f42619..772ad29 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,7 +1,6 @@
menuconfig MTD_ONENAND
tristate "OneNAND Device Support"
depends on MTD
- select MTD_PARTITIONS
help
This enables support for accessing all type of OneNAND flash
devices. For further information see
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ac08750..2d70d35 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,9 +30,7 @@
*/
#define DRIVER_NAME "onenand-flash"
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
struct onenand_info {
struct mtd_info mtd;
@@ -75,15 +73,13 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
- add_mtd_partitions(&info->mtd, info->parts, err);
+ mtd_device_register(&info->mtd, info->parts, err);
else if (err <= 0 && pdata && pdata->parts)
- add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- err = add_mtd_device(&info->mtd);
+ err = mtd_device_register(&info->mtd, NULL, 0);
platform_set_drvdata(pdev, info);
@@ -108,11 +104,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (info) {
- if (info->parts)
- del_mtd_partitions(&info->mtd);
- else
- del_mtd_device(&info->mtd);
-
+ mtd_device_unregister(&info->mtd);
onenand_release(&info->mtd);
release_mem_region(res->start, size);
iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 1fcb41a..a916dec 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -67,9 +67,7 @@ struct omap2_onenand {
struct regulator *regulator;
};
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
@@ -755,15 +753,13 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
-#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
- r = add_mtd_partitions(&c->mtd, c->parts, r);
+ r = mtd_device_register(&c->mtd, c->parts, r);
else if (pdata->parts != NULL)
- r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
+ r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- r = add_mtd_device(&c->mtd);
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 56a8b20..ac9e959 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -65,11 +65,11 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
" : 2 -> 1st Block lock"
" : 3 -> BOTH OTP Block and 1st Block lock");
-/**
- * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
- * For now, we expose only 64 out of 80 ecc bytes
+/*
+ * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
*/
-static struct nand_ecclayout onenand_oob_128 = {
+static struct nand_ecclayout flexonenand_oob_128 = {
.eccbytes = 64,
.eccpos = {
6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -86,6 +86,35 @@ static struct nand_ecclayout onenand_oob_128 = {
}
};
+/*
+ * onenand_oob_128 - oob info for OneNAND with 4KB page
+ *
+ * Based on specification:
+ * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
+ *
+ * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
+ *
+ * oobfree uses the spare area fields marked as
+ * "Managed by internal ECC logic for Logical Sector Number area"
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119
+ },
+ .oobfree = {
+ {2, 3}, {18, 3}, {34, 3}, {50, 3},
+ {66, 3}, {82, 3}, {98, 3}, {114, 3}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -2424,7 +2453,7 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd,
len -= block_size;
addr += block_size;
- if (addr == region_end) {
+ if (region && addr == region_end) {
if (!len)
break;
region++;
@@ -4018,8 +4047,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
*/
switch (mtd->oobsize) {
case 128:
- this->ecclayout = &onenand_oob_128;
- mtd->subpage_sft = 0;
+ if (FLEXONENAND(this)) {
+ this->ecclayout = &flexonenand_oob_128;
+ mtd->subpage_sft = 0;
+ } else {
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 2;
+ }
break;
case 64:
this->ecclayout = &onenand_oob_64;
@@ -4108,12 +4142,8 @@ void onenand_release(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
-#ifdef CONFIG_MTD_PARTITIONS
/* Deregister partitions */
- del_mtd_partitions (mtd);
-#endif
- /* Deregister the device */
- del_mtd_device (mtd);
+ mtd_device_unregister(mtd);
/* Free bad block table memory, if allocated */
if (this->bbm) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index 5ef3bd5..85399e3 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -539,7 +539,8 @@ static int __init onenand_sim_init(void)
return -ENXIO;
}
- add_mtd_partitions(&info->mtd, info->parts, ARRAY_SIZE(os_partitions));
+ mtd_device_register(&info->mtd, info->parts,
+ ARRAY_SIZE(os_partitions));
return 0;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a4c74a9..3306b5b 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,9 +147,7 @@ struct s3c_onenand {
struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -159,9 +157,7 @@ struct s3c_onenand {
static struct s3c_onenand *onenand;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
static inline int s3c_read_reg(int offset)
{
@@ -1021,15 +1017,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
if (err > 0)
- add_mtd_partitions(mtd, onenand->parts, err);
+ mtd_device_register(mtd, onenand->parts, err);
else if (err <= 0 && pdata && pdata->parts)
- add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- err = add_mtd_device(mtd);
+ err = mtd_device_register(mtd, NULL, 0);
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 9aa8158..941bc3c 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -365,7 +365,7 @@ static int gluebi_create(struct ubi_device_info *di,
vi->vol_id);
mutex_unlock(&devices_mutex);
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
err_msg("cannot add MTD device");
kfree(mtd->name);
kfree(gluebi);
@@ -407,7 +407,7 @@ static int gluebi_remove(struct ubi_volume_info *vi)
return err;
mtd = &gluebi->mtd;
- err = del_mtd_device(mtd);
+ err = mtd_device_unregister(mtd);
if (err) {
err_msg("cannot remove fake MTD device %d, UBI device %d, "
"volume %d, error %d", mtd->index, gluebi->ubi_num,
@@ -524,7 +524,7 @@ static void __exit ubi_gluebi_exit(void)
int err;
struct mtd_info *mtd = &gluebi->mtd;
- err = del_mtd_device(mtd);
+ err = mtd_device_unregister(mtd);
if (err)
err_msg("error %d while removing gluebi MTD device %d, "
"UBI device %d, volume %d - ignoring", err,
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index e646bfc..b630448 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -216,7 +216,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
int rc;
for (;;) {
- rc = del_mtd_device(&part->mtd);
+ rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
@@ -268,7 +268,7 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
part->mtd.write = efx_mtd->ops->write;
part->mtd.sync = efx_mtd_sync;
- if (add_mtd_device(&part->mtd))
+ if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
}
@@ -280,7 +280,7 @@ fail:
--part;
efx_mtd_remove_partition(part);
}
- /* add_mtd_device() returns 1 if the MTD table is full */
+ /* mtd_device_register() returns 1 if the MTD table is full */
return -ENOMEM;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0cb0b06..f685324 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -609,7 +609,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8b63a69..65200af 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -670,7 +670,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
- if (depth != 1 ||
+ if (depth != 1 || !data ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
@@ -679,16 +679,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
+ strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
#ifdef CONFIG_CMDLINE
#ifndef CONFIG_CMDLINE_FORCE
if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
#endif
- strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", cmd_line);
+ pr_debug("Command line is: %s\n", (char*)data);
/* break now */
return 1;
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 4e70749..a8d5bb3 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -11,7 +11,7 @@
#define EVENT_BUFFER_H
#include <linux/types.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
int alloc_event_buffer(void);
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f9bda64..dccd863 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -14,7 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/time.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
#include "oprof.h"
#include "event_buffer.h"
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 12e02bf..3dc9bef 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -698,12 +698,7 @@ int __init detect_intel_iommu(void)
{
#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
- /*
- * for now we will disable dma-remapping when interrupt
- * remapping is enabled.
- * When support for queued invalidation for IOTLB invalidation
- * is added, we will not need this any more.
- */
+
dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 6af6b62..59f17ac 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -47,6 +47,8 @@
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
+#define IS_BRIDGE_HOST_DEVICE(pdev) \
+ ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -116,6 +118,11 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
return (pfn + level_size(level) - 1) & level_mask(level);
}
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+ return 1 << ((lvl - 1) * LEVEL_STRIDE);
+}
+
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@@ -143,6 +150,12 @@ static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
+ * set to 1 to panic kernel if can't successfully enable VT-d
+ * (used when kernel is launched w/ TXT)
+ */
+static int force_on = 0;
+
+/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
@@ -338,6 +351,9 @@ struct dmar_domain {
int iommu_coherency;/* indicate coherency of iommu access */
int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
};
@@ -387,6 +403,7 @@ int dmar_disabled = 1;
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
+static int intel_iommu_superpage = 1;
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
@@ -417,6 +434,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable batched IOTLB flush\n");
intel_iommu_strict = 1;
+ } else if (!strncmp(str, "sp_off", 6)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable supported super page\n");
+ intel_iommu_superpage = 0;
}
str += strcspn(str, ",");
@@ -555,11 +576,32 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
}
}
+static void domain_update_iommu_superpage(struct dmar_domain *domain)
+{
+ int i, mask = 0xf;
+
+ if (!intel_iommu_superpage) {
+ domain->iommu_superpage = 0;
+ return;
+ }
+
+ domain->iommu_superpage = 4; /* 1TiB */
+
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ mask |= cap_super_page_val(g_iommus[i]->cap);
+ if (!mask) {
+ break;
+ }
+ }
+ domain->iommu_superpage = fls(mask);
+}
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain_update_iommu_snooping(domain);
+ domain_update_iommu_superpage(domain);
}
static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
@@ -689,23 +731,31 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn)
+ unsigned long pfn, int large_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
- int offset;
+ int offset, target_level;
BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
+ /* Search pte */
+ if (!large_level)
+ target_level = 1;
+ else
+ target_level = large_level;
+
while (level > 0) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
- if (level == 1)
+ if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+ break;
+ if (level == target_level)
break;
if (!dma_pte_present(pte)) {
@@ -733,10 +783,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return pte;
}
+
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
- int level)
+ int level, int *large_page)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@@ -749,8 +800,16 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
if (level == total)
return pte;
- if (!dma_pte_present(pte))
+ if (!dma_pte_present(pte)) {
+ *large_page = total;
break;
+ }
+
+ if (pte->val & DMA_PTE_LARGE_PAGE) {
+ *large_page = total;
+ return pte;
+ }
+
parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
@@ -763,6 +822,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
@@ -771,14 +831,15 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
/* we don't need lock here; nobody else touches the iova range */
do {
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
+ large_page = 1;
+ first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, 2);
+ start_pfn = align_to_level(start_pfn + 1, large_page + 1);
continue;
}
- do {
+ do {
dma_clear_pte(pte);
- start_pfn++;
+ start_pfn += lvl_to_nr_pages(large_page);
pte++;
} while (start_pfn <= last_pfn && !first_pte_in_page(pte));
@@ -798,6 +859,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int total = agaw_to_level(domain->agaw);
int level;
unsigned long tmp;
+ int large_page = 2;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -813,7 +875,10 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
return;
do {
- first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+ large_page = level;
+ first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
+ if (large_page > level)
+ level = large_page + 1;
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
continue;
@@ -1397,6 +1462,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else
domain->iommu_snooping = 0;
+ domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
domain->iommu_count = 1;
domain->nid = iommu->node;
@@ -1417,6 +1483,10 @@ static void domain_exit(struct dmar_domain *domain)
if (!domain)
return;
+ /* Flush any lazy unmaps that may reference this domain */
+ if (!intel_iommu_strict)
+ flush_unmaps_timeout(0);
+
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
@@ -1648,6 +1718,34 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
+/* Return largest possible superpage level for a given mapping */
+static inline int hardware_largepage_caps(struct dmar_domain *domain,
+ unsigned long iov_pfn,
+ unsigned long phy_pfn,
+ unsigned long pages)
+{
+ int support, level = 1;
+ unsigned long pfnmerge;
+
+ support = domain->iommu_superpage;
+
+ /* To use a large page, the virtual *and* physical addresses
+ must be aligned to 2MiB/1GiB/etc. Lower bits set in either
+ of them will mean we have to use smaller pages. So just
+ merge them and check both at once. */
+ pfnmerge = iov_pfn | phy_pfn;
+
+ while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
+ pages >>= VTD_STRIDE_SHIFT;
+ if (!pages)
+ break;
+ pfnmerge >>= VTD_STRIDE_SHIFT;
+ level++;
+ support--;
+ }
+ return level;
+}
+
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
@@ -1656,6 +1754,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
phys_addr_t uninitialized_var(pteval);
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned long sg_res;
+ unsigned int largepage_lvl = 0;
+ unsigned long lvl_pages = 0;
BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
@@ -1671,7 +1771,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
- while (nr_pages--) {
+ while (nr_pages > 0) {
uint64_t tmp;
if (!sg_res) {
@@ -1679,11 +1779,21 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
+ phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
+
if (!pte) {
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
+ largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
if (!pte)
return -ENOMEM;
+ /* It is large page*/
+ if (largepage_lvl > 1)
+ pteval |= DMA_PTE_LARGE_PAGE;
+ else
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+
}
/* We don't need lock here, nobody else
* touches the iova range
@@ -1699,16 +1809,38 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
WARN_ON(1);
}
+
+ lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+ BUG_ON(nr_pages < lvl_pages);
+ BUG_ON(sg_res < lvl_pages);
+
+ nr_pages -= lvl_pages;
+ iov_pfn += lvl_pages;
+ phys_pfn += lvl_pages;
+ pteval += lvl_pages * VTD_PAGE_SIZE;
+ sg_res -= lvl_pages;
+
+ /* If the next PTE would be the first in a new page, then we
+ need to flush the cache on the entries we've just written.
+ And then we'll need to recalculate 'pte', so clear it and
+ let it get set again in the if (!pte) block above.
+
+ If we're done (!nr_pages) we need to flush the cache too.
+
+ Also if we've been setting superpages, we may need to
+ recalculate 'pte' and switch back to smaller pages for the
+ end of the mapping, if the trailing size is not enough to
+ use another superpage (i.e. sg_res < lvl_pages). */
pte++;
- if (!nr_pages || first_pte_in_page(pte)) {
+ if (!nr_pages || first_pte_in_page(pte) ||
+ (largepage_lvl > 1 && sg_res < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
- iov_pfn++;
- pteval += VTD_PAGE_SIZE;
- sg_res--;
- if (!sg_res)
+
+ if (!sg_res && nr_pages)
sg = sg_next(sg);
}
return 0;
@@ -2016,7 +2148,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
return iommu_prepare_identity_map(pdev, rmrr->base_address,
- rmrr->end_address + 1);
+ rmrr->end_address);
}
#ifdef CONFIG_DMAR_FLOPPY_WA
@@ -2030,7 +2162,7 @@ static inline void iommu_prepare_isa(void)
return;
printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
+ ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
if (ret)
printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@@ -2106,10 +2238,10 @@ static int identity_mapping(struct pci_dev *pdev)
if (likely(!iommu_identity_mapping))
return 0;
+ info = pdev->dev.archdata.iommu;
+ if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+ return (info->domain == si_domain);
- list_for_each_entry(info, &si_domain->devices, link)
- if (info->dev == pdev)
- return 1;
return 0;
}
@@ -2187,8 +2319,19 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
* Assume that they will -- if they turn out not to be, then we can
* take them out of the 1:1 domain later.
*/
- if (!startup)
- return pdev->dma_mask > DMA_BIT_MASK(32);
+ if (!startup) {
+ /*
+ * If the device's dma_mask is less than the system's memory
+ * size then this is not a candidate for identity mapping.
+ */
+ u64 dma_mask = pdev->dma_mask;
+
+ if (pdev->dev.coherent_dma_mask &&
+ pdev->dev.coherent_dma_mask < dma_mask)
+ dma_mask = pdev->dev.coherent_dma_mask;
+
+ return dma_mask >= dma_get_required_mask(&pdev->dev);
+ }
return 1;
}
@@ -2203,6 +2346,9 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return -EFAULT;
for_each_pci_dev(pdev) {
+ /* Skip Host/PCI Bridge devices */
+ if (IS_BRIDGE_HOST_DEVICE(pdev))
+ continue;
if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
hw ? "hardware" : "software", pci_name(pdev));
@@ -2218,7 +2364,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
-static int __init init_dmars(int force_on)
+static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@@ -2592,8 +2738,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
- pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
if (!iova)
goto error;
@@ -3118,7 +3263,17 @@ static int init_iommu_hw(void)
if (iommu->qi)
dmar_reenable_qi(iommu);
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ continue;
+ }
+
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@@ -3127,7 +3282,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
+ if (iommu_enable_translation(iommu))
+ return 1;
iommu_disable_protect_mem_regions(iommu);
}
@@ -3194,7 +3350,10 @@ static void iommu_resume(void)
unsigned long flag;
if (init_iommu_hw()) {
- WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
+ if (force_on)
+ panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
+ else
+ WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
return;
}
@@ -3271,7 +3430,6 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
- int force_on = 0;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3309,7 +3467,7 @@ int __init intel_iommu_init(void)
init_no_remapping_devices();
- ret = init_dmars(force_on);
+ ret = init_dmars();
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
@@ -3380,8 +3538,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) {
info = list_entry(entry, struct device_domain_info, link);
- /* No need to compare PCI domain; it has to be the same */
- if (info->bus == pdev->bus->number &&
+ if (info->segment == pci_domain_nr(pdev->bus) &&
+ info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
list_del(&info->link);
list_del(&info->global);
@@ -3419,10 +3577,13 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
- spin_lock_irqsave(&iommu->lock, tmp_flags);
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
+ spin_lock_irqsave(&iommu->lock, tmp_flags);
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
+ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3505,6 +3666,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0;
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
domain->max_addr = 0;
domain->nid = -1;
@@ -3720,7 +3882,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
if (pte)
phys = dma_pte_addr(pte);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 9606e59..c5c274a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -63,8 +63,16 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
curr = iovad->cached32_node;
cached_iova = container_of(curr, struct iova, node);
- if (free->pfn_lo >= cached_iova->pfn_lo)
- iovad->cached32_node = rb_next(&free->node);
+ if (free->pfn_lo >= cached_iova->pfn_lo) {
+ struct rb_node *node = rb_next(&free->node);
+ struct iova *iova = container_of(node, struct iova, node);
+
+ /* only cache if it's below 32bit pfn */
+ if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
+ iovad->cached32_node = node;
+ else
+ iovad->cached32_node = NULL;
+ }
}
/* Computes the padding size required, to make the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c3b18e..d36f41e 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D2;
case ACPI_STATE_D3:
return PCI_D3hot;
+ case ACPI_STATE_D3_COLD:
+ return PCI_D3cold;
}
return PCI_POWER_ERROR;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5cb999b..45e0191 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -39,7 +39,7 @@ config ACER_WMI
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
- depends on THERMAL && THERMAL_HWMON && ACPI
+ depends on THERMAL && ACPI
---help---
This is a driver for Acer Aspire One netbooks. It allows to access
the temperature sensor and to control the fan.
@@ -760,4 +760,13 @@ config MXM_WMI
MXM is a standard for laptop graphics cards, the WMI interface
is required for switchable nvidia graphics machines
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
+ depends on ACPI
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
+ ---help---
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a7ab3bc..afc1f83 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ac4e7f83..005417b 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -98,13 +98,26 @@ enum acer_wmi_event_ids {
static const struct key_entry acer_wmi_keymap[] = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
{KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_IGNORE, 0x41, {KEY_MUTE} },
+ {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
+ {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} },
+ {KE_IGNORE, 0x45, {KEY_STOP} },
+ {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
+ {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
+ {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
+ {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
+ {KE_IGNORE, 0x81, {KEY_SLEEP} },
{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
{KE_END, 0}
};
@@ -122,6 +135,7 @@ struct event_return_value {
*/
#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */
#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
+#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
struct lm_input_params {
@@ -737,8 +751,11 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
obj = (union acpi_object *) result.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
tmp = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u32) obj->integer.value;
} else {
tmp = 0;
}
@@ -866,8 +883,11 @@ static acpi_status WMID_set_capabilities(void)
obj = (union acpi_object *) out.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
} else {
kfree(out.pointer);
return AE_ERROR;
@@ -876,7 +896,8 @@ static acpi_status WMID_set_capabilities(void)
dmi_walk(type_aa_dmi_decode, NULL);
if (!has_type_aa) {
interface->capability |= ACER_CAP_WIRELESS;
- interface->capability |= ACER_CAP_THREEG;
+ if (devices & 0x40)
+ interface->capability |= ACER_CAP_THREEG;
if (devices & 0x10)
interface->capability |= ACER_CAP_BLUETOOTH;
}
@@ -961,10 +982,12 @@ static void __init acer_commandline_init(void)
* These will all fail silently if the value given is invalid, or the
* capability isn't available on the given interface
*/
- set_u32(mailled, ACER_CAP_MAILLED);
- if (!has_type_aa)
+ if (mailled >= 0)
+ set_u32(mailled, ACER_CAP_MAILLED);
+ if (!has_type_aa && threeg >= 0)
set_u32(threeg, ACER_CAP_THREEG);
- set_u32(brightness, ACER_CAP_BRIGHTNESS);
+ if (brightness >= 0)
+ set_u32(brightness, ACER_CAP_BRIGHTNESS);
}
/*
@@ -1081,7 +1104,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
return AE_ERROR;
}
if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return AE_ERROR;
}
@@ -1090,8 +1113,8 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
kfree(obj);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Get Device Status failed: "
- "0x%x - 0x%x\n", return_value.error_code,
+ pr_warn("Get Device Status failed: 0x%x - 0x%x\n",
+ return_value.error_code,
return_value.ec_return_value);
else
*value = !!(return_value.devices & device);
@@ -1124,6 +1147,114 @@ static acpi_status get_device_status(u32 *value, u32 cap)
}
}
+static acpi_status wmid3_set_device_status(u32 value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ u16 devices;
+ struct wmid3_gds_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = 0x01,
+ .devices = ACER_WMID3_GDS_WIRELESS &
+ ACER_WMID3_GDS_THREEG &
+ ACER_WMID3_GDS_WIMAX &
+ ACER_WMID3_GDS_BLUETOOTH,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value) {
+ pr_warning("Get Current Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+ return status;
+ }
+
+ devices = return_value.devices;
+ params.function_num = 0x2;
+ params.hotkey_number = 0x01;
+ params.devices = (value) ? (devices | device) : (devices & ~device);
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output2.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warning("Set Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static acpi_status set_device_status(u32 value, u32 cap)
+{
+ if (wmi_has_guid(WMID_GUID3)) {
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_set_device_status(value, device);
+
+ } else {
+ return set_u32(value, cap);
+ }
+}
+
/*
* Rfkill devices
*/
@@ -1160,7 +1291,7 @@ static int acer_rfkill_set(void *data, bool blocked)
u32 cap = (unsigned long)data;
if (rfkill_inited) {
- status = set_u32(!blocked, cap);
+ status = set_device_status(!blocked, cap);
if (ACPI_FAILURE(status))
return -ENODEV;
}
@@ -1317,7 +1448,7 @@ static void acer_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_warning("bad event status 0x%x\n", status);
+ pr_warn("bad event status 0x%x\n", status);
return;
}
@@ -1326,12 +1457,12 @@ static void acer_wmi_notify(u32 value, void *context)
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
- pr_warning("Unknown response received %d\n", obj->type);
+ pr_warn("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
@@ -1343,7 +1474,7 @@ static void acer_wmi_notify(u32 value, void *context)
case WMID_HOTKEY_EVENT:
if (return_value.device_state) {
u16 device_state = return_value.device_state;
- pr_debug("deivces states: 0x%x\n", device_state);
+ pr_debug("device state: 0x%x\n", device_state);
if (has_cap(ACER_CAP_WIRELESS))
rfkill_set_sw_state(wireless_rfkill,
!(device_state & ACER_WMID3_GDS_WIRELESS));
@@ -1356,11 +1487,11 @@ static void acer_wmi_notify(u32 value, void *context)
}
if (!sparse_keymap_report_event(acer_wmi_input_dev,
return_value.key_num, 1, true))
- pr_warning("Unknown key number - 0x%x\n",
+ pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
break;
default:
- pr_warning("Unknown function number - %d - %d\n",
+ pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
break;
}
@@ -1389,7 +1520,7 @@ wmid3_set_lm_mode(struct lm_input_params *params,
return AE_ERROR;
}
if (obj->buffer.length != 4) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return AE_ERROR;
}
@@ -1414,11 +1545,11 @@ static int acer_wmi_enable_ec_raw(void)
status = wmid3_set_lm_mode(&params, &return_value);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Enabling EC raw mode failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
+ pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
else
- pr_info("Enabled EC raw mode");
+ pr_info("Enabled EC raw mode\n");
return status;
}
@@ -1437,9 +1568,9 @@ static int acer_wmi_enable_lm(void)
status = wmid3_set_lm_mode(&params, &return_value);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Enabling Launch Manager failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
+ pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
return status;
}
@@ -1506,8 +1637,11 @@ static u32 get_wmid_devices(void)
obj = (union acpi_object *) out.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
}
kfree(out.pointer);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 60f9cfc..fca3489 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -35,10 +35,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/fs.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/sched.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/platform_device.h>
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index c53b3ff..d65df92 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -318,7 +318,7 @@ static int acpi_check_handle(acpi_handle handle, const char *method,
if (status != AE_OK) {
if (ret)
- pr_warning("Error finding %s\n", method);
+ pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
@@ -383,7 +383,7 @@ static int asus_kled_lvl(struct asus_laptop *asus)
rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
&params, &kblv);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading kled level\n");
+ pr_warn("Error reading kled level\n");
return -ENODEV;
}
return kblv;
@@ -397,7 +397,7 @@ static int asus_kled_set(struct asus_laptop *asus, int kblv)
kblv = 0;
if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
- pr_warning("Keyboard LED display write failed\n");
+ pr_warn("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
@@ -531,7 +531,7 @@ static int asus_read_brightness(struct backlight_device *bd)
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
if (ACPI_FAILURE(rv))
- pr_warning("Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
return value;
}
@@ -541,7 +541,7 @@ static int asus_set_brightness(struct backlight_device *bd, int value)
struct asus_laptop *asus = bl_get_data(bd);
if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
- pr_warning("Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
return -EIO;
}
return 0;
@@ -730,7 +730,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
- pr_warning("LED display write failed\n");
+ pr_warn("LED display write failed\n");
return -ENODEV;
}
asus->ledd_status = (u32) value;
@@ -752,7 +752,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading Wireless status\n");
+ pr_warn("Error reading Wireless status\n");
return -EINVAL;
}
return !!(status & mask);
@@ -764,7 +764,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
static int asus_wlan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
- pr_warning("Error setting wlan status to %d", status);
+ pr_warn("Error setting wlan status to %d\n", status);
return -EIO;
}
return 0;
@@ -792,7 +792,7 @@ static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
static int asus_bluetooth_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
- pr_warning("Error setting bluetooth status to %d", status);
+ pr_warn("Error setting bluetooth status to %d\n", status);
return -EIO;
}
return 0;
@@ -821,7 +821,7 @@ static ssize_t store_bluetooth(struct device *dev,
static int asus_wimax_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
- pr_warning("Error setting wimax status to %d", status);
+ pr_warn("Error setting wimax status to %d\n", status);
return -EIO;
}
return 0;
@@ -850,7 +850,7 @@ static ssize_t store_wimax(struct device *dev,
static int asus_wwan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
- pr_warning("Error setting wwan status to %d", status);
+ pr_warn("Error setting wwan status to %d\n", status);
return -EIO;
}
return 0;
@@ -880,7 +880,7 @@ static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
- pr_warning("Error setting display\n");
+ pr_warn("Error setting display\n");
return;
}
@@ -909,7 +909,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
static void asus_als_switch(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
- pr_warning("Error setting light sensor switch\n");
+ pr_warn("Error setting light sensor switch\n");
asus->light_switch = value;
}
@@ -937,7 +937,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void asus_als_level(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
- pr_warning("Error setting light sensor level\n");
+ pr_warn("Error setting light sensor level\n");
asus->light_level = value;
}
@@ -976,7 +976,7 @@ static int asus_gps_status(struct asus_laptop *asus)
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading GPS status\n");
+ pr_warn("Error reading GPS status\n");
return -ENODEV;
}
return !!status;
@@ -1284,7 +1284,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
- pr_warning("Couldn't get the DSDT table header\n");
+ pr_warn("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
@@ -1296,7 +1296,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
status =
acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
- pr_warning("Error calling BSTS\n");
+ pr_warn("Error calling BSTS\n");
else if (bsts_result)
pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 832a3fd7..00460cb 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -425,7 +425,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
if (asus->hotplug_slot) {
bus = pci_find_bus(0, 1);
if (!bus) {
- pr_warning("Unable to find PCI bus 1?\n");
+ pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
@@ -436,12 +436,12 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warning("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
- blocked ? "blocked" : "unblocked",
- absent ? "absent" : "present");
- pr_warning("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
goto out_unlock;
}
@@ -500,7 +500,7 @@ static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify, asus);
if (ACPI_FAILURE(status))
- pr_warning("Failed to register notify on %s\n", node);
+ pr_warn("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -1223,7 +1223,7 @@ static int asus_wmi_sysfs_init(struct platform_device *device)
/*
* Platform device
*/
-static int __init asus_wmi_platform_init(struct asus_wmi *asus)
+static int asus_wmi_platform_init(struct asus_wmi *asus)
{
int rv;
@@ -1583,12 +1583,12 @@ static int asus_wmi_probe(struct platform_device *pdev)
int ret;
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
- pr_warning("Management GUID not found\n");
+ pr_warn("Management GUID not found\n");
return -ENODEV;
}
if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
- pr_warning("Event GUID not found\n");
+ pr_warn("Event GUID not found\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index f503607..d9312b3 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -30,6 +30,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -581,8 +583,7 @@ static int read_led(const char *ledname, int ledmask)
if (read_acpi_int(NULL, ledname, &led_status))
return led_status;
else
- printk(KERN_WARNING "Asus ACPI: Error reading LED "
- "status\n");
+ pr_warn("Error reading LED status\n");
}
return (hotk->status & ledmask) ? 1 : 0;
}
@@ -621,8 +622,7 @@ write_led(const char __user *buffer, unsigned long count,
led_out = !led_out;
if (!write_acpi_int(hotk->handle, ledname, led_out, NULL))
- printk(KERN_WARNING "Asus ACPI: LED (%s) write failed\n",
- ledname);
+ pr_warn("LED (%s) write failed\n", ledname);
return rv;
}
@@ -679,8 +679,7 @@ static ssize_t ledd_proc_write(struct file *file, const char __user *buffer,
if (rv > 0) {
if (!write_acpi_int
(hotk->handle, hotk->methods->mt_ledd, value, NULL))
- printk(KERN_WARNING
- "Asus ACPI: LED display write failed\n");
+ pr_warn("LED display write failed\n");
else
hotk->ledd_status = (u32) value;
}
@@ -838,8 +837,7 @@ static int get_lcd_state(void)
} else {
/* We don't have to check anything if we are here */
if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
- printk(KERN_WARNING
- "Asus ACPI: Error reading LCD status\n");
+ pr_warn("Error reading LCD status\n");
if (hotk->model == L2D)
lcd = ~lcd;
@@ -871,7 +869,7 @@ static int set_lcd_state(int value)
the exact behaviour is simulated here */
}
if (ACPI_FAILURE(status))
- printk(KERN_WARNING "Asus ACPI: Error switching LCD\n");
+ pr_warn("Error switching LCD\n");
}
return 0;
@@ -915,13 +913,11 @@ static int read_brightness(struct backlight_device *bd)
if (hotk->methods->brightness_get) { /* SPLV/GPLV laptop */
if (!read_acpi_int(hotk->handle, hotk->methods->brightness_get,
&value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
} else if (hotk->methods->brightness_status) { /* For D1 for example */
if (!read_acpi_int(NULL, hotk->methods->brightness_status,
&value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
} else /* No GPLV method */
value = hotk->brightness;
return value;
@@ -939,8 +935,7 @@ static int set_brightness(int value)
if (hotk->methods->brightness_set) {
if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
value, NULL)) {
- printk(KERN_WARNING
- "Asus ACPI: Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
ret = -EIO;
}
goto out;
@@ -955,8 +950,7 @@ static int set_brightness(int value)
NULL, NULL);
(value > 0) ? value-- : value++;
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING
- "Asus ACPI: Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
ret = -EIO;
}
}
@@ -1008,7 +1002,7 @@ static void set_display(int value)
/* no sanity check needed for now */
if (!write_acpi_int(hotk->handle, hotk->methods->display_set,
value, NULL))
- printk(KERN_WARNING "Asus ACPI: Error setting display\n");
+ pr_warn("Error setting display\n");
return;
}
@@ -1021,8 +1015,7 @@ static int disp_proc_show(struct seq_file *m, void *v)
int value = 0;
if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading display status\n");
+ pr_warn("Error reading display status\n");
value &= 0x07; /* needed for some models, shouldn't hurt others */
seq_printf(m, "%d\n", value);
return 0;
@@ -1068,7 +1061,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
proc = proc_create_data(name, mode, acpi_device_dir(device),
proc_fops, acpi_driver_data(device));
if (!proc) {
- printk(KERN_WARNING " Unable to create %s fs entry\n", name);
+ pr_warn(" Unable to create %s fs entry\n", name);
return -1;
}
proc->uid = asus_uid;
@@ -1085,8 +1078,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
- printk(KERN_WARNING " asus_uid and asus_gid parameters are "
- "deprecated, use chown and chmod instead!\n");
+ pr_warn(" asus_uid and asus_gid parameters are "
+ "deprecated, use chown and chmod instead!\n");
}
acpi_device_dir(device) = asus_proc_dir;
@@ -1099,8 +1092,7 @@ static int asus_hotk_add_fs(struct acpi_device *device)
proc->uid = asus_uid;
proc->gid = asus_gid;
} else {
- printk(KERN_WARNING " Unable to create " PROC_INFO
- " fs entry\n");
+ pr_warn(" Unable to create " PROC_INFO " fs entry\n");
}
if (hotk->methods->mt_wled) {
@@ -1283,20 +1275,19 @@ static int asus_hotk_get_info(void)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Couldn't get the DSDT table header\n");
+ pr_warn(" Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
- printk(KERN_ERR " Hotkey initialization failed\n");
+ pr_err(" Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result))
- printk(KERN_WARNING " Error calling BSTS\n");
+ pr_warn(" Error calling BSTS\n");
else if (bsts_result)
- printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
- bsts_result);
+ pr_notice(" BSTS called, 0x%02x returned\n", bsts_result);
/*
* Try to match the object returned by INIT to the specific model.
@@ -1324,23 +1315,21 @@ static int asus_hotk_get_info(void)
if (asus_info &&
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30;
- printk(KERN_NOTICE
- " Samsung P30 detected, supported\n");
+ pr_notice(" Samsung P30 detected, supported\n");
hotk->methods = &model_conf[hotk->model];
kfree(model);
return 0;
} else {
hotk->model = M2E;
- printk(KERN_NOTICE " unsupported model %s, trying "
- "default values\n", string);
- printk(KERN_NOTICE
- " send /proc/acpi/dsdt to the developers\n");
+ pr_notice(" unsupported model %s, trying default values\n",
+ string);
+ pr_notice(" send /proc/acpi/dsdt to the developers\n");
kfree(model);
return -ENODEV;
}
}
hotk->methods = &model_conf[hotk->model];
- printk(KERN_NOTICE " %s model detected, supported\n", string);
+ pr_notice(" %s model detected, supported\n", string);
/* Sort of per-model blacklist */
if (strncmp(string, "L2B", 3) == 0)
@@ -1385,7 +1374,7 @@ static int asus_hotk_check(void)
if (hotk->device->status.present) {
result = asus_hotk_get_info();
} else {
- printk(KERN_ERR " Hotkey device not present, aborting\n");
+ pr_err(" Hotkey device not present, aborting\n");
return -EINVAL;
}
@@ -1399,8 +1388,7 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_status status = AE_OK;
int result;
- printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n",
- ASUS_ACPI_VERSION);
+ pr_notice("Asus Laptop ACPI Extras version %s\n", ASUS_ACPI_VERSION);
hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
if (!hotk)
@@ -1428,15 +1416,14 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_evaluate_object(NULL, hotk->methods->brightness_down,
NULL, NULL);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Error changing brightness\n");
+ pr_warn(" Error changing brightness\n");
else {
status =
acpi_evaluate_object(NULL,
hotk->methods->brightness_up,
NULL, NULL);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Strange, error changing"
- " brightness\n");
+ pr_warn(" Strange, error changing brightness\n");
}
}
@@ -1488,7 +1475,7 @@ static int __init asus_acpi_init(void)
asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
if (!asus_proc_dir) {
- printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
+ pr_err("Unable to create /proc entry\n");
acpi_bus_unregister_driver(&asus_hotk_driver);
return -ENODEV;
}
@@ -1513,7 +1500,7 @@ static int __init asus_acpi_init(void)
&asus_backlight_data,
&props);
if (IS_ERR(asus_backlight_device)) {
- printk(KERN_ERR "Could not register asus backlight device\n");
+ pr_err("Could not register asus backlight device\n");
asus_backlight_device = NULL;
asus_acpi_exit();
return -ENODEV;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index c16a276..3f204fd 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -68,6 +68,8 @@
* only enabled on a JHL90 board until it is verified that they work on the
* other boards too. See the extra_features variable. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -200,8 +202,8 @@ static bool extra_features;
* watching the output of address 0x4F (do an ec_transaction writing 0x33
* into 0x4F and read a few bytes from the output, like so:
* u8 writeData = 0x33;
- * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0);
- * That address is labelled "fan1 table information" in the service manual.
+ * ec_transaction(0x4F, &writeData, 1, buffer, 32);
+ * That address is labeled "fan1 table information" in the service manual.
* It should be clear which value in 'buffer' changes). This seems to be
* related to fan speed. It isn't a proper 'realtime' fan speed value
* though, because physically stopping or speeding up the fan doesn't
@@ -286,7 +288,7 @@ static int get_backlight_level(void)
static void set_backlight_state(bool on)
{
u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
- ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0);
+ ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
}
@@ -294,24 +296,24 @@ static void set_backlight_state(bool on)
static void pwm_enable_control(void)
{
unsigned char writeData = PWM_ENABLE_DATA;
- ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
}
static void pwm_disable_control(void)
{
unsigned char writeData = PWM_DISABLE_DATA;
- ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
}
static void set_pwm(int pwm)
{
- ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0);
+ ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
}
static int get_fan_rpm(void)
{
u8 value, data = FAN_DATA;
- ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0);
+ ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
return 100 * (int)value;
}
@@ -760,16 +762,14 @@ static struct rfkill *bt_rfkill;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n",
- id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
extra_features = false;
return 1;
}
static int dmi_check_cb_extra(const struct dmi_system_id *id)
{
- printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s', "
- "enabling extra features\n",
+ pr_info("Identified laptop model '%s', enabling extra features\n",
id->ident);
extra_features = true;
return 1;
@@ -956,14 +956,12 @@ static int __init compal_init(void)
int ret;
if (acpi_disabled) {
- printk(KERN_ERR DRIVER_NAME": ACPI needs to be enabled for "
- "this driver to work!\n");
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
return -ENODEV;
}
if (!force && !dmi_check_system(compal_dmi_table)) {
- printk(KERN_ERR DRIVER_NAME": Motherboard not recognized (You "
- "could try the module's force-parameter)");
+ pr_err("Motherboard not recognized (You could try the module's force-parameter)\n");
return -ENODEV;
}
@@ -998,8 +996,7 @@ static int __init compal_init(void)
if (ret)
goto err_rfkill;
- printk(KERN_INFO DRIVER_NAME": Driver "DRIVER_VERSION
- " successfully loaded\n");
+ pr_info("Driver " DRIVER_VERSION " successfully loaded\n");
return 0;
err_rfkill:
@@ -1064,7 +1061,7 @@ static void __exit compal_cleanup(void)
rfkill_destroy(wifi_rfkill);
rfkill_destroy(bt_rfkill);
- printk(KERN_INFO DRIVER_NAME": Driver unloaded\n");
+ pr_info("Driver unloaded\n");
}
static int __devexit compal_remove(struct platform_device *pdev)
@@ -1074,8 +1071,7 @@ static int __devexit compal_remove(struct platform_device *pdev)
if (!extra_features)
return 0;
- printk(KERN_INFO DRIVER_NAME": Unloading: resetting fan control "
- "to motherboard\n");
+ pr_info("Unloading: resetting fan control to motherboard\n");
pwm_disable_control();
data = platform_get_drvdata(pdev);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index de301aa8..d3841de 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -434,8 +436,7 @@ static int __init dell_setup_rfkill(void)
int ret;
if (dmi_check_system(dell_blacklist)) {
- printk(KERN_INFO "dell-laptop: Blacklisted hardware detected - "
- "not enabling rfkill\n");
+ pr_info("Blacklisted hardware detected - not enabling rfkill\n");
return 0;
}
@@ -606,7 +607,7 @@ static int __init dell_init(void)
dmi_walk(find_tokens, NULL);
if (!da_tokens) {
- printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n");
+ pr_info("Unable to find dmi tokens\n");
return -ENODEV;
}
@@ -636,14 +637,13 @@ static int __init dell_init(void)
ret = dell_setup_rfkill();
if (ret) {
- printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
+ pr_warn("Unable to setup rfkill\n");
goto fail_rfkill;
}
ret = i8042_install_filter(dell_laptop_i8042_filter);
if (ret) {
- printk(KERN_WARNING
- "dell-laptop: Unable to install key filter\n");
+ pr_warn("Unable to install key filter\n");
goto fail_filter;
}
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index 0ed8457..3f94545 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -15,6 +15,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -138,7 +139,7 @@ static int __init dell_wmi_aio_init(void)
guid = dell_wmi_aio_find();
if (!guid) {
- pr_warning("No known WMI GUID found\n");
+ pr_warn("No known WMI GUID found\n");
return -ENXIO;
}
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 77f1d55..ce79082 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -23,6 +23,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -141,7 +143,7 @@ static void dell_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -153,8 +155,8 @@ static void dell_wmi_notify(u32 value, void *context)
u16 *buffer_entry = (u16 *)obj->buffer.pointer;
if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
- printk(KERN_INFO "dell-wmi: Received unknown WMI event"
- " (0x%x)\n", buffer_entry[1]);
+ pr_info("Received unknown WMI event (0x%x)\n",
+ buffer_entry[1]);
kfree(obj);
return;
}
@@ -167,8 +169,7 @@ static void dell_wmi_notify(u32 value, void *context)
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
reported_key);
if (!key) {
- printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- reported_key);
+ pr_info("Unknown key %x pressed\n", reported_key);
} else if ((key->keycode == KEY_BRIGHTNESSUP ||
key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
/* Don't report brightness notifications that will also
@@ -275,7 +276,7 @@ static int __init dell_wmi_init(void)
acpi_status status;
if (!wmi_has_guid(DELL_EVENT_GUID)) {
- printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
+ pr_warn("No known WMI GUID found\n");
return -ENODEV;
}
@@ -290,9 +291,7 @@ static int __init dell_wmi_init(void)
dell_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
dell_wmi_input_destroy();
- printk(KERN_ERR
- "dell-wmi: Unable to register notify handler - %d\n",
- status);
+ pr_err("Unable to register notify handler - %d\n", status);
return -ENODEV;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 2c1abf6..1c45d92 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -228,7 +228,7 @@ static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
return -ENODEV;
if (write_acpi_int(eeepc->handle, method, value))
- pr_warning("Error writing %s\n", method);
+ pr_warn("Error writing %s\n", method);
return 0;
}
@@ -243,7 +243,7 @@ static int get_acpi(struct eeepc_laptop *eeepc, int cm)
return -ENODEV;
if (read_acpi_int(eeepc->handle, method, &value))
- pr_warning("Error reading %s\n", method);
+ pr_warn("Error reading %s\n", method);
return value;
}
@@ -261,7 +261,7 @@ static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
status = acpi_get_handle(eeepc->handle, (char *)method,
handle);
if (status != AE_OK) {
- pr_warning("Error finding %s\n", method);
+ pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
@@ -417,7 +417,7 @@ static ssize_t store_cpufv_disabled(struct device *dev,
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
- pr_warning("cpufv enabled (not officially supported "
+ pr_warn("cpufv enabled (not officially supported "
"on this model)\n");
eeepc->cpufv_disabled = false;
return rv;
@@ -609,7 +609,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
bus = port->subordinate;
if (!bus) {
- pr_warning("Unable to find PCI bus?\n");
+ pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
@@ -621,12 +621,12 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warning("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
+ pr_warn("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
- pr_warning("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
goto out_unlock;
}
@@ -691,7 +691,8 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
eeepc_rfkill_notify,
eeepc);
if (ACPI_FAILURE(status))
- pr_warning("Failed to register notify on %s\n", node);
+ pr_warn("Failed to register notify on %s\n", node);
+
/*
* Refresh pci hotplug in case the rfkill state was
* changed during setup.
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 649dcad..4aa867a 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -84,7 +84,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
- pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID);
+ pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
*(bool *)context = true;
return AE_CTRL_TERMINATE;
}
@@ -105,12 +105,12 @@ static int eeepc_wmi_check_atkd(void)
static int eeepc_wmi_probe(struct platform_device *pdev)
{
if (eeepc_wmi_check_atkd()) {
- pr_warning("WMI device present, but legacy ATKD device is also "
- "present and enabled.");
- pr_warning("You probably booted with acpi_osi=\"Linux\" or "
- "acpi_osi=\"!Windows 2009\"");
- pr_warning("Can't load eeepc-wmi, use default acpi_osi "
- "(preferred) or eeepc-laptop");
+ pr_warn("WMI device present, but legacy ATKD device is also "
+ "present and enabled\n");
+ pr_warn("You probably booted with acpi_osi=\"Linux\" or "
+ "acpi_osi=\"!Windows 2009\"\n");
+ pr_warn("Can't load eeepc-wmi, use default acpi_osi "
+ "(preferred) or eeepc-laptop\n");
return -EBUSY;
}
return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 493054c..6b26666 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -56,6 +56,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -585,8 +587,7 @@ static struct platform_driver fujitsupf_driver = {
static void dmi_check_cb_common(const struct dmi_system_id *id)
{
acpi_handle handle;
- printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
- id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
if (use_alt_lcd_levels == -1) {
if (ACPI_SUCCESS(acpi_get_handle(NULL,
"\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
@@ -691,11 +692,11 @@ static int acpi_fujitsu_add(struct acpi_device *device)
result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
if (result) {
- printk(KERN_ERR "Error reading power state\n");
+ pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
- printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
+ pr_info("ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -707,7 +708,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (ACPI_FAILURE
(acpi_evaluate_object
(device->handle, METHOD_NAME__INI, NULL, NULL)))
- printk(KERN_ERR "_INI Method failed\n");
+ pr_err("_INI Method failed\n");
}
/* do config (detect defaults) */
@@ -827,7 +828,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
GFP_KERNEL);
if (error) {
- printk(KERN_ERR "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
goto err_stop;
}
@@ -859,13 +860,13 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
if (result) {
- printk(KERN_ERR "Error reading power state\n");
+ pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
- printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- !device->power.state ? "on" : "off");
+ pr_info("ACPI: %s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ !device->power.state ? "on" : "off");
fujitsu_hotkey->dev = device;
@@ -875,7 +876,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (ACPI_FAILURE
(acpi_evaluate_object
(device->handle, METHOD_NAME__INI, NULL, NULL)))
- printk(KERN_ERR "_INI Method failed\n");
+ pr_err("_INI Method failed\n");
}
i = 0;
@@ -897,8 +898,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
/* Suspect this is a keymap of the application panel, print it */
- printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n",
- call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
+ pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
@@ -907,8 +907,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (result == 0) {
fujitsu_hotkey->logolamp_registered = 1;
} else {
- printk(KERN_ERR "fujitsu-laptop: Could not register "
- "LED handler for logo lamp, error %i\n", result);
+ pr_err("Could not register LED handler for logo lamp, error %i\n",
+ result);
}
}
@@ -919,8 +919,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (result == 0) {
fujitsu_hotkey->kblamps_registered = 1;
} else {
- printk(KERN_ERR "fujitsu-laptop: Could not register "
- "LED handler for keyboard lamps, error %i\n", result);
+ pr_err("Could not register LED handler for keyboard lamps, error %i\n",
+ result);
}
}
#endif
@@ -1169,8 +1169,7 @@ static int __init fujitsu_init(void)
fujitsu->bl_device->props.power = 0;
}
- printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
- " successfully loaded.\n");
+ pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
@@ -1216,7 +1215,7 @@ static void __exit fujitsu_cleanup(void)
kfree(fujitsu);
- printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(fujitsu_init);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 067bf36..5a34973 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -26,6 +26,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
@@ -238,7 +240,7 @@ static int hdaps_device_init(void)
__check_latch(0x1611, 0x01))
goto out;
- printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x).\n",
+ printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
__get_latch(0x1611));
outb(0x17, 0x1610);
@@ -299,7 +301,7 @@ static int hdaps_probe(struct platform_device *dev)
if (ret)
return ret;
- printk(KERN_INFO "hdaps: device successfully initialized.\n");
+ pr_info("device successfully initialized\n");
return 0;
}
@@ -480,7 +482,7 @@ static struct attribute_group hdaps_attribute_group = {
/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
static int __init hdaps_dmi_match(const struct dmi_system_id *id)
{
- printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
+ pr_info("%s detected\n", id->ident);
return 1;
}
@@ -488,8 +490,7 @@ static int __init hdaps_dmi_match(const struct dmi_system_id *id)
static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
{
hdaps_invert = (unsigned long)id->driver_data;
- printk(KERN_INFO "hdaps: inverting axis (%u) readings.\n",
- hdaps_invert);
+ pr_info("inverting axis (%u) readings\n", hdaps_invert);
return hdaps_dmi_match(id);
}
@@ -543,7 +544,7 @@ static int __init hdaps_init(void)
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
- printk(KERN_WARNING "hdaps: supported laptop not found!\n");
+ pr_warn("supported laptop not found!\n");
ret = -ENODEV;
goto out;
}
@@ -595,7 +596,7 @@ static int __init hdaps_init(void)
if (ret)
goto out_idev;
- printk(KERN_INFO "hdaps: driver successfully loaded.\n");
+ pr_info("driver successfully loaded\n");
return 0;
out_idev:
@@ -609,7 +610,7 @@ out_driver:
out_region:
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
out:
- printk(KERN_WARNING "hdaps: driver init failed (ret=%d)!\n", ret);
+ pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
@@ -622,7 +623,7 @@ static void __exit hdaps_exit(void)
platform_driver_unregister(&hdaps_driver);
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
- printk(KERN_INFO "hdaps: driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(hdaps_init);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1bc4a75..f94017b 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -54,9 +56,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
-#define PREFIX "HP WMI: "
-#define UNIMP "Unimplemented "
-
enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
@@ -228,9 +227,8 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
if (bios_return->return_code) {
if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
- printk(KERN_WARNING PREFIX "query 0x%x returned "
- "error 0x%x\n",
- query, bios_return->return_code);
+ pr_warn("query 0x%x returned error 0x%x\n",
+ query, bios_return->return_code);
kfree(obj);
return bios_return->return_code;
}
@@ -384,8 +382,7 @@ static int hp_wmi_rfkill2_refresh(void)
if (num >= state.count ||
devstate->rfkill_id != rfkill2[i].id) {
- printk(KERN_WARNING PREFIX "power configuration of "
- "the wireless devices unexpectedly changed\n");
+ pr_warn("power configuration of the wireless devices unexpectedly changed\n");
continue;
}
@@ -471,7 +468,7 @@ static void hp_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO PREFIX "bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -480,8 +477,7 @@ static void hp_wmi_notify(u32 value, void *context)
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
- printk(KERN_INFO "hp-wmi: Unknown response received %d\n",
- obj->type);
+ pr_info("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
@@ -498,8 +494,7 @@ static void hp_wmi_notify(u32 value, void *context)
event_id = *location;
event_data = *(location + 2);
} else {
- printk(KERN_INFO "hp-wmi: Unknown buffer length %d\n",
- obj->buffer.length);
+ pr_info("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
@@ -527,8 +522,7 @@ static void hp_wmi_notify(u32 value, void *context)
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
- printk(KERN_INFO PREFIX "Unknown key code - 0x%x\n",
- key_code);
+ pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_WIRELESS:
if (rfkill2_count) {
@@ -550,14 +544,12 @@ static void hp_wmi_notify(u32 value, void *context)
hp_wmi_get_hw_state(HPWMI_WWAN));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
- printk(KERN_INFO PREFIX UNIMP "CPU throttle because of 3 Cell"
- " battery event detected\n");
+ pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
break;
case HPWMI_LOCK_SWITCH:
break;
default:
- printk(KERN_INFO PREFIX "Unknown event_id - %d - 0x%x\n",
- event_id, event_data);
+ pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
}
}
@@ -705,7 +697,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
return err;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
- printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n");
+ pr_warn("unable to parse 0x1b query output\n");
return -EINVAL;
}
@@ -727,14 +719,14 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
name = "hp-wwan";
break;
default:
- printk(KERN_WARNING PREFIX "unknown device type 0x%x\n",
- state.device[i].radio_type);
+ pr_warn("unknown device type 0x%x\n",
+ state.device[i].radio_type);
continue;
}
if (!state.device[i].vendor_id) {
- printk(KERN_WARNING PREFIX "zero device %d while %d "
- "reported\n", i, state.count);
+ pr_warn("zero device %d while %d reported\n",
+ i, state.count);
continue;
}
@@ -755,8 +747,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
IS_HWBLOCKED(state.device[i].power));
if (!(state.device[i].power & HPWMI_POWER_BIOS))
- printk(KERN_INFO PREFIX "device %s blocked by BIOS\n",
- name);
+ pr_info("device %s blocked by BIOS\n", name);
err = rfkill_register(rfkill);
if (err) {
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index b1396e5..811d436 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -69,9 +71,10 @@ struct ibm_rtl_table {
#define RTL_SIGNATURE 0x0000005f4c54525fULL
#define RTL_MASK 0x000000ffffffffffULL
-#define RTL_DEBUG(A, ...) do { \
- if (debug) \
- pr_info("ibm-rtl: " A, ##__VA_ARGS__ ); \
+#define RTL_DEBUG(fmt, ...) \
+do { \
+ if (debug) \
+ pr_info(fmt, ##__VA_ARGS__); \
} while (0)
static DEFINE_MUTEX(rtl_lock);
@@ -114,7 +117,7 @@ static int ibm_rtl_write(u8 value)
int ret = 0, count = 0;
static u32 cmd_port_val;
- RTL_DEBUG("%s(%d)\n", __FUNCTION__, value);
+ RTL_DEBUG("%s(%d)\n", __func__, value);
value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM;
@@ -144,8 +147,8 @@ static int ibm_rtl_write(u8 value)
while (ioread8(&rtl_table->command)) {
msleep(10);
if (count++ > 500) {
- pr_err("ibm-rtl: Hardware not responding to "
- "mode switch request\n");
+ pr_err("Hardware not responding to "
+ "mode switch request\n");
ret = -EIO;
break;
}
@@ -250,7 +253,7 @@ static int __init ibm_rtl_init(void) {
int ret = -ENODEV, i;
if (force)
- pr_warning("ibm-rtl: module loaded by force\n");
+ pr_warn("module loaded by force\n");
/* first ensure that we are running on IBM HW */
else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
return -ENODEV;
@@ -288,19 +291,19 @@ static int __init ibm_rtl_init(void) {
if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
phys_addr_t addr;
unsigned int plen;
- RTL_DEBUG("found RTL_SIGNATURE at %#llx\n", (u64)tmp);
+ RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp);
rtl_table = tmp;
/* The address, value, width and offset are platform
* dependent and found in the ibm_rtl_table */
rtl_cmd_width = ioread8(&rtl_table->cmd_granularity);
rtl_cmd_type = ioread8(&rtl_table->cmd_address_type);
RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
- rtl_cmd_width, rtl_cmd_type);
+ rtl_cmd_width, rtl_cmd_type);
addr = ioread32(&rtl_table->cmd_port_address);
RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
plen = rtl_cmd_width/sizeof(char);
rtl_cmd_addr = rtl_port_map(addr, plen);
- RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr);
+ RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr);
if (!rtl_cmd_addr) {
ret = -ENOMEM;
break;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 21b1018..bfdda33 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -20,6 +20,8 @@
* 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index eacd5da..809adea4 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -27,6 +27,8 @@
* to get/set bandwidth.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -135,8 +137,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
&temp);
- printk(KERN_INFO
- "Bandwidth value was %ld: status is %d\n", state, status);
+ pr_info("Bandwidth value was %ld: status is %d\n", state, status);
if (ACPI_FAILURE(status))
return -EFAULT;
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 213e79b..f1ae507 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -23,58 +23,48 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/input.h>
+
#include <asm/intel_scu_ipc.h>
#define DRIVER_NAME "msic_power_btn"
-#define MSIC_IRQ_STAT 0x02
- #define MSIC_IRQ_PB (1 << 0)
-#define MSIC_PB_CONFIG 0x3e
#define MSIC_PB_STATUS 0x3f
- #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
-
-struct mfld_pb_priv {
- struct input_dev *input;
- unsigned int irq;
-};
+#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
{
- struct mfld_pb_priv *priv = dev_id;
+ struct input_dev *input = dev_id;
int ret;
u8 pbstat;
ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat);
- if (ret < 0)
- return IRQ_HANDLED;
-
- input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL));
- input_sync(priv->input);
+ if (ret < 0) {
+ dev_err(input->dev.parent, "Read error %d while reading"
+ " MSIC_PB_STATUS\n", ret);
+ } else {
+ input_event(input, EV_KEY, KEY_POWER,
+ !(pbstat & MSIC_PB_LEVEL));
+ input_sync(input);
+ }
return IRQ_HANDLED;
}
static int __devinit mfld_pb_probe(struct platform_device *pdev)
{
- struct mfld_pb_priv *priv;
struct input_dev *input;
- int irq;
+ int irq = platform_get_irq(pdev, 0);
int error;
- irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
- priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL);
input = input_allocate_device();
- if (!priv || !input) {
- error = -ENOMEM;
- goto err_free_mem;
+ if (!input) {
+ dev_err(&pdev->dev, "Input device allocation error\n");
+ return -ENOMEM;
}
- priv->input = input;
- priv->irq = irq;
-
input->name = pdev->name;
input->phys = "power-button/input0";
input->id.bustype = BUS_HOST;
@@ -82,42 +72,40 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr,
- 0, DRIVER_NAME, priv);
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ DRIVER_NAME, input);
if (error) {
- dev_err(&pdev->dev,
- "unable to request irq %d for mfld power button\n",
- irq);
- goto err_free_mem;
+ dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
+ "button\n", irq);
+ goto err_free_input;
}
error = input_register_device(input);
if (error) {
- dev_err(&pdev->dev,
- "unable to register input dev, error %d\n", error);
+ dev_err(&pdev->dev, "Unable to register input dev, error "
+ "%d\n", error);
goto err_free_irq;
}
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, input);
return 0;
err_free_irq:
- free_irq(priv->irq, priv);
-err_free_mem:
+ free_irq(irq, input);
+err_free_input:
input_free_device(input);
- kfree(priv);
return error;
}
static int __devexit mfld_pb_remove(struct platform_device *pdev)
{
- struct mfld_pb_priv *priv = platform_get_drvdata(pdev);
-
- free_irq(priv->irq, priv);
- input_unregister_device(priv->input);
- kfree(priv);
+ struct input_dev *input = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ free_irq(irq, input);
+ input_unregister_device(input);
platform_set_drvdata(pdev, NULL);
+
return 0;
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index c2f4bd8..3a57832 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -37,49 +37,50 @@
#include <asm/intel_scu_ipc.h>
/* Number of thermal sensors */
-#define MSIC_THERMAL_SENSORS 4
+#define MSIC_THERMAL_SENSORS 4
/* ADC1 - thermal registers */
-#define MSIC_THERM_ADC1CNTL1 0x1C0
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
+#define MSIC_THERM_ADC1CNTL1 0x1C0
+#define MSIC_ADC_ENBL 0x10
+#define MSIC_ADC_START 0x08
-#define MSIC_THERM_ADC1CNTL3 0x1C2
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-#define MSIC_CHANL_MASK_VAL 0x0F
+#define MSIC_THERM_ADC1CNTL3 0x1C2
+#define MSIC_ADCTHERM_ENBL 0x04
+#define MSIC_ADCRRDATA_ENBL 0x05
+#define MSIC_CHANL_MASK_VAL 0x0F
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
+#define MSIC_STOPBIT_MASK 16
+#define MSIC_ADCTHERM_MASK 4
+/* Number of ADC channels */
+#define ADC_CHANLS_MAX 15
+#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
/* ADC channel code values */
-#define SKIN_SENSOR0_CODE 0x08
-#define SKIN_SENSOR1_CODE 0x09
-#define SYS_SENSOR_CODE 0x0A
-#define MSIC_DIE_SENSOR_CODE 0x03
+#define SKIN_SENSOR0_CODE 0x08
+#define SKIN_SENSOR1_CODE 0x09
+#define SYS_SENSOR_CODE 0x0A
+#define MSIC_DIE_SENSOR_CODE 0x03
-#define SKIN_THERM_SENSOR0 0
-#define SKIN_THERM_SENSOR1 1
-#define SYS_THERM_SENSOR2 2
-#define MSIC_DIE_THERM_SENSOR3 3
+#define SKIN_THERM_SENSOR0 0
+#define SKIN_THERM_SENSOR1 1
+#define SYS_THERM_SENSOR2 2
+#define MSIC_DIE_THERM_SENSOR3 3
/* ADC code range */
-#define ADC_MAX 977
-#define ADC_MIN 162
-#define ADC_VAL0C 887
-#define ADC_VAL20C 720
-#define ADC_VAL40C 508
-#define ADC_VAL60C 315
+#define ADC_MAX 977
+#define ADC_MIN 162
+#define ADC_VAL0C 887
+#define ADC_VAL20C 720
+#define ADC_VAL40C 508
+#define ADC_VAL60C 315
/* ADC base addresses */
-#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
+#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
+#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
/* MSIC die attributes */
-#define MSIC_DIE_ADC_MIN 488
-#define MSIC_DIE_ADC_MAX 1004
+#define MSIC_DIE_ADC_MIN 488
+#define MSIC_DIE_ADC_MAX 1004
/* This holds the address of the first free ADC channel,
* among the 15 channels
@@ -87,15 +88,15 @@
static int channel_index;
struct platform_info {
- struct platform_device *pdev;
- struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
+ struct platform_device *pdev;
+ struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
};
struct thermal_device_info {
- unsigned int chnl_addr;
- int direct;
- /* This holds the current temperature in millidegree celsius */
- long curr_temp;
+ unsigned int chnl_addr;
+ int direct;
+ /* This holds the current temperature in millidegree celsius */
+ long curr_temp;
};
/**
@@ -106,7 +107,7 @@ struct thermal_device_info {
*/
static int to_msic_die_temp(uint16_t adc_val)
{
- return (368 * (adc_val) / 1000) - 220;
+ return (368 * (adc_val) / 1000) - 220;
}
/**
@@ -118,7 +119,7 @@ static int to_msic_die_temp(uint16_t adc_val)
*/
static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
{
- return (adc_val >= min) && (adc_val <= max);
+ return (adc_val >= min) && (adc_val <= max);
}
/**
@@ -136,35 +137,35 @@ static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
*/
static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
{
- int temp;
-
- /* Direct conversion for die temperature */
- if (direct) {
- if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
- *tp = to_msic_die_temp(adc_val) * 1000;
- return 0;
- }
- return -ERANGE;
- }
-
- if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
- return -ERANGE;
-
- /* Linear approximation for skin temperature */
- if (adc_val > ADC_VAL0C)
- temp = 177 - (adc_val/5);
- else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
- temp = 111 - (adc_val/8);
- else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
- temp = 92 - (adc_val/10);
- else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
- temp = 91 - (adc_val/10);
- else
- temp = 112 - (adc_val/6);
-
- /* Convert temperature in celsius to milli degree celsius */
- *tp = temp * 1000;
- return 0;
+ int temp;
+
+ /* Direct conversion for die temperature */
+ if (direct) {
+ if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
+ *tp = to_msic_die_temp(adc_val) * 1000;
+ return 0;
+ }
+ return -ERANGE;
+ }
+
+ if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
+ return -ERANGE;
+
+ /* Linear approximation for skin temperature */
+ if (adc_val > ADC_VAL0C)
+ temp = 177 - (adc_val/5);
+ else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
+ temp = 111 - (adc_val/8);
+ else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
+ temp = 92 - (adc_val/10);
+ else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
+ temp = 91 - (adc_val/10);
+ else
+ temp = 112 - (adc_val/6);
+
+ /* Convert temperature in celsius to milli degree celsius */
+ *tp = temp * 1000;
+ return 0;
}
/**
@@ -178,47 +179,47 @@ static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
*/
static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
- struct thermal_device_info *td_info = tzd->devdata;
- uint16_t adc_val, addr;
- uint8_t data = 0;
- int ret;
- unsigned long curr_temp;
-
-
- addr = td_info->chnl_addr;
-
- /* Enable the msic for conversion before reading */
- ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
- if (ret)
- return ret;
-
- /* Re-toggle the RRDATARD bit (temporary workaround) */
- ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
- if (ret)
- return ret;
-
- /* Read the higher bits of data */
- ret = intel_scu_ipc_ioread8(addr, &data);
- if (ret)
- return ret;
-
- /* Shift bits to accommodate the lower two data bits */
- adc_val = (data << 2);
- addr++;
-
- ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
- if (ret)
- return ret;
-
- /* Adding lower two bits to the higher bits */
- data &= 03;
- adc_val += data;
-
- /* Convert ADC value to temperature */
- ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
- if (ret == 0)
- *temp = td_info->curr_temp = curr_temp;
- return ret;
+ struct thermal_device_info *td_info = tzd->devdata;
+ uint16_t adc_val, addr;
+ uint8_t data = 0;
+ int ret;
+ unsigned long curr_temp;
+
+
+ addr = td_info->chnl_addr;
+
+ /* Enable the msic for conversion before reading */
+ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
+ if (ret)
+ return ret;
+
+ /* Re-toggle the RRDATARD bit (temporary workaround) */
+ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
+ if (ret)
+ return ret;
+
+ /* Read the higher bits of data */
+ ret = intel_scu_ipc_ioread8(addr, &data);
+ if (ret)
+ return ret;
+
+ /* Shift bits to accommodate the lower two data bits */
+ adc_val = (data << 2);
+ addr++;
+
+ ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
+ if (ret)
+ return ret;
+
+ /* Adding lower two bits to the higher bits */
+ data &= 03;
+ adc_val += data;
+
+ /* Convert ADC value to temperature */
+ ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
+ if (ret == 0)
+ *temp = td_info->curr_temp = curr_temp;
+ return ret;
}
/**
@@ -231,22 +232,21 @@ static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
*/
static int configure_adc(int val)
{
- int ret;
- uint8_t data;
-
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if (val) {
- /* Enable and start the ADC */
- data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- } else {
- /* Just stop the ADC */
- data &= (~MSIC_ADC_START);
- }
-
- return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
+ int ret;
+ uint8_t data;
+
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
+ if (ret)
+ return ret;
+
+ if (val) {
+ /* Enable and start the ADC */
+ data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
+ } else {
+ /* Just stop the ADC */
+ data &= (~MSIC_ADC_START);
+ }
+ return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
}
/**
@@ -259,30 +259,30 @@ static int configure_adc(int val)
*/
static int set_up_therm_channel(u16 base_addr)
{
- int ret;
-
- /* Enable all the sensor channels */
- ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
- if (ret)
- return ret;
-
- ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
- if (ret)
- return ret;
-
- ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
- if (ret)
- return ret;
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- ret = intel_scu_ipc_iowrite8(base_addr + 3,
- (MSIC_DIE_SENSOR_CODE | 0x10));
- if (ret)
- return ret;
-
- /* Enable ADC and start it */
- return configure_adc(1);
+ int ret;
+
+ /* Enable all the sensor channels */
+ ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
+ if (ret)
+ return ret;
+
+ ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
+ if (ret)
+ return ret;
+
+ ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
+ if (ret)
+ return ret;
+
+ /* Since this is the last channel, set the stop bit
+ * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
+ ret = intel_scu_ipc_iowrite8(base_addr + 3,
+ (MSIC_DIE_SENSOR_CODE | 0x10));
+ if (ret)
+ return ret;
+
+ /* Enable ADC and start it */
+ return configure_adc(1);
}
/**
@@ -293,13 +293,13 @@ static int set_up_therm_channel(u16 base_addr)
*/
static int reset_stopbit(uint16_t addr)
{
- int ret;
- uint8_t data;
- ret = intel_scu_ipc_ioread8(addr, &data);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
+ int ret;
+ uint8_t data;
+ ret = intel_scu_ipc_ioread8(addr, &data);
+ if (ret)
+ return ret;
+ /* Set the stop bit to zero */
+ return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
}
/**
@@ -317,30 +317,30 @@ static int reset_stopbit(uint16_t addr)
*/
static int find_free_channel(void)
{
- int ret;
- int i;
- uint8_t data;
-
- /* check whether ADC is enabled */
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if ((data & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
- ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
+ int ret;
+ int i;
+ uint8_t data;
+
+ /* check whether ADC is enabled */
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MSIC_ADC_ENBL) == 0)
+ return 0;
+
+ /* ADC is already enabled; Looking for an empty channel */
+ for (i = 0; i < ADC_CHANLS_MAX; i++) {
+ ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
+ if (ret)
+ return ret;
+
+ if (data & MSIC_STOPBIT_MASK) {
+ ret = i;
+ break;
+ }
+ }
+ return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
}
/**
@@ -351,48 +351,48 @@ static int find_free_channel(void)
*/
static int mid_initialize_adc(struct device *dev)
{
- u8 data;
- u16 base_addr;
- int ret;
-
- /*
- * Ensure that adctherm is disabled before we
- * initialize the ADC
- */
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_ADCTHERM_MASK)
- dev_warn(dev, "ADCTHERM already set");
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- dev_err(dev, "No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- ret = set_up_therm_channel(base_addr);
- if (ret) {
- dev_err(dev, "unable to enable ADC");
- return ret;
- }
- dev_dbg(dev, "ADC initialization successful");
- return ret;
+ u8 data;
+ u16 base_addr;
+ int ret;
+
+ /*
+ * Ensure that adctherm is disabled before we
+ * initialize the ADC
+ */
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
+ if (ret)
+ return ret;
+
+ if (data & MSIC_ADCTHERM_MASK)
+ dev_warn(dev, "ADCTHERM already set");
+
+ /* Index of the first channel in which the stop bit is set */
+ channel_index = find_free_channel();
+ if (channel_index < 0) {
+ dev_err(dev, "No free ADC channels");
+ return channel_index;
+ }
+
+ base_addr = ADC_CHNL_START_ADDR + channel_index;
+
+ if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
+ /* Reset stop bit for channels other than 0 and 12 */
+ ret = reset_stopbit(base_addr);
+ if (ret)
+ return ret;
+
+ /* Index of the first free channel */
+ base_addr++;
+ channel_index++;
+ }
+
+ ret = set_up_therm_channel(base_addr);
+ if (ret) {
+ dev_err(dev, "unable to enable ADC");
+ return ret;
+ }
+ dev_dbg(dev, "ADC initialization successful");
+ return ret;
}
/**
@@ -403,18 +403,18 @@ static int mid_initialize_adc(struct device *dev)
*/
static struct thermal_device_info *initialize_sensor(int index)
{
- struct thermal_device_info *td_info =
- kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
-
- if (!td_info)
- return NULL;
-
- /* Set the base addr of the channel for this sensor */
- td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
- /* Sensor 3 is direct conversion */
- if (index == 3)
- td_info->direct = 1;
- return td_info;
+ struct thermal_device_info *td_info =
+ kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+ if (!td_info)
+ return NULL;
+
+ /* Set the base addr of the channel for this sensor */
+ td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
+ /* Sensor 3 is direct conversion */
+ if (index == 3)
+ td_info->direct = 1;
+ return td_info;
}
/**
@@ -425,7 +425,7 @@ static struct thermal_device_info *initialize_sensor(int index)
*/
static int mid_thermal_resume(struct platform_device *pdev)
{
- return mid_initialize_adc(&pdev->dev);
+ return mid_initialize_adc(&pdev->dev);
}
/**
@@ -437,12 +437,12 @@ static int mid_thermal_resume(struct platform_device *pdev)
*/
static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
{
- /*
- * This just stops the ADC and does not disable it.
- * temporary workaround until we have a generic ADC driver.
- * If 0 is passed, it disables the ADC.
- */
- return configure_adc(0);
+ /*
+ * This just stops the ADC and does not disable it.
+ * temporary workaround until we have a generic ADC driver.
+ * If 0 is passed, it disables the ADC.
+ */
+ return configure_adc(0);
}
/**
@@ -453,16 +453,15 @@ static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
*/
static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
- WARN_ON(tzd == NULL);
- return mid_read_temp(tzd, temp);
+ WARN_ON(tzd == NULL);
+ return mid_read_temp(tzd, temp);
}
/* Can't be const */
static struct thermal_zone_device_ops tzd_ops = {
- .get_temp = read_curr_temp,
+ .get_temp = read_curr_temp,
};
-
/**
* mid_thermal_probe - mfld thermal initialize
* @pdev: platform device structure
@@ -472,46 +471,45 @@ static struct thermal_zone_device_ops tzd_ops = {
*/
static int mid_thermal_probe(struct platform_device *pdev)
{
- static char *name[MSIC_THERMAL_SENSORS] = {
- "skin0", "skin1", "sys", "msicdie"
- };
-
- int ret;
- int i;
- struct platform_info *pinfo;
-
- pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
-
- /* Initializing the hardware */
- ret = mid_initialize_adc(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "ADC init failed");
- kfree(pinfo);
- return ret;
- }
-
- /* Register each sensor with the generic thermal framework*/
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, initialize_sensor(i),
- &tzd_ops, 0, 0, 0, 0);
- if (IS_ERR(pinfo->tzd[i]))
- goto reg_fail;
- }
-
- pinfo->pdev = pdev;
- platform_set_drvdata(pdev, pinfo);
- return 0;
+ static char *name[MSIC_THERMAL_SENSORS] = {
+ "skin0", "skin1", "sys", "msicdie"
+ };
+
+ int ret;
+ int i;
+ struct platform_info *pinfo;
+
+ pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ /* Initializing the hardware */
+ ret = mid_initialize_adc(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "ADC init failed");
+ kfree(pinfo);
+ return ret;
+ }
+
+ /* Register each sensor with the generic thermal framework*/
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ pinfo->tzd[i] = thermal_zone_device_register(name[i],
+ 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(pinfo->tzd[i]))
+ goto reg_fail;
+ }
+
+ pinfo->pdev = pdev;
+ platform_set_drvdata(pdev, pinfo);
+ return 0;
reg_fail:
- ret = PTR_ERR(pinfo->tzd[i]);
- while (--i >= 0)
- thermal_zone_device_unregister(pinfo->tzd[i]);
- configure_adc(0);
- kfree(pinfo);
- return ret;
+ ret = PTR_ERR(pinfo->tzd[i]);
+ while (--i >= 0)
+ thermal_zone_device_unregister(pinfo->tzd[i]);
+ configure_adc(0);
+ kfree(pinfo);
+ return ret;
}
/**
@@ -523,49 +521,46 @@ reg_fail:
*/
static int mid_thermal_remove(struct platform_device *pdev)
{
- int i;
- struct platform_info *pinfo = platform_get_drvdata(pdev);
+ int i;
+ struct platform_info *pinfo = platform_get_drvdata(pdev);
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
- thermal_zone_device_unregister(pinfo->tzd[i]);
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
+ thermal_zone_device_unregister(pinfo->tzd[i]);
- platform_set_drvdata(pdev, NULL);
+ kfree(pinfo);
+ platform_set_drvdata(pdev, NULL);
- /* Stop the ADC */
- return configure_adc(0);
+ /* Stop the ADC */
+ return configure_adc(0);
}
-/*********************************************************************
- * Driver initialisation and finalization
- *********************************************************************/
-
#define DRIVER_NAME "msic_sensor"
static const struct platform_device_id therm_id_table[] = {
- { DRIVER_NAME, 1 },
- { }
+ { DRIVER_NAME, 1 },
+ { }
};
static struct platform_driver mid_thermal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
- .probe = mid_thermal_probe,
- .suspend = mid_thermal_suspend,
- .resume = mid_thermal_resume,
- .remove = __devexit_p(mid_thermal_remove),
- .id_table = therm_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mid_thermal_probe,
+ .suspend = mid_thermal_suspend,
+ .resume = mid_thermal_resume,
+ .remove = __devexit_p(mid_thermal_remove),
+ .id_table = therm_id_table,
};
static int __init mid_thermal_module_init(void)
{
- return platform_driver_register(&mid_thermal_driver);
+ return platform_driver_register(&mid_thermal_driver);
}
static void __exit mid_thermal_module_exit(void)
{
- platform_driver_unregister(&mid_thermal_driver);
+ platform_driver_unregister(&mid_thermal_driver);
}
module_init(mid_thermal_module_init);
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
new file mode 100644
index 0000000..e936364
--- /dev/null
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -0,0 +1,396 @@
+/*
+ * intel_oaktrail.c - Intel OakTrail Platform support.
+ *
+ * Copyright (C) 2010-2011 Intel Corporation
+ * Author: Yin Kangkai (kangkai.yin@intel.com)
+ *
+ * based on Compal driver, Copyright (C) 2008 Cezary Jackiewicz
+ * <cezary.jackiewicz (at) gmail.com>, based on MSI driver
+ * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ * This driver does below things:
+ * 1. registers itself in the Linux backlight control in
+ * /sys/class/backlight/intel_oaktrail/
+ *
+ * 2. registers in the rfkill subsystem here: /sys/class/rfkill/rfkillX/
+ * for these components: wifi, bluetooth, wwan (3g), gps
+ *
+ * This driver might work on other products based on Oaktrail. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * product as compatible.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/rfkill.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+
+#define DRIVER_NAME "intel_oaktrail"
+#define DRIVER_VERSION "0.4ac1"
+
+/*
+ * This is the devices status address in EC space, and the control bits
+ * definition:
+ *
+ * (1 << 0): Camera enable/disable, RW.
+ * (1 << 1): Bluetooth enable/disable, RW.
+ * (1 << 2): GPS enable/disable, RW.
+ * (1 << 3): WiFi enable/disable, RW.
+ * (1 << 4): WWAN (3G) enable/disalbe, RW.
+ * (1 << 5): Touchscreen enable/disable, Read Only.
+ */
+#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
+
+#define OT_EC_CAMERA_MASK (1 << 0)
+#define OT_EC_BT_MASK (1 << 1)
+#define OT_EC_GPS_MASK (1 << 2)
+#define OT_EC_WIFI_MASK (1 << 3)
+#define OT_EC_WWAN_MASK (1 << 4)
+#define OT_EC_TS_MASK (1 << 5)
+
+/*
+ * This is the address in EC space and commands used to control LCD backlight:
+ *
+ * Two steps needed to change the LCD backlight:
+ * 1. write the backlight percentage into OT_EC_BL_BRIGHTNESS_ADDRESS;
+ * 2. write OT_EC_BL_CONTROL_ON_DATA into OT_EC_BL_CONTROL_ADDRESS.
+ *
+ * To read the LCD back light, just read out the value from
+ * OT_EC_BL_BRIGHTNESS_ADDRESS.
+ *
+ * LCD backlight brightness range: 0 - 100 (OT_EC_BL_BRIGHTNESS_MAX)
+ */
+#define OT_EC_BL_BRIGHTNESS_ADDRESS 0x44
+#define OT_EC_BL_BRIGHTNESS_MAX 100
+#define OT_EC_BL_CONTROL_ADDRESS 0x3A
+#define OT_EC_BL_CONTROL_ON_DATA 0x1A
+
+
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static struct platform_device *oaktrail_device;
+static struct backlight_device *oaktrail_bl_device;
+static struct rfkill *bt_rfkill;
+static struct rfkill *gps_rfkill;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *wwan_rfkill;
+
+
+/* rfkill */
+static int oaktrail_rfkill_set(void *data, bool blocked)
+{
+ u8 value;
+ u8 result;
+ unsigned long radio = (unsigned long) data;
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &result);
+
+ if (!blocked)
+ value = (u8) (result | radio);
+ else
+ value = (u8) (result & ~radio);
+
+ ec_write(OT_EC_DEVICE_STATE_ADDRESS, value);
+
+ return 0;
+}
+
+static const struct rfkill_ops oaktrail_rfkill_ops = {
+ .set_block = oaktrail_rfkill_set,
+};
+
+static struct rfkill *oaktrail_rfkill_new(char *name, enum rfkill_type type,
+ unsigned long mask)
+{
+ struct rfkill *rfkill_dev;
+ u8 value;
+ int err;
+
+ rfkill_dev = rfkill_alloc(name, &oaktrail_device->dev, type,
+ &oaktrail_rfkill_ops, (void *)mask);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &value);
+ rfkill_init_sw_state(rfkill_dev, (value & mask) != 1);
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ rfkill_destroy(rfkill_dev);
+ return ERR_PTR(err);
+ }
+
+ return rfkill_dev;
+}
+
+static inline void __oaktrail_rfkill_cleanup(struct rfkill *rf)
+{
+ if (rf) {
+ rfkill_unregister(rf);
+ rfkill_destroy(rf);
+ }
+}
+
+static void oaktrail_rfkill_cleanup(void)
+{
+ __oaktrail_rfkill_cleanup(wifi_rfkill);
+ __oaktrail_rfkill_cleanup(bt_rfkill);
+ __oaktrail_rfkill_cleanup(gps_rfkill);
+ __oaktrail_rfkill_cleanup(wwan_rfkill);
+}
+
+static int oaktrail_rfkill_init(void)
+{
+ int ret;
+
+ wifi_rfkill = oaktrail_rfkill_new("oaktrail-wifi",
+ RFKILL_TYPE_WLAN,
+ OT_EC_WIFI_MASK);
+ if (IS_ERR(wifi_rfkill)) {
+ ret = PTR_ERR(wifi_rfkill);
+ wifi_rfkill = NULL;
+ goto cleanup;
+ }
+
+ bt_rfkill = oaktrail_rfkill_new("oaktrail-bluetooth",
+ RFKILL_TYPE_BLUETOOTH,
+ OT_EC_BT_MASK);
+ if (IS_ERR(bt_rfkill)) {
+ ret = PTR_ERR(bt_rfkill);
+ bt_rfkill = NULL;
+ goto cleanup;
+ }
+
+ gps_rfkill = oaktrail_rfkill_new("oaktrail-gps",
+ RFKILL_TYPE_GPS,
+ OT_EC_GPS_MASK);
+ if (IS_ERR(gps_rfkill)) {
+ ret = PTR_ERR(gps_rfkill);
+ gps_rfkill = NULL;
+ goto cleanup;
+ }
+
+ wwan_rfkill = oaktrail_rfkill_new("oaktrail-wwan",
+ RFKILL_TYPE_WWAN,
+ OT_EC_WWAN_MASK);
+ if (IS_ERR(wwan_rfkill)) {
+ ret = PTR_ERR(wwan_rfkill);
+ wwan_rfkill = NULL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ oaktrail_rfkill_cleanup();
+ return ret;
+}
+
+
+/* backlight */
+static int get_backlight_brightness(struct backlight_device *b)
+{
+ u8 value;
+ ec_read(OT_EC_BL_BRIGHTNESS_ADDRESS, &value);
+
+ return value;
+}
+
+static int set_backlight_brightness(struct backlight_device *b)
+{
+ u8 percent = (u8) b->props.brightness;
+ if (percent < 0 || percent > OT_EC_BL_BRIGHTNESS_MAX)
+ return -EINVAL;
+
+ ec_write(OT_EC_BL_BRIGHTNESS_ADDRESS, percent);
+ ec_write(OT_EC_BL_CONTROL_ADDRESS, OT_EC_BL_CONTROL_ON_DATA);
+
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_bl_ops = {
+ .get_brightness = get_backlight_brightness,
+ .update_status = set_backlight_brightness,
+};
+
+static int oaktrail_backlight_init(void)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
+ bd = backlight_device_register(DRIVER_NAME,
+ &oaktrail_device->dev, NULL,
+ &oaktrail_bl_ops,
+ &props);
+
+ if (IS_ERR(bd)) {
+ oaktrail_bl_device = NULL;
+ pr_warning("Unable to register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ oaktrail_bl_device = bd;
+
+ bd->props.brightness = get_backlight_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void oaktrail_backlight_exit(void)
+{
+ if (oaktrail_bl_device)
+ backlight_device_unregister(oaktrail_bl_device);
+}
+
+static int __devinit oaktrail_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __devexit oaktrail_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver oaktrail_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = oaktrail_probe,
+ .remove = __devexit_p(oaktrail_remove)
+};
+
+static int dmi_check_cb(const struct dmi_system_id *id)
+{
+ pr_info("Identified model '%s'\n", id->ident);
+ return 0;
+}
+
+static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
+ {
+ .ident = "OakTrail platform",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OakTrail platform"),
+ },
+ .callback = dmi_check_cb
+ },
+ { }
+};
+
+static int __init oaktrail_init(void)
+{
+ int ret;
+
+ if (acpi_disabled) {
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
+ return -ENODEV;
+ }
+
+ if (!force && !dmi_check_system(oaktrail_dmi_table)) {
+ pr_err("Platform not recognized (You could try the module's force-parameter)");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&oaktrail_driver);
+ if (ret) {
+ pr_warning("Unable to register platform driver\n");
+ goto err_driver_reg;
+ }
+
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
+ if (!oaktrail_device) {
+ pr_warning("Unable to allocate platform device\n");
+ ret = -ENOMEM;
+ goto err_device_alloc;
+ }
+
+ ret = platform_device_add(oaktrail_device);
+ if (ret) {
+ pr_warning("Unable to add platform device\n");
+ goto err_device_add;
+ }
+
+ if (!acpi_video_backlight_support()) {
+ ret = oaktrail_backlight_init();
+ if (ret)
+ goto err_backlight;
+
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
+
+ ret = oaktrail_rfkill_init();
+ if (ret) {
+ pr_warning("Setup rfkill failed\n");
+ goto err_rfkill;
+ }
+
+ pr_info("Driver "DRIVER_VERSION" successfully loaded\n");
+ return 0;
+
+err_rfkill:
+ oaktrail_backlight_exit();
+err_backlight:
+ platform_device_del(oaktrail_device);
+err_device_add:
+ platform_device_put(oaktrail_device);
+err_device_alloc:
+ platform_driver_unregister(&oaktrail_driver);
+err_driver_reg:
+
+ return ret;
+}
+
+static void __exit oaktrail_cleanup(void)
+{
+ oaktrail_backlight_exit();
+ oaktrail_rfkill_cleanup();
+ platform_device_unregister(oaktrail_device);
+ platform_driver_unregister(&oaktrail_driver);
+
+ pr_info("Driver unloaded\n");
+}
+
+module_init(oaktrail_init);
+module_exit(oaktrail_cleanup);
+
+MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("dmi:*:svnIntelCorporation:pnOakTrailplatform:*");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 464bb3f..1686c1e 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -19,6 +19,8 @@
* Moorestown platform PMIC chip
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -90,8 +92,7 @@ static void pmic_program_irqtype(int gpio, int type)
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
- printk(KERN_ERR
- "%s: only pin 0-7 support input\n", __func__);
+ pr_err("only pin 0-7 support input\n");
return -1;/* we only have 8 GPIO can use as input */
}
return intel_scu_ipc_update_register(GPIO0 + offset,
@@ -116,8 +117,7 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
value ? 1 << (offset - 16) : 0,
1 << (offset - 16));
else {
- printk(KERN_ERR
- "%s: invalid PMIC GPIO pin %d!\n", __func__, offset);
+ pr_err("invalid PMIC GPIO pin %d!\n", offset);
WARN_ON(1);
}
@@ -260,7 +260,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
/* setting up SRAM mapping for GPIOINT register */
pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
if (!pg->gpiointr) {
- printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
+ pr_err("Can not map GPIOINT\n");
retval = -EINVAL;
goto err2;
}
@@ -281,13 +281,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
if (retval) {
- printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
+ pr_err("Can not add pmic gpio chip\n");
goto err;
}
retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
if (retval) {
- printk(KERN_WARNING "pmic: Interrupt request failed\n");
+ pr_warn("Interrupt request failed\n");
goto err;
}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 23fb2af..3ff629d 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
- NULL, 0, 1);
+ NULL, 0);
}
static int get_lcd_level(void)
@@ -144,7 +144,7 @@ static int get_lcd_level(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
- NULL, 0, 1);
+ NULL, 0);
}
static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
return -1;
@@ -447,7 +447,7 @@ static struct platform_device *msipf_device;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- pr_info("Identified laptop model '%s'.\n", id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
return 1;
}
@@ -800,7 +800,7 @@ static void msi_laptop_input_destroy(void)
input_unregister_device(msi_laptop_input_dev);
}
-static int load_scm_model_init(struct platform_device *sdev)
+static int __init load_scm_model_init(struct platform_device *sdev)
{
u8 data;
int result;
@@ -875,8 +875,7 @@ static int __init msi_init(void)
/* Register backlight stuff */
if (acpi_video_backlight_support()) {
- pr_info("Brightness ignored, must be controlled "
- "by ACPI video driver\n");
+ pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
} else {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -930,7 +929,7 @@ static int __init msi_init(void)
if (auto_brightness != 2)
set_auto_brightness(auto_brightness);
- pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n");
+ pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
return 0;
@@ -978,7 +977,7 @@ static void __exit msi_cleanup(void)
if (auto_brightness != 2)
set_auto_brightness(1);
- pr_info("driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(msi_init);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index d5419c9..c832e33 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/input.h>
@@ -36,13 +37,10 @@ MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
#define DRV_NAME "msi-wmi"
-#define DRV_PFX DRV_NAME ": "
#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
-#define dprintk(msg...) pr_debug(DRV_PFX msg)
-
#define SCANCODE_BASE 0xD0
#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
@@ -78,7 +76,7 @@ static int msi_wmi_query_block(int instance, int *ret)
if (!obj || obj->type != ACPI_TYPE_INTEGER) {
if (obj) {
- printk(KERN_ERR DRV_PFX "query block returned object "
+ pr_err("query block returned object "
"type: %d - buffer length:%d\n", obj->type,
obj->type == ACPI_TYPE_BUFFER ?
obj->buffer.length : 0);
@@ -97,8 +95,8 @@ static int msi_wmi_set_block(int instance, int value)
struct acpi_buffer input = { sizeof(int), &value };
- dprintk("Going to set block of instance: %d - value: %d\n",
- instance, value);
+ pr_debug("Going to set block of instance: %d - value: %d\n",
+ instance, value);
status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
@@ -112,20 +110,19 @@ static int bl_get(struct backlight_device *bd)
/* Instance 1 is "get backlight", cmp with DSDT */
err = msi_wmi_query_block(1, &ret);
if (err) {
- printk(KERN_ERR DRV_PFX "Could not query backlight: %d\n", err);
+ pr_err("Could not query backlight: %d\n", err);
return -EINVAL;
}
- dprintk("Get: Query block returned: %d\n", ret);
+ pr_debug("Get: Query block returned: %d\n", ret);
for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
if (backlight_map[level] == ret) {
- dprintk("Current backlight level: 0x%X - index: %d\n",
- backlight_map[level], level);
+ pr_debug("Current backlight level: 0x%X - index: %d\n",
+ backlight_map[level], level);
break;
}
}
if (level == ARRAY_SIZE(backlight_map)) {
- printk(KERN_ERR DRV_PFX "get: Invalid brightness value: 0x%X\n",
- ret);
+ pr_err("get: Invalid brightness value: 0x%X\n", ret);
return -EINVAL;
}
return level;
@@ -156,7 +153,7 @@ static void msi_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -164,7 +161,7 @@ static void msi_wmi_notify(u32 value, void *context)
if (obj && obj->type == ACPI_TYPE_INTEGER) {
int eventcode = obj->integer.value;
- dprintk("Eventcode: 0x%x\n", eventcode);
+ pr_debug("Eventcode: 0x%x\n", eventcode);
key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
eventcode);
if (key) {
@@ -175,8 +172,8 @@ static void msi_wmi_notify(u32 value, void *context)
/* Ignore event if the same event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
- dprintk("Suppressed key event 0x%X - "
- "Last press was %lld us ago\n",
+ pr_debug("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
return;
}
@@ -187,17 +184,16 @@ static void msi_wmi_notify(u32 value, void *context)
(!acpi_video_backlight_support() ||
(key->code != MSI_WMI_BRIGHTNESSUP &&
key->code != MSI_WMI_BRIGHTNESSDOWN))) {
- dprintk("Send key: 0x%X - "
- "Input layer keycode: %d\n", key->code,
- key->keycode);
+ pr_debug("Send key: 0x%X - "
+ "Input layer keycode: %d\n",
+ key->code, key->keycode);
sparse_keymap_report_entry(msi_wmi_input_dev,
key, 1, true);
}
} else
- printk(KERN_INFO "Unknown key pressed - %x\n",
- eventcode);
+ pr_info("Unknown key pressed - %x\n", eventcode);
} else
- printk(KERN_INFO DRV_PFX "Unknown event received\n");
+ pr_info("Unknown event received\n");
kfree(response.pointer);
}
@@ -238,8 +234,7 @@ static int __init msi_wmi_init(void)
int err;
if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
- printk(KERN_ERR
- "This machine doesn't have MSI-hotkeys through WMI\n");
+ pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
return -ENODEV;
}
err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
@@ -270,7 +265,7 @@ static int __init msi_wmi_init(void)
backlight->props.brightness = err;
}
- dprintk("Event handler installed\n");
+ pr_debug("Event handler installed\n");
return 0;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 6fe8cd6..bbd182e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -42,6 +42,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -70,10 +72,10 @@
#include <linux/miscdevice.h>
#endif
-#define DRV_PFX "sony-laptop: "
-#define dprintk(msg...) do { \
- if (debug) \
- pr_warn(DRV_PFX msg); \
+#define dprintk(fmt, ...) \
+do { \
+ if (debug) \
+ pr_warn(fmt, ##__VA_ARGS__); \
} while (0)
#define SONY_LAPTOP_DRIVER_VERSION "0.6"
@@ -418,7 +420,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
error = kfifo_alloc(&sony_laptop_input.fifo,
SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
- pr_err(DRV_PFX "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
goto err_dec_users;
}
@@ -702,7 +704,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
return 0;
}
- pr_warn(DRV_PFX "acpi_callreadfunc failed\n");
+ pr_warn("acpi_callreadfunc failed\n");
return -1;
}
@@ -728,8 +730,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
if (status == AE_OK) {
if (result != NULL) {
if (out_obj.type != ACPI_TYPE_INTEGER) {
- pr_warn(DRV_PFX "acpi_evaluate_object bad "
- "return type\n");
+ pr_warn("acpi_evaluate_object bad return type\n");
return -1;
}
*result = out_obj.integer.value;
@@ -737,7 +738,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
return 0;
}
- pr_warn(DRV_PFX "acpi_evaluate_object failed\n");
+ pr_warn("acpi_evaluate_object failed\n");
return -1;
}
@@ -961,7 +962,6 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
static int sony_nc_get_brightness_ng(struct backlight_device *bd)
{
int result;
- int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
@@ -973,7 +973,6 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
static int sony_nc_update_status_ng(struct backlight_device *bd)
{
int value, result;
- int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
@@ -1104,10 +1103,8 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
}
if (!key_event->data)
- pr_info(DRV_PFX
- "Unknown event: 0x%x 0x%x\n",
- key_handle,
- ev);
+ pr_info("Unknown event: 0x%x 0x%x\n",
+ key_handle, ev);
else
sony_laptop_report_input_event(ev);
}
@@ -1128,7 +1125,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
struct acpi_device_info *info;
if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
- pr_warn(DRV_PFX "method: name: %4.4s, args %X\n",
+ pr_warn("method: name: %4.4s, args %X\n",
(char *)&info->name, info->param_count);
kfree(info);
@@ -1169,7 +1166,7 @@ static int sony_nc_resume(struct acpi_device *device)
ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
item->value, NULL);
if (ret < 0) {
- pr_err(DRV_PFX "%s: %d\n", __func__, ret);
+ pr_err("%s: %d\n", __func__, ret);
break;
}
}
@@ -1336,12 +1333,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
device_enum = (union acpi_object *) buffer.pointer;
if (!device_enum) {
- pr_err(DRV_PFX "No SN06 return object.");
+ pr_err("No SN06 return object\n");
goto out_no_enum;
}
if (device_enum->type != ACPI_TYPE_BUFFER) {
- pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n",
- device_enum->type);
+ pr_err("Invalid SN06 return object 0x%.2x\n",
+ device_enum->type);
goto out_no_enum;
}
@@ -1662,7 +1659,7 @@ static void sony_nc_backlight_setup(void)
ops, &props);
if (IS_ERR(sony_bl_props.dev)) {
- pr_warn(DRV_PFX "unable to register backlight device\n");
+ pr_warn("unable to register backlight device\n");
sony_bl_props.dev = NULL;
} else
sony_bl_props.dev->props.brightness =
@@ -1682,8 +1679,7 @@ static int sony_nc_add(struct acpi_device *device)
acpi_handle handle;
struct sony_nc_value *item;
- pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME,
- SONY_LAPTOP_DRIVER_VERSION);
+ pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
sony_nc_acpi_device = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -1708,7 +1704,7 @@ static int sony_nc_add(struct acpi_device *device)
sony_nc_acpi_handle, 1, sony_walk_callback,
NULL, NULL, NULL);
if (ACPI_FAILURE(status)) {
- pr_warn(DRV_PFX "unable to walk acpi resources\n");
+ pr_warn("unable to walk acpi resources\n");
result = -ENODEV;
goto outpresent;
}
@@ -1736,13 +1732,12 @@ static int sony_nc_add(struct acpi_device *device)
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
- pr_err(DRV_PFX "Unable to create input devices.\n");
+ pr_err("Unable to create input devices\n");
goto outkbdbacklight;
}
if (acpi_video_backlight_support()) {
- pr_info(DRV_PFX "brightness ignored, must be "
- "controlled by ACPI video driver\n");
+ pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
sony_nc_backlight_setup();
}
@@ -2265,9 +2260,9 @@ out:
if (pcidev)
pci_dev_put(pcidev);
- pr_info(DRV_PFX "detected Type%d model\n",
- dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
- dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
+ pr_info("detected Type%d model\n",
+ dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
+ dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
}
/* camera tests and poweron/poweroff */
@@ -2313,7 +2308,7 @@ static int __sony_pic_camera_ready(void)
static int __sony_pic_camera_off(void)
{
if (!camera) {
- pr_warn(DRV_PFX "camera control not enabled\n");
+ pr_warn("camera control not enabled\n");
return -ENODEV;
}
@@ -2333,7 +2328,7 @@ static int __sony_pic_camera_on(void)
int i, j, x;
if (!camera) {
- pr_warn(DRV_PFX "camera control not enabled\n");
+ pr_warn("camera control not enabled\n");
return -ENODEV;
}
@@ -2356,7 +2351,7 @@ static int __sony_pic_camera_on(void)
}
if (j == 0) {
- pr_warn(DRV_PFX "failed to power on camera\n");
+ pr_warn("failed to power on camera\n");
return -ENODEV;
}
@@ -2412,8 +2407,7 @@ int sony_pic_camera_command(int command, u8 value)
ITERATIONS_SHORT);
break;
default:
- pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n",
- command);
+ pr_err("sony_pic_camera_command invalid: %d\n", command);
break;
}
mutex_unlock(&spic_dev.lock);
@@ -2819,7 +2813,7 @@ static int sonypi_compat_init(void)
error =
kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
- pr_err(DRV_PFX "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
return error;
}
@@ -2829,12 +2823,12 @@ static int sonypi_compat_init(void)
sonypi_misc_device.minor = minor;
error = misc_register(&sonypi_misc_device);
if (error) {
- pr_err(DRV_PFX "misc_register failed\n");
+ pr_err("misc_register failed\n");
goto err_free_kfifo;
}
if (minor == -1)
- pr_info(DRV_PFX "device allocated minor is %d\n",
- sonypi_misc_device.minor);
+ pr_info("device allocated minor is %d\n",
+ sonypi_misc_device.minor);
return 0;
@@ -2893,8 +2887,8 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
for (i = 0; i < p->interrupt_count; i++) {
if (!p->interrupts[i]) {
- pr_warn(DRV_PFX "Invalid IRQ %d\n",
- p->interrupts[i]);
+ pr_warn("Invalid IRQ %d\n",
+ p->interrupts[i]);
continue;
}
interrupt = kzalloc(sizeof(*interrupt),
@@ -2932,14 +2926,14 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
ioport->io2.address_length);
}
else {
- pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
+ pr_err("Unknown SPIC Type, more than 2 IO Ports\n");
return AE_ERROR;
}
return AE_OK;
}
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
- resource->type);
+ resource->type);
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
@@ -2960,7 +2954,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
dprintk("Evaluating _STA\n");
result = acpi_bus_get_status(device);
if (result) {
- pr_warn(DRV_PFX "Unable to read status\n");
+ pr_warn("Unable to read status\n");
goto end;
}
@@ -2976,8 +2970,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
sony_pic_read_possible_resource, &spic_dev);
if (ACPI_FAILURE(status)) {
- pr_warn(DRV_PFX "Failure evaluating %s\n",
- METHOD_NAME__PRS);
+ pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS);
result = -ENODEV;
}
end:
@@ -3090,7 +3083,7 @@ static int sony_pic_enable(struct acpi_device *device,
/* check for total failure */
if (ACPI_FAILURE(status)) {
- pr_err(DRV_PFX "Error evaluating _SRS\n");
+ pr_err("Error evaluating _SRS\n");
result = -ENODEV;
goto end;
}
@@ -3182,7 +3175,7 @@ static int sony_pic_remove(struct acpi_device *device, int type)
struct sony_pic_irq *irq, *tmp_irq;
if (sony_pic_disable(device)) {
- pr_err(DRV_PFX "Couldn't disable device.\n");
+ pr_err("Couldn't disable device\n");
return -ENXIO;
}
@@ -3222,8 +3215,7 @@ static int sony_pic_add(struct acpi_device *device)
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
- pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME,
- SONY_LAPTOP_DRIVER_VERSION);
+ pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -3233,14 +3225,14 @@ static int sony_pic_add(struct acpi_device *device)
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
- pr_err(DRV_PFX "Unable to read possible resources.\n");
+ pr_err("Unable to read possible resources\n");
goto err_free_resources;
}
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
- pr_err(DRV_PFX "Unable to create input devices.\n");
+ pr_err("Unable to create input devices\n");
goto err_free_resources;
}
@@ -3281,7 +3273,7 @@ static int sony_pic_add(struct acpi_device *device)
}
}
if (!spic_dev.cur_ioport) {
- pr_err(DRV_PFX "Failed to request_region.\n");
+ pr_err("Failed to request_region\n");
result = -ENODEV;
goto err_remove_compat;
}
@@ -3301,7 +3293,7 @@ static int sony_pic_add(struct acpi_device *device)
}
}
if (!spic_dev.cur_irq) {
- pr_err(DRV_PFX "Failed to request_irq.\n");
+ pr_err("Failed to request_irq\n");
result = -ENODEV;
goto err_release_region;
}
@@ -3309,7 +3301,7 @@ static int sony_pic_add(struct acpi_device *device)
/* set resource status _SRS */
result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
if (result) {
- pr_err(DRV_PFX "Couldn't enable device.\n");
+ pr_err("Couldn't enable device\n");
goto err_free_irq;
}
@@ -3418,7 +3410,7 @@ static int __init sony_laptop_init(void)
if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
result = acpi_bus_register_driver(&sony_pic_driver);
if (result) {
- pr_err(DRV_PFX "Unable to register SPIC driver.");
+ pr_err("Unable to register SPIC driver\n");
goto out;
}
spic_drv_registered = 1;
@@ -3426,7 +3418,7 @@ static int __init sony_laptop_init(void)
result = acpi_bus_register_driver(&sony_nc_driver);
if (result) {
- pr_err(DRV_PFX "Unable to register SNC driver.");
+ pr_err("Unable to register SNC driver\n");
goto out_unregister_pic;
}
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 865ef78..e24f5ae 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -25,6 +25,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -40,9 +42,6 @@
#define TC1100_INSTANCE_WIRELESS 1
#define TC1100_INSTANCE_JOGDIAL 2
-#define TC1100_LOGPREFIX "tc1100-wmi: "
-#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
-
MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
MODULE_LICENSE("GPL");
@@ -264,7 +263,7 @@ static int __init tc1100_init(void)
if (error)
goto err_device_del;
- printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+ pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
return 0;
err_device_del:
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 562fcf0..77f6e70 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,6 +21,8 @@
* 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define TPACPI_VERSION "0.24"
#define TPACPI_SYSFS_VERSION 0x020700
@@ -224,17 +226,6 @@ enum tpacpi_hkey_event_t {
#define TPACPI_MAX_ACPI_ARGS 3
-/* printk headers */
-#define TPACPI_LOG TPACPI_FILE ": "
-#define TPACPI_EMERG KERN_EMERG TPACPI_LOG
-#define TPACPI_ALERT KERN_ALERT TPACPI_LOG
-#define TPACPI_CRIT KERN_CRIT TPACPI_LOG
-#define TPACPI_ERR KERN_ERR TPACPI_LOG
-#define TPACPI_WARN KERN_WARNING TPACPI_LOG
-#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
-#define TPACPI_INFO KERN_INFO TPACPI_LOG
-#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
-
/* Debugging printk groups */
#define TPACPI_DBG_ALL 0xffff
#define TPACPI_DBG_DISCLOSETASK 0x8000
@@ -389,34 +380,36 @@ static int tpacpi_uwb_emulstate;
* Debugging helpers
*/
-#define dbg_printk(a_dbg_level, format, arg...) \
- do { if (dbg_level & (a_dbg_level)) \
- printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
- } while (0)
+#define dbg_printk(a_dbg_level, format, arg...) \
+do { \
+ if (dbg_level & (a_dbg_level)) \
+ printk(KERN_DEBUG pr_fmt("%s: " format), \
+ __func__, ##arg); \
+} while (0)
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
#define vdbg_printk dbg_printk
static const char *str_supported(int is_supported);
#else
-#define vdbg_printk(a_dbg_level, format, arg...) \
- do { } while (0)
+static inline const char *str_supported(int is_supported) { return ""; }
+#define vdbg_printk(a_dbg_level, format, arg...) \
+ no_printk(format, ##arg)
#endif
static void tpacpi_log_usertask(const char * const what)
{
- printk(TPACPI_DEBUG "%s: access by process with PID %d\n",
- what, task_tgid_vnr(current));
+ printk(KERN_DEBUG pr_fmt("%s: access by process with PID %d\n"),
+ what, task_tgid_vnr(current));
}
-#define tpacpi_disclose_usertask(what, format, arg...) \
- do { \
- if (unlikely( \
- (dbg_level & TPACPI_DBG_DISCLOSETASK) && \
- (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
- printk(TPACPI_DEBUG "%s: PID %d: " format, \
- what, task_tgid_vnr(current), ## arg); \
- } \
- } while (0)
+#define tpacpi_disclose_usertask(what, format, arg...) \
+do { \
+ if (unlikely((dbg_level & TPACPI_DBG_DISCLOSETASK) && \
+ (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
+ printk(KERN_DEBUG pr_fmt("%s: PID %d: " format), \
+ what, task_tgid_vnr(current), ## arg); \
+ } \
+} while (0)
/*
* Quirk handling helpers
@@ -535,15 +528,6 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
"HKEY", /* all others */
); /* 570 */
-TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
- "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
- "\\_SB.PCI0.VID0", /* 770e */
- "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
- "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
- "\\_SB.PCI0.AGP.VID", /* all others */
- ); /* R30, R31 */
-
-
/*************************************************************************
* ACPI helpers
*/
@@ -563,7 +547,7 @@ static int acpi_evalf(acpi_handle handle,
int quiet;
if (!*fmt) {
- printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
+ pr_err("acpi_evalf() called with empty format\n");
return 0;
}
@@ -588,7 +572,7 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(TPACPI_ERR "acpi_evalf() called "
+ pr_err("acpi_evalf() called "
"with invalid format character '%c'\n", c);
va_end(ap);
return 0;
@@ -617,13 +601,13 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(TPACPI_ERR "acpi_evalf() called "
+ pr_err("acpi_evalf() called "
"with invalid format character '%c'\n", res_type);
return 0;
}
if (!success && !quiet)
- printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %s\n",
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
method, fmt0, acpi_format_exception(status));
return success;
@@ -767,8 +751,7 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
if (rc < 0) {
- printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
- ibm->name, rc);
+ pr_err("acpi_bus_get_device(%s) failed: %d\n", ibm->name, rc);
return -ENODEV;
}
@@ -781,12 +764,10 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
ibm->acpi->type, dispatch_acpi_notify, ibm);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
- printk(TPACPI_NOTICE
- "another device driver is already "
- "handling %s events\n", ibm->name);
+ pr_notice("another device driver is already "
+ "handling %s events\n", ibm->name);
} else {
- printk(TPACPI_ERR
- "acpi_install_notify_handler(%s) failed: %s\n",
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
ibm->name, acpi_format_exception(status));
}
return -ENODEV;
@@ -811,8 +792,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
if (!ibm->acpi->driver) {
- printk(TPACPI_ERR
- "failed to allocate memory for ibm->acpi->driver\n");
+ pr_err("failed to allocate memory for ibm->acpi->driver\n");
return -ENOMEM;
}
@@ -823,7 +803,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
rc = acpi_bus_register_driver(ibm->acpi->driver);
if (rc < 0) {
- printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
+ pr_err("acpi_bus_register_driver(%s) failed: %d\n",
ibm->name, rc);
kfree(ibm->acpi->driver);
ibm->acpi->driver = NULL;
@@ -1081,15 +1061,14 @@ static int parse_strtoul(const char *buf,
static void tpacpi_disable_brightness_delay(void)
{
if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
- printk(TPACPI_NOTICE
- "ACPI backlight control delay disabled\n");
+ pr_notice("ACPI backlight control delay disabled\n");
}
static void printk_deprecated_attribute(const char * const what,
const char * const details)
{
tpacpi_log_usertask("deprecated sysfs attribute");
- printk(TPACPI_WARN "WARNING: sysfs attribute %s is deprecated and "
+ pr_warn("WARNING: sysfs attribute %s is deprecated and "
"will be removed. %s\n",
what, details);
}
@@ -1264,8 +1243,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
&tpacpi_rfk_rfkill_ops,
atp_rfk);
if (!atp_rfk || !atp_rfk->rfkill) {
- printk(TPACPI_ERR
- "failed to allocate memory for rfkill class\n");
+ pr_err("failed to allocate memory for rfkill class\n");
kfree(atp_rfk);
return -ENOMEM;
}
@@ -1275,9 +1253,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
sw_status = (tp_rfkops->get_status)();
if (sw_status < 0) {
- printk(TPACPI_ERR
- "failed to read initial state for %s, error %d\n",
- name, sw_status);
+ pr_err("failed to read initial state for %s, error %d\n",
+ name, sw_status);
} else {
sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
@@ -1291,9 +1268,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
- printk(TPACPI_ERR
- "failed to register %s rfkill switch: %d\n",
- name, res);
+ pr_err("failed to register %s rfkill switch: %d\n", name, res);
rfkill_destroy(atp_rfk->rfkill);
kfree(atp_rfk);
return res;
@@ -1301,7 +1276,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
tpacpi_rfkill_switches[id] = atp_rfk;
- printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
+ pr_info("rfkill switch %s: radio is %sblocked\n",
name, (sw_state || hw_state) ? "" : "un");
return 0;
}
@@ -1825,10 +1800,8 @@ static void __init tpacpi_check_outdated_fw(void)
* broken, or really stable to begin with, so it is
* best if the user upgrades the firmware anyway.
*/
- printk(TPACPI_WARN
- "WARNING: Outdated ThinkPad BIOS/EC firmware\n");
- printk(TPACPI_WARN
- "WARNING: This firmware may be missing critical bug "
+ pr_warn("WARNING: Outdated ThinkPad BIOS/EC firmware\n");
+ pr_warn("WARNING: This firmware may be missing critical bug "
"fixes and/or important features\n");
}
}
@@ -2117,9 +2090,7 @@ void static hotkey_mask_warn_incomplete_mask(void)
(hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
if (wantedmask)
- printk(TPACPI_NOTICE
- "required events 0x%08x not enabled!\n",
- wantedmask);
+ pr_notice("required events 0x%08x not enabled!\n", wantedmask);
}
/*
@@ -2157,10 +2128,9 @@ static int hotkey_mask_set(u32 mask)
* a given event.
*/
if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
- printk(TPACPI_NOTICE
- "asked for hotkey mask 0x%08x, but "
- "firmware forced it to 0x%08x\n",
- fwmask, hotkey_acpi_mask);
+ pr_notice("asked for hotkey mask 0x%08x, but "
+ "firmware forced it to 0x%08x\n",
+ fwmask, hotkey_acpi_mask);
}
if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
@@ -2184,13 +2154,11 @@ static int hotkey_user_mask_set(const u32 mask)
(mask == 0xffff || mask == 0xffffff ||
mask == 0xffffffff)) {
tp_warned.hotkey_mask_ff = 1;
- printk(TPACPI_NOTICE
- "setting the hotkey mask to 0x%08x is likely "
- "not the best way to go about it\n", mask);
- printk(TPACPI_NOTICE
- "please consider using the driver defaults, "
- "and refer to up-to-date thinkpad-acpi "
- "documentation\n");
+ pr_notice("setting the hotkey mask to 0x%08x is likely "
+ "not the best way to go about it\n", mask);
+ pr_notice("please consider using the driver defaults, "
+ "and refer to up-to-date thinkpad-acpi "
+ "documentation\n");
}
/* Try to enable what the user asked for, plus whatever we need.
@@ -2574,8 +2542,7 @@ static void hotkey_poll_setup(const bool may_warn)
NULL, TPACPI_NVRAM_KTHREAD_NAME);
if (IS_ERR(tpacpi_hotkey_task)) {
tpacpi_hotkey_task = NULL;
- printk(TPACPI_ERR
- "could not create kernel thread "
+ pr_err("could not create kernel thread "
"for hotkey polling\n");
}
}
@@ -2583,11 +2550,10 @@ static void hotkey_poll_setup(const bool may_warn)
hotkey_poll_stop_sync();
if (may_warn && (poll_driver_mask || poll_user_mask) &&
hotkey_poll_freq == 0) {
- printk(TPACPI_NOTICE
- "hot keys 0x%08x and/or events 0x%08x "
- "require polling, which is currently "
- "disabled\n",
- poll_user_mask, poll_driver_mask);
+ pr_notice("hot keys 0x%08x and/or events 0x%08x "
+ "require polling, which is currently "
+ "disabled\n",
+ poll_user_mask, poll_driver_mask);
}
}
}
@@ -2811,13 +2777,13 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
mutex_unlock(&hotkey_mutex);
if (rc < 0)
- printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
- "firmware event mask!\n");
+ pr_err("hotkey_source_mask: "
+ "failed to update the firmware event mask!\n");
if (r_ev)
- printk(TPACPI_NOTICE "hotkey_source_mask: "
- "some important events were disabled: "
- "0x%04x\n", r_ev);
+ pr_notice("hotkey_source_mask: "
+ "some important events were disabled: 0x%04x\n",
+ r_ev);
tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
@@ -3048,8 +3014,7 @@ static void hotkey_exit(void)
if (((tp_features.hotkey_mask &&
hotkey_mask_set(hotkey_orig_mask)) |
hotkey_status_set(false)) != 0)
- printk(TPACPI_ERR
- "failed to restore hot key mask "
+ pr_err("failed to restore hot key mask "
"to BIOS defaults\n");
}
@@ -3288,10 +3253,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
if ((hkeyv >> 8) != 1) {
- printk(TPACPI_ERR "unknown version of the "
- "HKEY interface: 0x%x\n", hkeyv);
- printk(TPACPI_ERR "please report this to %s\n",
- TPACPI_MAIL);
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
} else {
/*
* MHKV 0x100 in A31, R40, R40e,
@@ -3304,8 +3268,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Paranoia check AND init hotkey_all_mask */
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- printk(TPACPI_ERR
- "missing MHKA handler, "
+ pr_err("missing MHKA handler, "
"please report this to %s\n",
TPACPI_MAIL);
/* Fallback: pre-init for FN+F3,F4,F12 */
@@ -3343,16 +3306,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!tpacpi_wlsw_emulstate;
- printk(TPACPI_INFO
- "radio switch emulation enabled\n");
+ pr_info("radio switch emulation enabled\n");
} else
#endif
/* Not all thinkpads have a hardware radio switch */
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!status;
- printk(TPACPI_INFO
- "radio switch found; radios are %s\n",
+ pr_info("radio switch found; radios are %s\n",
enabled(status, 0));
}
if (tp_features.hotkey_wlsw)
@@ -3363,8 +3324,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
tp_features.hotkey_tablet = 1;
tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
- printk(TPACPI_INFO
- "possible tablet mode switch found; "
+ pr_info("possible tablet mode switch found; "
"ThinkPad in %s mode\n",
(tabletsw_state) ? "tablet" : "laptop");
res = add_to_attr_set(hotkey_dev_attributes,
@@ -3382,8 +3342,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
- printk(TPACPI_ERR
- "failed to allocate memory for key map\n");
+ pr_err("failed to allocate memory for key map\n");
res = -ENOMEM;
goto err_exit;
}
@@ -3426,13 +3385,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
- printk(TPACPI_INFO
- "This ThinkPad has standard ACPI backlight "
- "brightness control, supported by the ACPI "
- "video driver\n");
- printk(TPACPI_NOTICE
- "Disabling thinkpad-acpi brightness events "
- "by default...\n");
+ pr_info("This ThinkPad has standard ACPI backlight "
+ "brightness control, supported by the ACPI "
+ "video driver\n");
+ pr_notice("Disabling thinkpad-acpi brightness events "
+ "by default...\n");
/* Disable brightness up/down on Lenovo thinkpads when
* ACPI is handling them, otherwise it is plain impossible
@@ -3539,8 +3496,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
- printk(TPACPI_ALERT
- "EMERGENCY WAKEUP: battery almost empty\n");
+ pr_alert("EMERGENCY WAKEUP: battery almost empty\n");
/* how to auto-heal: */
/* 2313: woke up from S3, go to S4/S5 */
/* 2413: woke up from S4, go to S5 */
@@ -3551,9 +3507,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
}
if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
- printk(TPACPI_INFO
- "woke up due to a hot-unplug "
- "request...\n");
+ pr_info("woke up due to a hot-unplug request...\n");
hotkey_wakeup_reason_notify_change();
}
return true;
@@ -3605,37 +3559,31 @@ static bool hotkey_notify_thermal(const u32 hkey,
switch (hkey) {
case TP_HKEY_EV_THM_TABLE_CHANGED:
- printk(TPACPI_INFO
- "EC reports that Thermal Table has changed\n");
+ pr_info("EC reports that Thermal Table has changed\n");
/* recommended action: do nothing, we don't have
* Lenovo ATM information */
return true;
case TP_HKEY_EV_ALARM_BAT_HOT:
- printk(TPACPI_CRIT
- "THERMAL ALARM: battery is too hot!\n");
+ pr_crit("THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
break;
case TP_HKEY_EV_ALARM_BAT_XHOT:
- printk(TPACPI_ALERT
- "THERMAL EMERGENCY: battery is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
- printk(TPACPI_CRIT
- "THERMAL ALARM: "
+ pr_crit("THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
- printk(TPACPI_ALERT
- "THERMAL EMERGENCY: "
- "a sensor reports something is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: "
+ "a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
default:
- printk(TPACPI_ALERT
- "THERMAL ALERT: unknown thermal alarm received\n");
+ pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
known = false;
}
@@ -3652,8 +3600,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
bool known_ev;
if (event != 0x80) {
- printk(TPACPI_ERR
- "unknown HKEY notification event %d\n", event);
+ pr_err("unknown HKEY notification event %d\n", event);
/* forward it to userspace, maybe it knows how to handle it */
acpi_bus_generate_netlink_event(
ibm->acpi->device->pnp.device_class,
@@ -3664,7 +3611,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
while (1) {
if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
- printk(TPACPI_ERR "failed to retrieve HKEY event\n");
+ pr_err("failed to retrieve HKEY event\n");
return;
}
@@ -3692,8 +3639,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
switch (hkey) {
case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
- printk(TPACPI_INFO
- "bay ejected\n");
+ pr_info("bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
break;
@@ -3709,8 +3655,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
/* 0x4000-0x4FFF: dock-related wakeups */
if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
hotkey_autosleep_ack = 1;
- printk(TPACPI_INFO
- "undocked\n");
+ pr_info("undocked\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
} else {
@@ -3741,11 +3686,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = false;
}
if (!known_ev) {
- printk(TPACPI_NOTICE
- "unhandled HKEY event 0x%04x\n", hkey);
- printk(TPACPI_NOTICE
- "please report the conditions when this "
- "event happened to %s\n", TPACPI_MAIL);
+ pr_notice("unhandled HKEY event 0x%04x\n", hkey);
+ pr_notice("please report the conditions when this "
+ "event happened to %s\n", TPACPI_MAIL);
}
/* Legacy events */
@@ -3778,8 +3721,7 @@ static void hotkey_resume(void)
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
- printk(TPACPI_ERR
- "error while attempting to reset the event "
+ pr_err("error while attempting to reset the event "
"firmware interface\n");
tpacpi_send_radiosw_update();
@@ -3824,14 +3766,12 @@ static void hotkey_enabledisable_warn(bool enable)
{
tpacpi_log_usertask("procfs hotkey enable/disable");
if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable),
- TPACPI_WARN
- "hotkey enable/disable functionality has been "
- "removed from the driver. Hotkeys are always "
- "enabled\n"))
- printk(TPACPI_ERR
- "Please remove the hotkey=enable module "
- "parameter, it is deprecated. Hotkeys are always "
- "enabled\n");
+ pr_fmt("hotkey enable/disable functionality has been "
+ "removed from the driver. "
+ "Hotkeys are always enabled.\n")))
+ pr_err("Please remove the hotkey=enable module "
+ "parameter, it is deprecated. "
+ "Hotkeys are always enabled.\n");
}
static int hotkey_write(char *buf)
@@ -4011,8 +3951,7 @@ static void bluetooth_shutdown(void)
/* Order firmware to save current state to NVRAM */
if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
TP_ACPI_BLTH_SAVE_STATE))
- printk(TPACPI_NOTICE
- "failed to save bluetooth state to NVRAM\n");
+ pr_notice("failed to save bluetooth state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
"bluestooth state saved to NVRAM\n");
@@ -4051,8 +3990,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
tp_features.bluetooth = 1;
- printk(TPACPI_INFO
- "bluetooth switch emulation enabled\n");
+ pr_info("bluetooth switch emulation enabled\n");
} else
#endif
if (tp_features.bluetooth &&
@@ -4203,8 +4141,7 @@ static void wan_shutdown(void)
/* Order firmware to save current state to NVRAM */
if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
TP_ACPI_WGSV_SAVE_STATE))
- printk(TPACPI_NOTICE
- "failed to save WWAN state to NVRAM\n");
+ pr_notice("failed to save WWAN state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
"WWAN state saved to NVRAM\n");
@@ -4241,8 +4178,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
tp_features.wan = 1;
- printk(TPACPI_INFO
- "wwan switch emulation enabled\n");
+ pr_info("wwan switch emulation enabled\n");
} else
#endif
if (tp_features.wan &&
@@ -4382,8 +4318,7 @@ static int __init uwb_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
tp_features.uwb = 1;
- printk(TPACPI_INFO
- "uwb switch emulation enabled\n");
+ pr_info("uwb switch emulation enabled\n");
} else
#endif
if (tp_features.uwb &&
@@ -4444,6 +4379,15 @@ static int video_orig_autosw;
static int video_autosw_get(void);
static int video_autosw_set(int enable);
+TPACPI_HANDLE(vid, root,
+ "\\_SB.PCI.AGP.VGA", /* 570 */
+ "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
+ "\\_SB.PCI0.VID0", /* 770e */
+ "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
+ "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
+ "\\_SB.PCI0.AGP.VID", /* all others */
+ ); /* R30, R31 */
+
TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
static int __init video_init(struct ibm_init_struct *iibm)
@@ -4487,7 +4431,7 @@ static void video_exit(void)
dbg_printk(TPACPI_DBG_EXIT,
"restoring original video autoswitch mode\n");
if (video_autosw_set(video_orig_autosw))
- printk(TPACPI_ERR "error while trying to restore original "
+ pr_err("error while trying to restore original "
"video autoswitch mode\n");
}
@@ -4560,8 +4504,7 @@ static int video_outputsw_set(int status)
res = acpi_evalf(vid_handle, NULL,
"ASWT", "vdd", status * 0x100, 0);
if (!autosw && video_autosw_set(autosw)) {
- printk(TPACPI_ERR
- "video auto-switch left enabled due to error\n");
+ pr_err("video auto-switch left enabled due to error\n");
return -EIO;
}
break;
@@ -4630,8 +4573,7 @@ static int video_outputsw_cycle(void)
return -ENOSYS;
}
if (!autosw && video_autosw_set(autosw)) {
- printk(TPACPI_ERR
- "video auto-switch left enabled due to error\n");
+ pr_err("video auto-switch left enabled due to error\n");
return -EIO;
}
@@ -5348,7 +5290,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
GFP_KERNEL);
if (!tpacpi_leds) {
- printk(TPACPI_ERR "Out of memory for LED data\n");
+ pr_err("Out of memory for LED data\n");
return -ENOMEM;
}
@@ -5367,9 +5309,8 @@ static int __init led_init(struct ibm_init_struct *iibm)
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- printk(TPACPI_NOTICE
- "warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ pr_notice("warning: userspace override of important "
+ "firmware LEDs is enabled\n");
#endif
return 0;
}
@@ -5639,17 +5580,16 @@ static void thermal_dump_all_sensors(void)
if (n <= 0)
return;
- printk(TPACPI_NOTICE
- "temperatures (Celsius):");
+ pr_notice("temperatures (Celsius):");
for (i = 0; i < n; i++) {
if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
- printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
+ pr_cont(" %d", (int)(t.temp[i] / 1000));
else
- printk(KERN_CONT " N/A");
+ pr_cont(" N/A");
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
/* sysfs temp##_input -------------------------------------------------- */
@@ -5769,14 +5709,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
if (ta1 == 0) {
/* This is sheer paranoia, but we handle it anyway */
if (acpi_tmp7) {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"falling back to ACPI TMPx access "
"mode\n");
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
} else {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"disabling thermal sensors access\n");
thermal_read_mode = TPACPI_THERMAL_NONE;
}
@@ -6129,8 +6067,8 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(TPACPI_ERR "Unknown _BCL data, "
- "please report this to %s\n", TPACPI_MAIL);
+ pr_err("Unknown _BCL data, please report this to %s\n",
+ TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
@@ -6214,18 +6152,15 @@ static void __init tpacpi_detect_brightness_capabilities(void)
switch (b) {
case 16:
bright_maxlvl = 15;
- printk(TPACPI_INFO
- "detected a 16-level brightness capable ThinkPad\n");
+ pr_info("detected a 16-level brightness capable ThinkPad\n");
break;
case 8:
case 0:
bright_maxlvl = 7;
- printk(TPACPI_INFO
- "detected a 8-level brightness capable ThinkPad\n");
+ pr_info("detected a 8-level brightness capable ThinkPad\n");
break;
default:
- printk(TPACPI_ERR
- "Unsupported brightness interface, "
+ pr_err("Unsupported brightness interface, "
"please contact %s\n", TPACPI_MAIL);
tp_features.bright_unkfw = 1;
bright_maxlvl = b - 1;
@@ -6260,22 +6195,19 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (acpi_video_backlight_support()) {
if (brightness_enable > 1) {
- printk(TPACPI_INFO
- "Standard ACPI backlight interface "
- "available, not loading native one.\n");
+ pr_info("Standard ACPI backlight interface "
+ "available, not loading native one\n");
return 1;
} else if (brightness_enable == 1) {
- printk(TPACPI_WARN
- "Cannot enable backlight brightness support, "
+ pr_warn("Cannot enable backlight brightness support, "
"ACPI is already handling it. Refer to the "
- "acpi_backlight kernel parameter\n");
+ "acpi_backlight kernel parameter.\n");
return 1;
}
} else if (tp_features.bright_acpimode && brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface not "
- "available, thinkpad_acpi native "
- "brightness control enabled\n");
+ pr_notice("Standard ACPI backlight interface not "
+ "available, thinkpad_acpi native "
+ "brightness control enabled\n");
}
/*
@@ -6319,19 +6251,17 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (IS_ERR(ibm_backlight_device)) {
int rc = PTR_ERR(ibm_backlight_device);
ibm_backlight_device = NULL;
- printk(TPACPI_ERR "Could not register backlight device\n");
+ pr_err("Could not register backlight device\n");
return rc;
}
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness is supported\n");
if (quirks & TPACPI_BRGHT_Q_ASK) {
- printk(TPACPI_NOTICE
- "brightness: will use unverified default: "
- "brightness_mode=%d\n", brightness_mode);
- printk(TPACPI_NOTICE
- "brightness: please report to %s whether it works well "
- "or not on your ThinkPad\n", TPACPI_MAIL);
+ pr_notice("brightness: will use unverified default: "
+ "brightness_mode=%d\n", brightness_mode);
+ pr_notice("brightness: please report to %s whether it works well "
+ "or not on your ThinkPad\n", TPACPI_MAIL);
}
/* Added by mistake in early 2007. Probably useless, but it could
@@ -6804,8 +6734,7 @@ static int __init volume_create_alsa_mixer(void)
rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
sizeof(struct tpacpi_alsa_data), &card);
if (rc < 0 || !card) {
- printk(TPACPI_ERR
- "Failed to create ALSA card structures: %d\n", rc);
+ pr_err("Failed to create ALSA card structures: %d\n", rc);
return 1;
}
@@ -6839,9 +6768,8 @@ static int __init volume_create_alsa_mixer(void)
ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
rc = snd_ctl_add(card, ctl_vol);
if (rc < 0) {
- printk(TPACPI_ERR
- "Failed to create ALSA volume control: %d\n",
- rc);
+ pr_err("Failed to create ALSA volume control: %d\n",
+ rc);
goto err_exit;
}
data->ctl_vol_id = &ctl_vol->id;
@@ -6850,8 +6778,7 @@ static int __init volume_create_alsa_mixer(void)
ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
rc = snd_ctl_add(card, ctl_mute);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n",
- rc);
+ pr_err("Failed to create ALSA mute control: %d\n", rc);
goto err_exit;
}
data->ctl_mute_id = &ctl_mute->id;
@@ -6859,7 +6786,7 @@ static int __init volume_create_alsa_mixer(void)
snd_card_set_dev(card, &tpacpi_pdev->dev);
rc = snd_card_register(card);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc);
+ pr_err("Failed to register ALSA card: %d\n", rc);
goto err_exit;
}
@@ -6915,9 +6842,8 @@ static int __init volume_init(struct ibm_init_struct *iibm)
return -EINVAL;
if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
- printk(TPACPI_ERR
- "UCMS step volume mode not implemented, "
- "please contact %s\n", TPACPI_MAIL);
+ pr_err("UCMS step volume mode not implemented, "
+ "please contact %s\n", TPACPI_MAIL);
return 1;
}
@@ -6981,13 +6907,11 @@ static int __init volume_init(struct ibm_init_struct *iibm)
rc = volume_create_alsa_mixer();
if (rc) {
- printk(TPACPI_ERR
- "Could not create the ALSA mixer interface\n");
+ pr_err("Could not create the ALSA mixer interface\n");
return rc;
}
- printk(TPACPI_INFO
- "Console audio control enabled, mode: %s\n",
+ pr_info("Console audio control enabled, mode: %s\n",
(volume_control_allowed) ?
"override (read/write)" :
"monitor (read only)");
@@ -7049,12 +6973,10 @@ static int volume_write(char *buf)
if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
tp_warned.volume_ctrl_forbidden = 1;
- printk(TPACPI_NOTICE
- "Console audio control in monitor mode, "
- "changes are not allowed.\n");
- printk(TPACPI_NOTICE
- "Use the volume_control=1 module parameter "
- "to enable volume control\n");
+ pr_notice("Console audio control in monitor mode, "
+ "changes are not allowed\n");
+ pr_notice("Use the volume_control=1 module parameter "
+ "to enable volume control\n");
}
return -EPERM;
}
@@ -7129,8 +7051,7 @@ static void inline volume_alsa_notify_change(void)
static int __init volume_init(struct ibm_init_struct *iibm)
{
- printk(TPACPI_INFO
- "volume: disabled as there is no ALSA support in this kernel\n");
+ pr_info("volume: disabled as there is no ALSA support in this kernel\n");
return 1;
}
@@ -7337,9 +7258,8 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
static void fan_quirk1_setup(void)
{
if (fan_control_initial_status == 0x07) {
- printk(TPACPI_NOTICE
- "fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
+ pr_notice("fan_init: initial fan status is unknown, "
+ "assuming it is in auto mode\n");
tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -7726,8 +7646,7 @@ static void fan_watchdog_reset(void)
if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
msecs_to_jiffies(fan_watchdog_maxinterval
* 1000))) {
- printk(TPACPI_ERR
- "failed to queue the fan watchdog, "
+ pr_err("failed to queue the fan watchdog, "
"watchdog will not trigger\n");
}
} else
@@ -7741,11 +7660,11 @@ static void fan_watchdog_fire(struct work_struct *ignored)
if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
return;
- printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
+ pr_notice("fan watchdog: enabling fan\n");
rc = fan_set_enable();
if (rc < 0) {
- printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
- "will try again later...\n", -rc);
+ pr_err("fan watchdog: error %d while enabling fan, "
+ "will try again later...\n", -rc);
/* reschedule for later */
fan_watchdog_reset();
}
@@ -8049,8 +7968,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
"secondary fan support enabled\n");
}
} else {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"fan status and control unavailable\n");
return 1;
}
@@ -8150,9 +8068,8 @@ static void fan_suspend(pm_message_t state)
fan_control_resume_level = 0;
rc = fan_get_status_safe(&fan_control_resume_level);
if (rc < 0)
- printk(TPACPI_NOTICE
- "failed to read fan level for later "
- "restore during resume: %d\n", rc);
+ pr_notice("failed to read fan level for later "
+ "restore during resume: %d\n", rc);
/* if it is undefined, don't attempt to restore it.
* KEEP THIS LAST */
@@ -8207,13 +8124,11 @@ static void fan_resume(void)
return;
}
if (do_set) {
- printk(TPACPI_NOTICE
- "restoring fan level to 0x%02x\n",
- fan_control_resume_level);
+ pr_notice("restoring fan level to 0x%02x\n",
+ fan_control_resume_level);
rc = fan_set_level_safe(fan_control_resume_level);
if (rc < 0)
- printk(TPACPI_NOTICE
- "failed to restore fan level: %d\n", rc);
+ pr_notice("failed to restore fan level: %d\n", rc);
}
}
@@ -8305,8 +8220,8 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
*rc = fan_set_level_safe(level);
if (*rc == -ENXIO)
- printk(TPACPI_ERR "level command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("level command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan",
"set level to %d\n", level);
@@ -8321,8 +8236,8 @@ static int fan_write_cmd_enable(const char *cmd, int *rc)
*rc = fan_set_enable();
if (*rc == -ENXIO)
- printk(TPACPI_ERR "enable command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("enable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan", "enable\n");
@@ -8336,8 +8251,8 @@ static int fan_write_cmd_disable(const char *cmd, int *rc)
*rc = fan_set_disable();
if (*rc == -ENXIO)
- printk(TPACPI_ERR "disable command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("disable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan", "disable\n");
@@ -8356,8 +8271,8 @@ static int fan_write_cmd_speed(const char *cmd, int *rc)
*rc = fan_set_speed(speed);
if (*rc == -ENXIO)
- printk(TPACPI_ERR "speed command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("speed command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan",
"set speed to %d\n", speed);
@@ -8560,8 +8475,8 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (ibm->acpi->notify) {
ret = setup_acpi_notify(ibm);
if (ret == -ENODEV) {
- printk(TPACPI_NOTICE "disabling subdriver %s\n",
- ibm->name);
+ pr_notice("disabling subdriver %s\n",
+ ibm->name);
ret = 0;
goto err_out;
}
@@ -8583,8 +8498,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
entry = proc_create_data(ibm->name, mode, proc_dir,
&dispatch_proc_fops, ibm);
if (!entry) {
- printk(TPACPI_ERR "unable to create proc entry %s\n",
- ibm->name);
+ pr_err("unable to create proc entry %s\n", ibm->name);
ret = -ENODEV;
goto err_out;
}
@@ -8683,13 +8597,11 @@ static int __must_check __init get_thinkpad_model_data(
tp->ec_release = (ec_fw_string[4] << 8)
| ec_fw_string[5];
} else {
- printk(TPACPI_NOTICE
- "ThinkPad firmware release %s "
- "doesn't match the known patterns\n",
- ec_fw_string);
- printk(TPACPI_NOTICE
- "please report this to %s\n",
- TPACPI_MAIL);
+ pr_notice("ThinkPad firmware release %s "
+ "doesn't match the known patterns\n",
+ ec_fw_string);
+ pr_notice("please report this to %s\n",
+ TPACPI_MAIL);
}
break;
}
@@ -8733,8 +8645,7 @@ static int __init probe_for_thinkpad(void)
tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle);
if (!ec_handle) {
if (is_thinkpad)
- printk(TPACPI_ERR
- "Not yet supported ThinkPad detected!\n");
+ pr_err("Not yet supported ThinkPad detected!\n");
return -ENODEV;
}
@@ -8746,10 +8657,10 @@ static int __init probe_for_thinkpad(void)
static void __init thinkpad_acpi_init_banner(void)
{
- printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
- printk(TPACPI_INFO "%s\n", TPACPI_URL);
+ pr_info("%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ pr_info("%s\n", TPACPI_URL);
- printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
+ pr_info("ThinkPad BIOS %s, EC %s\n",
(thinkpad_id.bios_version_str) ?
thinkpad_id.bios_version_str : "unknown",
(thinkpad_id.ec_version_str) ?
@@ -8758,7 +8669,7 @@ static void __init thinkpad_acpi_init_banner(void)
BUG_ON(!thinkpad_id.vendor);
if (thinkpad_id.model_str)
- printk(TPACPI_INFO "%s %s, model %s\n",
+ pr_info("%s %s, model %s\n",
(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
"IBM" : ((thinkpad_id.vendor ==
PCI_VENDOR_ID_LENOVO) ?
@@ -9024,8 +8935,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = get_thinkpad_model_data(&thinkpad_id);
if (ret) {
- printk(TPACPI_ERR
- "unable to get DMI data: %d\n", ret);
+ pr_err("unable to get DMI data: %d\n", ret);
thinkpad_acpi_module_exit();
return ret;
}
@@ -9051,16 +8961,14 @@ static int __init thinkpad_acpi_module_init(void)
proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
if (!proc_dir) {
- printk(TPACPI_ERR
- "unable to create proc dir " TPACPI_PROC_DIR);
+ pr_err("unable to create proc dir " TPACPI_PROC_DIR "\n");
thinkpad_acpi_module_exit();
return -ENODEV;
}
ret = platform_driver_register(&tpacpi_pdriver);
if (ret) {
- printk(TPACPI_ERR
- "unable to register main platform driver\n");
+ pr_err("unable to register main platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9068,8 +8976,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_hwmon_pdriver);
if (ret) {
- printk(TPACPI_ERR
- "unable to register hwmon platform driver\n");
+ pr_err("unable to register hwmon platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9082,8 +8989,7 @@ static int __init thinkpad_acpi_module_init(void)
&tpacpi_hwmon_pdriver.driver);
}
if (ret) {
- printk(TPACPI_ERR
- "unable to create sysfs driver attributes\n");
+ pr_err("unable to create sysfs driver attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9096,7 +9002,7 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- printk(TPACPI_ERR "unable to register platform device\n");
+ pr_err("unable to register platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9106,16 +9012,14 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- printk(TPACPI_ERR
- "unable to register hwmon platform device\n");
+ pr_err("unable to register hwmon platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
ret = device_create_file(&tpacpi_sensors_pdev->dev,
&dev_attr_thinkpad_acpi_pdev_name);
if (ret) {
- printk(TPACPI_ERR
- "unable to create sysfs hwmon device attributes\n");
+ pr_err("unable to create sysfs hwmon device attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9124,14 +9028,14 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_hwmon)) {
ret = PTR_ERR(tpacpi_hwmon);
tpacpi_hwmon = NULL;
- printk(TPACPI_ERR "unable to register hwmon device\n");
+ pr_err("unable to register hwmon device\n");
thinkpad_acpi_module_exit();
return ret;
}
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
- printk(TPACPI_ERR "unable to allocate input device\n");
+ pr_err("unable to allocate input device\n");
thinkpad_acpi_module_exit();
return -ENOMEM;
} else {
@@ -9163,7 +9067,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
- printk(TPACPI_ERR "unable to register input device\n");
+ pr_err("unable to register input device\n");
thinkpad_acpi_module_exit();
return ret;
} else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 1d07d6d..4c20447 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -194,7 +194,7 @@ static int __init topstar_laptop_init(void)
if (ret < 0)
return ret;
- printk(KERN_INFO "Topstar Laptop ACPI extras driver loaded\n");
+ pr_info("ACPI extras driver loaded\n");
return 0;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 63f42a2..cb009b2 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -35,6 +35,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define TOSHIBA_ACPI_VERSION "0.19"
#define PROC_INTERFACE_VERSION 1
@@ -60,11 +62,6 @@ MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
-#define MY_LOGPREFIX "toshiba_acpi: "
-#define MY_ERR KERN_ERR MY_LOGPREFIX
-#define MY_NOTICE KERN_NOTICE MY_LOGPREFIX
-#define MY_INFO KERN_INFO MY_LOGPREFIX
-
/* Toshiba ACPI method paths */
#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
#define TOSH_INTERFACE_1 "\\_SB_.VALD"
@@ -301,7 +298,7 @@ static int toshiba_illumination_available(void)
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return 0;
}
in[0] = 0xf400;
@@ -320,7 +317,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return;
}
@@ -331,7 +328,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[2] = 1;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed\n");
return;
}
} else {
@@ -341,7 +338,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[2] = 0;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed.\n");
return;
}
}
@@ -364,7 +361,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return LED_OFF;
}
@@ -373,7 +370,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[1] = 0x14e;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed.\n");
return LED_OFF;
}
@@ -517,7 +514,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
seq_printf(m, "brightness_levels: %d\n",
HCI_LCD_BRIGHTNESS_LEVELS);
} else {
- printk(MY_ERR "Error reading LCD brightness\n");
+ pr_err("Error reading LCD brightness\n");
}
return 0;
@@ -592,7 +589,7 @@ static int video_proc_show(struct seq_file *m, void *v)
seq_printf(m, "crt_out: %d\n", is_crt);
seq_printf(m, "tv_out: %d\n", is_tv);
} else {
- printk(MY_ERR "Error reading video out status\n");
+ pr_err("Error reading video out status\n");
}
return 0;
@@ -686,7 +683,7 @@ static int fan_proc_show(struct seq_file *m, void *v)
seq_printf(m, "running: %d\n", (value > 0));
seq_printf(m, "force_on: %d\n", force_fan);
} else {
- printk(MY_ERR "Error reading fan status\n");
+ pr_err("Error reading fan status\n");
}
return 0;
@@ -750,9 +747,9 @@ static int keys_proc_show(struct seq_file *m, void *v)
* some machines where system events sporadically
* become disabled. */
hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- printk(MY_NOTICE "Re-enabled hotkeys\n");
+ pr_notice("Re-enabled hotkeys\n");
} else {
- printk(MY_ERR "Error reading hotkey status\n");
+ pr_err("Error reading hotkey status\n");
goto end;
}
}
@@ -863,7 +860,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev,
value, 1, true)) {
- printk(MY_INFO "Unknown key %x\n",
+ pr_info("Unknown key %x\n",
value);
}
} else if (hci_result == HCI_NOT_SUPPORTED) {
@@ -871,7 +868,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
* some machines where system events sporadically
* become disabled. */
hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- printk(MY_NOTICE "Re-enabled hotkeys\n");
+ pr_notice("Re-enabled hotkeys\n");
}
} while (hci_result != HCI_EMPTY);
}
@@ -883,13 +880,13 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
status = acpi_get_handle(NULL, device, &toshiba_acpi.handle);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to get notification device\n");
+ pr_info("Unable to get notification device\n");
return -ENODEV;
}
toshiba_acpi.hotkey_dev = input_allocate_device();
if (!toshiba_acpi.hotkey_dev) {
- printk(MY_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
return -ENOMEM;
}
@@ -905,21 +902,21 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
status = acpi_install_notify_handler(toshiba_acpi.handle,
ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to install hotkey notification\n");
+ pr_info("Unable to install hotkey notification\n");
error = -ENODEV;
goto err_free_keymap;
}
status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to enable hotkeys\n");
+ pr_info("Unable to enable hotkeys\n");
error = -ENODEV;
goto err_remove_notify;
}
error = input_register_device(toshiba_acpi.hotkey_dev);
if (error) {
- printk(MY_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
goto err_remove_notify;
}
@@ -980,17 +977,17 @@ static int __init toshiba_acpi_init(void)
if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) {
method_hci = TOSH_INTERFACE_1 GHCI_METHOD;
if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1))
- printk(MY_INFO "Unable to activate hotkeys\n");
+ pr_info("Unable to activate hotkeys\n");
} else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) {
method_hci = TOSH_INTERFACE_2 GHCI_METHOD;
if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2))
- printk(MY_INFO "Unable to activate hotkeys\n");
+ pr_info("Unable to activate hotkeys\n");
} else
return -ENODEV;
- printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n",
+ pr_info("Toshiba Laptop ACPI Extras version %s\n",
TOSHIBA_ACPI_VERSION);
- printk(MY_INFO " HCI method: %s\n", method_hci);
+ pr_info(" HCI method: %s\n", method_hci);
mutex_init(&toshiba_acpi.mutex);
@@ -998,7 +995,7 @@ static int __init toshiba_acpi_init(void)
-1, NULL, 0);
if (IS_ERR(toshiba_acpi.p_dev)) {
ret = PTR_ERR(toshiba_acpi.p_dev);
- printk(MY_ERR "unable to register platform device\n");
+ pr_err("unable to register platform device\n");
toshiba_acpi.p_dev = NULL;
toshiba_acpi_exit();
return ret;
@@ -1028,7 +1025,7 @@ static int __init toshiba_acpi_init(void)
if (IS_ERR(toshiba_backlight_device)) {
ret = PTR_ERR(toshiba_backlight_device);
- printk(KERN_ERR "Could not register toshiba backlight device\n");
+ pr_err("Could not register toshiba backlight device\n");
toshiba_backlight_device = NULL;
toshiba_acpi_exit();
return ret;
@@ -1042,14 +1039,14 @@ static int __init toshiba_acpi_init(void)
&toshiba_rfk_ops,
&toshiba_acpi);
if (!toshiba_acpi.bt_rfk) {
- printk(MY_ERR "unable to allocate rfkill device\n");
+ pr_err("unable to allocate rfkill device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
ret = rfkill_register(toshiba_acpi.bt_rfk);
if (ret) {
- printk(MY_ERR "unable to register rfkill device\n");
+ pr_err("unable to register rfkill device\n");
rfkill_destroy(toshiba_acpi.bt_rfk);
toshiba_acpi_exit();
return ret;
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 9440686..5fb7186 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -17,6 +17,8 @@
* delivered.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -70,14 +72,13 @@ static int toshiba_bluetooth_enable(acpi_handle handle)
if (!(result & 0x01))
return 0;
- printk(KERN_INFO "toshiba_bluetooth: Re-enabling Toshiba Bluetooth\n");
+ pr_info("Re-enabling Toshiba Bluetooth\n");
res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
return 0;
- printk(KERN_WARNING "toshiba_bluetooth: Failed to re-enable "
- "Toshiba Bluetooth\n");
+ pr_warn("Failed to re-enable Toshiba Bluetooth\n");
return -ENODEV;
}
@@ -107,8 +108,8 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
&bt_present);
if (!ACPI_FAILURE(status) && bt_present) {
- printk(KERN_INFO "Detected Toshiba ACPI Bluetooth device - "
- "installing RFKill handler\n");
+ pr_info("Detected Toshiba ACPI Bluetooth device - "
+ "installing RFKill handler\n");
result = toshiba_bluetooth_enable(device->handle);
}
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 05cc796..f23d5a8 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -486,16 +486,16 @@ static void wmi_dump_wdg(const struct guid_block *g)
pr_info("\tnotify_id: %02X\n", g->notify_id);
pr_info("\treserved: %02X\n", g->reserved);
pr_info("\tinstance_count: %d\n", g->instance_count);
- pr_info("\tflags: %#x ", g->flags);
+ pr_info("\tflags: %#x", g->flags);
if (g->flags) {
if (g->flags & ACPI_WMI_EXPENSIVE)
- pr_cont("ACPI_WMI_EXPENSIVE ");
+ pr_cont(" ACPI_WMI_EXPENSIVE");
if (g->flags & ACPI_WMI_METHOD)
- pr_cont("ACPI_WMI_METHOD ");
+ pr_cont(" ACPI_WMI_METHOD");
if (g->flags & ACPI_WMI_STRING)
- pr_cont("ACPI_WMI_STRING ");
+ pr_cont(" ACPI_WMI_STRING");
if (g->flags & ACPI_WMI_EVENT)
- pr_cont("ACPI_WMI_EVENT ");
+ pr_cont(" ACPI_WMI_EVENT");
}
pr_cont("\n");
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index c1372ed..fad153d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -11,6 +11,8 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,7 +22,6 @@
#include <acpi/acpi_drivers.h>
#define MODULE_NAME "xo15-ebook"
-#define PREFIX MODULE_NAME ": "
#define XO15_EBOOK_CLASS MODULE_NAME
#define XO15_EBOOK_TYPE_UNKNOWN 0x00
@@ -105,7 +106,7 @@ static int ebook_switch_add(struct acpi_device *device)
class = acpi_device_class(device);
if (strcmp(hid, XO15_EBOOK_HID)) {
- printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
+ pr_err("Unsupported hid [%s]\n", hid);
error = -ENODEV;
goto err_free_input;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d..d5ff142 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
flash_error_table[i].reason);
}
-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
asd_show_update_bios, asd_store_update_bios);
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c4..6c7e033 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08..c85182a 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
bfa_boolean_t msix);
void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713..89ae4c8 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+ hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
}
/*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+ return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129..9361252 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
writel(1, ioc->ioc_regs.ioc_sem_reg);
}
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+
/*
* Synchronized IOC failure processing routines
*/
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd5902..6bdd25a 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
#define BD_SPLIT_SIZE 32768
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
-#define BNX2I_SQ_WQES_MIN 16
-#define BNX2I_570X_SQ_WQES_MAX 128
-#define BNX2I_5770X_SQ_WQES_MAX 512
-#define BNX2I_570X_SQ_WQES_DEFAULT 128
-#define BNX2I_5770X_SQ_WQES_DEFAULT 256
+#define BNX2I_SQ_WQES_MIN 16
+#define BNX2I_570X_SQ_WQES_MAX 128
+#define BNX2I_5770X_SQ_WQES_MAX 512
+#define BNX2I_570X_SQ_WQES_DEFAULT 128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 128
#define BNX2I_570X_CQ_WQES_MAX 128
#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
#define BNX2X_MAX_CQS 8
#define CNIC_ARM_CQE 1
+#define CNIC_ARM_CQE_FP 2
#define CNIC_DISARM_CQE 0
#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
* after HBA reset is completed by bnx2i/cnic/bnx2
* modules
* @state: tracks offload connection state machine
- * @teardown_mode: indicates if conn teardown is abortive or orderly
+ * @timestamp: tracks the start time when the ep begins to connect
+ * @num_active_cmds: tracks the number of outstanding commands for this ep
+ * @ec_shift: the amount of shift as part of the event coal calc
* @qp: QP information
* @ids: contains chip allocated *context id* & driver assigned
* *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
u32 state;
unsigned long timestamp;
int num_active_cmds;
+ u32 ec_shift;
struct qp_info qp;
struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b8951..5c54a2d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
u16 next_index;
u32 num_active_cmds;
-
/* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
* interrupts and other unwanted results
*/
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
- if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
- return;
- if (action == CNIC_ARM_CQE) {
+ if (action != CNIC_ARM_CQE_FP)
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return;
+
+ if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
num_active_cmds = ep->num_active_cmds;
if (num_active_cmds <= event_coal_min)
next_index = 1;
else
next_index = event_coal_min +
- (num_active_cmds - event_coal_min) / event_coal_div;
+ ((num_active_cmds - event_coal_min) >>
+ ep->ec_shift);
if (!next_index)
next_index = 1;
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
iscsi_init.dummy_buffer_addr_hi =
(u32) ((u64) hba->dummy_buf_dma >> 32);
+ hba->num_ccell = hba->max_sqes >> 1;
hba->ctx_ccell_tasks =
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
qp->cq_cons_idx++;
}
}
- bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
}
/**
@@ -1948,22 +1950,23 @@ cqe_out:
static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
struct iscsi_kcqe *new_cqe_kcqe)
{
- struct bnx2i_conn *conn;
+ struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
- conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
- if (!conn) {
+ if (!bnx2i_conn) {
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
return;
}
- if (!conn->ep) {
+ if (!bnx2i_conn->ep) {
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
return;
}
-
- bnx2i_process_new_cqes(conn);
+ bnx2i_process_new_cqes(bnx2i_conn);
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
+ bnx2i_process_new_cqes(bnx2i_conn);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a28..6adbdc3 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
wait_event_interruptible_timeout(hba->eh_wait,
(list_empty(&hba->ep_ofld_list) &&
list_empty(&hba->ep_destroy_list)),
- 10 * HZ);
+ 2 * HZ);
/* Wait for all endpoints to be torn down, Chip will be reset once
* control returns to network driver. So it is required to cleanup and
* release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9c..041928b 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
{
struct iscsi_endpoint *ep;
struct bnx2i_endpoint *bnx2i_ep;
+ u32 ec_div;
ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->hba = hba;
bnx2i_ep->hba_age = hba->age;
+
+ ec_div = event_coal_div;
+ while (ec_div >>= 1)
+ bnx2i_ep->ec_shift += 1;
+
hba->ofld_conns_active++;
init_waitqueue_head(&bnx2i_ep->ofld_wait);
return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
mutex_init(&hba->net_dev_lock);
init_waitqueue_head(&hba->eh_wait);
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->hba_shutdown_tmo = 20 * HZ;
+ hba->hba_shutdown_tmo = 30 * HZ;
hba->conn_teardown_tmo = 20 * HZ;
hba->conn_ctx_destroy_tmo = 6 * HZ;
} else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_cmd *cmd = task->dd_data;
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+ if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+ return -ENOMEM;
+
/*
* If there is no scsi_cmnd this must be a mgmt task
*/
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
- .cmd_per_lun = 32,
+ .cmd_per_lun = 24,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9..155d7b9 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR "fcoe: Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
mutex_lock(&fcoe_config_mutex);
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
}
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ * - WWPN supplied is unique for given lport
+ *
+ *
+*/
+static int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ int rc = 0;
+ char buf[32];
+
+ mutex_lock(&n_port->lp_mutex);
+
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ /* Check if the wwpn is not same as that of the lport */
+ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport WWPN 0x%s is same as that of the "
+ "base port WWPN\n", buf);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check if there is any existing vport with same wwpn */
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport with given WWPN 0x%s already "
+ "exists\n", buf);
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ mutex_unlock(&n_port->lp_mutex);
+
+ return rc;
+}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd..c4a9399 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
+static inline void wwn_to_str(u64 wwn, char *buf, int len)
+{
+ u8 wwpn[8];
+
+ u64_to_wwn(wwn, wwpn);
+ snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+ wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+ wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af..c74c4b8 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
struct fc_lport *lport = fip->lp;
struct fc_lport *vn_port = NULL;
u32 desc_mask;
- int is_vn_port = 0;
+ int num_vlink_desc;
+ int reset_phys_port = 0;
+ struct fip_vn_desc **vlink_desc_arr = NULL;
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* mask of required descriptors. Validating each one clears its bit.
*/
- desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
desc = (struct fip_desc *)(fh + 1);
+
+ /*
+ * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
+ * before determining max Vx_Port descriptor but a buggy FCF could have
+ * omited either or both MAC Address and Name Identifier descriptors
+ */
+ num_vlink_desc = rlen / sizeof(*vp);
+ if (num_vlink_desc)
+ vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
+ GFP_ATOMIC);
+ if (!vlink_desc_arr)
+ return;
+ num_vlink_desc = 0;
+
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW;
if (dlen > rlen)
- return;
+ goto err;
/* Drop CVL if there are duplicate critical descriptors */
if ((desc->fip_dtype < 32) &&
+ (desc->fip_dtype != FIP_DT_VN_ID) &&
!(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
"Descriptors in FIP CVL\n");
- return;
+ goto err;
}
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
if (dlen < sizeof(*mp))
- return;
+ goto err;
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_MAC);
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
if (dlen < sizeof(*wp))
- return;
+ goto err;
if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_NAME);
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
if (dlen < sizeof(*vp))
- return;
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(lport)) == 0 &&
- get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
- ntoh24(vp->fd_fc_id) == lport->port_id) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- break;
+ goto err;
+ vlink_desc_arr[num_vlink_desc++] = vp;
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (vn_port && (vn_port == lport)) {
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
}
- /* check if clr_vlink is for NPIV port */
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vn_port, &lport->vports, list) {
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(vn_port)) == 0 &&
- (get_unaligned_be64(&vp->fd_wwpn)
- == vn_port->wwpn) &&
- (ntoh24(vp->fd_fc_id) ==
- fc_host_port_id(vn_port->host))) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- is_vn_port = 1;
- break;
- }
- }
- mutex_unlock(&lport->lp_mutex);
-
break;
default:
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
- return;
+ goto err;
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* reset only if all required descriptors were present and valid.
*/
- if (desc_mask) {
+ if (desc_mask)
LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
desc_mask);
+ else if (!num_vlink_desc) {
+ LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
+ /*
+ * No Vx_Port description. Clear all NPIV ports,
+ * followed by physical port
+ */
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
} else {
- LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ int i;
- if (is_vn_port)
- fc_lport_reset(vn_port);
- else {
- mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->dev_stats,
- get_cpu())->VLinkFailureCount++;
- put_cpu();
- fcoe_ctlr_reset(fip);
- mutex_unlock(&fip->ctlr_mutex);
+ LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ for (i = 0; i < num_vlink_desc; i++) {
+ vp = vlink_desc_arr[i];
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (!vn_port)
+ continue;
+
+ /*
+ * 'port_id' is already validated, check MAC address and
+ * wwpn
+ */
+ if (compare_ether_addr(fip->get_src_addr(vn_port),
+ vp->fd_mac) != 0 ||
+ get_unaligned_be64(&vp->fd_wwpn) !=
+ vn_port->wwpn)
+ continue;
+
+ if (vn_port == lport)
+ /*
+ * Physical port, defer processing till all
+ * listed NPIV ports are cleared
+ */
+ reset_phys_port = 1;
+ else /* NPIV port */
+ fc_lport_reset(vn_port);
+ }
+ if (reset_phys_port) {
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
}
}
+
+err:
+ kfree(vlink_desc_arr);
}
/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c..41068e8 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
struct fcoe_transport *ft = NULL;
enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca..888086c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
+ num_hrrq = 0;
do {
writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
- if (int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
- }
-
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
+ } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
+ int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
} else
break;
}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b273..b9cb814 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
default:
FC_DISC_DBG(disc, "Received an unsupported request, "
"the opcode is (%x)\n", op);
+ fc_frame_free(fp);
break;
}
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a7..3b8a645 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock);
+ resp = ep->resp;
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
+ fc_fcp_ddp_done(fr_fsp(fp));
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a472..9cd2149 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
* DDP related resources for a fcp_pkt
* @fsp: The FCP packet that DDP had been used on
*/
-static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport;
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
- fc_fcp_retry_cmd(fsp);
- return 0;
+ return error;
}
fp = NULL;
}
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
FC_FCTL_REQ, 0);
rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
+ seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
fsp, jiffies_to_msecs(rec_tov));
if (!seq)
goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
return;
}
- fsp->recov_seq = NULL;
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
}
fc_fcp_unlock_pkt(fsp);
- fsp->lp->tt.exch_done(seq);
out:
+ fsp->lp->tt.exch_done(seq);
fc_frame_free(fp);
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
}
/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
if (fc_fcp_lock_pkt(fsp))
goto out;
- fsp->lp->tt.exch_done(fsp->recov_seq);
- fsp->recov_seq = NULL;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
}
fc_fcp_unlock_pkt(fsp);
out:
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+ fsp->lp->tt.exch_done(fsp->recov_seq);
}
/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819..c7d0712 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
/*
* Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f..db9238f 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
struct sas_ha_struct *sas_ha;
enum ata_completion_errors ac;
unsigned long flags;
+ struct ata_link *link;
if (!qc)
goto qc_already_gone;
dev = qc->ap->private_data;
sas_ha = dev->port->ha;
+ link = &dev->sata_dev.ap->link;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+
+ if (!link->sactive) {
+ qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ } else {
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
- qc->err_mask = ac;
+ if (!link->sactive) {
+ qc->err_mask = ac;
+ } else {
+ link->eh_info.err_mask |= AC_ERR_DEV;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.tf.feature = 0x04; /* status err */
dev->sata_dev.tf.command = ATA_ERR;
}
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
return ret;
}
+static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+ int res = TMF_RESP_FUNC_FAILED;
+ int ret = 0;
+
+ if (i->dft->lldd_ata_soft_reset)
+ res = i->dft->lldd_ata_soft_reset(dev);
+
+ if (res != TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
+ ret = -EAGAIN;
+ }
+
+ switch (dev->sata_dev.command_set) {
+ case ATA_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATA device.\n", __func__);
+ *class = ATA_DEV_ATA;
+ break;
+ case ATAPI_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
+ *class = ATA_DEV_ATAPI;
+ break;
+ default:
+ SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
+ __func__, dev->sata_dev.command_set);
+ *class = ATA_DEV_UNKNOWN;
+ break;
+ }
+
+ ap->cbl = ATA_CBL_SATA;
+ return ret;
+}
+
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
- .softreset = NULL,
+ .softreset = sas_ata_soft_reset,
.hardreset = sas_ata_hard_reset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd..14e21b5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
-void sas_deform_port(struct asd_sas_phy *phy);
+void sas_deform_port(struct asd_sas_phy *phy, int gone);
void sas_porte_bytes_dmaed(struct work_struct *work);
void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b..e0f5018e 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
&phy->phy_events_pending);
phy->error = 0;
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
&phy->phy_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdf..42fd1f2 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
- sas_deform_port(phy);
+ sas_deform_port(phy, 0);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
-void sas_deform_port(struct asd_sas_phy *phy)
+void sas_deform_port(struct asd_sas_phy *phy, int gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
+ struct domain_device *dev;
unsigned long flags;
if (!port)
return; /* done by a phy event */
- if (port->port_dev)
- port->port_dev->pathways--;
+ dev = port->port_dev;
+ if (dev)
+ dev->pathways--;
if (port->num_phys == 1) {
+ if (dev && gone)
+ dev->gone = 1;
sas_unregister_domain_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else
sas_port_delete_phy(port->port, phy->phy);
-
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
- sas_deform_port(sas_ha->sas_phy[i]);
+ sas_deform_port(sas_ha->sas_phy[i], 0);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f..eeba76c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
+ /* If the device fell off, no sense in issuing commands */
+ if (dev->gone) {
+ cmd->result = DID_BAD_TARGET << 16;
+ scsi_done(cmd);
+ goto out;
+ }
+
if (dev_is_sata(dev)) {
unsigned long flags;
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
goto out;
}
- /* If the device fell off, no sense in issuing commands */
- if (dev->gone) {
- cmd->result = DID_BAD_TARGET << 16;
- scsi_done(cmd);
- goto out;
- }
-
res = -ENOMEM;
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d8..8ec2c86 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
(1 << LPFC_USER_LINK_SPEED_AUTO))
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+enum nemb_type {
+ nemb_mse = 1,
+ nemb_hbd
+};
+
+enum mbox_type {
+ mbox_rd = 1,
+ mbox_wr
+};
+
+enum dma_type {
+ dma_mbox = 1,
+ dma_ebuf
+};
+
+enum sta_type {
+ sta_pre_addr = 1,
+ sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+ uint32_t state;
+#define LPFC_BSG_MBOX_IDLE 0
+#define LPFC_BSG_MBOX_HOST 1
+#define LPFC_BSG_MBOX_PORT 2
+#define LPFC_BSG_MBOX_DONE 3
+#define LPFC_BSG_MBOX_ABTS 4
+ enum nemb_type nembType;
+ enum mbox_type mboxType;
+ uint32_t numBuf;
+ uint32_t mbxTag;
+ uint32_t seqNum;
+ struct lpfc_dmabuf *mbx_dmabuf;
+ struct list_head ext_dmabuf_list;
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
MAILBOX_t *mbox;
uint32_t *mbox_ext;
+ struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
+ uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
int brd_no; /* FC board number */
-
char SerialNumber[32]; /* adapter Serial Number */
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
uint16_t vpi_base;
uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
+ uint16_t *vpi_ids;
+ uint16_t vpi_count;
+ struct list_head lpfc_vpi_blk_list;
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8f..135a53b 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+ struct completion online_compl;
+ uint32_t reg_val;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EIO;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0)
+ return status;
+
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
+
+ reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+
+ if (opcode == LPFC_FW_DUMP)
+ reg_val |= LPFC_FW_DUMP_REQUEST;
+ else if (opcode == LPFC_FW_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+ else if (opcode == LPFC_DV_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+ writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+ /* flush */
+ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* delay driver action following IF_TYPE_2 reset */
+ msleep(100);
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* lpfc_nport_evt_cnt_show - Return the number of nport events
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+ else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+ else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+ else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
return -EINVAL;
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
+ struct lpfc_rsrc_desc_pcie *desc;
+ uint32_t max_nr_virtfn;
+ uint32_t desc_count;
+ int length, rc, i;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ if (!pdev->is_physfn)
+ return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* get the maximum number of virtfn support by physfn */
+ length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
+ phba->sli4_hba.iov.pf_number + 1);
+
+ get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
+ bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
+ LPFC_CFG_TYPE_CURRENT_ACTIVE);
+
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+
+ if (rc != MBX_TIMEOUT) {
+ /* check return status */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (shdr_status || shdr_add_status || rc)
+ goto error_out;
+
+ } else
+ goto error_out;
+
+ desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_pcie *)
+ &get_prof_cfg->u.response.prof_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_PCIE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
+ desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+ }
+
+error_out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+ lpfc_sriov_hw_max_virtfn_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing enable or disable aer flag.
* @count: unused variable.
*
* Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
/**
* lpfc_aer_support_init - Set the initial adapters aer support flag
* @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @val: enable aer or disable aer flag.
*
* Description:
* If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing flag 1 for aer cleanup state.
* @count: unused variable.
*
* Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
lpfc_aer_cleanup_state);
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ /* Request disabling virtual functions */
+ if (val == 0) {
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ return strlen(buf);
+ }
+
+ /* Request enabling virtual functions */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3018 There are %d virtual functions "
+ "enabled on physical function.\n",
+ phba->cfg_sriov_nr_virtfn);
+ return -EEXIST;
+ }
+
+ if (val <= LPFC_MAX_VFN_PER_PFN)
+ phba->cfg_sriov_nr_virtfn = val;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3019 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+ }
+
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ phba->cfg_sriov_nr_virtfn = 0;
+ rc = -EPERM;
+ } else
+ rc = strlen(buf);
+
+ return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+ phba->cfg_sriov_nr_virtfn = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3017 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+ lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
+ &dev_attr_lpfc_sriov_hw_max_virtfn,
NULL,
};
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
- .size = MAILBOX_CMD_SIZE,
+ .size = MAILBOX_SYSFS_MAX,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
+ lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e504..7fb0ba4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
- struct lpfc_dmabuf *rxbmp; /* for BIU diags */
- struct lpfc_dmabufext *dmp; /* for BIU diags */
+ struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
uint8_t *ext; /* extended mailbox data */
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
}
icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
} else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
}
/**
- * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
- * This function is responsible for placing a port into diagnostic loopback
- * mode in order to perform a diagnostic loopback test.
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i = 0;
+
+ psli = &phba->sli;
+ if (!psli)
+ return -ENODEV;
+
+ pring = &psli->ring[LPFC_FCP_RING];
+ if (!pring)
+ return -ENODEV;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ return -EACCES;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (pring->txcmplq_cnt) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+ msleep(10);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
* All new scsi requests are blocked, a small delay is used to allow the
* scsi requests to complete then the link is brought down. If the link is
* is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
* All of this is done in-line.
*/
static int
-lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
- struct Scsi_Host *shost = job->shost;
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
struct diag_mode_set *loopback_mode;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
uint32_t link_flags;
uint32_t timeout;
- struct lpfc_vport **vports;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus;
int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
- if (job->request_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2738 Received DIAG MODE request below minimum "
- "size\n");
+ "2738 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
- rc = -EACCES;
- goto job_error;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
- goto job_error;
- }
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_block_requests(shost);
- }
-
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_block_requests(shost);
+ goto loopback_mode_exit;
}
-
- while (pring->txcmplq_cnt) {
- if (i++ > 500) /* wait up to 5 seconds */
- break;
-
- msleep(10);
- }
-
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
rc = -ENODEV;
loopback_mode_exit:
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_unblock_requests(shost);
+ lpfc_bsg_diag_mode_exit(phba);
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_mbx_set_link_diag_state *link_diag_state;
+ uint32_t req_len, alloc_len;
+ int mbxstatus = MBX_SUCCESS, rc;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_state_set_out;
+ }
+ link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.type);
+ if (diag)
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 1);
+ else
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 0);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+ rc = 0;
+ else
+ rc = -ENODEV;
+
+link_diag_state_set_out:
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags, timeout, req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ int mbxstatus, i, rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3011 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+ if (rc)
+ goto loopback_mode_exit;
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+ /* set up loopback mode */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
+ if (link_flags == INTERNAL_LOOP_BACK)
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ else
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ /* wait for the link attention interrupt */
+ msleep(100);
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ msleep(10);
}
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_unblock_requests(shost);
}
+loopback_mode_exit:
+ lpfc_bsg_diag_mode_exit(phba);
+
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1622,6 +1846,234 @@ job_error:
}
/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)
+ rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+ else
+ rc = -ENODEV;
+
+ return rc;
+
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return -ENODEV;
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (!rc)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmboxq;
+ struct sli4_link_diag *link_diag_test_cmd;
+ uint32_t req_len, alloc_len;
+ uint32_t timeout;
+ struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct diag_status *diag_status_reply;
+ int mbxstatus, rc = 0;
+
+ shost = job->shost;
+ if (!shost) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ phba = vport->phba;
+ if (!phba) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3013 Received LINK DIAG TEST request "
+ " size:%d below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ link_diag_test_cmd = (struct sli4_link_diag *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = link_diag_test_cmd->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+ if (rc)
+ goto job_error;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+ run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+ bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.type);
+ bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_id);
+ bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+ link_diag_test_cmd->loops);
+ bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_version);
+ bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+ link_diag_test_cmd->error_action);
+
+ mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || mbxstatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3010 Run link diag test mailbox failed with "
+ "mbx_status x%x status x%x, add_status x%x\n",
+ mbxstatus, shdr_status, shdr_add_status);
+ }
+
+ diag_status_reply = (struct diag_status *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3012 Received Run link diag test reply "
+ "below minimum size (%d): reply_len:%d\n",
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_status)),
+ job->reply_len);
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ diag_status_reply->mbox_status = mbxstatus;
+ diag_status_reply->shdr_status = shdr_status;
+ diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
* lpfcdiag_loop_self_reg - obtains a remote port login id
* @phba: Pointer to HBA context object
* @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
}
/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf;
+ struct pci_dev *pcidev = phba->pcidev;
+
+ /* allocate dma buffer struct */
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ INIT_LIST_HEAD(&dmabuf->list);
+
+ /* now, allocate dma buffer */
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
+
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return NULL;
+ }
+ memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+ return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+
+ if (!dmabuf)
+ return;
+
+ if (dmabuf->virt)
+ dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+ struct list_head *dmabuf_list)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+ if (list_empty(dmabuf_list))
+ return;
+
+ list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+ list_del_init(&dmabuf->list);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ }
+ return;
+}
+
+/**
* diag_cmd_data_alloc - fills in a bde struct with dma buffers
* @phba: Pointer to HBA context object
* @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
}
/**
- * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
* @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
*
* This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_test(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
}
/**
- * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
@@ -2422,15 +2954,13 @@ job_error:
* of the mailbox.
**/
void
-lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
- struct lpfc_mbx_nembed_cmd *nembed_sge;
uint32_t size;
unsigned long flags;
- uint8_t *to;
- uint8_t *from;
+ uint8_t *pmb, *pmb_buf;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
- /* build the outgoing buffer to do an sg copy
- * the format is the response mailbox followed by any extended
- * mailbox data
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
*/
- from = (uint8_t *)&pmboxq->u.mb;
- to = (uint8_t *)dd_data->context_un.mbox.mb;
- memcpy(to, from, sizeof(MAILBOX_t));
- if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
- /* copy the extended data if any, count is in words */
- if (dd_data->context_un.mbox.outExtWLen) {
- from = (uint8_t *)dd_data->context_un.mbox.ext;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.outExtWLen *
- sizeof(uint32_t);
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
- from = (uint8_t *)dd_data->context_un.mbox.
- dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = pmboxq->u.mb.un.varWords[5];
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmboxq->u.mb.un.varWords[0];
-
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = nembed_sge->sge[0].length;
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
- from = (uint8_t *)dd_data->context_un.
- mbox.dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- }
- }
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
- from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
/* need to hold the lock until we set job->dd_data to NULL
* to hold off the timeout handler returning to the mid-layer
* while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
job->dd_data = NULL;
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
} else {
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
- kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(dd_data->context_un.mbox.ext);
- if (dd_data->context_un.mbox.dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dd_data->context_un.mbox.dmp->size,
- dd_data->context_un.mbox.dmp->dma.virt,
- dd_data->context_un.mbox.dmp->dma.phys);
- kfree(dd_data->context_un.mbox.dmp);
- }
- if (dd_data->context_un.mbox.rxbmp) {
- lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
- dd_data->context_un.mbox.rxbmp->phys);
- kfree(dd_data->context_un.mbox.rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
+
+ if (job) {
+ job->reply->result = 0;
+ job->job_done(job);
+ }
return;
}
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+ return;
+
+ /* free all memory, including dma buffers */
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+ /* multi-buffer write mailbox command pass-through complete */
+ memset((char *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint8_t *pmb, *pmb_buf;
+ unsigned long flags;
+ uint32_t size;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = pmboxq->context1;
+ /* has the job already timed out? */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job = NULL;
+ goto job_done_out;
+ }
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+ job = dd_data->context_un.mbox.set_job;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+ /* result for successful */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* need to hold the lock util we set job->dd_data to NULL
+ * to hold off the timeout handler from midlayer to take
+ * any action.
+ */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2937 SLI_CONFIG ext-buffer maibox command "
+ "(x%x/x%x) complete bsg job done, bsize:%d\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, size);
+ } else
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+ if (!job)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2938 SLI_CONFIG ext-buffer maibox "
+ "command (x%x/x%x) failure, rc:x%x\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, rc);
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+ kfree(dd_data);
+
+ return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* free base driver mailbox structure memory */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with the mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* free all memory, including dma buffers */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+ struct lpfc_dmabuf *ext_dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2943 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ } else {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2944 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ }
+ } else {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3007 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+
+ } else {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3008 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ uint32_t ext_buf_cnt, ext_buf_index;
+ struct lpfc_dmabuf *ext_dmabuf = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *pmbx;
+ int rc, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2945 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2941 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2946 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2942 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* reject non-embedded mailbox command with none external buffer */
+ if (ext_buf_cnt == 0) {
+ rc = -EPERM;
+ goto job_error;
+ } else if (ext_buf_cnt > 1) {
+ /* additional external read buffers */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ }
+ }
+
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* for the rest of external buffer descriptors if any */
+ if (ext_buf_cnt > 1) {
+ ext_buf_index = 1;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+ ext_buf_index, dmabuf,
+ curr_dmabuf);
+ ext_buf_index++;
+ }
+ }
+
+ /* construct base driver mbox command */
+ pmb = &pmboxq->u.mb;
+ pmbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2947 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2948 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ kfree(dd_data);
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t ext_buf_cnt;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *mbx;
+ int rc = 0, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2953 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2949 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2954 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2950 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ if (ext_buf_cnt == 0)
+ return -EPERM;
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* log for looking forward */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ if (nemb_tp == nemb_mse)
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+ i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[i].buf_len);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+ i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[i]));
+ }
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ if (ext_buf_cnt == 1) {
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ mbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, mbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2955 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2956 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ }
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t subsys;
+ uint32_t opcode;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+ switch (opcode) {
+ case FCOE_OPCODE_READ_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2957 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ case FCOE_OPCODE_ADD_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2958 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2959 Not handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2977 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ } else {
+ subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ case COMN_OPCODE_READ_OBJECT_LIST:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2960 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ case COMN_OPCODE_WRITE_OBJECT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2961 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2962 Not handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2978 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ else
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_dmabuf *dmabuf;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2963 SLI_CONFIG (mse) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ } else {
+ size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ }
+ if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+ return -EPIPE;
+ dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ struct lpfc_dmabuf, list);
+ list_del_init(&dmabuf->list);
+ pbuf = (uint8_t *)dmabuf->virt;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pbuf, size);
+
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+ "command session done\n");
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ }
+
+ job->reply->result = 0;
+ job->job_done(job);
+
+ return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ enum nemb_type nemb_tp;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+ int rc;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+ nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pbuf, size);
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2966 SLI_CONFIG (mse) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ }
+
+ /* set up external buffer descriptor and add to external buffer list */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf,
+ dmabuf);
+ list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2968 SLI_CONFIG ext-buffer wr all %d "
+ "ebuffers received\n",
+ phba->mbox_ext_buf_ctx.numBuf);
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pbuf, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer write mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2969 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2970 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2971 SLI_CONFIG buffer (type:x%x)\n",
+ phba->mbox_ext_buf_ctx.mboxType);
+
+ if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2972 SLI_CONFIG rd buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_read_ebuf_get(phba, job);
+ if (rc == SLI_CONFIG_HANDLED)
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2973 SLI_CONFIG wr buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ int rc;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* mbox command with/without single external buffer */
+ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+ return SLI_CONFIG_NOT_HANDLED;
+
+ /* mbox command and first external buffer */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+ if (mbox_req->extSeqNum == 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2974 SLI_CONFIG mailbox: tag:%d, "
+ "seq:%d\n", mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+ return rc;
+ } else
+ goto sli_cfg_ext_error;
+ }
+
+ /*
+ * handle additional external buffers
+ */
+
+ /* check broken pipe conditions */
+ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+ goto sli_cfg_ext_error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2975 SLI_CONFIG mailbox external buffer: "
+ "extSta:x%x, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+ return rc;
+
+sli_cfg_ext_error:
+ /* all other cases, broken pipe */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2976 SLI_CONFIG mailbox broken pipe: "
+ "ctxSta:x%x, ctxNumBuf:%d "
+ "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state,
+ phba->mbox_ext_buf_ctx.numBuf,
+ phba->mbox_ext_buf_ctx.mbxTag,
+ phba->mbox_ext_buf_ctx.seqNum,
+ mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ return -EPIPE;
+}
+
+/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
- MAILBOX_t *mb = NULL;
+ uint8_t *pmbx = NULL;
struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
- uint32_t size;
- struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
- struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
- struct ulp_bde64 *rxbpl = NULL;
- struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct dfc_mbox_req *mbox_req;
struct READ_EVENT_LOG_VAR *rdEventLog;
uint32_t transmit_length, receive_length, mode;
+ struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
uint8_t *from;
+ uint32_t size;
+
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ /*
+ * Don't allow mailbox commands to be sent when blocked or when in
+ * the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
(mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!dmabuf || !dmabuf->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* Get the mailbox command or external buffer from BSG */
+ pmbx = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, pmbx, size);
+
+ /* Handle possible SLI_CONFIG with non-embedded payloads */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+ if (rc == SLI_CONFIG_HANDLED)
+ goto job_cont;
+ if (rc)
+ goto job_done;
+ /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+ }
+
+ rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+ if (rc != 0)
+ goto job_done; /* must be negative */
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
- mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
- if (!mb) {
- rc = -ENOMEM;
- goto job_done;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
- size = job->request_payload.payload_len;
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
-
- rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0)
- goto job_done; /* must be negative */
-
pmb = &pmboxq->u.mb;
- memcpy(pmb, mb, sizeof(*pmb));
+ memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
"0x%x while in stopped state.\n",
pmb->mbxCommand);
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_done;
- }
-
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
- if (!ext) {
- rc = -ENOMEM;
- goto job_done;
- }
-
/* any data for the device? */
if (mbox_req->inExtWLen) {
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)ext, from,
- mbox_req->inExtWLen * sizeof(uint32_t));
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
}
-
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
- putPaddrLow(dmp->dma.phys);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
- putPaddrLow(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
-
- /* copy the transmit data found in the mailbox extension area */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
rdEventLog = &pmb->un.varRdEventLog;
receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* mode zero uses a bde like biu diags command */
if (mode == 0) {
-
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- if (rxbpl) {
- INIT_LIST_HEAD(&rxbmp->list);
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- receive_length, 0);
- }
-
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ if (receive_length == 0) {
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
pmb->un.varUpdateCfg.co) {
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- bde->tus.f.bdeSize, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- bde->addrHigh = putPaddrHigh(dmp->dma.phys);
- bde->addrLow = putPaddrLow(dmp->dma.phys);
-
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- bde->tus.f.bdeSize);
+ bde->addrHigh = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ bde->addrLow = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
- header = (struct mbox_header *)&pmb->un.varWords[0];
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmb->un.varWords[0];
- receive_length = nembed_sge->sge[0].length;
-
- /* receive length cannot be greater than mailbox
- * extension size
- */
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
- rc = -ERANGE;
- goto job_done;
- }
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
+ /* Handling non-embedded SLI_CONFIG mailbox command */
+ sli4_config = &pmboxq->u.mqe.un.sli4_config;
+ if (!bf_get(lpfc_mbox_hdr_emb,
+ &sli4_config->header.cfg_mhdr)) {
+ /* rebuild the command for sli4 using our
+ * own buffers like we do for biu diags
+ */
+ header = (struct mbox_header *)
+ &pmb->un.varWords[0];
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &pmb->un.varWords[0];
+ receive_length = nembed_sge->sge[0].length;
+
+ /* receive length cannot be greater than
+ * mailbox extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
+ nembed_sge->sge[0].pa_hi =
+ putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ nembed_sge->sge[0].pa_lo =
+ putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
-
- INIT_LIST_HEAD(&dmp->dma.list);
- nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
- nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- header->cfg_mhdr.payload_length);
}
}
- dd_data->context_un.mbox.rxbmp = rxbmp;
- dd_data->context_un.mbox.dmp = dmp;
+ dd_data->context_un.mbox.dmabuffers = dmabuf;
/* setup wake call as IOCB callback */
- pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
- dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
dd_data->context_un.mbox.set_job = job;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* job finished, copy the data */
- memcpy(mb, pmb, sizeof(*pmb));
+ memcpy(pmbx, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
+ job->reply_payload.sg_cnt,
+ pmbx, size);
/* not waiting mbox already done */
rc = 0;
goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
job_done:
/* common exit for error or job completed inline */
- kfree(mb);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- kfree(ext);
- if (dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dmp->size, dmp->dma.virt,
- dmp->dma.phys);
- kfree(dmp);
- }
- if (rxbmp) {
- lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
- kfree(rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
+job_cont:
return rc;
}
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct dfc_mbox_req *mbox_req;
int rc = 0;
- /* in case no data is transferred */
+ /* mix-and-match backward compatibility */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2737 Received MBOX_REQ request below "
- "minimum size\n");
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_error;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2737 Mix-and-match backward compability "
+ "between MBOX_REQ old size:%d and "
+ "new request size:%d\n",
+ (int)(job->request_len -
+ sizeof(struct fc_bsg_request)),
+ (int)sizeof(struct dfc_mbox_req));
+ mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ mbox_req->extMboxTag = 0;
+ mbox_req->extSeqNum = 0;
}
rc = lpfc_bsg_issue_mbox(phba, job, vport);
-job_error:
if (rc == 0) {
/* job done */
job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
rc = lpfc_bsg_send_mgmt_rsp(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE:
- rc = lpfc_bsg_diag_mode(job);
+ rc = lpfc_bsg_diag_loopback_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE_END:
+ rc = lpfc_sli4_bsg_diag_mode_end(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+ rc = lpfc_bsg_diag_loopback_run(job);
break;
- case LPFC_BSG_VENDOR_DIAG_TEST:
- rc = lpfc_bsg_diag_test(job);
+ case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+ rc = lpfc_sli4_bsg_link_diag_test(job);
break;
case LPFC_BSG_VENDOR_GET_MGMT_REV:
rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
/* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
break;
case TYPE_MENLO:
menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca..c8c2b47 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
* These are the vendor unique structures passed in using the bsg
* FC_BSG_HST_VENDOR message code type.
*/
-#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
-#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
-#define LPFC_BSG_VENDOR_DIAG_MODE 4
-#define LPFC_BSG_VENDOR_DIAG_TEST 5
-#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
-#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
struct set_ct_event {
uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
uint32_t timeout;
};
+struct sli4_link_diag {
+ uint32_t command;
+ uint32_t timeout;
+ uint32_t test_id;
+ uint32_t loops;
+ uint32_t test_version;
+ uint32_t error_action;
+};
+
struct diag_mode_test {
uint32_t command;
};
+struct diag_status {
+ uint32_t mbox_status;
+ uint32_t shdr_status;
+ uint32_t shdr_add_status;
+};
+
#define LPFC_WWNN_TYPE 0
#define LPFC_WWPN_TYPE 1
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
};
#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
struct dfc_mbox_req {
uint32_t command;
uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
+ uint32_t extMboxTag;
+ uint32_t extSeqNum;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
#define lpfc_mbox_sli_config_mse_len_WORD buf_len
};
-struct lpfc_sli_config_subcmd_hbd {
+struct lpfc_sli_config_hbd {
uint32_t buf_len;
#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
uint32_t reserved5;
};
-struct lpfc_sli_config_generic {
+struct lpfc_sli_config_emb0_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+ uint32_t padding;
+ uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK 0xff
+#define lpfc_emb0_subcmnd_opcode_WORD word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK 0xff
+#define lpfc_emb0_subcmnd_subsys_WORD word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE 0x0C
+#define FCOE_OPCODE_READ_FCF 0x08
+#define FCOE_OPCODE_ADD_FCF 0x09
};
-struct lpfc_sli_config_subcmnd {
+struct lpfc_sli_config_emb1_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
uint32_t word6;
-#define lpfc_subcmnd_opcode_SHIFT 0
-#define lpfc_subcmnd_opcode_MASK 0xff
-#define lpfc_subcmnd_opcode_WORD word6
-#define lpfc_subcmnd_subsys_SHIFT 8
-#define lpfc_subcmnd_subsys_MASK 0xff
-#define lpfc_subcmnd_subsys_WORD word6
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK 0xff
+#define lpfc_emb1_subcmnd_opcode_WORD word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK 0xff
+#define lpfc_emb1_subcmnd_subsys_WORD word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_READ_OBJECT 0xAB
+#define COMN_OPCODE_WRITE_OBJECT 0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
+#define COMN_OPCODE_DELETE_OBJECT 0xAE
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
uint32_t rd_offset;
uint32_t obj_name[26];
uint32_t hbd_count;
-#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
- struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
+ struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
};
struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
#define lpfc_mqe_command_MASK 0x000000FF
#define lpfc_mqe_command_WORD word0
union {
- struct lpfc_sli_config_generic sli_config_generic;
- struct lpfc_sli_config_subcmnd sli_config_subcmnd;
+ struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+ struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
} un;
};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED 0
+#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f..fc20c24 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+
/* externs BlockGuard */
extern char *_dump_buf_data;
extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd9..779b88e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
/* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca0..ffe82d1 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ fcp_qidx = 0;
+ do {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ qidx = 0;
+ do {
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
- }
+ } while (++qidx < phba->cfg_fcp_eq_count);
goto error_out;
break;
case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c4524..32a0845 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = vport->vpi + phba->vpi_base;
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = -ENOMEM;
goto fail_free_dmabuf;
}
+
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
+ int i;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
vport = phba->pport;
else
vport = lpfc_find_vport_by_vpid(phba,
- icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
+ icmd->unsli3.rcvsli3.vpi);
}
+
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
/* Set the ulpContext to the vpi */
- elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+ elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
} else {
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5..18d0dbf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
- lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(vport->fc_nodename));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(vport->fc_portname));
+ lpfc_update_vport_wwn(vport);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
-
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
+ uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ /* SLI4 ports require the physical rpi value. */
+ rpi = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- /*
- * For SLI3, cmpl_reg_vpi will set port_state to READY, and
- * continue discovery.
- */
+ /* Register the VPI for SLI3, NON-NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
if (phba->sli_rev < LPFC_SLI_REV4) {
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
+ else {
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
+ /*
+ * IF the CVL_RCVD bit is not set then we have sent the
+ * flogi.
+ * If dev_loss fires while we are waiting we do not want to
+ * unreg the fcf.
+ */
+ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+ spin_unlock_irq(shost->host_lock);
+ ret = 1;
+ goto out;
+ }
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e..9059524 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
#define SLI3_IOCB_CMD_SIZE 128
#define SLI3_IOCB_RSP_SIZE 64
+#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
#define rrq_rxid_WORD rrq_exchg
};
+#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
struct RTV_RSP { /* Structure is in Big Endian format */
uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
#define PCI_DEVICE_ID_BALIUS 0xe131
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_LANCER_FC 0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
#define MAILBOX_HBA_EXT_OFFSET 0x100
/* max mbox xmit size is a page size for sysfs IO operations */
-#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
+#define MAILBOX_SYSFS_MAX 4096
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668..11e26a2 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
+/* SLI4 interface type-2 control register offsets */
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
+
+/* Some SLI4 interface type-2 PDEV_CTL register bits */
+#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
+#define LPFC_CTL_PDEV_CTL_DD 0x00000004
+#define LPFC_CTL_PDEV_CTL_LC 0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+
+#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
@@ -210,9 +229,26 @@ struct ulp_bde64 {
struct lpfc_sli4_flags {
uint32_t word0;
-#define lpfc_fip_flag_SHIFT 0
-#define lpfc_fip_flag_MASK 0x00000001
-#define lpfc_fip_flag_WORD word0
+#define lpfc_idx_rsrc_rdy_SHIFT 0
+#define lpfc_idx_rsrc_rdy_MASK 0x00000001
+#define lpfc_idx_rsrc_rdy_WORD word0
+#define LPFC_IDX_RSRC_RDY 1
+#define lpfc_xri_rsrc_rdy_SHIFT 1
+#define lpfc_xri_rsrc_rdy_MASK 0x00000001
+#define lpfc_xri_rsrc_rdy_WORD word0
+#define LPFC_XRI_RSRC_RDY 1
+#define lpfc_rpi_rsrc_rdy_SHIFT 2
+#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD word0
+#define LPFC_RPI_RSRC_RDY 1
+#define lpfc_vpi_rsrc_rdy_SHIFT 3
+#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD word0
+#define LPFC_VPI_RSRC_RDY 1
+#define lpfc_vfi_rsrc_rdy_SHIFT 4
+#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD word0
+#define LPFC_VFI_RSRC_RDY 1
};
struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_version_SHIFT 0
#define lpfc_mbox_hdr_version_MASK 0x000000FF
#define lpfc_mbox_hdr_version_WORD word9
+#define lpfc_mbox_hdr_pf_num_SHIFT 16
+#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD word9
+#define lpfc_mbox_hdr_vh_num_SHIFT 24
+#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD word9
#define LPFC_Q_CREATE_VERSION_2 2
#define LPFC_Q_CREATE_VERSION_1 1
#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
} response;
};
-/* Mailbox structures */
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
struct mbox_header {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
union lpfc_sli4_cfg_shdr cfg_shdr;
};
+#define LPFC_EXTENT_LOCAL 0
+#define LPFC_TIMEOUT_DEFAULT 0
+#define LPFC_EXTENT_VERSION_DEFAULT 0
+
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
/* Mailbox command structures */
struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
} u;
};
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
+ } rsp;
+ } u;
+};
+
+struct lpfc_id_range {
+ uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT 0
+#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
+#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT 16
+#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
+#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD word2
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges. For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT 0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+ struct lpfc_id_range id[53];
+ } rsp;
+ } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above. This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word4;
+ struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+ struct mbox_header header;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
+ } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
struct lpfc_mbx_post_hdr_tmpl {
struct mbox_header header;
uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
uint32_t word2;
#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
+#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
#define lpfc_sli4_sge_offset_WORD word2
#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
struct lpfc_mbx_read_config {
uint32_t word1;
-#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
-#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_bbc_WORD word1
-#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
-#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_init_bbc_WORD word1
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
uint32_t word2;
-#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
-#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
-#define lpfc_mbx_rd_conf_nport_did_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
- uint32_t word3;
-#define lpfc_mbx_rd_conf_ao_SHIFT 0
-#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
-#define lpfc_mbx_rd_conf_ao_WORD word3
-#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
-#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_bb_scn_WORD word3
-#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
-#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
-#define lpfc_mbx_rd_conf_mc_SHIFT 29
-#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
-#define lpfc_mbx_rd_conf_mc_WORD word3
+ uint32_t rsvd_3;
uint32_t word4;
#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
- uint32_t word5;
-#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_lp_tov_WORD word5
+ uint32_t rsvd_5;
uint32_t word6;
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
- uint32_t word7;
-#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
- uint32_t word8;
-#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_al_tov_WORD word8
+ uint32_t rsvd_7;
+ uint32_t rsvd_8;
uint32_t word9;
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_lmt_WORD word9
- uint32_t word10;
-#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
-#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_alpa_WORD word10
- uint32_t word11_rsvd;
+ uint32_t rsvd_10;
+ uint32_t rsvd_11;
uint32_t word12;
#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_vfi_count_WORD word15
uint32_t word16;
-#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
-#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
#define cfg_fcoe_SHIFT 0
#define cfg_fcoe_MASK 0x00000001
#define cfg_fcoe_WORD word12
+#define cfg_ext_SHIFT 1
+#define cfg_ext_MASK 0x00000001
+#define cfg_ext_WORD word12
+#define cfg_hdrr_SHIFT 2
+#define cfg_hdrr_MASK 0x00000001
+#define cfg_hdrr_WORD word12
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE 18
+ uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+ uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT 0
+#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+ uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
+ uint32_t reserved;
+ uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+ uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+ uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
+ uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
+ uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
+ uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
+ } request;
+ struct {
+ struct lpfc_prof_cfg prof_cfg;
+ } response;
+ } u;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT 31
+#define lpfc_wr_object_eof_MASK 0x00000001
+#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_write_length_SHIFT 0
+#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD word4
+ uint32_t write_offset;
+ uint32_t object_name[26];
+ uint32_t bde_count;
+ struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+ } request;
+ struct {
+ uint32_t actual_write_length;
+ } response;
+ } u;
+};
+
/* mailbox queue entry structure */
struct lpfc_mqe {
uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
struct lpfc_mbx_cq_destroy cq_destroy;
struct lpfc_mbx_wq_destroy wq_destroy;
struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+ struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+ struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
struct lpfc_mbx_post_sgl_pages post_sgl_pages;
struct lpfc_mbx_nembed_cmd nembed_cmd;
struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_set_link_diag_state link_diag_state;
+ struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+ struct lpfc_mbx_run_link_diag_test link_diag_test;
+ struct lpfc_mbx_get_func_cfg get_func_cfg;
+ struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_wr_object wr_object;
} un;
};
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
#define SGL_ALIGN_SZ 64
#define SGL_PAGE_SIZE 4096
/* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI ((uint16_t)-1)
+#define NO_XRI 0xffff
struct wqe_common {
uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP 0xf7
+#define LPFC_FILE_ID_GROUP 0xa2
+struct lpfc_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT 24
+#define lpfc_grp_hdr_file_type_MASK 0x000000FF
+#define lpfc_grp_hdr_file_type_WORD word2
+#define lpfc_grp_hdr_id_SHIFT 16
+#define lpfc_grp_hdr_id_MASK 0x000000FF
+#define lpfc_grp_hdr_id_WORD word2
+ uint8_t rev_name[128];
+};
+
#define FCP_COMMAND 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
+#define LPFC_FW_DUMP 1
+#define LPFC_FW_RESET 2
+#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036..148b98d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
goto out_free_mbox;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
}
/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ * cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ * None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+ /* If the soft name exists then update it using the service params */
+ if (vport->phba->cfg_soft_wwnn)
+ u64_to_wwn(vport->phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (vport->phba->cfg_soft_wwpn)
+ u64_to_wwn(vport->phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+
+ /*
+ * If the name is empty or there exists a soft name
+ * then copy the service params name, otherwise use the fc name
+ */
+ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name));
+
+ if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+}
+
+/**
* lpfc_config_port_post - Perform lpfc initialization after config port
* @phba: pointer to lpfc hba data structure.
*
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
-
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
-
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_10Gb)
+ if (phba->lmt & LMT_16Gb)
+ max_speed = 16;
+ else if (phba->lmt & LMT_10Gb)
max_speed = 10;
else if (phba->lmt & LMT_8Gb)
max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
"Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FC:
- oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+ m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
break;
default:
m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
if (mdp && mdp[0] == '\0')
snprintf(mdp, 79,"%s", m.name);
- /* oneConnect hba requires special processing, they are all initiators
+ /*
+ * oneConnect hba requires special processing, they are all initiators
* and we put the port number on the end
*/
if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
kfree(io);
phba->total_iocbq_bufs--;
}
+
spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
vport = lpfc_find_vport_by_vpid(phba,
acqe_fip->index - phba->vpi_base);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_try_set_mwi(pdev);
pci_save_state(pdev);
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ pdev->needs_freset = 1;
+
return 0;
out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ int rc;
+
+ rc = pci_enable_sriov(pdev, nr_vfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2806 Failed to enable sriov on this device "
+ "with vfn number nr_vf:%d, rc:%d\n",
+ nr_vfn, rc);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2807 Successful enable sriov on this device "
+ "with vfn number nr_vf:%d\n", nr_vfn);
+ return rc;
+}
+
+/**
* lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
* @phba: pointer to lpfc hba data structure.
*
@@ -4011,6 +4079,7 @@ static int
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
+ int rc;
/*
* Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2808 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return 0;
}
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fcf.redisc_wait.data = (unsigned long)phba;
/*
+ * Control structure for handling external multi-buffer mailbox
+ * command pass-through.
+ */
+ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ /*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
/*
- * Initialize dirver internal slow-path work queues
+ * Initialize driver internal slow-path work queues
*/
/* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Receive queue CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+ /* Initialize extent block lists. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+ INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
/* Initialize the driver internal SLI layer lists. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
/*
* Get sli4 parameters that override parameters from Port capabilities.
- * If this call fails it is not a critical error so continue loading.
+ * If this call fails, it isn't critical unless the SLI4 parameters come
+ * back in conflict.
*/
- lpfc_get_sli4_parameters(phba, mboxq);
+ rc = lpfc_get_sli4_parameters(phba, mboxq);
+ if (rc) {
+ if (phba->sli4_hba.extents_in_use &&
+ phba->sli4_hba.rpi_hdrs_in_use) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
+ goto out_free_bsmbx;
+ }
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"1430 Failed to initialize sgl list.\n");
goto out_free_sgl_list;
}
-
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
+ rc = -ENOMEM;
goto out_remove_rpi_hdrs;
}
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path "
"per-EQ handle array\n");
+ rc = -ENOMEM;
goto out_free_fcf_rr_bmask;
}
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
"interrupt vector entries\n");
+ rc = -ENOMEM;
goto out_free_fcp_eq_hdl;
}
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3020 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return rc;
out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_cq_event_release_all(phba);
lpfc_sli4_cq_event_pool_destroy(phba);
+ /* Release resource identifiers. */
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+
/* Free the bsmbx region. */
lpfc_destroy_bootstrap_mbox(phba);
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
"Unloading driver.\n", __func__);
goto out_free_iocbq;
}
+ iocbq_entry->sli4_lxritag = NO_XRI;
iocbq_entry->sli4_xritag = NO_XRI;
spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2400 lpfc_init_sgl_list els %d.\n",
+ "2400 ELS XRI count %d.\n",
els_xri_cnt);
/* Initialize and populate the sglq list per host/VF. */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max =
phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
phba->sli4_hba.scsi_xri_cnt = 0;
-
phba->sli4_hba.lpfc_scsi_psb_array =
kzalloc((sizeof(struct lpfc_scsi_buf *) *
phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
goto out_free_mem;
}
- sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (sglq_entry->sli4_xritag == NO_XRI) {
- kfree(sglq_entry);
- printk(KERN_ERR "%s: failed to allocate XRI.\n"
- "Unloading driver.\n", __func__);
- goto out_free_mem;
- }
sglq_entry->buff_type = GEN_BUFF_TYPE;
sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
{
int rc = 0;
- int longs;
- uint16_t rpi_count;
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-
/*
- * Provision an rpi bitmask range for discovery. The total count
- * is the difference between max and base + 1.
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
*/
- rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
-
- longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
- GFP_KERNEL);
- if (!phba->sli4_hba.rpi_bmask)
- return -ENOMEM;
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ return rc;
+ }
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
uint32_t rpi_count;
+ /*
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return NULL;
+ if (phba->sli4_hba.extents_in_use)
+ return NULL;
+
+ /* The limit on the logical index is just the max_rpi count. */
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
spin_lock_irq(&phba->hbalock);
- curr_rpi_range = phba->sli4_hba.next_rpi;
+ /*
+ * Establish the starting RPI in this header block. The starting
+ * rpi is normalized to a zero base because the physical rpi is
+ * port based.
+ */
+ curr_rpi_range = phba->sli4_hba.next_rpi -
+ phba->sli4_hba.max_cfg_param.rpi_base;
spin_unlock_irq(&phba->hbalock);
/*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
else
rpi_count = LPFC_RPI_HDR_COUNT;
+ if (!rpi_count)
+ return NULL;
/*
* First allocate the protocol header region for the port. The
* port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
rpi_hdr->page_count = 1;
spin_lock_irq(&phba->hbalock);
- rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+
+ /* The rpi_hdr stores the logical index only. */
+ rpi_hdr->start_rpi = curr_rpi_range;
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
/*
- * The next_rpi stores the next module-64 rpi value to post
- * in any subsequent rpi memory region postings.
+ * The next_rpi stores the next logical module-64 rpi value used
+ * to post physical rpis in subsequent rpi postings.
*/
phba->sli4_hba.next_rpi += rpi_count;
spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove all memory resources allocated
- * to support rpis. This routine presumes the caller has released all
- * rpis consumed by fabric or port logins and is prepared to have
- * the header pages removed.
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
**/
void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+
list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
&phba->sli4_hba.lpfc_rpi_hdr_list, list) {
list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
kfree(rpi_hdr->dmabuf);
kfree(rpi_hdr);
}
-
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
- memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+ exit:
+ /* There are no rpis available to the port now. */
+ phba->sli4_hba.next_rpi = 0;
}
/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&reg_data.word0) ||
- bf_get(lpfc_sliport_status_err, &reg_data)) {
+ (bf_get(lpfc_sliport_status_err, &reg_data) &&
+ !bf_get(lpfc_sliport_status_rn, &reg_data))) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
struct lpfc_mbx_read_config *rd_config;
- uint32_t rc = 0;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_mbx_get_func_cfg *get_func_cfg;
+ struct lpfc_rsrc_desc_fcfcoe *desc;
+ uint32_t desc_count;
+ int length, i, rc = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ phba->sli4_hba.extents_in_use =
+ bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_fcfi =
bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
- phba->sli4_hba.max_cfg_param.fcfi_base =
- bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_eq =
bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2003 cfg params XRI(B:%d M:%d), "
+ "2003 cfg params Extents? %d "
+ "XRI(B:%d M:%d), "
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI(B:%d M:%d)\n",
+ "FCFI(Count:%d)\n",
+ phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_vfi,
phba->sli4_hba.max_cfg_param.rpi_base,
phba->sli4_hba.max_cfg_param.max_rpi,
- phba->sli4_hba.max_cfg_param.fcfi_base,
phba->sli4_hba.max_cfg_param.max_fcfi);
}
- mempool_free(pmb, phba->mbox_mem_pool);
+
+ if (rc)
+ goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->cfg_hba_queue_depth =
phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ goto read_cfg_out;
+
+ /* get the pf# and vf# for SLI4 if_type 2 port */
+ length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc || shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3026 Mailbox failed , mbxCmd x%x "
+ "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ goto read_cfg_out;
+ }
+
+ /* search for fc_fcoe resrouce descriptor */
+ get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+ desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)
+ &get_func_cfg->func_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ phba->sli4_hba.iov.pf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+ phba->sli4_hba.iov.vf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM)
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+ "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+ phba->sli4_hba.iov.vf_number);
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3028 GET_FUNCTION_CONFIG: failed to find "
+ "Resrouce Descriptor:x%x\n",
+ LPFC_RSRC_DESC_TYPE_FCFCOE);
+ rc = -EIO;
+ }
+
+read_cfg_out:
+ mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = NULL;
/* Release FCP response complete queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ fcp_qidx = 0;
+ do
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ fcp_cqidx = 0;
+ do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
goto out_destroy_fcp_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
+ if (phba->cfg_fcp_eq_count)
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.fp_eq[fcp_cqidx],
+ LPFC_WCQ, LPFC_FCP);
+ else
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.sp_eq,
+ LPFC_WCQ, LPFC_FCP);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent eq[%d]-id=%d\n",
+ "parent %seq[%d]-id=%d\n",
fcp_cqidx,
phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ (phba->cfg_fcp_eq_count) ? "" : "sp_",
fcp_cqidx,
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
- }
+ (phba->cfg_fcp_eq_count) ?
+ phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+ phba->sli4_hba.sp_eq->queue_id);
+ } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/*
* Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
fcp_cq_index,
phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
/* Round robin FCP Work Queue's Completion Queue assignment */
- fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+ if (phba->cfg_fcp_eq_count)
+ fcp_cq_index = ((fcp_cq_index + 1) %
+ phba->cfg_fcp_eq_count);
}
/*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
if (rdy_chk < 1000)
break;
}
+ /* delay driver action following IF_TYPE_2 function reset */
+ msleep(100);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
/*
* Assign MSI-X vectors to interrupt handlers
*/
-
- /* The first vector must associated to slow-path handler for MQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (vectors > 1)
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ else
+ /* All Interrupts need to be handled by one EQ */
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_intr_handler, IRQF_SHARED,
+ LPFC_DRIVER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
{
int wait_cnt = 0;
LPFC_MBOXQ_t *mboxq;
+ struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return rc;
}
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int length;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ /*
+ * By default, the driver assumes the SLI4 port requires RPI
+ * header postings. The SLI4_PARAM response will correct this
+ * assumption.
+ */
+ phba->sli4_hba.rpi_hdrs_in_use = 1;
+
/* Read the port's SLI4 Config Parameters */
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
mbx_sli4_parameters);
+ phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+ phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return 0;
}
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
}
/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+ char fwrev[32];
+ struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+ struct list_head dma_buffer_list;
+ int i, rc = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+ uint32_t offset = 0, temp_offset = 0;
+
+ INIT_LIST_HEAD(&dma_buffer_list);
+ if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
+ (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (image->size != fw->size)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 Invalid FW image found. "
+ "Magic:%d Type:%x ID:%x\n",
+ image->magic_number,
+ bf_get(lpfc_grp_hdr_file_type, image),
+ bf_get(lpfc_grp_hdr_id, image));
+ return -EINVAL;
+ }
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3023 Updating Firmware. Current Version:%s "
+ "New Version:%s\n",
+ fwrev, image->rev_name);
+ for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dmabuf->list, &dma_buffer_list);
+ }
+ while (offset < fw->size) {
+ temp_offset = offset;
+ list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+ if (offset + SLI4_PAGE_SIZE > fw->size) {
+ temp_offset += fw->size - offset;
+ memcpy(dmabuf->virt,
+ fw->data + temp_offset,
+ fw->size - offset);
+ break;
+ }
+ temp_offset += SLI4_PAGE_SIZE;
+ memcpy(dmabuf->virt, fw->data + temp_offset,
+ SLI4_PAGE_SIZE);
+ }
+ rc = lpfc_wr_object(phba, &dma_buffer_list,
+ (fw->size - offset), &offset);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update failed. "
+ "%d\n", rc);
+ goto out;
+ }
+ }
+ rc = offset;
+ }
+out:
+ list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ return rc;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
+ int adjusted_fcp_eq_count;
+ int fcp_qidx;
+ const struct firmware *fw;
+ uint8_t file_name[16];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
- /* Default to single FCP EQ for non-MSI-X */
+ /* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- phba->cfg_fcp_eq_count = 1;
- else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
- phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ adjusted_fcp_eq_count = 0;
+ else if (phba->sli4_hba.msix_vec_nr <
+ phba->cfg_fcp_eq_count + 1)
+ adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ else
+ adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+ /* Free unused EQs */
+ for (fcp_qidx = adjusted_fcp_eq_count;
+ fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ /* do not delete the first fcp_cq */
+ if (fcp_qidx)
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[fcp_qidx]);
+ }
+ phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* check for firmware upgrade or downgrade */
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
+
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce903..5567670 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
- mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
/* save address for completion */
pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
- if (vpi != 0xffff)
- vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
+ if ((vpi != 0xffff) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- mb->un.varRegLogin.rpi = rpi;
- if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
- return 1;
- }
- mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"rpi x%x\n", vpi, did, rpi);
- return (1);
+ return 1;
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
- return (0);
+ return 0;
}
/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
*
* This routine prepares the mailbox command for unregistering remote port
* login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rpi = rpi;
mb->un.varUnregLogin.rsvd1 = 0;
- mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi,
- vport->vpi + phba->vpi_base, mbox);
- mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ /*
+ * For SLI4 functions, the rpi field is overloaded for
+ * the vport context unreg all. This routine passes
+ * 0 for the rpi field in lpfc_unreg_login for compatibility
+ * with SLI3 and then overrides the rpi field with the
+ * expected value for SLI4.
+ */
+ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+ mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
mb->un.varRegVpi.upd = 1;
- mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+
+ mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
mb->un.varRegVpi.sid = vport->fc_myDID;
- mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+ else
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
sizeof(struct lpfc_name));
mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- if (phba->sli_rev < LPFC_SLI_REV4)
- mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
- else
- mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+ else if (phba->sli_rev >= LPFC_SLI_REV4)
+ mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
return length;
}
- /* Setup for the none-embedded mbox command */
+ /* Setup for the non-embedded mbox command */
pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
- mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* The sub-header is in DMA memory, which needs endian converstion */
if (cfg_shdr)
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
- sizeof(union lpfc_sli4_cfg_shdr));
-
+ sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands. It is called after lpfc_sli4_config. The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+ uint8_t opcode = 0;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+ void *virtaddr = NULL;
+
+ /* Set up SLI4 ioctl command header fields */
+ if (emb == LPFC_SLI4_MBX_NEMBED) {
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ virtaddr = mbox->sge_array->addr[0];
+ if (virtaddr == NULL)
+ return 1;
+ n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ }
+
+ /*
+ * The resource type is common to all extent Opcodes and resides in the
+ * same position.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ rsrc_type);
+ else {
+ /* This is DMA data. Byteswap is required. */
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ n_rsrc_extnt, rsrc_type);
+ lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+ &n_rsrc_extnt->word4,
+ sizeof(uint32_t));
+ }
+
+ /* Complete the initialization for the particular Opcode. */
+ opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ exts_count);
+ else
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ n_rsrc_extnt, exts_count);
+ break;
+ case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+ case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+ case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+ /* Initialization is complete.*/
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2929 Resource Extent Opcode x%x is "
+ "unsupported\n", opcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
- bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
- bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
- bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+ bf_set(lpfc_init_vfi_vfi, init_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_init_vpi_vpi, init_vfi,
+ vport->phba->vpi_ids[vport->vpi]);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi,
+ vport->phba->fcf.fcfi);
}
/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
- bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
- bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
- vpi + phba->vpi_base);
+ phba->vpi_ids[vpi]);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
- phba->pport->vfi + phba->vfi_base);
+ phba->sli4_hba.vfi_ids[phba->pport->vfi]);
}
/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
- vport->vfi + vport->phba->vfi_base);
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
}
/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = ndlp->phba;
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
- bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_index, resume_rpi,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee..10d5b5e 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- int longs;
int i;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_hrb_pool = NULL;
phba->lpfc_drb_pool = NULL;
}
- /* vpi zero is reserved for the physical port so add 1 to max */
- longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
- if (!phba->vpi_bmask)
- goto fail_free_dbq_pool;
return 0;
-
- fail_free_dbq_pool:
- pci_pool_destroy(phba->lpfc_drb_pool);
- phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- /* Free VPI bitmask memory */
- kfree(phba->vpi_bmask);
-
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d42..2ddd02f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /*
- * Need to unreg_login if we are already in one of these states and
- * change to NPR state. This will block the port until after the ACC
- * completes and the reg_login is issued and completed.
- */
+ /* no need to reg_login if we are already in one of these states */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_unreg_rpi(vport, ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ return 1;
}
if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+
/**
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0246 RegLogin failed Data: x%x x%x x%x\n",
- did, mb->mbxStatus, vport->port_state);
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ "x%x\n",
+ did, mb->mbxStatus, vport->port_state,
+ mb->un.varRegLogin.vpi,
+ mb->un.varRegLogin.rpi);
/*
* If RegLogin failed due to lack of HBA resources do not
* retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
MAILBOX_t *mb = &pmb->u.mb;
if (!mb->mbxStatus) {
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481..3ccc974 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
if (bcnt == 0)
continue;
/* Now, post the SCSI buffer list sgls as a block */
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
/* Reset SCSI buffer count for next round of posting */
bcnt = 0;
while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
- uint16_t iotag, last_xritag = NO_XRI;
+ uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
int status = 0, index;
int bcnt;
int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break;
}
- psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
if (last_xritag != NO_XRI
&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
}
if (bcnt) {
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
+
+ if (status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3021 SCSI SGL post error %d\n",
+ status);
+ bcnt = 0;
+ }
/* Reset SCSI buffer count for next round of posting */
while (!list_empty(&sblist)) {
list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_len = sg_dma_len(sgel);
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((num_bde + 1) == nseg)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ piocbq->iocb.ulpContext =
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ piocb->ulpContext =
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ }
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
}
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
- "rpi x%x nlp_flag x%x\n",
+ "rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- pnode->nlp_rpi, pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ iocbq->iocb_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
+ "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+ "iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4]);
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
} else if (status == IOCB_BUSY)
ret = FAILED;
else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e..98999bb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *);
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
-
if (iocbq)
phba->iocb_cnt++;
if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
return sglq;
}
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
return sglq;
}
@@ -532,7 +528,6 @@ static int
__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
- uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/*
* set the active bit even if there is no mem available.
*/
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = xritag;
+ rrq->xritag = phba->sli4_hba.xri_ids[xritag];
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
uint16_t xritag,
struct lpfc_node_rrq *rrq)
{
- uint16_t adj_xri;
struct lpfc_nodelist *ndlp = NULL;
if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
- uint16_t adj_xri;
-
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (!ndlp)
return 0;
- if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
return 1;
else
return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @piocb: Pointer to the iocbq.
*
* This function is called with hbalock held. This function
- * Gets a new driver sglq object from the sglq list. If the
+ * gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
**/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
- uint16_t adj_xri;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
while (!found) {
if (!sglq)
return NULL;
- adj_xri = sglq->sli4_xritag -
- phba->sli4_hba.max_cfg_param.xri_base;
if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
}
sglq->ndlp = ndlp;
found = 1;
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
sglq->state = SGL_ALLOCATED;
}
return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
- sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
- vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+ vpi = pmb->u.mb.un.varRegLogin.vpi;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
list_del_init(&phba->sli4_hba.els_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+ qindx = 0;
+ do
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+ while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
continue;
} else if (rc)
break;
+
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3;
+ int mode = 3, i;
+ int longs;
switch (lpfc_sli_mode) {
case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Initialize VPIs. */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ /*
+ * The VPI bitmask and physical ID array are allocated
+ * and initialized once only - at driver load. A port
+ * reset doesn't need to reinitialize this memory.
+ */
+ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+ longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->vpi_bmask) {
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+
+ phba->vpi_ids = kzalloc(
+ (phba->max_vpi+1) * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->vpi_ids) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+ for (i = 0; i < phba->max_vpi; i++)
+ phba->vpi_ids[i] = i;
+ }
+ }
+
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ fcp_eqidx = 0;
+ do
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+static int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_count, uint16_t *extnt_size)
+{
+ int rc = 0;
+ uint32_t length;
+ uint32_t mbox_tmo;
+ struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Find out how many extents are available for this resource type */
+ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the GET doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2930 Failed to get resource extents "
+ "Status 0x%x Add'l Status 0x%x\n",
+ bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &rsrc_info->header.cfg_shdr.response));
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+ &rsrc_info->u.rsp);
+ *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+ &rsrc_info->u.rsp);
+ err_exit:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ * -Error: error indicates problem.
+ * 1: Extent count or size has changed.
+ * 0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+ uint16_t curr_ext_cnt, rsrc_ext_cnt;
+ uint16_t size_diff, rsrc_ext_size;
+ int rc = 0;
+ struct lpfc_rsrc_blks *rsrc_entry;
+ struct list_head *rsrc_blk_list = NULL;
+
+ size_diff = 0;
+ curr_ext_cnt = 0;
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ break;
+ }
+
+ list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+ curr_ext_cnt++;
+ if (rsrc_entry->rsrc_size != rsrc_ext_size)
+ size_diff++;
+ }
+
+ if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+ rc = 1;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request. It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ * -Error: Error value describes the condition found.
+ * 0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+ uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+ int rc = 0;
+ uint32_t req_len;
+ uint32_t emb_len;
+ uint32_t alloc_len, mbox_tmo;
+
+ /* Calculate the total requested length of the dma memory */
+ req_len = *extnt_cnt * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ *emb = LPFC_SLI4_MBX_EMBED;
+ if (req_len > emb_len) {
+ req_len = *extnt_cnt * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ *emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+ req_len, *emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "9000 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+ if (unlikely(rc))
+ return -EIO;
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ rc = -EIO;
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ bool emb = false;
+ uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+ uint16_t rsrc_id, rsrc_start, j, k;
+ uint16_t *ids;
+ int i, rc;
+ unsigned long longs;
+ unsigned long *bmask;
+ struct lpfc_rsrc_blks *rsrc_blks;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t length;
+ struct lpfc_id_range *id_array = NULL;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ struct list_head *ext_blk_list;
+
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_cnt,
+ &rsrc_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "3009 No available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+ "2903 Available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ id_array = &rsrc_ext->u.rsp.id[0];
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ id_array = &n_rsrc->id;
+ }
+
+ longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+ /*
+ * Based on the resource size and count, correct the base and max
+ * resource values.
+ */
+ length = sizeof(struct lpfc_rsrc_blks);
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ kfree(phba->sli4_hba.rpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /*
+ * The next_rpi was initialized with the maximum available
+ * count but the port may allocate a smaller number. Catch
+ * that case and update the next_rpi.
+ */
+ phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.rpi_bmask;
+ ids = phba->sli4_hba.rpi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->vpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->vpi_bmask;
+ ids = phba->vpi_ids;
+ ext_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ kfree(phba->sli4_hba.xri_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.xri_bmask;
+ ids = phba->sli4_hba.xri_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ kfree(phba->sli4_hba.vfi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.vfi_bmask;
+ ids = phba->sli4_hba.vfi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ /* Unsupported Opcode. Fail call. */
+ id_array = NULL;
+ bmask = NULL;
+ ids = NULL;
+ ext_blk_list = NULL;
+ goto err_exit;
+ }
+
+ /*
+ * Complete initializing the extent configuration with the
+ * allocated ids assigned to this function. The bitmask serves
+ * as an index into the array and manages the available ids. The
+ * array just stores the ids communicated to the port via the wqes.
+ */
+ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+ if ((i % 2) == 0)
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+ &id_array[k]);
+ else
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+ &id_array[k]);
+
+ rsrc_blks = kzalloc(length, GFP_KERNEL);
+ if (unlikely(!rsrc_blks)) {
+ rc = -ENOMEM;
+ kfree(bmask);
+ kfree(ids);
+ goto err_exit;
+ }
+ rsrc_blks->rsrc_start = rsrc_id;
+ rsrc_blks->rsrc_size = rsrc_size;
+ list_add_tail(&rsrc_blks->list, ext_blk_list);
+ rsrc_start = rsrc_id;
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ phba->sli4_hba.scsi_xri_start = rsrc_start +
+ lpfc_sli4_get_els_iocb_cnt(phba);
+
+ while (rsrc_id < (rsrc_start + rsrc_size)) {
+ ids[j] = rsrc_id;
+ rsrc_id++;
+ j++;
+ }
+ /* Entire word processed. Get next word.*/
+ if ((i % 2) == 1)
+ k++;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range. It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ int rc;
+ uint32_t length, mbox_tmo = 0;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+ struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /*
+ * This function sends an embedded mailbox because it only sends the
+ * the resource type. All extents of this type are released by the
+ * port.
+ */
+ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the dealloc doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2919 Failed to release resource extents "
+ "for type %d - Status 0x%x Add'l Status 0x%x. "
+ "Resource memory not released.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &dealloc_rsrc->header.cfg_shdr.response));
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /* Release kernel memory resources for the specific type. */
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->lpfc_vpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_xri_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ /* RPI bitmask and physical id array are cleaned up earlier. */
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ int i, rc, error = 0;
+ uint16_t count, base;
+ unsigned long longs;
+
+ if (phba->sli4_hba.extents_in_use) {
+ /*
+ * The port supports resource extents. The XRI, VPI, VFI, RPI
+ * resource extent count must be read and allocated before
+ * provisioning the resource id arrays.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ /*
+ * Extent-based resources are set - the driver could
+ * be in a port reset. Figure out if any corrective
+ * actions need to be taken.
+ */
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ if (rc != 0)
+ error++;
+
+ /*
+ * It's possible that the number of resources
+ * provided to this port instance changed between
+ * resets. Detect this condition and reallocate
+ * resources. Otherwise, there is no action.
+ */
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_INIT,
+ "2931 Detected extent resource "
+ "change. Reallocating all "
+ "extents.\n");
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ } else
+ return 0;
+ }
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ if (unlikely(rc))
+ goto err_exit;
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return rc;
+ } else {
+ /*
+ * The port does not support resource extents. The XRI, VPI,
+ * VFI, RPI resource ids were determined from READ_CONFIG.
+ * Just allocate the bitmasks and provision the resource id
+ * arrays. If a port reset is active, the resources don't
+ * need any action - just exit.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY)
+ return 0;
+
+ /* RPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_rpi;
+ base = phba->sli4_hba.max_cfg_param.rpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ rc = -ENOMEM;
+ goto free_rpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.rpi_ids[i] = base + i;
+
+ /* VPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vpi;
+ base = phba->sli4_hba.max_cfg_param.vpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto free_rpi_ids;
+ }
+ phba->vpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ rc = -ENOMEM;
+ goto free_vpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->vpi_ids[i] = base + i;
+
+ /* XRIs. */
+ count = phba->sli4_hba.max_cfg_param.max_xri;
+ base = phba->sli4_hba.max_cfg_param.xri_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto free_vpi_ids;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ rc = -ENOMEM;
+ goto free_xri_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.xri_ids[i] = base + i;
+
+ /* VFIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vfi;
+ base = phba->sli4_hba.max_cfg_param.vfi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto free_xri_ids;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ rc = -ENOMEM;
+ goto free_vfi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.vfi_ids[i] = base + i;
+
+ /*
+ * Mark all resources ready. An HBA reset doesn't need
+ * to reset the initialization.
+ */
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return 0;
+ }
+
+ free_vfi_bmask:
+ kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+ kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+ kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+ kfree(phba->vpi_ids);
+ free_vpi_bmask:
+ kfree(phba->vpi_bmask);
+ free_rpi_ids:
+ kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+ kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ if (phba->sli4_hba.extents_in_use) {
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ } else {
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
- /*
- * TODO: Why does this routine execute these task in a different
- * order from probe?
- */
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* to read FCoE param config regions
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /*
+ * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
+ * calls depends on these resources to complete port setup.
+ */
+ rc = lpfc_sli4_alloc_resource_identifiers(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2920 Failed to alloc Resource IDs "
+ "rc = x%x\n", rc);
+ goto out_free_mbox;
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
/* Register SGL pool to the device using non-embedded mailbox command */
- rc = lpfc_sli4_post_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during sgl post operation\n",
- rc);
- rc = -ENODEV;
- goto out_free_mbox;
+ if (!phba->sli4_hba.extents_in_use) {
+ rc = lpfc_sli4_post_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ } else {
+ rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2560 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
}
/* Register SCSI SGL pool to the device */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0383 Error %d during scsi sgl post "
"operation\n", rc);
/* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
-
- /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+ /*
+ * When the CQE status indicates a failure and the mailbox status
+ * indicates success then copy the CQE status into the mailbox status
+ * (and prefix it with x4000).
+ */
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
- bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+ if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mb,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
rc = MBXERR_ERROR;
} else
lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
else
rc = -EIO;
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
"(x%x) cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- iocbq->vport->vpi + phba->vpi_base);
+ phba->vpi_ids[iocbq->vport->vpi]);
bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
return IOCB_ERROR;
break;
}
+
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_BUSY;
}
} else {
- sglq = __lpfc_sli_get_sglq(phba, piocb);
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
- sglq = NULL; /* These IO's already have an XRI and
- * a mapped sgl.
- */
+ /* These IO's already have an XRI and a mapped sgl. */
+ sglq = NULL;
} else {
- /* This is a continuation of a commandi,(CX) so this
+ /*
+ * This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
if (sglq) {
+ piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
-
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
+ if (cq->subtype == LPFC_FCP)
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ cqe);
+ else
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+ cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 0 - successful
+ * -ERROR - otherwise.
+ */
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+ unsigned long xri;
+
+ /*
+ * Fetch the next logical xri. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irq(&phba->hbalock);
+ xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri, 0);
+ if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+ spin_unlock_irq(&phba->hbalock);
+ return NO_XRI;
+ } else {
+ set_bit(xri, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+ phba->sli4_hba.xri_count--;
+ phba->sli4_hba.max_cfg_param.xri_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_xri(phba, xri);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
* lpfc_sli4_next_xritag - Get an xritag for the io
* @phba: Pointer to HBA context object.
*
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba *phba)
{
- uint16_t xritag;
+ uint16_t xri_index;
- spin_lock_irq(&phba->hbalock);
- xritag = phba->sli4_hba.next_xri;
- if ((xritag != (uint16_t) -1) && xritag <
- (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base)) {
- phba->sli4_hba.next_xri++;
- phba->sli4_hba.max_cfg_param.xri_used++;
- spin_unlock_irq(&phba->hbalock);
- return xritag;
- }
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ xri_index = lpfc_sli4_alloc_xri(phba);
+ if (xri_index != NO_XRI)
+ return xri_index;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
- phba->sli4_hba.next_xri,
+ xri_index,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.xri_used);
- return -1;
+ return NO_XRI;
}
/**
- * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* stopped.
**/
int
-lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
- uint16_t xritag_start = 0;
+ uint16_t xritag_start = 0, lxri = 0;
int els_xri_cnt, rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2560 Failed to allocate mbox cmd memory\n");
+ if (!mbox)
return -ENOMEM;
- }
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
/* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+ /*
+ * Assign the sglq a physical xri only if the driver has not
+ * initialized those resources. A port reset only needs
+ * the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
cpu_to_le32(putPaddrLow(0));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(0));
+
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
}
+
+ /* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
- /* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
-
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
+
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, index;
+ uint32_t mbox_tmo;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t xritag_start = 0, lxri = 0;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ int cnt, ttl_cnt, rc = 0;
+ int loop_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The number of sgls to be posted */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2989 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ cnt = 0;
+ ttl_cnt = 0;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_start = rsrc_blk->rsrc_start;
+ rsrc_size = rsrc_blk->rsrc_size;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3014 Working ELS Extent start %d, cnt %d\n",
+ rsrc_start, rsrc_size);
+
+ loop_cnt = min(els_xri_cnt, rsrc_size);
+ if (ttl_cnt + loop_cnt >= els_xri_cnt) {
+ loop_cnt = els_xri_cnt - ttl_cnt;
+ ttl_cnt = els_xri_cnt;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command.
+ */
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen, LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2987 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /*
+ * The starting resource may not begin at zero. Control
+ * the loop variants via the block resource parameters,
+ * but handle the sge pointers with a zero-based index
+ * that doesn't get reset per loop pass.
+ */
+ for (index = rsrc_start;
+ index < rsrc_start + loop_cnt;
+ index++) {
+ sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+ /*
+ * Assign the sglq a physical xri only if the driver
+ * has not initialized those resources. A port reset
+ * only needs the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy,
+ &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag =
+ phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+
+ /* Track the starting physical XRI for the mailbox. */
+ if (index == rsrc_start)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ cnt++;
+ }
+
+ /* Complete initialization and perform endian conversion. */
+ rsrc_blk->rsrc_used += loop_cnt;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3015 Post ELS Extent SGL, start %d, "
+ "cnt %d, used %d\n",
+ xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2988 POST_SGL_BLOCK mailbox "
+ "command failed status x%x "
+ "add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ goto err_exit;
+ }
+ if (ttl_cnt >= els_xri_cnt)
+ break;
+ }
+
+ err_exit:
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
return rc;
}
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
+
/* Get the first SGE entry from the non-embedded DMA memory */
viraddr = mbox->sge_array->addr[0];
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
}
/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+ int cnt)
+{
+ struct lpfc_scsi_buf *psb = NULL;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xri_start = 0, scsi_xri_start;
+ uint16_t rsrc_range;
+ int rc = 0, avail_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ uint32_t xri_cnt = 0;
+
+ /* Calculate the total requested length of the dma memory */
+ reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2932 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ /*
+ * The use of extents requires the driver to post the sgl headers
+ * in multiple postings to meet the contiguous resource assignment.
+ */
+ psb = list_prepare_entry(psb, sblist, list);
+ scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+ if (rsrc_range < scsi_xri_start)
+ continue;
+ else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+ continue;
+ else
+ avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+ reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command. The mbox is used to post an SGL page per loop
+ * but the DMA memory has a use-once semantic so the mailbox
+ * is used and freed per loop pass.
+ */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2933 Failed to allocate mbox cmd "
+ "memory\n");
+ return -ENOMEM;
+ }
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2934 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /* pg_pairs tracks posted SGEs per loop iteration. */
+ pg_pairs = 0;
+ list_for_each_entry_continue(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xri for this extent. */
+ if (pg_pairs == 0)
+ xri_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ xri_cnt++;
+
+ /*
+ * Track two exit conditions - the loop has constructed
+ * all of the caller's SGE pairs or all available
+ * resource IDs in this extent are consumed.
+ */
+ if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+ break;
+ }
+ rsrc_blk->rsrc_used += pg_pairs;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3016 Post SCSI Extent SGL, start %d, cnt %d "
+ "blk use %d\n",
+ xri_start, pg_pairs, rsrc_blk->rsrc_used);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2935 POST_SGL_BLOCK mailbox command "
+ "failed status x%x add_status x%x "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ return -ENXIO;
+ }
+
+ /* Post only what is requested. */
+ if (xri_cnt >= cnt)
+ break;
+ }
+ return rc;
+}
+
+/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+static uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+ uint16_t xri)
+{
+ int i;
+
+ for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+ if (xri == phba->sli4_hba.xri_ids[i])
+ return i;
+ }
+ return NO_XRI;
+}
+
+
+/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
"SID:x%x\n", oxid, sid);
return;
}
- if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
- && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base))
+ if (lpfc_sli4_xri_inrange(phba, rxid))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
/* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpBdeCount = 0;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = ndlp;
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
/* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->vpi + vport->phba->vpi_base;
+ /* iocbq is prepped for internal consumption. Logical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
&phba->sli.ring[LPFC_ELS_RING],
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_page;
uint32_t rc = 0;
+ uint16_t lrpi = 0;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
- /* Post all rpi memory regions to the port. */
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ /*
+ * Assign the rpi headers a physical rpi only if the driver
+ * has not initialized those resources. A port reset only
+ * needs the headers posted.
+ */
+ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_RPI_RSRC_RDY)
+ rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
}
}
+ exit:
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_RPI_RSRC_RDY);
return rc;
}
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
uint32_t rc = 0;
- uint32_t mbox_tmo;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
/* Post all rpi memory regions to the port. */
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
sizeof(struct lpfc_sli4_cfg_mhdr),
LPFC_SLI4_MBX_EMBED);
- bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
- hdr_tmpl, rpi_page->page_count);
+
+
+ /* Post the physical rpi to the port for this rpi header. */
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
rpi_page->start_rpi);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
int
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
{
- int rpi;
- uint16_t max_rpi, rpi_base, rpi_limit;
- uint16_t rpi_remaining;
+ unsigned long rpi;
+ uint16_t max_rpi, rpi_limit;
+ uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
rpi_limit = phba->sli4_hba.next_rpi;
/*
- * The valid rpi range is not guaranteed to be zero-based. Start
- * the search at the rpi_base as reported by the port.
+ * Fetch the next logical rpi. Because this index is logical,
+ * the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
- if (rpi >= rpi_limit || rpi < rpi_base)
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
/*
* Don't try to allocate more rpi header regions if the device limit
- * on available rpis max has been exhausted.
+ * has been exhausted.
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/*
+ * RPI header postings are not required for SLI4 ports capable of
+ * extents.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ spin_unlock_irq(&phba->hbalock);
+ return rpi;
+ }
+
+ /*
* If the driver is running low on rpi resources, allocate another
* page now. Note that the next_rpi value is used because
* it represents how many are actually in use whereas max_rpi notes
* how many are supported max by the device.
*/
- rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
- phba->sli4_hba.rpi_count;
+ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
"2002 Error Could not grow rpi "
"count\n");
} else {
+ lrpi = rpi_hdr->start_rpi;
+ rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
}
}
@@ -12751,6 +14069,8 @@ void
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
{
kfree(phba->sli4_hba.rpi_bmask);
+ kfree(phba->sli4_hba.rpi_ids);
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
}
/**
@@ -13490,6 +14810,96 @@ out:
}
/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ uint32_t size, uint32_t *offset)
+{
+ struct lpfc_mbx_wr_object *wr_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc = 0, i = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t written = 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_WRITE_OBJECT,
+ sizeof(struct lpfc_mbx_wr_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+ wr_object->u.request.write_offset = *offset;
+ sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+ wr_object->u.request.object_name[0] =
+ cpu_to_le32(wr_object->u.request.object_name[0]);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+ list_for_each_entry(dmabuf, dmabuf_list, list) {
+ if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+ break;
+ wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+ wr_object->u.request.bde[i].addrHigh =
+ putPaddrHigh(dmabuf->phys);
+ if (written + SLI4_PAGE_SIZE >= size) {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ (size - written);
+ written += (size - written);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ } else {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ SLI4_PAGE_SIZE;
+ written += SLI4_PAGE_SIZE;
+ }
+ i++;
+ }
+ wr_object->u.request.bde_count = i;
+ bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3025 Write Object mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ } else
+ *offset += wr_object->u.response.actual_write_length;
+ return rc;
+}
+
+/**
* lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
* @vport: pointer to vport data structure.
*
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
* never happen
*/
sglq = __lpfc_clear_active_sglq(phba,
- sglq->sli4_xritag);
+ sglq->sli4_lxritag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
/* The xri and iocb resources secured,
* attempt to issue request
*/
+ piocbq->sli4_lxritag = sglq->sli4_lxritag;
piocbq->sli4_xritag = sglq->sli4_xritag;
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c..a0075b0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
struct list_head clist;
struct list_head dlist;
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct lpfc_cq_event cq_event;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf8..4b17035 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
uint16_t vfi_base;
uint16_t vfi_used;
uint16_t max_fcfi;
- uint16_t fcfi_base;
uint16_t fcfi_used;
uint16_t max_eq;
uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
uint8_t rqv;
};
+struct lpfc_iov {
+ uint32_t pf_number;
+ uint32_t vf_number;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
uint32_t intr_enable;
struct lpfc_bmbx bmbx;
struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t extents_in_use; /* must allocate resource extents. */
+ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
+ uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
+ uint16_t *rpi_ids;
uint16_t rpi_count;
+ struct list_head lpfc_rpi_blk_list;
+ unsigned long *xri_bmask;
+ uint16_t *xri_ids;
+ uint16_t xri_count;
+ struct list_head lpfc_xri_blk_list;
+ unsigned long *vfi_bmask;
+ uint16_t *vfi_ids;
+ uint16_t vfi_count;
+ struct list_head lpfc_vfi_blk_list;
struct lpfc_sli4_flags sli4_flags;
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
+ struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
enum lpfc_sgl_state state;
struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned xri. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
uint32_t start_rpi;
};
+struct lpfc_rsrc_blks {
+ struct list_head list;
+ uint16_t rsrc_start;
+ uint16_t rsrc_size;
+ uint16_t rsrc_used;
+};
+
/*
* SLI4 specific function prototypes
*/
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
+ int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba544..1feb551 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
static int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
- int vpi;
+ unsigned long vpi;
spin_lock_irq(&phba->hbalock);
/* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc6..7370c08 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.34-rc1"
-#define MEGASAS_RELDATE "Feb. 24, 2011"
-#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.05.38-rc1"
+#define MEGASAS_RELDATE "May. 11, 2011"
+#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
/*
* Device IDs
@@ -76,8 +76,8 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
-#define MFI_RESET_REQUIRED 0x00000001
-
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623e..2d8cdce 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.34-rc1
+ * Version : v00.00.05.38-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
static int
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
{
- u32 status;
+ u32 status, mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
status = readl(&regs->outbound_intr_status);
- if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
- return 0;
- }
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
/*
* Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
/* Dummy readl to force pci flush */
readl(&regs->outbound_doorbell_clear);
- return 1;
+ return mfiStatus;
}
+
/**
* megasas_fire_cmd_ppc - Sends command to the FW
* @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_ppc - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_ppc(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_ppc - For controller reset check
* @regs: MFI register set
*/
@@ -495,8 +488,12 @@ static int
megasas_check_reset_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
+
static struct megasas_instance_template megasas_instance_template_ppc = {
.fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
.disable_intr = megasas_disable_intr_ppc,
.clear_intr = megasas_clear_intr_ppc,
.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
- .adp_reset = megasas_adp_reset_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
.check_reset = megasas_check_reset_ppc,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
megasas_check_reset_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
{
u32 max_sectors_1;
u32 max_sectors_2;
- u32 tmp_sectors;
+ u32 tmp_sectors, msix_enable;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable &&
+ !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
+ instance->msi_flag = 1;
+
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /* Try to enable MSI-X */
- if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
- !msix_disable && !pci_enable_msix(instance->pdev,
- &instance->msixentry, 1))
- instance->msi_flag = 1;
-
/*
* Initialize MFI Firmware
*/
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
megasas_mgmt_info.max_index++;
/*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+
+ /*
* Initiate AEN (Asynchronous Event Notification)
*/
if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_start_aen;
}
- /*
- * Register with SCSI mid-layer
- */
- if (megasas_io_attach(instance))
- goto fail_io_attach;
-
- instance->unload = 0;
return 0;
fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Initialize MFI Firmware
*/
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
{
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
instance->instancet->enable_intr(instance->reg_set);
- /*
- * Initiate AEN (Asynchronous Event Notification)
- */
- if (megasas_start_aen(instance))
- printk(KERN_ERR "megasas: Start AEN failed\n");
-
/* Initialize the cmd completion timer */
if (poll_mode_io)
megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
return 0;
fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
instance->unload = 1;
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance->reg_set);
+ free_irq(instance->msi_flag ? instance->msixentry.vector :
+ instance->pdev->irq, instance);
+ if (instance->msi_flag)
+ pci_disable_msix(instance->pdev);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cf..f13e7ab 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
}
/*
- * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
- * @instance: Adapter soft state
- *
- */
-void
-megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
-{
- struct fusion_context *fusion;
- struct megasas_cmd_fusion *cmd;
-
- fusion = instance->ctrl_context;
- cmd = fusion->cmd_list[smid - 1];
- megasas_return_cmd_fusion(instance, cmd);
-}
-
-/*
* megasas_get_ld_map_info - Returns FW's ld_map structure
* @instance: Adapter soft state
* @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
u64 start_blk = io_info->pdBlock;
u8 *cdb = io_request->CDB.CDB32;
u32 num_blocks = io_info->numBlocks;
- u8 opcode, flagvals, groupnum, control;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
/* Check if T10 PI (DIF) is enabled for this LD */
ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
}
/* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- u32 host_diag, abs_state;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
- mutex_lock(&instance->reset_mutex);
- set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
- instance->instancet->disable_intr(instance->reg_set);
- msleep(1000);
-
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
goto out;
}
+ mutex_lock(&instance->reset_mutex);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->instancet->disable_intr(instance->reg_set);
+ msleep(1000);
+
/* First try waiting for commands to complete */
if (megasas_wait_for_outstanding_fusion(instance)) {
printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
}
- if (instance->disableOnlineCtrlReset == 1) {
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
printk(KERN_WARNING "megaraid_sas: Reset not supported"
", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
/* Check that the diag write enable (DRWE) bit is on */
host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
msleep(100);
host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
msleep(100);
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f..dcc289c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
+#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
-#define MPT2SAS_RELEASE_VERSION 01
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e023..a7dbc68 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
};
+#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
/**
@@ -121,6 +122,7 @@ struct sense_info {
* @work: work object (ioc->fault_reset_work_q)
* @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
+ * @device_handle: device handle
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
u8 cancel_pending_work;
struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
+ u16 device_handle;
u8 VF_ID;
u8 VP_ID;
u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
switch (prot_type) {
case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
/*
* enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
cpu_to_be32(scsi_get_lba(scmd));
break;
- case SCSI_PROT_DIF_TYPE2:
-
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- break;
-
case SCSI_PROT_DIF_TYPE3:
/*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
#endif
/**
- * _scsih_smart_predicted_fault - illuminate Fault LED
+ * _scsih_turn_on_fault_led - illuminate Fault LED
* @ioc: per adapter object
* @handle: device handle
+ * Context: process
*
* Return nothing.
*/
static void
-_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
+ "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
struct scsi_target *starget;
struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
starget_printk(KERN_WARNING, starget, "predicted fault\n");
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
- memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
- mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
- mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
- mpi_request.SlotStatus =
- cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
- mpi_request.DevHandle = cpu_to_le16(handle);
- mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
- if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
- &mpi_request)) != 0) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
-
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "enclosure_processor: ioc_status (0x%04x), "
- "loginfo(0x%08x)\n", ioc->name,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
- return;
- }
- }
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
}
switch (fw_event->event) {
+ case MPT2SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4..de0b1a7 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
break;
if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
+ int j;
+
STp->pos_unknown = 0;
STp->partition = STp->new_partition = 0;
if (STp->can_partitions)
STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
+ for (j = 0; j < ST_NBR_PARTITIONS; j++) {
+ STps = &(STp->ps[j]);
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff0..252523d 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
- ql4_nx.o ql4_nvram.o ql4_dbg.o
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 0000000..864d018
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+
+/* Scsi_Host attributes. */
+static ssize_t
+qla4xxx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla8022(ha))
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+ else
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+}
+
+static ssize_t
+qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
+}
+
+static ssize_t
+qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
+ ha->iscsi_minor);
+}
+
+static ssize_t
+qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->bootload_major, ha->bootload_minor,
+ ha->bootload_patch, ha->bootload_build);
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
+static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
+static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+
+struct device_attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version,
+ &dev_attr_serial_num,
+ &dev_attr_iscsi_version,
+ &dev_attr_optrom_version,
+ NULL,
+};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878..473c5c8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
#define INVALID_ENTRY 0xFFFF
#define MAX_CMDS_TO_RISC 1024
#define MAX_SRBS MAX_CMDS_TO_RISC
-#define MBOX_AEN_REG_COUNT 5
+#define MBOX_AEN_REG_COUNT 8
#define MAX_INIT_RETRIES 5
/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
-#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
uint32_t nx_reset_timeout;
struct completion mbx_intr_comp;
+
+ /* --- From About Firmware --- */
+ uint16_t iscsi_major;
+ uint16_t iscsi_minor;
+ uint16_t bootload_major;
+ uint16_t bootload_minor;
+ uint16_t bootload_patch;
+ uint16_t bootload_build;
};
static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf9..01082aa 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
uint8_t reserved[12]; /* 34-3f */
};
+struct about_fw_info {
+ uint16_t fw_major; /* 00 - 01 */
+ uint16_t fw_minor; /* 02 - 03 */
+ uint16_t fw_patch; /* 04 - 05 */
+ uint16_t fw_build; /* 06 - 07 */
+ uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
+ uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
+ uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
+ uint16_t fw_load_source; /* 38 - 39 */
+ /* 1 = Flash Primary,
+ 2 = Flash Secondary,
+ 3 = Host Download
+ */
+ uint8_t reserved1[6]; /* 3A - 3F */
+ uint16_t iscsi_major; /* 40 - 41 */
+ uint16_t iscsi_minor; /* 42 - 43 */
+ uint16_t bootload_major; /* 44 - 45 */
+ uint16_t bootload_minor; /* 46 - 47 */
+ uint16_t bootload_patch; /* 48 - 49 */
+ uint16_t bootload_build; /* 4A - 4B */
+ uint8_t reserved2[180]; /* 4C - FF */
+};
+
struct crash_record {
uint16_t fw_major_version; /* 00 - 01 */
uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3f..a53a256 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
+int qla4xxx_about_firmware(struct scsi_qla_host *ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241..42ed5db 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_get_fw_version(ha) == QLA_ERROR)
+ if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac7..0e72921 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
- if (sense_len == 0)
+ if (sense_len == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
+ " sense len 0\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__));
+ ha->status_srb = NULL;
return;
-
+ }
/* Save total available sense length,
* not to exceed cmd's sense buffer size */
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
+ case MBOX_ASTS_DUPLICATE_IP:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_sts[i];
/* print debug message */
- DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
- " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
- ha->host_no, ha->aen_in, mbox_sts[0],
- mbox_sts[1], mbox_sts[2], mbox_sts[3],
- mbox_sts[4]));
+ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
+ "mb1:0x%x mb2:0x%x mb3:0x%x "
+ "mb4:0x%x mb5:0x%x\n",
+ ha->host_no, ha->aen_in,
+ mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5]));
/* advance pointer */
ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58d..fce8289 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
- /* To prevent overwriting mailbox registers for a command that has
- * not yet been serviced, check to see if an active command
- * (AEN, IOCB, etc.) is interrupting, then service it.
- * -----------------------------------------------------------------
- */
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!is_qla8022(ha)) {
- intr_status = readl(&ha->reg->ctrl_status);
- if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
- /* Service existing interrupt */
- ha->isp_ops->interrupt_service_routine(ha, intr_status);
- clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
- }
- }
-
ha->mbox_status_count = outCount;
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
}
/**
- * qla4xxx_get_fw_version - gets firmware version
+ * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
* @ha: Pointer to host adapter structure.
*
- * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
- * hold an address for data. Make sure that we write 0 to those mailboxes,
- * if unused.
+ * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
+ * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
+ * those mailboxes, if unused.
**/
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
+int qla4xxx_about_firmware(struct scsi_qla_host *ha)
{
+ struct about_fw_info *about_fw = NULL;
+ dma_addr_t about_fw_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
+ if (!about_fw) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
+ "for about_fw\n", __func__));
+ return status;
+ }
- /* Get firmware version. */
+ memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
- "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
- return QLA_ERROR;
+ mbox_cmd[2] = LSDW(about_fw_dma);
+ mbox_cmd[3] = MSDW(about_fw_dma);
+ mbox_cmd[4] = sizeof(struct about_fw_info);
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ goto exit_about_fw;
}
- /* Save firmware version information. */
- ha->firmware_version[0] = mbox_sts[1];
- ha->firmware_version[1] = mbox_sts[2];
- ha->patch_number = mbox_sts[3];
- ha->build_number = mbox_sts[4];
+ /* Save version information. */
+ ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
+ ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
+ ha->patch_number = le16_to_cpu(about_fw->fw_patch);
+ ha->build_number = le16_to_cpu(about_fw->fw_build);
+ ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
+ status = QLA_SUCCESS;
- return QLA_SUCCESS;
+exit_about_fw:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
+ about_fw, about_fw_dma);
+ return status;
}
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b..fdfe27b 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
/* Halt all the indiviual PEGs and other blocks of the ISP */
qla4_8xxx_rom_lock(ha);
- /* mask all niu interrupts */
+ /* disable all I2Q */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
/* disable xge rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
/* disable xg1 rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
/* halt pegs */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(5);
/* big hammer */
- msleep(1000);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
/* don't reset CAM block on reset */
qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a7..f2364ec 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
+ .shost_attrs = qla4xxx_host_attrs,
};
static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
- struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
struct srb *srb;
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
srb->cmd = cmd;
srb->flags = 0;
CMD_SP(cmd) = (void *)srb;
- cmd->scsi_done = done;
return srb;
}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
/**
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @host: scsi host
* @cmd: Pointer to Linux's SCSI command structure
- * @done_fn: Function that the driver calls to notify the SCSI mid-layer
- * that the command has been processed.
*
* Remarks:
* This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
* completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
-static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ struct scsi_qla_host *ha = to_qla_host(host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
- spin_unlock_irq(ha->host->host_lock);
-
- srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
+ srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
if (!srb)
- goto qc_host_busy_lock;
+ goto qc_host_busy;
rval = qla4xxx_send_command_to_isp(ha, srb);
if (rval != QLA_SUCCESS)
goto qc_host_busy_free_sp;
- spin_lock_irq(ha->host->host_lock);
return 0;
qc_host_busy_free_sp:
qla4xxx_srb_free_dma(ha, srb);
mempool_free(srb, ha->srb_mempool);
-qc_host_busy_lock:
- spin_lock_irq(ha->host->host_lock);
-
qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla4xxx_queuecommand)
-
/**
* qla4xxx_mem_free - frees memory allocated to adapter
* @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
+ QLA82XX_PEG_HALT_STATUS1);
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, Dumping hw/fw registers:\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
+ " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
+ " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
+ " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
+ ha->host_no, __func__, halt_status,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS2),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
+ 0x3c));
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
}
/* Wakeup the dpc routine for this adapter, if needed. */
- if ((start_dpc ||
+ if (start_dpc ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
- test_bit(DPC_AEN, &ha->dpc_flags)) &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
- ha->dpc_thread) {
+ test_bit(DPC_AEN, &ha->dpc_flags)) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
" - dpc flags = 0x%lx\n",
ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
- if (ha->dpc_thread &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
- set_bit(AF_DPC_SCHEDULED, &ha->flags);
+ if (ha->dpc_thread)
queue_work(ha->dpc_thread, &ha->dpc_work);
- }
}
/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
- goto do_dpc_exit;
+ return;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
ha->host_no, __func__, ha->flags));
- goto do_dpc_exit;
+ return;
}
if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
}
}
-do_dpc_exit:
- clear_bit(AF_DPC_SCHEDULED, &ha->flags);
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6031557..6104928 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf..a4b9cdb 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
#define BUS_RESET_SETTLE_TIME (10)
#define HOST_RESET_SETTLE_TIME (10)
+static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
@@ -947,6 +949,48 @@ retry_tur:
}
/**
+ * scsi_eh_test_devices - check if devices are responding from error recovery.
+ * @cmd_list: scsi commands in error recovery.
+ * @work_q: queue for commands which still need more error recovery
+ * @done_q: queue for commands which are finished
+ * @try_stu: boolean on if a STU command should be tried in addition to TUR.
+ *
+ * Decription:
+ * Tests if devices are in a working state. Commands to devices now in
+ * a working state are sent to the done_q while commands to devices which
+ * are still failing to respond are returned to the work_q for more
+ * processing.
+ **/
+static int scsi_eh_test_devices(struct list_head *cmd_list,
+ struct list_head *work_q,
+ struct list_head *done_q, int try_stu)
+{
+ struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
+ int finish_cmds;
+
+ while (!list_empty(cmd_list)) {
+ scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
+ sdev = scmd->device;
+
+ finish_cmds = !scsi_device_online(scmd->device) ||
+ (try_stu && !scsi_eh_try_stu(scmd) &&
+ !scsi_eh_tur(scmd)) ||
+ !scsi_eh_tur(scmd);
+
+ list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
+ if (scmd->device == sdev) {
+ if (finish_cmds)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, work_q);
+ }
+ }
+ return list_empty(work_q);
+}
+
+
+/**
* scsi_eh_abort_cmds - abort pending commands.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
- }
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
} else
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
scmd));
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
LIST_HEAD(tmp_list);
+ LIST_HEAD(check_list);
list_splice_init(work_q, &tmp_list);
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
if (scmd_id(scmd) != id)
continue;
- if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
- && (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
+ if (rtn == SUCCESS)
+ list_move_tail(&scmd->eh_entry, &check_list);
+ else if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
/* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *chan_scmd, *next;
+ LIST_HEAD(check_list);
unsigned int channel;
int rtn;
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
rtn = scsi_try_bus_reset(chan_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (channel == scmd_channel(scmd))
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd))
+ if (channel == scmd_channel(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd,
done_q);
+ else
+ list_move_tail(&scmd->eh_entry,
+ &check_list);
+ }
}
} else {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
channel));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
+ if (rtn == SUCCESS) {
+ list_splice_init(work_q, &check_list);
+ } else if (rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
- !scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
}
} else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
current->comm));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
}
/**
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index f46855c..ad747dc 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -381,11 +381,6 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
return err;
}
-/**
- * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
- * @s: output goes here
- * @p: not used
- */
static int always_match(struct device *dev, void *data)
{
return 1;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289..2bea4f0 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e..953773c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int max_blocks = 0;
q->limits.discard_zeroes_data = sdkp->lbprz;
- q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_alignment = sdkp->unmap_alignment *
+ logical_block_size;
q->limits.discard_granularity =
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int dbd;
int modepage;
+ int first_len;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
int old_wce = sdkp->WCE;
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
- if (sdp->skip_ms_page_8)
- goto defaults;
-
- if (sdp->type == TYPE_RBC) {
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+ goto defaults;
+ else {
+ if (sdp->skip_ms_page_3f)
+ goto defaults;
+ modepage = 0x3F;
+ if (sdp->use_192_bytes_for_3f)
+ first_len = 192;
+ dbd = 0;
+ }
+ } else if (sdp->type == TYPE_RBC) {
modepage = 6;
dbd = 8;
} else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ &data, &sshdr);
if (!scsi_status_is_good(res))
goto bad_sense;
if (!data.header_length) {
modepage = 6;
+ first_len = 0;
sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
}
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
*/
if (len < 3)
goto bad_sense;
- if (len > 20)
- len = 20;
-
- /* Take headers and block descriptors into account */
- len += data.header_length + data.block_descriptor_length;
- if (len > SD_BUF_SIZE)
- goto bad_sense;
+ else if (len > SD_BUF_SIZE) {
+ sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ "data from %d to %d bytes\n", len, SD_BUF_SIZE);
+ len = SD_BUF_SIZE;
+ }
+ if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
+ len = 192;
/* Get the data */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
+ if (len > first_len)
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ &data, &sshdr);
if (scsi_status_is_good(res)) {
int offset = data.header_length + data.block_descriptor_length;
- if (offset >= SD_BUF_SIZE - 2) {
- sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
- goto defaults;
+ while (offset < len) {
+ u8 page_code = buffer[offset] & 0x3F;
+ u8 spf = buffer[offset] & 0x40;
+
+ if (page_code == 8 || page_code == 6) {
+ /* We're interested only in the first 3 bytes.
+ */
+ if (len - offset <= 2) {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ } else {
+ modepage = page_code;
+ goto Page_found;
+ }
+ } else {
+ /* Go to the next page */
+ if (spf && len - offset > 3)
+ offset += 4 + (buffer[offset+2] << 8) +
+ buffer[offset+3];
+ else if (!spf && len - offset > 1)
+ offset += 2 + buffer[offset+1];
+ else {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ }
+ }
}
- if ((buffer[offset] & 0x3f) != modepage) {
+ if (modepage == 0x3F) {
+ sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+ "present\n");
+ goto defaults;
+ } else if ((buffer[offset] & 0x3f) != modepage) {
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
goto defaults;
}
-
+ Page_found:
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b..7e22b73 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
"0: bsfw %1,%w0\n\t"
"btr %0,%1\n\t"
"jnc 0b"
- : "=&r" (rv), "=m" (*field) :);
+ : "=&r" (rv), "+m" (*field) :);
return rv;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fbd96b2..de35c3a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,15 @@ config SPI_BFIN
help
This is the SPI controller master driver for Blackfin 5xx processor.
+config SPI_BFIN_SPORT
+ tristate "SPI bus via Blackfin SPORT"
+ depends on BLACKFIN
+ help
+ Enable support for a SPI bus via the Blackfin SPORT peripheral.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_bfin_sport.
+
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fd2fc5f..0f8c69b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c
new file mode 100644
index 0000000..e557ff6
--- /dev/null
+++ b/drivers/spi/spi_bfin_sport.c
@@ -0,0 +1,952 @@
+/*
+ * SPI bus via the Blackfin SPORT peripheral
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include <asm/portmux.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/blackfin.h>
+#include <asm/bfin_sport.h>
+#include <asm/cacheflush.h>
+
+#define DRV_NAME "bfin-sport-spi"
+#define DRV_DESC "SPI bus via the Blackfin SPORT"
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bfin-sport-spi");
+
+enum bfin_sport_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE,
+};
+
+struct bfin_sport_spi_master_data;
+
+struct bfin_sport_transfer_ops {
+ void (*write) (struct bfin_sport_spi_master_data *);
+ void (*read) (struct bfin_sport_spi_master_data *);
+ void (*duplex) (struct bfin_sport_spi_master_data *);
+};
+
+struct bfin_sport_spi_master_data {
+ /* Driver model hookup */
+ struct device *dev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct sport_register __iomem *regs;
+ int err_irq;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ bool run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ enum bfin_sport_spi_state state;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_sport_spi_slave_data *cur_chip;
+ union {
+ void *tx;
+ u8 *tx8;
+ u16 *tx16;
+ };
+ void *tx_end;
+ union {
+ void *rx;
+ u8 *rx8;
+ u16 *rx16;
+ };
+ void *rx_end;
+
+ int cs_change;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+struct bfin_sport_spi_slave_data {
+ u16 ctl_reg;
+ u16 baud;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u16 idle_tx_val;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+static void
+bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_or(&drv_data->regs->tcr1, TSPEN);
+ bfin_write_or(&drv_data->regs->rcr1, TSPEN);
+ SSYNC();
+}
+
+static void
+bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
+ bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
+ SSYNC();
+}
+
+/* Caculate the SPI_BAUD register value based on input HZ */
+static u16
+bfin_sport_hz_to_spi_baud(u32 speed_hz)
+{
+ u_long clk, sclk = get_sclk();
+ int div = (sclk / (2 * speed_hz)) - 1;
+
+ if (div < 0)
+ div = 0;
+
+ clk = sclk / (2 * (div + 1));
+
+ if (clk > speed_hz)
+ div++;
+
+ return div;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void
+bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 0);
+}
+
+static void
+bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 1);
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+static void
+bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long timeout = jiffies + HZ;
+ while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
+ if (!time_before(jiffies, timeout))
+ break;
+ }
+}
+
+static void
+bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
+ .write = bfin_sport_spi_u8_writer,
+ .read = bfin_sport_spi_u8_reader,
+ .duplex = bfin_sport_spi_u8_duplex,
+};
+
+static void
+bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
+ .write = bfin_sport_spi_u16_writer,
+ .read = bfin_sport_spi_u16_reader,
+ .duplex = bfin_sport_spi_u16_duplex,
+};
+
+/* stop controller and re-config current chip */
+static void
+bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
+
+ bfin_sport_spi_disable(drv_data);
+ dev_dbg(drv_data->dev, "restoring spi ctl state\n");
+
+ bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
+ bfin_write(&drv_data->regs->tcr2, bits);
+ bfin_write(&drv_data->regs->tclkdiv, chip->baud);
+ bfin_write(&drv_data->regs->tfsdiv, bits);
+ SSYNC();
+
+ bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
+ bfin_write(&drv_data->regs->rcr2, bits);
+ SSYNC();
+
+ bfin_sport_spi_cs_active(chip);
+}
+
+/* test if there is more transfer to be done */
+static enum bfin_sport_spi_state
+bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return RUNNING_STATE;
+ }
+
+ return DONE_STATE;
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void
+bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->state = START_STATE;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ if (!drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static irqreturn_t
+sport_err_handler(int irq, void *dev_id)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_id;
+ u16 status;
+
+ dev_dbg(drv_data->dev, "%s enter\n", __func__);
+ status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
+
+ if (status) {
+ bfin_write(&drv_data->regs->stat, status);
+ SSYNC();
+
+ bfin_sport_spi_disable(drv_data);
+ dev_err(drv_data->dev, "status error:%s%s%s%s\n",
+ status & TOVF ? " TOVF" : "",
+ status & TUVF ? " TUVF" : "",
+ status & ROVF ? " ROVF" : "",
+ status & RUVF ? " RUVF" : "");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+bfin_sport_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_sport_spi_master_data *drv_data = (void *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct bfin_sport_spi_slave_data *chip = NULL;
+ unsigned int bits_per_word;
+ u32 tranf_success = 1;
+ u32 transfer_speed;
+ u8 full_duplex = 0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ if (transfer->speed_hz)
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
+ else
+ transfer_speed = chip->baud;
+ bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
+ SSYNC();
+
+ /*
+ * if msg is error or done, report it back using complete() callback
+ */
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
+ message->status = -EIO;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ dev_dbg(drv_data->dev, "transfer: all done!\n");
+ message->status = 0;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer */
+ if (drv_data->state == RUNNING_STATE) {
+ dev_dbg(drv_data->dev, "transfer: still running ...\n");
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ if (transfer->len == 0) {
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+
+ if (transfer->tx_buf != NULL) {
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
+ transfer->tx_buf, drv_data->tx_end);
+ } else
+ drv_data->tx = NULL;
+
+ if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
+ transfer->rx_buf, drv_data->rx_end);
+ } else
+ drv_data->rx = NULL;
+
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
+ if (bits_per_word == 8)
+ drv_data->ops = &bfin_sport_transfer_ops_u8;
+ else
+ drv_data->ops = &bfin_sport_transfer_ops_u16;
+
+ drv_data->state = RUNNING_STATE;
+
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_active(chip);
+
+ dev_dbg(drv_data->dev,
+ "now pumping a transfer: width is %d, len is %d\n",
+ bits_per_word, transfer->len);
+
+ /* PIO mode write then read */
+ dev_dbg(drv_data->dev, "doing IO transfer\n");
+
+ bfin_sport_spi_enable(drv_data);
+ if (full_duplex) {
+ /* full duplex mode */
+ BUG_ON((drv_data->tx_end - drv_data->tx) !=
+ (drv_data->rx_end - drv_data->rx));
+ drv_data->ops->duplex(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->tx != NULL) {
+ /* write only half duplex */
+
+ drv_data->ops->write(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->rx != NULL) {
+ /* read only half duplex */
+
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ tranf_success = 0;
+ }
+ bfin_sport_spi_disable(drv_data);
+
+ if (!tranf_success) {
+ dev_dbg(drv_data->dev, "IO write error!\n");
+ drv_data->state = ERROR_STATE;
+ } else {
+ /* Update total byte transfered */
+ message->actual_length += transfer->len;
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+ }
+
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+/* pop a msg from queue and kick off real transfer */
+static void
+bfin_sport_spi_pump_messages(struct work_struct *work)
+{
+ struct bfin_sport_spi_master_data *drv_data;
+ unsigned long flags;
+ struct spi_message *next_msg;
+
+ drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || !drv_data->run) {
+ /* pumper kicked off but no work to do */
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ next_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+
+ drv_data->cur_msg = next_msg;
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initialize message state */
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ bfin_sport_spi_restore_state(drv_data);
+ dev_dbg(drv_data->dev, "got a message to pump, "
+ "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
+ drv_data->cur_chip->ctl_reg);
+
+ dev_dbg(drv_data->dev,
+ "the first transfer len is %d\n",
+ drv_data->cur_transfer->len);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int
+bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (!drv_data->run) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ dev_dbg(&spi->dev, "adding an msg in transfer()\n");
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->run && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+/* Called every time common spi devices change state */
+static int
+bfin_sport_spi_setup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip, *first = NULL;
+ int ret;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ struct bfin5xx_spi_chip *chip_info;
+
+ chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* platform chip_info isn't required */
+ chip_info = spi->controller_data;
+ if (chip_info) {
+ /*
+ * DITFS and TDTYPE are only thing we don't set, but
+ * they probably shouldn't be changed by people.
+ */
+ if (chip_info->ctl_reg || chip_info->enable_dma) {
+ ret = -EINVAL;
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ goto error;
+ }
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->idle_tx_val = chip_info->idle_tx_val;
+ spi->bits_per_word = chip_info->bits_per_word;
+ }
+ }
+
+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* translate common spi framework into our register
+ * following configure contents are same for tx and rx.
+ */
+
+ if (spi->mode & SPI_CPHA)
+ chip->ctl_reg &= ~TCKFE;
+ else
+ chip->ctl_reg |= TCKFE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctl_reg |= TLSBIT;
+ else
+ chip->ctl_reg &= ~TLSBIT;
+
+ /* Sport in master mode */
+ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
+
+ chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
+
+ chip->cs_gpio = spi->chip_select;
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret)
+ goto error;
+
+ dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
+ spi->modalias, spi->bits_per_word);
+ dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
+ chip->ctl_reg, spi->chip_select);
+
+ spi_set_ctldata(spi, chip);
+
+ bfin_sport_spi_cs_deactive(chip);
+
+ return ret;
+
+ error:
+ kfree(first);
+ return ret;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void
+bfin_sport_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
+
+ if (!chip)
+ return;
+
+ gpio_free(chip->cs_gpio);
+
+ kfree(chip);
+}
+
+static int
+bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->run = false;
+ drv_data->busy = 0;
+
+ /* init transfer tasklet */
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
+
+ /* init messages workqueue */
+ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
+ drv_data->workqueue =
+ create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->run = true;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static inline int
+bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead
+ */
+ drv_data->run = false;
+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static inline int
+bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int __devinit
+bfin_sport_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin5xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct resource *res, *ires;
+ struct bfin_sport_spi_master_data *drv_data;
+ int status;
+
+ platform_info = dev->platform_data;
+
+ /* Allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*master) + 16);
+ if (!master) {
+ dev_err(dev, "cannot alloc spi_master\n");
+ return -ENOMEM;
+ }
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->dev = dev;
+ drv_data->pin_req = platform_info->pin_req;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = bfin_sport_spi_cleanup;
+ master->setup = bfin_sport_spi_setup;
+ master->transfer = bfin_sport_spi_transfer;
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
+ dev_err(dev, "cannot map registers\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ dev_err(dev, "cannot get IORESOURCE_IRQ\n");
+ status = -ENODEV;
+ goto out_error_get_ires;
+ }
+ drv_data->err_irq = ires->start;
+
+ /* Initial and start queue */
+ status = bfin_sport_spi_init_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem initializing queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem starting queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = request_irq(drv_data->err_irq, sport_err_handler,
+ 0, "sport_spi_err", drv_data);
+ if (status) {
+ dev_err(dev, "unable to request sport err irq\n");
+ goto out_error_irq;
+ }
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status) {
+ dev_err(dev, "requesting peripherals failed\n");
+ goto out_error_peripheral;
+ }
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status) {
+ dev_err(dev, "problem registering spi master\n");
+ goto out_error_master;
+ }
+
+ dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
+ return 0;
+
+ out_error_master:
+ peripheral_free_list(drv_data->pin_req);
+ out_error_peripheral:
+ free_irq(drv_data->err_irq, drv_data);
+ out_error_irq:
+ out_error_queue_alloc:
+ bfin_sport_spi_destroy_queue(drv_data);
+ out_error_get_ires:
+ iounmap(drv_data->regs);
+ out_error_ioremap:
+ out_error_get_res:
+ spi_master_put(master);
+
+ return status;
+}
+
+/* stop hardware and remove the driver */
+static int __devexit
+bfin_sport_spi_remove(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = bfin_sport_spi_destroy_queue(drv_data);
+ if (status)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ bfin_sport_spi_disable(drv_data);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ /* Prevent double remove */
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ /* stop hardware */
+ bfin_sport_spi_disable(drv_data);
+
+ return status;
+}
+
+static int
+bfin_sport_spi_resume(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ /* Enable the SPI interface */
+ bfin_sport_spi_enable(drv_data);
+
+ /* Start the queue running */
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status)
+ dev_err(drv_data->dev, "problem resuming queue\n");
+
+ return status;
+}
+#else
+# define bfin_sport_spi_suspend NULL
+# define bfin_sport_spi_resume NULL
+#endif
+
+static struct platform_driver bfin_sport_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = bfin_sport_spi_probe,
+ .remove = __devexit_p(bfin_sport_spi_remove),
+ .suspend = bfin_sport_spi_suspend,
+ .resume = bfin_sport_spi_resume,
+};
+
+static int __init bfin_sport_spi_init(void)
+{
+ return platform_driver_register(&bfin_sport_spi_driver);
+}
+module_init(bfin_sport_spi_init);
+
+static void __exit bfin_sport_spi_exit(void)
+{
+ platform_driver_unregister(&bfin_sport_spi_driver);
+}
+module_exit(bfin_sport_spi_exit);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index a393895..32a4087 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -283,7 +283,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return 0;
err_gpios:
- for (; ptr > 0; ptr--)
+ while (--ptr >= 0)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
@@ -301,6 +301,7 @@ static int __devexit tle62x0_remove(struct spi_device *spi)
for (ptr = 0; ptr < st->nr_gpio; ptr++)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
+ device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
return 0;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aed4e46..dee2a2c 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
if (sc->device->tagged_supported) {
switch (sc->tag) {
case HEAD_OF_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_HOQ;
+ sam_task_attr = MSG_HEAD_TAG;
break;
case ORDERED_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_ORDERED;
+ sam_task_attr = MSG_ORDERED_TAG;
break;
default:
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
break;
}
} else
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
@@ -379,7 +379,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, TASK_ATTR_SIMPLE,
+ DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
/*
* Allocate the LUN_RESET TMR
@@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
return 0;
}
-static u64 tcm_loop_pack_lun(unsigned int lun)
-{
- u64 result;
-
- /* LSB of lun into byte 1 big-endian */
- result = ((lun & 0xff) << 8);
- /* use flat space addressing method */
- result |= 0x40 | ((lun >> 8) & 0x3f);
-
- return cpu_to_le64(result);
-}
-
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
@@ -1481,7 +1469,6 @@ static int tcm_loop_register_configfs(void)
fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
- fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
tf_cg = &fabric->tf_group;
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f44a6..ee6fad9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
printk(KERN_ERR "Missing tfo->is_state_remove()\n");
return -EINVAL;
}
- if (!(tfo->pack_lun)) {
- printk(KERN_ERR "Missing tfo->pack_lun()\n");
- return -EINVAL;
- }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d25e208..8407f9c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -38,6 +38,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -150,13 +151,13 @@ out:
{
struct se_device *dev = se_lun->lun_se_dev;
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
}
/*
@@ -658,8 +659,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
struct se_session *se_sess = SE_SESS(se_cmd);
struct se_task *se_task;
unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
- u32 cdb_offset = 0, lun_count = 0, offset = 8;
- u64 i, lun;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
break;
@@ -675,15 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!(se_sess)) {
- lun = 0;
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
@@ -703,15 +695,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
if ((cdb_offset + 8) >= se_cmd->data_length)
continue;
- lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
cdb_offset += 8;
}
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7ff6a35..331d423 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -41,7 +41,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
* descriptor
*/
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
- (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+ (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4a10983..59b8b9c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
@@ -398,9 +399,9 @@ int core_tmr_lun_reset(
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b9d3501..4dafeb8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -42,7 +42,7 @@
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- transport_free_dev_tasks(cmd);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
@@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
* head of the struct se_device->execute_task_list, and task_prev
* after that for each subsequent task
*/
- if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
list_add(&task->t_execute_list,
(task_prev != NULL) ?
&task_prev->t_execute_list :
@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
break;
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
return task;
@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
{
unsigned long flags;
+ if (atomic_read(&task->task_execute_queue) == 0) {
+ dump_stack();
+ return;
+ }
+
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
- if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+ if (cmd->sam_task_attr == MSG_ACA_TAG) {
DEBUG_STA("SAM Task Attribute ACA"
" emulation is not supported\n");
return -1;
@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
+void transport_generic_free_cmd_intr(
+ struct se_cmd *cmd)
+{
+ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
+}
+EXPORT_SYMBOL(transport_generic_free_cmd_intr);
+
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
@@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
smp_mb__after_atomic_inc();
DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
@@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
T_TASK(cmd)->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
list_add_tail(&cmd->se_ordered_list,
&SE_DEV(cmd)->ordered_cmd_list);
@@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
case READ_BUFFER:
@@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
default:
@@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
- if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+ if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
list_del(&cmd->se_ordered_list);
atomic_dec(&dev->dev_ordered_sync);
@@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
new_active_tasks++;
spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+ if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
break;
}
spin_unlock(&dev->delayed_cmd_lock);
@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
sg_end_cur->page_link &= ~0x02;
sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
sg_head = sg_head_cur;
sg_link = sg_link_cur;
- task_sg_num = task->task_sg_num;
continue;
}
sg_head = sg_first = &task->task_sg[0];
sg_link = &task->task_sg[task->task_sg_num];
- task_sg_num = task->task_sg_num;
/*
* Check for single task..
*/
@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
*/
sg_end = &task->task_sg[task->task_sg_num - 1];
sg_end->page_link &= ~0x02;
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
T_TASK(cmd)->t_tasks_sg_chained = sg_first;
T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
- DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+ DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
T_TASK(cmd)->t_tasks_sg_chained_no);
for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
T_TASK(cmd)->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
- sg, sg_page(sg), sg->length, sg->offset);
+ DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
+ i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
if (sg_is_chain(sg))
DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
}
-
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
if (wait_for_tasks && cmd->transport_wait_for_tasks)
cmd->transport_wait_for_tasks(cmd, 0, 0);
+ transport_free_dev_tasks(cmd);
+
transport_generic_remove(cmd, release_to_pool,
session_reinstatement);
}
@@ -6132,6 +6151,9 @@ get_cmd:
case TRANSPORT_REMOVE:
transport_generic_remove(cmd, 1, 0);
break;
+ case TRANSPORT_FREE_CMD_INTR:
+ transport_generic_free_cmd(cmd, 0, 1, 0);
+ break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 49e5177..c056a11 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -35,6 +35,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
@@ -592,8 +593,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
+ /*
+ * Locate the SAM Task Attr from fc_pri_ta
+ */
+ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_HEADQ:
+ task_attr = MSG_HEAD_TAG;
+ break;
+ case FCP_PTA_ORDERED:
+ task_attr = MSG_ORDERED_TAG;
+ break;
+ case FCP_PTA_ACA:
+ task_attr = MSG_ACA_TAG;
+ break;
+ case FCP_PTA_SIMPLE: /* Fallthrough */
+ default:
+ task_attr = MSG_SIMPLE_TAG;
+ }
+
- /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index fcdbbff..84e868c 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
return tpg->index;
}
-static u64 ft_pack_lun(unsigned int index)
-{
- WARN_ON(index >= 256);
- /* Caller wants this byte-swapped */
- return cpu_to_le64((index & 0xff) << 8);
-}
-
static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
@@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_sense_len = ft_get_fabric_sense_len,
.set_fabric_sense_len = ft_set_fabric_sense_len,
.is_state_remove = ft_is_state_remove,
- .pack_lun = ft_pack_lun,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index fc6f2a5..0b1c82a 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
dev_set_drvdata(hwmon->device, hwmon);
result = device_create_file(hwmon->device, &dev_attr_name);
if (result)
- goto unregister_hwmon_device;
+ goto free_mem;
register_sys_interface:
tz->hwmon = hwmon;
@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
sysfs_attr_init(&tz->temp_input.attr.attr);
result = device_create_file(hwmon->device, &tz->temp_input.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_name;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
result = device_create_file(hwmon->device,
&tz->temp_crit.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_input;
}
}
@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
- unregister_hwmon_device:
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ unregister_input:
device_remove_file(hwmon->device, &tz->temp_input.attr);
+ unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
hwmon_device_unregister(hwmon->device);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 652bdac..6d5d6e6 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1420,7 +1420,7 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
- port->line = pdev->id;
+ port->line = data->num;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 660b80a..1102ce6 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
return rc;
}
+static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == 0x1E26;
+}
+
+static void ehci_enable_xhci_companion(void)
+{
+ struct pci_dev *companion = NULL;
+
+ /* The xHCI and EHCI controllers are not on the same PCI slot */
+ for_each_pci_dev(companion) {
+ if (!usb_is_intel_switchable_xhci(companion))
+ continue;
+ usb_enable_xhci_ports(companion);
+ return;
+ }
+}
+
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_ehci(pdev))
+ ehci_enable_xhci_companion();
+
// maybe restore FLADJ
if (time_before(jiffies, ehci->next_statechange))
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f16c59d..fd93061 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -69,6 +69,9 @@
#define NB_PIF0_PWRDOWN_0 0x01100012
#define NB_PIF0_PWRDOWN_1 0x01100013
+#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB3_PSSEN 0xD8
+
static struct amd_chipset_info {
struct pci_dev *nb_dev;
struct pci_dev *smbus_dev;
@@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
return -ETIMEDOUT;
}
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
+}
+EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
+
+/*
+ * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
+ * share some number of ports. These ports can be switched between either
+ * controller. Not all of the ports under the EHCI host controller may be
+ * switchable.
+ *
+ * The ports should be switched over to xHCI before PCI probes for any device
+ * start. This avoids active devices under EHCI being disconnected during the
+ * port switchover, which could cause loss of data on USB storage devices, or
+ * failed boot when the root file system is on a USB mass storage device and is
+ * enumerated under EHCI first.
+ *
+ * We write into the xHC's PCI configuration space in some Intel-specific
+ * registers to switch the ports over. The USB 3.0 terminations and the USB
+ * 2.0 data wires are switched separately. We want to enable the SuperSpeed
+ * terminations before switching the USB 2.0 wires over, so that USB 3.0
+ * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
+ */
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ u32 ports_available;
+
+ ports_available = 0xffffffff;
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+ * Register, to turn on SuperSpeed terminations for all
+ * available ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+ ports_available = 0xffffffff;
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
+ "to xHCI: 0x%x\n", ports_available);
+}
+EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
@@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
writel(XHCI_LEGACY_DISABLE_SMI,
base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
hc_init:
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6ae9f78..b1002a8 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
#else
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cbc4d49..c408e9f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -118,6 +118,12 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ }
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
@@ -242,8 +248,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+
retval = xhci_resume(xhci, hibernated);
return retval;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 237a765..cc1485b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
next = ring->dequeue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
}
/*
@@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
next = ring->enqueue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
}
/*
@@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
}
}
@@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id])
+ if (xhci->devs[slot_id]) {
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci,
+ xhci->devs[slot_id], true);
xhci_free_virt_device(xhci, slot_id);
+ }
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
@@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
- xhci_dbg(xhci, "Successful control transfer!\n");
*status = 0;
}
break;
@@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
- xhci_dbg(xhci, "Successful isoc transfer!\n");
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
@@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
else
*status = 0;
} else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
*status = 0;
}
break;
@@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(le32_to_cpu(event->transfer_len)));
+ if (trb_comp_code == COMP_SHORT_TX)
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
@@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* Some host controllers give a spurious
+ * successful event after a short transfer.
+ * Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ ret = 0;
+ goto cleanup;
+ }
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
+ if (trb_comp_code == COMP_SHORT_TX)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
@@ -2149,9 +2148,15 @@ cleanup:
xhci_urb_free_priv(xhci, urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "status = %d\n",
- urb, urb->actual_length, status);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags &
+ URB_SHORT_NOT_OK)) ||
+ status != 0)
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "expected = %x, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length,
+ status);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
@@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1;
int ret;
- xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
return 0;
@@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
xhci->error_bitmask |= 1 << 2;
return 0;
}
- xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
/* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
- xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd);
- xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
break;
case TRB_TYPE(TRB_PORT_STATUS):
- xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
handle_port_status(xhci, event);
- xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
- xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
ret = handle_tx_event(xhci, &event->trans_event);
- xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
@@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_unlock(&xhci->lock);
return IRQ_NONE;
}
- xhci_dbg(xhci, "op reg status = %08x\n", status);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- (unsigned int) le32_to_cpu(trb->link.intr_target),
- (unsigned int) le32_to_cpu(trb->link.control));
-
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
@@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
- xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
switch (ep_state) {
case EP_STATE_DISABLED:
/*
@@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
- xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8f2a56e..d9660eb 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1314,8 +1314,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1401,6 +1403,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return ret;
}
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1578,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
return ret;
}
+static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ /* Ignore the slot flag (bit 0), and the default control endpoint flag
+ * (bit 1). The default control endpoint is added during the Address
+ * Device command and is never removed until the slot is disabled.
+ */
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ /* Use hweight32 to count the number of ones in the add flags, or
+ * number of endpoints added. Don't count endpoints that are changed
+ * (both added and dropped).
+ */
+ return hweight32(valid_add_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ return hweight32(valid_drop_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+/*
+ * We need to reserve the new number of endpoints before the configure endpoint
+ * command completes. We can't subtract the dropped endpoints from the number
+ * of active endpoints until the command completes because we can oversubscribe
+ * the host in this case:
+ *
+ * - the first configure endpoint command drops more endpoints than it adds
+ * - a second configure endpoint command that adds more endpoints is queued
+ * - the first configure endpoint command fails, so the config is unchanged
+ * - the second command may succeed, even though there isn't enough resources
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 added_eps;
+
+ added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add %u, limit is %u.\n",
+ xhci->num_active_eps, added_eps,
+ xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += added_eps;
+ xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
+ xhci->num_active_eps);
+ return 0;
+}
+
+/*
+ * The configure endpoint was failed by the xHC for some other reason, so we
+ * need to revert the resources that failed configuration would have used.
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_free_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_failed_eps;
+
+ num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_failed_eps;
+ xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
+ num_failed_eps,
+ xhci->num_active_eps);
+}
+
+/*
+ * Now that the command has completed, clean up the active endpoint count by
+ * subtracting out the endpoints that were dropped (but not changed).
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_dropped_eps;
+
+ num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
+ num_dropped_eps,
+ xhci->num_active_eps);
+}
+
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
@@ -1598,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
virt_dev = xhci->devs[udev->slot_id];
if (command) {
in_ctx = command->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
@@ -1613,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
in_ctx = virt_dev->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
@@ -1627,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if (ret < 0) {
if (command)
list_del(&command->cmd_list);
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1649,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
if (!ctx_change)
- return xhci_configure_endpoint_result(xhci, udev, cmd_status);
- return xhci_evaluate_context_result(xhci, udev, cmd_status);
+ ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
+ else
+ ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* If the command failed, remove the reserved resources.
+ * Otherwise, clean up the estimate to include dropped eps.
+ */
+ if (ret)
+ xhci_free_host_resources(xhci, in_ctx);
+ else
+ xhci_finish_resource_reservation(xhci, in_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ return ret;
}
/* Called after one or more calls to xhci_add_endpoint() or
@@ -1676,6 +1820,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
@@ -2266,6 +2412,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
}
/*
+ * Deletes endpoint resources for endpoints that were active before a Reset
+ * Device command, or a Disable Slot command. The Reset Device command leaves
+ * the control endpoint intact, whereas the Disable Slot command deletes it.
+ *
+ * Must be called with xhci->lock held.
+ */
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep)
+{
+ int i;
+ unsigned int num_dropped_eps = 0;
+ unsigned int drop_flags = 0;
+
+ for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
+ if (virt_dev->eps[i].ring) {
+ drop_flags |= 1 << i;
+ num_dropped_eps++;
+ }
+ }
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
+ "%u now active.\n",
+ num_dropped_eps, drop_flags,
+ xhci->num_active_eps);
+}
+
+/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
@@ -2406,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
goto command_cleanup;
}
+ /* Free up host controller endpoint resources */
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Don't delete the default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, false);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
@@ -2479,6 +2661,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
/*
+ * Checks if we have enough host controller resources for the default control
+ * endpoint.
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+{
+ if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add 1, limit is %u.\n",
+ xhci->num_active_eps, xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += 1;
+ xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
+ xhci->num_active_eps);
+ return 0;
+}
+
+
+/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
@@ -2513,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
- /* xhci_alloc_virt_device() does not touch rings; no need to lock.
- * Use GFP_NOIO, since this function can be called from
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_reserve_host_control_ep_resources(xhci);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ goto disable_slot;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ /* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
- /* Disable slot, if we can do it without mem alloc */
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
- spin_lock_irqsave(&xhci->lock, flags);
- if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
- return 0;
+ goto disable_slot;
}
udev->slot_id = xhci->slot_id;
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
+
+disable_slot:
+ /* Disable slot, if we can do it without mem alloc */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e12db7cf..ac0196e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1123,6 +1123,7 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
+ bool last_td_was_short;
};
struct xhci_erst_entry {
@@ -1290,6 +1291,19 @@ struct xhci_hcd {
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
+#define XHCI_SPURIOUS_SUCCESS (1 << 4)
+/*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle. Ideally, they would signal that they can't handle
+ * anymore endpoint contexts by returning a Resource Error for the Configure
+ * Endpoint command, but they don't. Instead they expect software to keep track
+ * of the number of active endpoints for them, across configure endpoint
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+#define XHCI_EP_LIMIT_QUIRK (1 << 5)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
@@ -1338,9 +1352,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __le32 __iomem *regs)
{
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
- regs, val);
writel(val, regs);
}
@@ -1368,9 +1379,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
- regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
@@ -1439,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2f7c76a..e224a92 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -144,7 +144,7 @@ static void handle_tx(struct vhost_net *net)
}
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
if (wmem < sock->sk->sk_sndbuf / 2)
tx_poll_stop(net);
@@ -166,8 +166,8 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
- if (unlikely(vhost_enable_notify(vq))) {
- vhost_disable_notify(vq);
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
continue;
}
break;
@@ -315,7 +315,7 @@ static void handle_rx(struct vhost_net *net)
return;
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
vhost_hlen = vq->vhost_hlen;
sock_hlen = vq->sock_hlen;
@@ -334,10 +334,10 @@ static void handle_rx(struct vhost_net *net)
break;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
- if (unlikely(vhost_enable_notify(vq))) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
continue;
}
/* Nothing new? Wait for eventfd to tell us
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 099f302..734e1d7 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
return;
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&n->dev, vq);
for (;;) {
head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
@@ -61,8 +61,8 @@ static void handle_vq(struct vhost_test *n)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- if (unlikely(vhost_enable_notify(vq))) {
- vhost_disable_notify(vq);
+ if (unlikely(vhost_enable_notify(&n->dev, vq))) {
+ vhost_disable_notify(&n->dev, vq);
continue;
}
break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 7aa4eea..ea966b3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -37,6 +37,9 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
+#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -161,6 +164,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->last_avail_idx = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
+ vq->signalled_used = 0;
+ vq->signalled_used_valid = false;
vq->used_flags = 0;
vq->log_used = false;
vq->log_addr = -1ull;
@@ -489,16 +494,17 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
-static int vq_access_ok(unsigned int num,
+static int vq_access_ok(struct vhost_dev *d, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
{
+ size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
- sizeof *avail + num * sizeof *avail->ring) &&
+ sizeof *avail + num * sizeof *avail->ring + s) &&
access_ok(VERIFY_WRITE, used,
- sizeof *used + num * sizeof *used->ring);
+ sizeof *used + num * sizeof *used->ring + s);
}
/* Can we log writes? */
@@ -514,9 +520,11 @@ int vhost_log_access_ok(struct vhost_dev *dev)
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
-static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
+static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
+ void __user *log_base)
{
struct vhost_memory *mp;
+ size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
mp = rcu_dereference_protected(vq->dev->memory,
lockdep_is_held(&vq->mutex));
@@ -524,15 +532,15 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
- vq->num * sizeof *vq->used->ring));
+ vq->num * sizeof *vq->used->ring + s));
}
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq, vq->log_base);
+ return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
+ vq_log_access_ok(vq->dev, vq, vq->log_base);
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
@@ -577,6 +585,7 @@ static int init_used(struct vhost_virtqueue *vq,
if (r)
return r;
+ vq->signalled_used_valid = false;
return get_user(vq->last_used_idx, &used->idx);
}
@@ -674,7 +683,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
* If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */
if (vq->private_data) {
- if (!vq_access_ok(vq->num,
+ if (!vq_access_ok(d, vq->num,
(void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr)) {
@@ -818,7 +827,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
vq = d->vqs + i;
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
- if (vq->private_data && !vq_log_access_ok(vq, base))
+ if (vq->private_data && !vq_log_access_ok(d, vq, base))
r = -EFAULT;
else
vq->log_base = base;
@@ -1219,6 +1228,10 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
/* On success, increment avail index. */
vq->last_avail_idx++;
+
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
@@ -1267,6 +1280,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
eventfd_signal(vq->log_ctx, 1);
}
vq->last_used_idx++;
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely(vq->last_used_idx == vq->signalled_used))
+ vq->signalled_used_valid = false;
return 0;
}
@@ -1275,6 +1294,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
unsigned count)
{
struct vring_used_elem __user *used;
+ u16 old, new;
int start;
start = vq->last_used_idx % vq->num;
@@ -1292,7 +1312,14 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
((void __user *)used - (void __user *)vq->used),
count * sizeof *used);
}
- vq->last_used_idx += count;
+ old = vq->last_used_idx;
+ new = (vq->last_used_idx += count);
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
+ vq->signalled_used_valid = false;
return 0;
}
@@ -1331,29 +1358,47 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return r;
}
-/* This actually signals the guest, using eventfd. */
-void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __u16 flags;
-
+ __u16 old, new, event;
+ bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
- if (__get_user(flags, &vq->avail->flags)) {
- vq_err(vq, "Failed to get flags");
- return;
+ if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ unlikely(vq->avail_idx == vq->last_avail_idx))
+ return true;
+
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ __u16 flags;
+ if (__get_user(flags, &vq->avail->flags)) {
+ vq_err(vq, "Failed to get flags");
+ return true;
+ }
+ return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
}
+ old = vq->signalled_used;
+ v = vq->signalled_used_valid;
+ new = vq->signalled_used = vq->last_used_idx;
+ vq->signalled_used_valid = true;
- /* If they don't want an interrupt, don't signal, unless empty. */
- if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
- (vq->avail_idx != vq->last_avail_idx ||
- !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
- return;
+ if (unlikely(!v))
+ return true;
+ if (get_user(event, vhost_used_event(vq))) {
+ vq_err(vq, "Failed to get used event idx");
+ return true;
+ }
+ return vring_need_event(event, new, old);
+}
+
+/* This actually signals the guest, using eventfd. */
+void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
/* Signal the Guest tell them we used something up. */
- if (vq->call_ctx)
+ if (vq->call_ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx, 1);
}
@@ -1376,7 +1421,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
}
/* OK, now we need to know about added descriptors. */
-bool vhost_enable_notify(struct vhost_virtqueue *vq)
+bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
u16 avail_idx;
int r;
@@ -1384,11 +1429,34 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
- r = put_user(vq->used_flags, &vq->used->flags);
- if (r) {
- vq_err(vq, "Failed to enable notification at %p: %d\n",
- &vq->used->flags, r);
- return false;
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r) {
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ return false;
+ }
+ } else {
+ r = put_user(vq->avail_idx, vhost_avail_event(vq));
+ if (r) {
+ vq_err(vq, "Failed to update avail event index at %p: %d\n",
+ vhost_avail_event(vq), r);
+ return false;
+ }
+ }
+ if (unlikely(vq->log_used)) {
+ void __user *used;
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ?
+ &vq->used->flags : vhost_avail_event(vq);
+ /* Log used flags or event index entry write. Both are 16 bit
+ * fields. */
+ log_write(vq->log_base, vq->log_addr +
+ (used - (void __user *)vq->used),
+ sizeof(u16));
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
@@ -1404,15 +1472,17 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
}
/* We don't need to be notified again. */
-void vhost_disable_notify(struct vhost_virtqueue *vq)
+void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
int r;
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
- r = put_user(vq->used_flags, &vq->used->flags);
- if (r)
- vq_err(vq, "Failed to enable notification at %p: %d\n",
- &vq->used->flags, r);
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r)
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ }
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3363ae..8e03379 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -84,6 +84,12 @@ struct vhost_virtqueue {
/* Used flags */
u16 used_flags;
+ /* Last used index value we have signalled on */
+ u16 signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
/* Log writes to used structure. */
bool log_used;
u64 log_addr;
@@ -149,8 +155,8 @@ void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
struct vring_used_elem *heads, unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
-void vhost_disable_notify(struct vhost_virtqueue *);
-bool vhost_enable_notify(struct vhost_virtqueue *);
+void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
@@ -162,11 +168,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
} while (0)
enum {
- VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
- (1 << VIRTIO_RING_F_INDIRECT_DESC) |
- (1 << VHOST_F_LOG_ALL) |
- (1 << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1 << VIRTIO_NET_F_MRG_RXBUF),
+ VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) |
+ (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF),
};
static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0f1da45..e058ace 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -40,9 +40,6 @@ struct virtio_balloon
/* Waiting for host to ack the pages we released. */
struct completion acked;
- /* Do we have to tell Host *before* we reuse pages? */
- bool tell_host_first;
-
/* The pages we've told the Host we're not using. */
unsigned int num_pages;
struct list_head pages;
@@ -151,13 +148,14 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages--;
}
- if (vb->tell_host_first) {
- tell_host(vb, vb->deflate_vq);
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
- } else {
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
- tell_host(vb, vb->deflate_vq);
- }
+
+ /*
+ * Note that if
+ * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
+ * is true, we *have* to do it in this order
+ */
+ tell_host(vb, vb->deflate_vq);
+ release_pages_by_pfn(vb->pfns, vb->num_pfns);
}
static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -325,9 +323,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
- vb->tell_host_first
- = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
-
return 0;
out_del_vqs:
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b0043fb..68b91368 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -82,6 +82,9 @@ struct vring_virtqueue
/* Host supports indirect buffers */
bool indirect;
+ /* Host publishes avail event idx */
+ bool event;
+
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
void virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 new, old;
START_USE(vq);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb();
- vq->vring.avail->idx += vq->num_added;
+ old = vq->vring.avail->idx;
+ new = vq->vring.avail->idx = old + vq->num_added;
vq->num_added = 0;
/* Need to update avail index before checking if we should notify */
virtio_mb();
- if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
+ if (vq->event ?
+ vring_need_event(vring_avail_event(&vq->vring), new, old) :
+ !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
/* Prod other side to tell it about changes. */
vq->notify(&vq->vq);
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
ret = vq->data[i];
detach_buf(vq, i);
vq->last_used_idx++;
+ /* If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call. */
+ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
+ vring_used_event(&vq->vring) = vq->last_used_idx;
+ virtio_mb();
+ }
+
END_USE(vq);
return ret;
}
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
+ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vring_used_event(&vq->vring) = vq->last_used_idx;
virtio_mb();
if (unlikely(more_used(vq))) {
END_USE(vq);
@@ -357,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 bufs;
+
+ START_USE(vq);
+
+ /* We optimistically turn back on interrupts, then check if there was
+ * more to do. */
+ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ /* TODO: tune this threshold */
+ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
+ vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
+ virtio_mb();
+ if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
+ END_USE(vq);
+ return false;
+ }
+
+ END_USE(vq);
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -438,6 +484,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
/* No callback? Tell other side not to bother us. */
if (!callback)
@@ -472,6 +519,8 @@ void vring_transport_features(struct virtio_device *vdev)
switch (i) {
case VIRTIO_RING_F_INDIRECT_DESC:
break;
+ case VIRTIO_RING_F_EVENT_IDX:
+ break;
default:
/* We don't understand this bit. */
clear_bit(i, vdev->features);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8d7f3e6..7f6c677 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -814,7 +814,6 @@ int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
{
- dentry_unhash(d);
return v9fs_remove(i, d, 1);
}
@@ -840,9 +839,6 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct p9_fid *newdirfid;
struct p9_wstat wstat;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
P9_DPRINTK(P9_DEBUG_VFS, "\n");
retval = 0;
old_inode = old_dentry->d_inode;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 03330e2..e3e9efc 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -320,8 +320,6 @@ affs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
- dentry_unhash(dentry);
-
return affs_remove_header(dentry);
}
@@ -419,9 +417,6 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct buffer_head *bh = NULL;
int retval;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
(u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
(u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 2c4e051..20c106f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -845,8 +845,6 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
_enter("{%x:%u},{%s}",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
- dentry_unhash(dentry);
-
ret = -ENAMETOOLONG;
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
@@ -1148,9 +1146,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct key *key;
int ret;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
vnode = AFS_FS_I(old_dentry->d_inode);
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
diff --git a/fs/attr.c b/fs/attr.c
index 91dbe2a..caf2aa5 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -175,6 +175,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
return -EPERM;
}
+ if ((ia_valid & ATTR_MODE)) {
+ mode_t amode = attr->ia_mode;
+ /* Flag setting protected by i_mutex */
+ if (is_sxid(amode))
+ inode->i_flags &= ~S_NOSEC;
+ }
+
now = current_fs_time(inode->i_sb);
attr->ia_ctime = now;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 87d95a8..f55ae23 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -583,8 +583,6 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
return -EACCES;
- dentry_unhash(dentry);
-
if (atomic_dec_and_test(&ino->count)) {
p_ino = autofs4_dentry_ino(dentry->d_parent);
if (p_ino && dentry->d_parent != dentry)
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index c7d1d06..b14cebf 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -224,9 +224,6 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct bfs_sb_info *info;
int error = -ENOENT;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_bh = new_bh = NULL;
old_inode = old_dentry->d_inode;
if (S_ISDIR(old_inode->i_mode))
diff --git a/fs/bio.c b/fs/bio.c
index 840a0d7..9bfade8 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -638,10 +638,11 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
- * number of reasons, such as the bio being full or target block
- * device limitations. The target block device must allow bio's
- * smaller than PAGE_SIZE, so it is always possible to add a single
- * page to an empty bio. This should only be used by REQ_PC bios.
+ * number of reasons, such as the bio being full or target block device
+ * limitations. The target block device must allow bio's up to PAGE_SIZE,
+ * so it is always possible to add a single page to an empty bio.
+ *
+ * This should only be used by REQ_PC bios.
*/
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
@@ -659,10 +660,9 @@ EXPORT_SYMBOL(bio_add_pc_page);
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
- * number of reasons, such as the bio being full or target block
- * device limitations. The target block device must allow bio's
- * smaller than PAGE_SIZE, so it is always possible to add a single
- * page to an empty bio.
+ * number of reasons, such as the bio being full or target block device
+ * limitations. The target block device must allow bio's up to PAGE_SIZE,
+ * so it is always possible to add a single page to an empty bio.
*/
int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 332323e..6c093fa9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2524,7 +2524,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode);
+void btrfs_dirty_inode(struct inode *inode, int flags);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index bb51bb1..39a9d57 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4294,7 +4294,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-void btrfs_dirty_inode(struct inode *inode)
+void btrfs_dirty_inode(struct inode *inode, int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
diff --git a/fs/buffer.c b/fs/buffer.c
index 698c6b2..49c9aad 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2382,6 +2382,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
ret = -EAGAIN;
goto out_unlock;
}
+ wait_on_page_writeback(page);
return 0;
out_unlock:
unlock_page(page);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 8f17006..21de1d6 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -74,8 +74,9 @@ shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
* Run idmap cache shrinker.
*/
static int
-cifs_idmap_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
{
+ int nr_to_scan = sc->nr_to_scan;
int nr_del = 0;
int nr_rem = 0;
struct rb_root *root;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index a46126f..2b8dae4 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -336,8 +336,6 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
int len = de->d_name.len;
int error;
- dentry_unhash(de);
-
error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
if (!error) {
/* VFS may delete the child */
@@ -361,9 +359,6 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
int new_length = new_dentry->d_name.len;
int error;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
coda_i2f(new_dir), old_length, new_length,
(const char *) old_name, (const char *)new_name);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 9d17d35..9a37a9b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1359,8 +1359,6 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
- dentry_unhash(dentry);
-
if (dentry->d_parent == configfs_sb->s_root)
return -EPERM;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index b8d5c80..58609bd 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1024,25 +1024,25 @@ out:
}
/**
- * contains_ecryptfs_marker - check for the ecryptfs marker
+ * ecryptfs_validate_marker - check for the ecryptfs marker
* @data: The data block in which to check
*
- * Returns one if marker found; zero if not found
+ * Returns zero if marker found; -EINVAL if not found
*/
-static int contains_ecryptfs_marker(char *data)
+static int ecryptfs_validate_marker(char *data)
{
u32 m_1, m_2;
m_1 = get_unaligned_be32(data);
m_2 = get_unaligned_be32(data + 4);
if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2)
- return 1;
+ return 0;
ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; "
"MAGIC_ECRYPTFS_MARKER = [0x%.8x]\n", m_1, m_2,
MAGIC_ECRYPTFS_MARKER);
ecryptfs_printk(KERN_DEBUG, "(m_1 ^ MAGIC_ECRYPTFS_MARKER) = "
"[0x%.8x]\n", (m_1 ^ MAGIC_ECRYPTFS_MARKER));
- return 0;
+ return -EINVAL;
}
struct ecryptfs_flag_map_elem {
@@ -1201,27 +1201,19 @@ int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code)
return rc;
}
-int ecryptfs_read_and_validate_header_region(char *data,
- struct inode *ecryptfs_inode)
+int ecryptfs_read_and_validate_header_region(struct inode *inode)
{
- struct ecryptfs_crypt_stat *crypt_stat =
- &(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
+ u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
+ u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
int rc;
- if (crypt_stat->extent_size == 0)
- crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE;
- rc = ecryptfs_read_lower(data, 0, crypt_stat->extent_size,
- ecryptfs_inode);
- if (rc < 0) {
- printk(KERN_ERR "%s: Error reading header region; rc = [%d]\n",
- __func__, rc);
- goto out;
- }
- if (!contains_ecryptfs_marker(data + ECRYPTFS_FILE_SIZE_BYTES)) {
- rc = -EINVAL;
- } else
- rc = 0;
-out:
+ rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
+ inode);
+ if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+ return rc >= 0 ? -EINVAL : rc;
+ rc = ecryptfs_validate_marker(marker);
+ if (!rc)
+ ecryptfs_i_size_init(file_size, inode);
return rc;
}
@@ -1242,8 +1234,7 @@ ecryptfs_write_header_metadata(char *virt,
(*written) = 6;
}
-struct kmem_cache *ecryptfs_header_cache_1;
-struct kmem_cache *ecryptfs_header_cache_2;
+struct kmem_cache *ecryptfs_header_cache;
/**
* ecryptfs_write_headers_virt
@@ -1496,11 +1487,9 @@ static int ecryptfs_read_headers_virt(char *page_virt,
crypt_stat->mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
offset = ECRYPTFS_FILE_SIZE_BYTES;
- rc = contains_ecryptfs_marker(page_virt + offset);
- if (rc == 0) {
- rc = -EINVAL;
+ rc = ecryptfs_validate_marker(page_virt + offset);
+ if (rc)
goto out;
- }
if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
@@ -1567,20 +1556,21 @@ out:
return rc;
}
-int ecryptfs_read_and_validate_xattr_region(char *page_virt,
- struct dentry *ecryptfs_dentry)
+int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
+ struct inode *inode)
{
+ u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
+ u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
int rc;
- rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_dentry->d_inode);
- if (rc)
- goto out;
- if (!contains_ecryptfs_marker(page_virt + ECRYPTFS_FILE_SIZE_BYTES)) {
- printk(KERN_WARNING "Valid data found in [%s] xattr, but "
- "the marker is invalid\n", ECRYPTFS_XATTR_NAME);
- rc = -EINVAL;
- }
-out:
+ rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
+ ECRYPTFS_XATTR_NAME, file_size,
+ ECRYPTFS_SIZE_AND_MARKER_BYTES);
+ if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+ return rc >= 0 ? -EINVAL : rc;
+ rc = ecryptfs_validate_marker(marker);
+ if (!rc)
+ ecryptfs_i_size_init(file_size, inode);
return rc;
}
@@ -1610,7 +1600,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
/* Read the first page from the underlying file */
- page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER);
+ page_virt = kmem_cache_alloc(ecryptfs_header_cache, GFP_USER);
if (!page_virt) {
rc = -ENOMEM;
printk(KERN_ERR "%s: Unable to allocate page_virt\n",
@@ -1655,7 +1645,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
out:
if (page_virt) {
memset(page_virt, 0, PAGE_CACHE_SIZE);
- kmem_cache_free(ecryptfs_header_cache_1, page_virt);
+ kmem_cache_free(ecryptfs_header_cache, page_virt);
}
return rc;
}
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index e702827..43c7c43 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -200,6 +200,8 @@ ecryptfs_get_key_payload_data(struct key *key)
#define MAGIC_ECRYPTFS_MARKER 0x3c81b7f5
#define MAGIC_ECRYPTFS_MARKER_SIZE_BYTES 8 /* 4*2 */
#define ECRYPTFS_FILE_SIZE_BYTES (sizeof(u64))
+#define ECRYPTFS_SIZE_AND_MARKER_BYTES (ECRYPTFS_FILE_SIZE_BYTES \
+ + MAGIC_ECRYPTFS_MARKER_SIZE_BYTES)
#define ECRYPTFS_DEFAULT_CIPHER "aes"
#define ECRYPTFS_DEFAULT_KEY_BYTES 16
#define ECRYPTFS_DEFAULT_HASH "md5"
@@ -603,8 +605,7 @@ extern struct kmem_cache *ecryptfs_file_info_cache;
extern struct kmem_cache *ecryptfs_dentry_info_cache;
extern struct kmem_cache *ecryptfs_inode_info_cache;
extern struct kmem_cache *ecryptfs_sb_info_cache;
-extern struct kmem_cache *ecryptfs_header_cache_1;
-extern struct kmem_cache *ecryptfs_header_cache_2;
+extern struct kmem_cache *ecryptfs_header_cache;
extern struct kmem_cache *ecryptfs_xattr_cache;
extern struct kmem_cache *ecryptfs_key_record_cache;
extern struct kmem_cache *ecryptfs_key_sig_cache;
@@ -625,14 +626,9 @@ struct ecryptfs_open_req {
struct list_head kthread_ctl_list;
};
-#define ECRYPTFS_INTERPOSE_FLAG_D_ADD 0x00000001
-int ecryptfs_interpose(struct dentry *hidden_dentry,
- struct dentry *this_dentry, struct super_block *sb,
- u32 flags);
+struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+ struct super_block *sb);
void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
-int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
- struct dentry *lower_dentry,
- struct inode *ecryptfs_dir_inode);
int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
size_t *decrypted_name_size,
struct dentry *ecryptfs_dentry,
@@ -664,10 +660,9 @@ int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written);
-int ecryptfs_read_and_validate_header_region(char *data,
- struct inode *ecryptfs_inode);
-int ecryptfs_read_and_validate_xattr_region(char *page_virt,
- struct dentry *ecryptfs_dentry);
+int ecryptfs_read_and_validate_header_region(struct inode *inode);
+int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
+ struct inode *inode);
u8 ecryptfs_code_for_cipher_string(char *cipher_name, size_t key_bytes);
int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code);
void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat);
@@ -679,9 +674,6 @@ int
ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *src, struct dentry *ecryptfs_dentry);
int ecryptfs_truncate(struct dentry *dentry, loff_t new_length);
-int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode);
-int ecryptfs_inode_set(struct inode *inode, void *lower_inode);
-void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode);
ssize_t
ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
void *value, size_t size);
@@ -761,7 +753,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
struct dentry *lower_dentry,
struct vfsmount *lower_mnt,
const struct cred *cred);
-int ecryptfs_get_lower_file(struct dentry *ecryptfs_dentry);
+int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode);
void ecryptfs_put_lower_file(struct inode *inode);
int
ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 566e547..4ec9eb0 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -191,7 +191,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
- rc = ecryptfs_get_lower_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry, inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index bc116b9..7349ade 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -51,6 +51,97 @@ static void unlock_dir(struct dentry *dir)
dput(dir);
}
+static int ecryptfs_inode_test(struct inode *inode, void *lower_inode)
+{
+ if (ecryptfs_inode_to_lower(inode) == (struct inode *)lower_inode)
+ return 1;
+ return 0;
+}
+
+static int ecryptfs_inode_set(struct inode *inode, void *opaque)
+{
+ struct inode *lower_inode = opaque;
+
+ ecryptfs_set_inode_lower(inode, lower_inode);
+ fsstack_copy_attr_all(inode, lower_inode);
+ /* i_size will be overwritten for encrypted regular files */
+ fsstack_copy_inode_size(inode, lower_inode);
+ inode->i_ino = lower_inode->i_ino;
+ inode->i_version++;
+ inode->i_mapping->a_ops = &ecryptfs_aops;
+
+ if (S_ISLNK(inode->i_mode))
+ inode->i_op = &ecryptfs_symlink_iops;
+ else if (S_ISDIR(inode->i_mode))
+ inode->i_op = &ecryptfs_dir_iops;
+ else
+ inode->i_op = &ecryptfs_main_iops;
+
+ if (S_ISDIR(inode->i_mode))
+ inode->i_fop = &ecryptfs_dir_fops;
+ else if (special_file(inode->i_mode))
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ else
+ inode->i_fop = &ecryptfs_main_fops;
+
+ return 0;
+}
+
+static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
+ struct super_block *sb)
+{
+ struct inode *inode;
+
+ if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
+ return ERR_PTR(-EXDEV);
+ if (!igrab(lower_inode))
+ return ERR_PTR(-ESTALE);
+ inode = iget5_locked(sb, (unsigned long)lower_inode,
+ ecryptfs_inode_test, ecryptfs_inode_set,
+ lower_inode);
+ if (!inode) {
+ iput(lower_inode);
+ return ERR_PTR(-EACCES);
+ }
+ if (!(inode->i_state & I_NEW))
+ iput(lower_inode);
+
+ return inode;
+}
+
+struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+ struct super_block *sb)
+{
+ struct inode *inode = __ecryptfs_get_inode(lower_inode, sb);
+
+ if (!IS_ERR(inode) && (inode->i_state & I_NEW))
+ unlock_new_inode(inode);
+
+ return inode;
+}
+
+/**
+ * ecryptfs_interpose
+ * @lower_dentry: Existing dentry in the lower filesystem
+ * @dentry: ecryptfs' dentry
+ * @sb: ecryptfs's super_block
+ *
+ * Interposes upper and lower dentries.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_interpose(struct dentry *lower_dentry,
+ struct dentry *dentry, struct super_block *sb)
+{
+ struct inode *inode = ecryptfs_get_inode(lower_dentry->d_inode, sb);
+
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ d_instantiate(dentry, inode);
+
+ return 0;
+}
+
/**
* ecryptfs_create_underlying_file
* @lower_dir_inode: inode of the parent in the lower fs of the new file
@@ -129,7 +220,7 @@ ecryptfs_do_create(struct inode *directory_inode,
goto out_lock;
}
rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- directory_inode->i_sb, 0);
+ directory_inode->i_sb);
if (rc) {
ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
goto out_lock;
@@ -168,7 +259,8 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
"context; rc = [%d]\n", rc);
goto out;
}
- rc = ecryptfs_get_lower_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry,
+ ecryptfs_dentry->d_inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
@@ -215,102 +307,90 @@ out:
return rc;
}
+static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
+{
+ struct ecryptfs_crypt_stat *crypt_stat;
+ int rc;
+
+ rc = ecryptfs_get_lower_file(dentry, inode);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to initialize "
+ "the lower file for the dentry with name "
+ "[%s]; rc = [%d]\n", __func__,
+ dentry->d_name.name, rc);
+ return rc;
+ }
+
+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
+ /* TODO: lock for crypt_stat comparison */
+ if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
+ ecryptfs_set_default_sizes(crypt_stat);
+
+ rc = ecryptfs_read_and_validate_header_region(inode);
+ ecryptfs_put_lower_file(inode);
+ if (rc) {
+ rc = ecryptfs_read_and_validate_xattr_region(dentry, inode);
+ if (!rc)
+ crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
+ }
+
+ /* Must return 0 to allow non-eCryptfs files to be looked up, too */
+ return 0;
+}
+
/**
- * ecryptfs_lookup_and_interpose_lower - Perform a lookup
+ * ecryptfs_lookup_interpose - Dentry interposition for a lookup
*/
-int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
- struct dentry *lower_dentry,
- struct inode *ecryptfs_dir_inode)
+static int ecryptfs_lookup_interpose(struct dentry *dentry,
+ struct dentry *lower_dentry,
+ struct inode *dir_inode)
{
- struct dentry *lower_dir_dentry;
+ struct inode *inode, *lower_inode = lower_dentry->d_inode;
+ struct ecryptfs_dentry_info *dentry_info;
struct vfsmount *lower_mnt;
- struct inode *lower_inode;
- struct ecryptfs_crypt_stat *crypt_stat;
- char *page_virt = NULL;
- int put_lower = 0, rc = 0;
-
- lower_dir_dentry = lower_dentry->d_parent;
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
- ecryptfs_dentry->d_parent));
- lower_inode = lower_dentry->d_inode;
- fsstack_copy_attr_atime(ecryptfs_dir_inode, lower_dir_dentry->d_inode);
+ int rc = 0;
+
+ lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
+ fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode);
BUG_ON(!lower_dentry->d_count);
- ecryptfs_set_dentry_private(ecryptfs_dentry,
- kmem_cache_alloc(ecryptfs_dentry_info_cache,
- GFP_KERNEL));
- if (!ecryptfs_dentry_to_private(ecryptfs_dentry)) {
- rc = -ENOMEM;
+
+ dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
+ ecryptfs_set_dentry_private(dentry, dentry_info);
+ if (!dentry_info) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to allocate ecryptfs_dentry_info struct\n",
__func__);
- goto out_put;
+ dput(lower_dentry);
+ mntput(lower_mnt);
+ d_drop(dentry);
+ return -ENOMEM;
}
- ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry);
- ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt);
+ ecryptfs_set_dentry_lower(dentry, lower_dentry);
+ ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
+
if (!lower_dentry->d_inode) {
/* We want to add because we couldn't find in lower */
- d_add(ecryptfs_dentry, NULL);
- goto out;
- }
- rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- ecryptfs_dir_inode->i_sb,
- ECRYPTFS_INTERPOSE_FLAG_D_ADD);
- if (rc) {
- printk(KERN_ERR "%s: Error interposing; rc = [%d]\n",
- __func__, rc);
- goto out;
- }
- if (S_ISDIR(lower_inode->i_mode))
- goto out;
- if (S_ISLNK(lower_inode->i_mode))
- goto out;
- if (special_file(lower_inode->i_mode))
- goto out;
- /* Released in this function */
- page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
- if (!page_virt) {
- printk(KERN_ERR "%s: Cannot kmem_cache_zalloc() a page\n",
- __func__);
- rc = -ENOMEM;
- goto out;
+ d_add(dentry, NULL);
+ return 0;
}
- rc = ecryptfs_get_lower_file(ecryptfs_dentry);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to initialize "
- "the lower file for the dentry with name "
- "[%s]; rc = [%d]\n", __func__,
- ecryptfs_dentry->d_name.name, rc);
- goto out_free_kmem;
+ inode = __ecryptfs_get_inode(lower_inode, dir_inode->i_sb);
+ if (IS_ERR(inode)) {
+ printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n",
+ __func__, PTR_ERR(inode));
+ return PTR_ERR(inode);
}
- put_lower = 1;
- crypt_stat = &ecryptfs_inode_to_private(
- ecryptfs_dentry->d_inode)->crypt_stat;
- /* TODO: lock for crypt_stat comparison */
- if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
- ecryptfs_set_default_sizes(crypt_stat);
- rc = ecryptfs_read_and_validate_header_region(page_virt,
- ecryptfs_dentry->d_inode);
- if (rc) {
- memset(page_virt, 0, PAGE_CACHE_SIZE);
- rc = ecryptfs_read_and_validate_xattr_region(page_virt,
- ecryptfs_dentry);
+ if (S_ISREG(inode->i_mode)) {
+ rc = ecryptfs_i_size_read(dentry, inode);
if (rc) {
- rc = 0;
- goto out_free_kmem;
+ make_bad_inode(inode);
+ return rc;
}
- crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
}
- ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
-out_free_kmem:
- kmem_cache_free(ecryptfs_header_cache_2, page_virt);
- goto out;
-out_put:
- dput(lower_dentry);
- mntput(lower_mnt);
- d_drop(ecryptfs_dentry);
-out:
- if (put_lower)
- ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
+
+ if (inode->i_state & I_NEW)
+ unlock_new_inode(inode);
+ d_add(dentry, inode);
+
return rc;
}
@@ -353,12 +433,12 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
goto out_d_drop;
}
if (lower_dentry->d_inode)
- goto lookup_and_interpose;
+ goto interpose;
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (!(mount_crypt_stat
&& (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)))
- goto lookup_and_interpose;
+ goto interpose;
dput(lower_dentry);
rc = ecryptfs_encrypt_and_encode_filename(
&encrypted_and_encoded_name, &encrypted_and_encoded_name_size,
@@ -381,9 +461,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
encrypted_and_encoded_name);
goto out_d_drop;
}
-lookup_and_interpose:
- rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry,
- ecryptfs_dir_inode);
+interpose:
+ rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry,
+ ecryptfs_dir_inode);
goto out;
out_d_drop:
d_drop(ecryptfs_dentry);
@@ -411,7 +491,7 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
lower_new_dentry);
if (rc || !lower_new_dentry->d_inode)
goto out_lock;
- rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
+ rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
if (rc)
goto out_lock;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
@@ -478,7 +558,7 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
kfree(encoded_symname);
if (rc || !lower_dentry->d_inode)
goto out_lock;
- rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
+ rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out_lock;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
@@ -502,7 +582,7 @@ static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode);
if (rc || !lower_dentry->d_inode)
goto out;
- rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
+ rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
@@ -521,8 +601,6 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
struct dentry *lower_dir_dentry;
int rc;
- dentry_unhash(dentry);
-
lower_dentry = ecryptfs_dentry_to_lower(dentry);
dget(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
@@ -552,7 +630,7 @@ ecryptfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev);
if (rc || !lower_dentry->d_inode)
goto out;
- rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
+ rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
@@ -575,9 +653,6 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dir_dentry;
struct dentry *trap = NULL;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
dget(lower_old_dentry);
@@ -755,7 +830,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
lower_ia->ia_valid &= ~ATTR_SIZE;
return 0;
}
- rc = ecryptfs_get_lower_file(dentry);
+ rc = ecryptfs_get_lower_file(dentry, inode);
if (rc)
return rc;
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
@@ -911,7 +986,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
- rc = ecryptfs_get_lower_file(dentry);
+ rc = ecryptfs_get_lower_file(dentry, inode);
if (rc) {
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
@@ -1084,21 +1159,6 @@ out:
return rc;
}
-int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode)
-{
- if ((ecryptfs_inode_to_lower(inode)
- == (struct inode *)candidate_lower_inode))
- return 1;
- else
- return 0;
-}
-
-int ecryptfs_inode_set(struct inode *inode, void *lower_inode)
-{
- ecryptfs_init_inode(inode, (struct inode *)lower_inode);
- return 0;
-}
-
const struct inode_operations ecryptfs_symlink_iops = {
.readlink = ecryptfs_readlink,
.follow_link = ecryptfs_follow_link,
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 89b9338..9f1bb74 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -135,12 +135,12 @@ static int ecryptfs_init_lower_file(struct dentry *dentry,
return rc;
}
-int ecryptfs_get_lower_file(struct dentry *dentry)
+int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode)
{
- struct ecryptfs_inode_info *inode_info =
- ecryptfs_inode_to_private(dentry->d_inode);
+ struct ecryptfs_inode_info *inode_info;
int count, rc = 0;
+ inode_info = ecryptfs_inode_to_private(inode);
mutex_lock(&inode_info->lower_file_mutex);
count = atomic_inc_return(&inode_info->lower_file_count);
if (WARN_ON_ONCE(count < 1))
@@ -168,75 +168,6 @@ void ecryptfs_put_lower_file(struct inode *inode)
}
}
-static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
- struct super_block *sb)
-{
- struct inode *inode;
- int rc = 0;
-
- if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) {
- rc = -EXDEV;
- goto out;
- }
- if (!igrab(lower_inode)) {
- rc = -ESTALE;
- goto out;
- }
- inode = iget5_locked(sb, (unsigned long)lower_inode,
- ecryptfs_inode_test, ecryptfs_inode_set,
- lower_inode);
- if (!inode) {
- rc = -EACCES;
- iput(lower_inode);
- goto out;
- }
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
- else
- iput(lower_inode);
- if (S_ISLNK(lower_inode->i_mode))
- inode->i_op = &ecryptfs_symlink_iops;
- else if (S_ISDIR(lower_inode->i_mode))
- inode->i_op = &ecryptfs_dir_iops;
- if (S_ISDIR(lower_inode->i_mode))
- inode->i_fop = &ecryptfs_dir_fops;
- if (special_file(lower_inode->i_mode))
- init_special_inode(inode, lower_inode->i_mode,
- lower_inode->i_rdev);
- fsstack_copy_attr_all(inode, lower_inode);
- /* This size will be overwritten for real files w/ headers and
- * other metadata */
- fsstack_copy_inode_size(inode, lower_inode);
- return inode;
-out:
- return ERR_PTR(rc);
-}
-
-/**
- * ecryptfs_interpose
- * @lower_dentry: Existing dentry in the lower filesystem
- * @dentry: ecryptfs' dentry
- * @sb: ecryptfs's super_block
- * @flags: flags to govern behavior of interpose procedure
- *
- * Interposes upper and lower dentries.
- *
- * Returns zero on success; non-zero otherwise
- */
-int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
- struct super_block *sb, u32 flags)
-{
- struct inode *lower_inode = lower_dentry->d_inode;
- struct inode *inode = ecryptfs_get_inode(lower_inode, sb);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
- if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD)
- d_add(dentry, inode);
- else
- d_instantiate(dentry, inode);
- return 0;
-}
-
enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher,
ecryptfs_opt_ecryptfs_key_bytes,
@@ -704,13 +635,8 @@ static struct ecryptfs_cache_info {
.size = sizeof(struct ecryptfs_sb_info),
},
{
- .cache = &ecryptfs_header_cache_1,
- .name = "ecryptfs_headers_1",
- .size = PAGE_CACHE_SIZE,
- },
- {
- .cache = &ecryptfs_header_cache_2,
- .name = "ecryptfs_headers_2",
+ .cache = &ecryptfs_header_cache,
+ .name = "ecryptfs_headers",
.size = PAGE_CACHE_SIZE,
},
{
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 245b517..dbd52d40 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -93,22 +93,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
}
/**
- * ecryptfs_init_inode
- * @inode: The ecryptfs inode
- *
- * Set up the ecryptfs inode.
- */
-void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode)
-{
- ecryptfs_set_inode_lower(inode, lower_inode);
- inode->i_ino = lower_inode->i_ino;
- inode->i_version++;
- inode->i_op = &ecryptfs_main_iops;
- inode->i_fop = &ecryptfs_main_fops;
- inode->i_mapping->a_ops = &ecryptfs_aops;
-}
-
-/**
* ecryptfs_statfs
* @sb: The ecryptfs super block
* @buf: The struct kstatfs to fill in with stats
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 68b2e43..3451d23 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3392,7 +3392,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*/
-void ext3_dirty_inode(struct inode *inode)
+void ext3_dirty_inode(struct inode *inode, int flags)
{
handle_t *current_handle = ext3_journal_current_handle();
handle_t *handle;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a74b89c..1921392 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1813,7 +1813,7 @@ extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
extern void ext4_evict_inode(struct inode *);
extern void ext4_clear_inode(struct inode *);
extern int ext4_sync_inode(handle_t *, struct inode *);
-extern void ext4_dirty_inode(struct inode *);
+extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern int ext4_can_truncate(struct inode *inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 50d0e9c..a5763e3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5733,7 +5733,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*/
-void ext4_dirty_inode(struct inode *inode)
+void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index be15437..3b222da 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -326,8 +326,6 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- dentry_unhash(dentry);
-
lock_super(sb);
/*
* Check whether the directory is not in use, then check
@@ -459,9 +457,6 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
err = fat_scan(old_dir, old_name, &old_sinfo);
if (err) {
err = -EIO;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c61a678..20b4ea5 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -824,8 +824,6 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- dentry_unhash(dentry);
-
lock_super(sb);
err = fat_dir_empty(inode);
@@ -933,9 +931,6 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 34591ee8..0f015a0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1007,9 +1007,6 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
* In short, make sure you hash any inodes _before_ you start marking
* them dirty.
*
- * This function *must* be atomic for the I_DIRTY_PAGES case -
- * set_page_dirty() is called under spinlock in several places.
- *
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
* the kernel-internal blockdev inode represents the dirtying time of the
@@ -1028,7 +1025,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
if (sb->s_op->dirty_inode)
- sb->s_op->dirty_inode(inode);
+ sb->s_op->dirty_inode(inode, flags);
}
/*
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0d0e3fa..d5016071 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -667,8 +667,6 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
if (IS_ERR(req))
return PTR_ERR(req);
- dentry_unhash(entry);
-
req->in.h.opcode = FUSE_RMDIR;
req->in.h.nodeid = get_node_id(dir);
req->in.numargs = 1;
@@ -694,9 +692,6 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
struct fuse_conn *fc = get_fuse_conn(olddir);
struct fuse_req *req = fuse_get_req(fc);
- if (newent->d_inode && S_ISDIR(newent->d_inode->i_mode))
- dentry_unhash(newent);
-
if (IS_ERR(req))
return PTR_ERR(req);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 1cb70cd..b4d70b1 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -253,9 +253,6 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int res;
- if (S_ISDIR(inode->i_mode))
- dentry_unhash(dentry);
-
if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
return -ENOTEMPTY;
res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
@@ -286,9 +283,6 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
- if (S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
res = hfs_remove(new_dir, new_dentry);
if (res)
return res;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index b288350..4df5059 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -370,8 +370,6 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int res;
- dentry_unhash(dentry);
-
if (inode->i_size != 2)
return -ENOTEMPTY;
@@ -469,12 +467,10 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
- if (S_ISDIR(new_dentry->d_inode->i_mode)) {
- dentry_unhash(new_dentry);
+ if (S_ISDIR(new_dentry->d_inode->i_mode))
res = hfsplus_rmdir(new_dir, new_dentry);
- } else {
+ else
res = hfsplus_unlink(new_dir, new_dentry);
- }
if (res)
return res;
}
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index e6816b9..2638c834e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -683,8 +683,6 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
char *file;
int err;
- dentry_unhash(dentry);
-
if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = do_rmdir(file);
@@ -738,9 +736,6 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
char *from_name, *to_name;
int err;
- if (to->d_inode && S_ISDIR(to->d_inode->i_mode))
- dentry_unhash(to);
-
if ((from_name = dentry_name(from)) == NULL)
return -ENOMEM;
if ((to_name = dentry_name(to)) == NULL) {
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index ff0ce21..acf95da 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -439,8 +439,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
int err;
int r;
- dentry_unhash(dentry);
-
hpfs_adjust_length(name, &len);
hpfs_lock(dir->i_sb);
err = -ENOENT;
@@ -535,9 +533,6 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct fnode *fnode;
int err;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
if ((err = hpfs_chk_name(new_name, &new_len))) return err;
err = 0;
hpfs_adjust_length(old_name, &old_len);
diff --git a/fs/inode.c b/fs/inode.c
index 990d284..0f7e88a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1,9 +1,7 @@
/*
- * linux/fs/inode.c
- *
* (C) 1997 Linus Torvalds
+ * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
*/
-
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/dcache.h>
@@ -27,10 +25,11 @@
#include <linux/prefetch.h>
#include <linux/ima.h>
#include <linux/cred.h>
+#include <linux/buffer_head.h> /* for inode_has_buffers */
#include "internal.h"
/*
- * inode locking rules.
+ * Inode locking rules:
*
* inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget()
@@ -60,54 +59,11 @@
* inode_hash_lock
*/
-/*
- * This is needed for the following functions:
- * - inode_has_buffers
- * - invalidate_bdev
- *
- * FIXME: remove all knowledge of the buffer layer from this file
- */
-#include <linux/buffer_head.h>
-
-/*
- * New inode.c implementation.
- *
- * This implementation has the basic premise of trying
- * to be extremely low-overhead and SMP-safe, yet be
- * simple enough to be "obviously correct".
- *
- * Famous last words.
- */
-
-/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
-
-/* #define INODE_PARANOIA 1 */
-/* #define INODE_DEBUG 1 */
-
-/*
- * Inode lookup is no longer as critical as it used to be:
- * most of the lookups are going to be through the dcache.
- */
-#define I_HASHBITS i_hash_shift
-#define I_HASHMASK i_hash_mask
-
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
-/*
- * Each inode can be on two separate lists. One is
- * the hash list of the inode, used for lookups. The
- * other linked list is the "type" list:
- * "in_use" - valid inode, i_count > 0, i_nlink > 0
- * "dirty" - as "in_use" but also dirty
- * "unused" - valid inode, i_count = 0
- *
- * A "dirty" list is maintained for each super block,
- * allowing for low-overhead inode sync() operations.
- */
-
static LIST_HEAD(inode_lru);
static DEFINE_SPINLOCK(inode_lru_lock);
@@ -424,8 +380,8 @@ static unsigned long hash(struct super_block *sb, unsigned long hashval)
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
- tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
- return tmp & I_HASHMASK;
+ tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
+ return tmp & i_hash_mask;
}
/**
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 05f7332..4bca6a2 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -75,7 +75,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
struct nameidata *nd)
{
struct jffs2_inode_info *dir_f;
- struct jffs2_sb_info *c;
struct jffs2_full_dirent *fd = NULL, *fd_list;
uint32_t ino = 0;
struct inode *inode = NULL;
@@ -86,7 +85,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
return ERR_PTR(-ENAMETOOLONG);
dir_f = JFFS2_INODE_INFO(dir_i);
- c = JFFS2_SB_INFO(dir_i->i_sb);
mutex_lock(&dir_f->sem);
@@ -119,7 +117,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct jffs2_inode_info *f;
- struct jffs2_sb_info *c;
struct inode *inode = filp->f_path.dentry->d_inode;
struct jffs2_full_dirent *fd;
unsigned long offset, curofs;
@@ -127,7 +124,6 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino));
f = JFFS2_INODE_INFO(inode);
- c = JFFS2_SB_INFO(inode->i_sb);
offset = filp->f_pos;
@@ -609,8 +605,6 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
int ret;
uint32_t now = get_seconds();
- dentry_unhash(dentry);
-
for (fd = f->dents ; fd; fd = fd->next) {
if (fd->ino)
return -ENOTEMPTY;
@@ -786,9 +780,6 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
uint8_t type;
uint32_t now;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* The VFS will check for us and prevent trying to rename a
* file over a directory and vice versa, but if it's a directory,
* the VFS can't check whether the victim is empty. The filesystem
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index e896e67..46ad619 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -357,7 +357,7 @@ error:
return ERR_PTR(ret);
}
-void jffs2_dirty_inode(struct inode *inode)
+void jffs2_dirty_inode(struct inode *inode, int flags)
{
struct iattr iattr;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 00bae7c..65c6c43 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -172,7 +172,7 @@ int jffs2_setattr (struct dentry *, struct iattr *);
int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_evict_inode (struct inode *);
-void jffs2_dirty_inode(struct inode *inode);
+void jffs2_dirty_inode(struct inode *inode, int flags);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
struct jffs2_raw_inode *ri);
int jffs2_statfs (struct dentry *, struct kstatfs *);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index b632ddd..8d8cd34 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -94,7 +94,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
uint32_t buf_size = 0;
struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
#ifndef __ECOS
- size_t pointlen;
+ size_t pointlen, try_size;
if (c->mtd->point) {
ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
@@ -113,18 +113,21 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
/* For NAND it's quicker to read a whole eraseblock at a time,
apparently */
if (jffs2_cleanmarker_oob(c))
- buf_size = c->sector_size;
+ try_size = c->sector_size;
else
- buf_size = PAGE_SIZE;
+ try_size = PAGE_SIZE;
- /* Respect kmalloc limitations */
- if (buf_size > 128*1024)
- buf_size = 128*1024;
+ D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu "
+ "bytes\n", try_size));
- D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
- flashbuf = kmalloc(buf_size, GFP_KERNEL);
+ flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
if (!flashbuf)
return -ENOMEM;
+
+ D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n",
+ try_size));
+
+ buf_size = (uint32_t)try_size;
}
if (jffs2_sum_active()) {
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index eddbb37..1096559 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -173,7 +173,7 @@ void jfs_evict_inode(struct inode *inode)
dquot_drop(inode);
}
-void jfs_dirty_inode(struct inode *inode)
+void jfs_dirty_inode(struct inode *inode, int flags)
{
static int noisy = 5;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 155e91e..ec2fb8b 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -28,7 +28,7 @@ extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
extern int jfs_write_inode(struct inode *, struct writeback_control *);
extern void jfs_evict_inode(struct inode *);
-extern void jfs_dirty_inode(struct inode *);
+extern void jfs_dirty_inode(struct inode *, int);
extern void jfs_truncate(struct inode *);
extern void jfs_truncate_nolock(struct inode *, loff_t);
extern void jfs_free_zero_link(struct inode *);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 865df16..eaaf2b5 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -360,8 +360,6 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
- dentry_unhash(dentry);
-
/* Init inode for quota operations. */
dquot_initialize(dip);
dquot_initialize(ip);
@@ -1097,9 +1095,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
jfs_info("jfs_rename: %s %s", old_dentry->d_name.name,
new_dentry->d_name.name);
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
dquot_initialize(old_dir);
dquot_initialize(new_dir);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index f34c9cd..9ed89d1 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -273,8 +273,6 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- dentry_unhash(dentry);
-
if (!logfs_empty_dir(inode))
return -ENOTEMPTY;
@@ -624,9 +622,6 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
loff_t pos;
int err;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* 1. locate source dd */
err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
if (err)
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index f60aed8..6e6777f 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -168,8 +168,6 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
struct inode * inode = dentry->d_inode;
int err = -ENOTEMPTY;
- dentry_unhash(dentry);
-
if (minix_empty_dir(inode)) {
err = minix_unlink(dir, dentry);
if (!err) {
@@ -192,9 +190,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
struct minix_dir_entry * old_de;
int err = -ENOENT;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_de = minix_find_entry(old_dentry, &old_page);
if (!old_de)
goto out;
diff --git a/fs/namei.c b/fs/namei.c
index 2358b32..e2e4e8d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -919,12 +919,11 @@ static inline bool managed_dentry_might_block(struct dentry *dentry)
}
/*
- * Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we
- * meet a managed dentry and we're not walking to "..". True is returned to
- * continue, false to abort.
+ * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
+ * we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
- struct inode **inode, bool reverse_transit)
+ struct inode **inode)
{
for (;;) {
struct vfsmount *mounted;
@@ -933,8 +932,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
* that wants to block transit.
*/
*inode = path->dentry->d_inode;
- if (!reverse_transit &&
- unlikely(managed_dentry_might_block(path->dentry)))
+ if (unlikely(managed_dentry_might_block(path->dentry)))
return false;
if (!d_mountpoint(path->dentry))
@@ -947,16 +945,24 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
path->dentry = mounted->mnt_root;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
}
-
- if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
- return reverse_transit;
return true;
}
-static int follow_dotdot_rcu(struct nameidata *nd)
+static void follow_mount_rcu(struct nameidata *nd)
{
- struct inode *inode = nd->inode;
+ while (d_mountpoint(nd->path.dentry)) {
+ struct vfsmount *mounted;
+ mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
+ if (!mounted)
+ break;
+ nd->path.mnt = mounted;
+ nd->path.dentry = mounted->mnt_root;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ }
+}
+static int follow_dotdot_rcu(struct nameidata *nd)
+{
set_root_rcu(nd);
while (1) {
@@ -972,7 +978,6 @@ static int follow_dotdot_rcu(struct nameidata *nd)
seq = read_seqcount_begin(&parent->d_seq);
if (read_seqcount_retry(&old->d_seq, nd->seq))
goto failed;
- inode = parent->d_inode;
nd->path.dentry = parent;
nd->seq = seq;
break;
@@ -980,10 +985,9 @@ static int follow_dotdot_rcu(struct nameidata *nd)
if (!follow_up_rcu(&nd->path))
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- inode = nd->path.dentry->d_inode;
}
- __follow_mount_rcu(nd, &nd->path, &inode, true);
- nd->inode = inode;
+ follow_mount_rcu(nd);
+ nd->inode = nd->path.dentry->d_inode;
return 0;
failed:
@@ -1157,8 +1161,11 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
}
path->mnt = mnt;
path->dentry = dentry;
- if (likely(__follow_mount_rcu(nd, path, inode, false)))
- return 0;
+ if (unlikely(!__follow_mount_rcu(nd, path, inode)))
+ goto unlazy;
+ if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+ goto unlazy;
+ return 0;
unlazy:
if (unlazy_walk(nd, dentry))
return -ECHILD;
@@ -2572,6 +2579,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
if (error)
goto out;
+ shrink_dcache_parent(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
@@ -2986,6 +2994,8 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
goto out;
+ if (target)
+ shrink_dcache_parent(new_dentry);
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
goto out;
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index e3e646b..9c51f62 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1033,8 +1033,11 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
DPRINTK("ncp_rmdir: removing %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
+ /*
+ * fail with EBUSY if there are still references to this
+ * directory.
+ */
dentry_unhash(dentry);
-
error = -EBUSY;
if (!d_unhashed(dentry))
goto out;
@@ -1141,8 +1144,16 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+ if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) {
+ /*
+ * fail with EBUSY if there are still references to this
+ * directory.
+ */
dentry_unhash(new_dentry);
+ error = -EBUSY;
+ if (!d_unhashed(new_dentry))
+ goto out;
+ }
ncp_age_dentry(server, old_dentry);
ncp_age_dentry(server, new_dentry);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index ba30665..8151554 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -87,6 +87,16 @@ config NFS_V4_1
config PNFS_FILE_LAYOUT
tristate
+config PNFS_OBJLAYOUT
+ tristate "Provide support for the pNFS Objects Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)"
+ depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD
+ help
+ Say M here if you want your pNFS client to support the Objects Layout Driver.
+ Requires the SCSI osd initiator library (SCSI_OSD_INITIATOR) and
+ upper level driver (SCSI_OSD_ULD).
+
+ If unsure, say N.
+
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 4776ff9..6a34f7d 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -15,9 +15,11 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o
-nfs-$(CONFIG_NFS_V4_1) += pnfs.o
+nfs-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
+
+obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayout/
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 46d93ce..b257383 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -167,6 +167,23 @@ extern unsigned nfs4_callback_layoutrecall(
extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
extern void nfs4_cb_take_slot(struct nfs_client *clp);
+
+struct cb_devicenotifyitem {
+ uint32_t cbd_notify_type;
+ uint32_t cbd_layout_type;
+ struct nfs4_deviceid cbd_dev_id;
+ uint32_t cbd_immediate;
+};
+
+struct cb_devicenotifyargs {
+ int ndevs;
+ struct cb_devicenotifyitem *devs;
+};
+
+extern __be32 nfs4_callback_devicenotify(
+ struct cb_devicenotifyargs *args,
+ void *dummy, struct cb_process_state *cps);
+
#endif /* CONFIG_NFS_V4_1 */
extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2f41dcce..d4d1954 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -139,7 +139,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
spin_lock(&ino->i_lock);
if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
mark_matching_lsegs_invalid(lo, &free_me_list,
- args->cbl_range.iomode))
+ &args->cbl_range))
rv = NFS4ERR_DELAY;
else
rv = NFS4ERR_NOMATCHING_LAYOUT;
@@ -184,7 +184,7 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
ino = lo->plh_inode;
spin_lock(&ino->i_lock);
set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
- if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode))
+ if (mark_matching_lsegs_invalid(lo, &free_me_list, &range))
rv = NFS4ERR_DELAY;
list_del_init(&lo->plh_bulk_recall);
spin_unlock(&ino->i_lock);
@@ -241,6 +241,53 @@ static void pnfs_recall_all_layouts(struct nfs_client *clp)
do_callback_layoutrecall(clp, &args);
}
+__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
+ void *dummy, struct cb_process_state *cps)
+{
+ int i;
+ __be32 res = 0;
+ struct nfs_client *clp = cps->clp;
+ struct nfs_server *server = NULL;
+
+ dprintk("%s: -->\n", __func__);
+
+ if (!clp) {
+ res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+ goto out;
+ }
+
+ for (i = 0; i < args->ndevs; i++) {
+ struct cb_devicenotifyitem *dev = &args->devs[i];
+
+ if (!server ||
+ server->pnfs_curr_ld->id != dev->cbd_layout_type) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ if (server->pnfs_curr_ld &&
+ server->pnfs_curr_ld->id == dev->cbd_layout_type) {
+ rcu_read_unlock();
+ goto found;
+ }
+ rcu_read_unlock();
+ dprintk("%s: layout type %u not found\n",
+ __func__, dev->cbd_layout_type);
+ continue;
+ }
+
+ found:
+ if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE)
+ dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
+ "deleting instead\n", __func__);
+ nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
+ }
+
+out:
+ kfree(args->devs);
+ dprintk("%s: exit with status = %u\n",
+ __func__, be32_to_cpu(res));
+ return res;
+}
+
int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
{
if (delegation == NULL)
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 00ecf62..c6c86a7 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -25,6 +25,7 @@
#if defined(CONFIG_NFS_V4_1)
#define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
4 + 1 + 3)
#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
@@ -284,6 +285,93 @@ out:
return status;
}
+static
+__be32 decode_devicenotify_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_devicenotifyargs *args)
+{
+ __be32 *p;
+ __be32 status = 0;
+ u32 tmp;
+ int n, i;
+ args->ndevs = 0;
+
+ /* Num of device notifications */
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ n = ntohl(*p++);
+ if (n <= 0)
+ goto out;
+
+ args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
+ if (!args->devs) {
+ status = htonl(NFS4ERR_DELAY);
+ goto out;
+ }
+
+ /* Decode each dev notification */
+ for (i = 0; i < n; i++) {
+ struct cb_devicenotifyitem *dev = &args->devs[i];
+
+ p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE);
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto err;
+ }
+
+ tmp = ntohl(*p++); /* bitmap size */
+ if (tmp != 1) {
+ status = htonl(NFS4ERR_INVAL);
+ goto err;
+ }
+ dev->cbd_notify_type = ntohl(*p++);
+ if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE &&
+ dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) {
+ status = htonl(NFS4ERR_INVAL);
+ goto err;
+ }
+
+ tmp = ntohl(*p++); /* opaque size */
+ if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) &&
+ (tmp != NFS4_DEVICEID4_SIZE + 8)) ||
+ ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) &&
+ (tmp != NFS4_DEVICEID4_SIZE + 4))) {
+ status = htonl(NFS4ERR_INVAL);
+ goto err;
+ }
+ dev->cbd_layout_type = ntohl(*p++);
+ memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE);
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+
+ if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) {
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto err;
+ }
+ dev->cbd_immediate = ntohl(*p++);
+ } else {
+ dev->cbd_immediate = 0;
+ }
+
+ args->ndevs++;
+
+ dprintk("%s: type %d layout 0x%x immediate %d\n",
+ __func__, dev->cbd_notify_type, dev->cbd_layout_type,
+ dev->cbd_immediate);
+ }
+out:
+ dprintk("%s: status %d ndevs %d\n",
+ __func__, ntohl(status), args->ndevs);
+ return status;
+err:
+ kfree(args->devs);
+ goto out;
+}
+
static __be32 decode_sessionid(struct xdr_stream *xdr,
struct nfs4_sessionid *sid)
{
@@ -639,10 +727,10 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_RECALL_ANY:
case OP_CB_RECALL_SLOT:
case OP_CB_LAYOUTRECALL:
+ case OP_CB_NOTIFY_DEVICEID:
*op = &callback_ops[op_nr];
break;
- case OP_CB_NOTIFY_DEVICEID:
case OP_CB_NOTIFY:
case OP_CB_PUSH_DELEG:
case OP_CB_RECALLABLE_OBJ_AVAIL:
@@ -849,6 +937,12 @@ static struct callback_op callback_ops[] = {
(callback_decode_arg_t)decode_layoutrecall_args,
.res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
},
+ [OP_CB_NOTIFY_DEVICEID] = {
+ .process_op = (callback_process_op_t)nfs4_callback_devicenotify,
+ .decode_args =
+ (callback_decode_arg_t)decode_devicenotify_args,
+ .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ,
+ },
[OP_CB_SEQUENCE] = {
.process_op = (callback_process_op_t)nfs4_callback_sequence,
.decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 139be96..b3dc2b8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -290,6 +290,8 @@ static void nfs_free_client(struct nfs_client *clp)
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
+ nfs4_deviceid_purge_client(clp);
+
kfree(clp->cl_hostname);
kfree(clp);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index bbbc6bf..dd25c2a 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -21,25 +21,13 @@
#include "delegation.h"
#include "internal.h"
-static void nfs_do_free_delegation(struct nfs_delegation *delegation)
-{
- kfree(delegation);
-}
-
-static void nfs_free_delegation_callback(struct rcu_head *head)
-{
- struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
-
- nfs_do_free_delegation(delegation);
-}
-
static void nfs_free_delegation(struct nfs_delegation *delegation)
{
if (delegation->cred) {
put_rpccred(delegation->cred);
delegation->cred = NULL;
}
- call_rcu(&delegation->rcu, nfs_free_delegation_callback);
+ kfree_rcu(delegation, rcu);
}
/**
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 424e477..ededdbd 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -512,12 +512,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
struct page **xdr_pages, struct page *page, unsigned int buflen)
{
struct xdr_stream stream;
- struct xdr_buf buf = {
- .pages = xdr_pages,
- .page_len = buflen,
- .buflen = buflen,
- .len = buflen,
- };
+ struct xdr_buf buf;
struct page *scratch;
struct nfs_cache_array *array;
unsigned int count = 0;
@@ -527,7 +522,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
if (scratch == NULL)
return -ENOMEM;
- xdr_init_decode(&stream, &buf, NULL);
+ xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
do {
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 57bb31a..144f2a3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1298,8 +1298,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
i_size_write(inode, new_isize);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
}
- dprintk("NFS: isize change on server for file %s/%ld\n",
- inode->i_sb->s_id, inode->i_ino);
+ dprintk("NFS: isize change on server for file %s/%ld "
+ "(%Ld to %Ld)\n",
+ inode->i_sb->s_id,
+ inode->i_ino,
+ (long long)cur_isize,
+ (long long)new_isize);
}
} else
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
@@ -1424,9 +1428,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
void nfs4_evict_inode(struct inode *inode)
{
- pnfs_destroy_layout(NFS_I(inode));
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
+ pnfs_return_layout(inode);
+ pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 2df6ca7..b9056cb 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -310,6 +310,7 @@ extern int nfs_migrate_page(struct address_space *,
#endif
/* nfs4proc.c */
+extern void __nfs4_read_done_cb(struct nfs_read_data *);
extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
extern int nfs4_init_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index be79dc9..4269088 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -421,6 +421,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
struct nfs4_deviceid *id,
gfp_t gfp_flags)
{
+ struct nfs4_deviceid_node *d;
struct nfs4_file_layout_dsaddr *dsaddr;
int status = -EINVAL;
struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
@@ -428,7 +429,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
dprintk("--> %s\n", __func__);
if (fl->pattern_offset > lgr->range.offset) {
- dprintk("%s pattern_offset %lld to large\n",
+ dprintk("%s pattern_offset %lld too large\n",
__func__, fl->pattern_offset);
goto out;
}
@@ -440,12 +441,14 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
}
/* find and reference the deviceid */
- dsaddr = nfs4_fl_find_get_deviceid(id);
- if (dsaddr == NULL) {
+ d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld,
+ NFS_SERVER(lo->plh_inode)->nfs_client, id);
+ if (d == NULL) {
dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
if (dsaddr == NULL)
goto out;
- }
+ } else
+ dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
fl->dsaddr = dsaddr;
if (fl->first_stripe_index < 0 ||
@@ -507,12 +510,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
gfp_t gfp_flags)
{
struct xdr_stream stream;
- struct xdr_buf buf = {
- .pages = lgr->layoutp->pages,
- .page_len = lgr->layoutp->len,
- .buflen = lgr->layoutp->len,
- .len = lgr->layoutp->len,
- };
+ struct xdr_buf buf;
struct page *scratch;
__be32 *p;
uint32_t nfl_util;
@@ -524,7 +522,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
if (!scratch)
return -ENOMEM;
- xdr_init_decode(&stream, &buf, NULL);
+ xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
/* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
@@ -535,7 +533,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
memcpy(id, p, sizeof(*id));
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
- print_deviceid(id);
+ nfs4_print_deviceid(id);
nfl_util = be32_to_cpup(p++);
if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
@@ -653,16 +651,19 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
/*
* filelayout_pg_test(). Called by nfs_can_coalesce_requests()
*
- * return 1 : coalesce page
- * return 0 : don't coalesce page
+ * return true : coalesce page
+ * return false : don't coalesce page
*/
-int
+bool
filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
struct nfs_page *req)
{
u64 p_stripe, r_stripe;
u32 stripe_unit;
+ if (!pnfs_generic_pg_test(pgio, prev, req))
+ return 0;
+
if (!pgio->pg_lseg)
return 1;
p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
@@ -860,6 +861,12 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
return -ENOMEM;
}
+static void
+filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
+{
+ nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
+}
+
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
@@ -872,6 +879,7 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
+ .free_deviceid_node = filelayout_free_deveiceid_node,
};
static int __init nfs4filelayout_init(void)
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 2b461d7..cebe01e 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -59,9 +59,7 @@ struct nfs4_pnfs_ds {
#define NFS4_DEVICE_ID_NEG_ENTRY 0x00000001
struct nfs4_file_layout_dsaddr {
- struct hlist_node node;
- struct nfs4_deviceid deviceid;
- atomic_t ref;
+ struct nfs4_deviceid_node id_node;
unsigned long flags;
u32 stripe_count;
u8 *stripe_indices;
@@ -95,14 +93,12 @@ extern struct nfs_fh *
nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
extern void print_ds(struct nfs4_pnfs_ds *ds);
-extern void print_deviceid(struct nfs4_deviceid *dev_id);
u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset);
u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j);
struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg,
u32 ds_idx);
-extern struct nfs4_file_layout_dsaddr *
-nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
+extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index db07c7a..3b7bf13 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -37,30 +37,6 @@
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
/*
- * Device ID RCU cache. A device ID is unique per client ID and layout type.
- */
-#define NFS4_FL_DEVICE_ID_HASH_BITS 5
-#define NFS4_FL_DEVICE_ID_HASH_SIZE (1 << NFS4_FL_DEVICE_ID_HASH_BITS)
-#define NFS4_FL_DEVICE_ID_HASH_MASK (NFS4_FL_DEVICE_ID_HASH_SIZE - 1)
-
-static inline u32
-nfs4_fl_deviceid_hash(struct nfs4_deviceid *id)
-{
- unsigned char *cptr = (unsigned char *)id->data;
- unsigned int nbytes = NFS4_DEVICEID4_SIZE;
- u32 x = 0;
-
- while (nbytes--) {
- x *= 37;
- x += *cptr++;
- }
- return x & NFS4_FL_DEVICE_ID_HASH_MASK;
-}
-
-static struct hlist_head filelayout_deviceid_cache[NFS4_FL_DEVICE_ID_HASH_SIZE];
-static DEFINE_SPINLOCK(filelayout_deviceid_lock);
-
-/*
* Data server cache
*
* Data servers can be mapped to different device ids.
@@ -89,27 +65,6 @@ print_ds(struct nfs4_pnfs_ds *ds)
ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
}
-void
-print_ds_list(struct nfs4_file_layout_dsaddr *dsaddr)
-{
- int i;
-
- ifdebug(FACILITY) {
- printk("%s dsaddr->ds_num %d\n", __func__,
- dsaddr->ds_num);
- for (i = 0; i < dsaddr->ds_num; i++)
- print_ds(dsaddr->ds_list[i]);
- }
-}
-
-void print_deviceid(struct nfs4_deviceid *id)
-{
- u32 *p = (u32 *)id;
-
- dprintk("%s: device id= [%x%x%x%x]\n", __func__,
- p[0], p[1], p[2], p[3]);
-}
-
/* nfs4_ds_cache_lock is held */
static struct nfs4_pnfs_ds *
_data_server_lookup_locked(u32 ip_addr, u32 port)
@@ -201,13 +156,13 @@ destroy_ds(struct nfs4_pnfs_ds *ds)
kfree(ds);
}
-static void
+void
nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
{
struct nfs4_pnfs_ds *ds;
int i;
- print_deviceid(&dsaddr->deviceid);
+ nfs4_print_deviceid(&dsaddr->id_node.deviceid);
for (i = 0; i < dsaddr->ds_num; i++) {
ds = dsaddr->ds_list[i];
@@ -353,12 +308,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
u8 max_stripe_index;
struct nfs4_file_layout_dsaddr *dsaddr = NULL;
struct xdr_stream stream;
- struct xdr_buf buf = {
- .pages = pdev->pages,
- .page_len = pdev->pglen,
- .buflen = pdev->pglen,
- .len = pdev->pglen,
- };
+ struct xdr_buf buf;
struct page *scratch;
/* set up xdr stream */
@@ -366,7 +316,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
if (!scratch)
goto out_err;
- xdr_init_decode(&stream, &buf, NULL);
+ xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
/* Get the stripe count (number of stripe index) */
@@ -431,8 +381,10 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
dsaddr->stripe_indices = stripe_indices;
stripe_indices = NULL;
dsaddr->ds_num = num;
-
- memcpy(&dsaddr->deviceid, &pdev->dev_id, sizeof(pdev->dev_id));
+ nfs4_init_deviceid_node(&dsaddr->id_node,
+ NFS_SERVER(ino)->pnfs_curr_ld,
+ NFS_SERVER(ino)->nfs_client,
+ &pdev->dev_id);
for (i = 0; i < dsaddr->ds_num; i++) {
int j;
@@ -505,8 +457,8 @@ out_err:
static struct nfs4_file_layout_dsaddr *
decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
{
- struct nfs4_file_layout_dsaddr *d, *new;
- long hash;
+ struct nfs4_deviceid_node *d;
+ struct nfs4_file_layout_dsaddr *n, *new;
new = decode_device(inode, dev, gfp_flags);
if (!new) {
@@ -515,20 +467,13 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_fl
return NULL;
}
- spin_lock(&filelayout_deviceid_lock);
- d = nfs4_fl_find_get_deviceid(&new->deviceid);
- if (d) {
- spin_unlock(&filelayout_deviceid_lock);
+ d = nfs4_insert_deviceid_node(&new->id_node);
+ n = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
+ if (n != new) {
nfs4_fl_free_deviceid(new);
- return d;
+ return n;
}
- INIT_HLIST_NODE(&new->node);
- atomic_set(&new->ref, 1);
- hash = nfs4_fl_deviceid_hash(&new->deviceid);
- hlist_add_head_rcu(&new->node, &filelayout_deviceid_cache[hash]);
- spin_unlock(&filelayout_deviceid_lock);
-
return new;
}
@@ -600,35 +545,7 @@ out_free:
void
nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
{
- if (atomic_dec_and_lock(&dsaddr->ref, &filelayout_deviceid_lock)) {
- hlist_del_rcu(&dsaddr->node);
- spin_unlock(&filelayout_deviceid_lock);
-
- synchronize_rcu();
- nfs4_fl_free_deviceid(dsaddr);
- }
-}
-
-struct nfs4_file_layout_dsaddr *
-nfs4_fl_find_get_deviceid(struct nfs4_deviceid *id)
-{
- struct nfs4_file_layout_dsaddr *d;
- struct hlist_node *n;
- long hash = nfs4_fl_deviceid_hash(id);
-
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &filelayout_deviceid_cache[hash], node) {
- if (!memcmp(&d->deviceid, id, sizeof(*id))) {
- if (!atomic_inc_not_zero(&d->ref))
- goto fail;
- rcu_read_unlock();
- return d;
- }
- }
-fail:
- rcu_read_unlock();
- return NULL;
+ nfs4_put_deviceid_node(&dsaddr->id_node);
}
/*
@@ -676,15 +593,15 @@ static void
filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
int err, u32 ds_addr)
{
- u32 *p = (u32 *)&dsaddr->deviceid;
+ u32 *p = (u32 *)&dsaddr->id_node.deviceid;
printk(KERN_ERR "NFS: data server %x connection error %d."
" Deviceid [%x%x%x%x] marked out of use.\n",
ds_addr, err, p[0], p[1], p[2], p[3]);
- spin_lock(&filelayout_deviceid_lock);
+ spin_lock(&nfs4_ds_cache_lock);
dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
- spin_unlock(&filelayout_deviceid_lock);
+ spin_unlock(&nfs4_ds_cache_lock);
}
struct nfs4_pnfs_ds *
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cf1b339..d2c4b59 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -267,9 +267,11 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ if (state != NULL)
+ nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
@@ -2361,6 +2363,9 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct nfs4_state *state = NULL;
int status;
+ if (pnfs_ld_layoutret_on_setattr(inode))
+ pnfs_return_layout(inode);
+
nfs_fattr_init(fattr);
/* Search for an existing open(O_WRITE) file */
@@ -3175,6 +3180,11 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
return err;
}
+void __nfs4_read_done_cb(struct nfs_read_data *data)
+{
+ nfs_invalidate_atime(data->inode);
+}
+
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_server *server = NFS_SERVER(data->inode);
@@ -3184,7 +3194,7 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
return -EAGAIN;
}
- nfs_invalidate_atime(data->inode);
+ __nfs4_read_done_cb(data);
if (task->tk_status > 0)
renew_lease(server, data->timestamp);
return 0;
@@ -3198,7 +3208,8 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
- return data->read_done_cb(task, data);
+ return data->read_done_cb ? data->read_done_cb(task, data) :
+ nfs4_read_done_cb(task, data);
}
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
@@ -3243,7 +3254,8 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
- return data->write_done_cb(task, data);
+ return data->write_done_cb ? data->write_done_cb(task, data) :
+ nfs4_write_done_cb(task, data);
}
/* Reset the the nfs_write_data to send the write to the MDS. */
@@ -3670,9 +3682,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ if (state != NULL)
+ nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
@@ -4543,6 +4557,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -ESTALE:
goto out;
case -NFS4ERR_EXPIRED:
+ nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
nfs4_schedule_lease_recovery(server->nfs_client);
@@ -5666,6 +5681,88 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
return status;
}
+static void
+nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutreturn *lrp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
+ &lrp->res.seq_res, 0, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutreturn *lrp = calldata;
+ struct nfs_server *server;
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &lrp->res.seq_res))
+ return;
+
+ server = NFS_SERVER(lrp->args.inode);
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ nfs_restart_rpc(task, lrp->clp);
+ return;
+ }
+ if (task->tk_status == 0) {
+ struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
+
+ if (lrp->res.lrs_present) {
+ spin_lock(&lo->plh_inode->i_lock);
+ pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ spin_unlock(&lo->plh_inode->i_lock);
+ } else
+ BUG_ON(!list_empty(&lo->plh_segs));
+ }
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_layoutreturn_release(void *calldata)
+{
+ struct nfs4_layoutreturn *lrp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ put_layout_hdr(NFS_I(lrp->args.inode)->layout);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
+ .rpc_call_prepare = nfs4_layoutreturn_prepare,
+ .rpc_call_done = nfs4_layoutreturn_done,
+ .rpc_release = nfs4_layoutreturn_release,
+};
+
+int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
+{
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
+ .rpc_argp = &lrp->args,
+ .rpc_resp = &lrp->res,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = lrp->clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutreturn_call_ops,
+ .callback_data = lrp,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = task->tk_status;
+ dprintk("<-- %s status=%d\n", __func__, status);
+ rpc_put_task(task);
+ return status;
+}
+
static int
_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 036f5ad..e97dd21 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1466,7 +1466,10 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
#ifdef CONFIG_NFS_V4_1
void nfs4_schedule_session_recovery(struct nfs4_session *session)
{
- nfs4_schedule_lease_recovery(session->clp);
+ struct nfs_client *clp = session->clp;
+
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ nfs4_schedule_lease_recovery(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
@@ -1549,6 +1552,7 @@ static int nfs4_reset_session(struct nfs_client *clp)
status = nfs4_recovery_handle_error(clp, status);
goto out;
}
+ clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* create_session negotiated new slot table */
clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c3ccd2c..d869a5e 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -338,7 +338,11 @@ static int nfs4_stat_to_errno(int);
1 /* layoutupdate4 layout type */ + \
1 /* NULL filelayout layoutupdate4 payload */)
#define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3)
-
+#define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + \
+ 1 /* FIXME: opaque lrf_body always empty at the moment */)
+#define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \
+ 1 + decode_stateid_maxsz)
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -760,7 +764,14 @@ static int nfs4_stat_to_errno(int);
decode_putfh_maxsz + \
decode_layoutcommit_maxsz + \
decode_getattr_maxsz)
-
+#define NFS4_enc_layoutreturn_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_layoutreturn_maxsz)
+#define NFS4_dec_layoutreturn_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutreturn_maxsz)
const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_encode_hdr_maxsz +
@@ -1864,6 +1875,7 @@ encode_layoutget(struct xdr_stream *xdr,
static int
encode_layoutcommit(struct xdr_stream *xdr,
+ struct inode *inode,
const struct nfs4_layoutcommit_args *args,
struct compound_hdr *hdr)
{
@@ -1872,7 +1884,7 @@ encode_layoutcommit(struct xdr_stream *xdr,
dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten,
NFS_SERVER(args->inode)->pnfs_curr_ld->id);
- p = reserve_space(xdr, 48 + NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_LAYOUTCOMMIT);
/* Only whole file layouts */
p = xdr_encode_hyper(p, 0); /* offset */
@@ -1883,12 +1895,49 @@ encode_layoutcommit(struct xdr_stream *xdr,
p = xdr_encode_hyper(p, args->lastbytewritten);
*p++ = cpu_to_be32(0); /* Never send time_modify_changed */
*p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
- *p++ = cpu_to_be32(0); /* no file layout payload */
+
+ if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit)
+ NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit(
+ NFS_I(inode)->layout, xdr, args);
+ else {
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(0); /* no layout-type payload */
+ }
hdr->nops++;
hdr->replen += decode_layoutcommit_maxsz;
return 0;
}
+
+static void
+encode_layoutreturn(struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 20);
+ *p++ = cpu_to_be32(OP_LAYOUTRETURN);
+ *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */
+ *p++ = cpu_to_be32(args->layout_type);
+ *p++ = cpu_to_be32(IOMODE_ANY);
+ *p = cpu_to_be32(RETURN_FILE);
+ p = reserve_space(xdr, 16 + NFS4_STATEID_SIZE);
+ p = xdr_encode_hyper(p, 0);
+ p = xdr_encode_hyper(p, NFS4_MAX_UINT64);
+ spin_lock(&args->inode->i_lock);
+ xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
+ spin_unlock(&args->inode->i_lock);
+ if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) {
+ NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn(
+ NFS_I(args->inode)->layout, xdr, args);
+ } else {
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(0);
+ }
+ hdr->nops++;
+ hdr->replen += decode_layoutreturn_maxsz;
+}
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -2706,10 +2755,12 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
/*
* Encode LAYOUTCOMMIT request
*/
-static int nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
- struct xdr_stream *xdr,
- struct nfs4_layoutcommit_args *args)
+static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_args *args)
{
+ struct nfs4_layoutcommit_data *data =
+ container_of(args, struct nfs4_layoutcommit_data, args);
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2717,10 +2768,27 @@ static int nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
- encode_layoutcommit(xdr, args, &hdr);
+ encode_layoutcommit(xdr, data->args.inode, args, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
+}
+
+/*
+ * Encode LAYOUTRETURN request
+ */
+static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutreturn_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutreturn(xdr, args, &hdr);
+ encode_nops(&hdr);
}
#endif /* CONFIG_NFS_V4_1 */
@@ -5203,6 +5271,27 @@ out_overflow:
return -EIO;
}
+static int decode_layoutreturn(struct xdr_stream *xdr,
+ struct nfs4_layoutreturn_res *res)
+{
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTRETURN);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->lrs_present = be32_to_cpup(p);
+ if (res->lrs_present)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_layoutcommit(struct xdr_stream *xdr,
struct rpc_rqst *req,
struct nfs4_layoutcommit_res *res)
@@ -6320,6 +6409,30 @@ out:
}
/*
+ * Decode LAYOUTRETURN response
+ */
+static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutreturn_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_layoutreturn(xdr, res);
+out:
+ return status;
+}
+
+/*
* Decode LAYOUTCOMMIT response
*/
static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
@@ -6547,6 +6660,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
+ PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index c541093..c4744e1 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -87,7 +87,7 @@
#define NFS_ROOT "/tftpboot/%s"
/* Default NFSROOT mount options. */
-#define NFS_DEF_OPTIONS "udp"
+#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096"
/* Parameters passed from the kernel command line */
static char nfs_root_parms[256] __initdata = "";
diff --git a/fs/nfs/objlayout/Kbuild b/fs/nfs/objlayout/Kbuild
new file mode 100644
index 0000000..ed30ea0
--- /dev/null
+++ b/fs/nfs/objlayout/Kbuild
@@ -0,0 +1,5 @@
+#
+# Makefile for the pNFS Objects Layout Driver kernel module
+#
+objlayoutdriver-y := objio_osd.o pnfs_osd_xdr_cli.o objlayout.o
+obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayoutdriver.o
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
new file mode 100644
index 0000000..9cf208d
--- /dev/null
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -0,0 +1,1057 @@
+/*
+ * pNFS Objects layout implementation over open-osd initiator library
+ *
+ * Copyright (C) 2009 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <scsi/osd_initiator.h>
+
+#include "objlayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+#define _LLU(x) ((unsigned long long)x)
+
+enum { BIO_MAX_PAGES_KMALLOC =
+ (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
+};
+
+struct objio_dev_ent {
+ struct nfs4_deviceid_node id_node;
+ struct osd_dev *od;
+};
+
+static void
+objio_free_deviceid_node(struct nfs4_deviceid_node *d)
+{
+ struct objio_dev_ent *de = container_of(d, struct objio_dev_ent, id_node);
+
+ dprintk("%s: free od=%p\n", __func__, de->od);
+ osduld_put_device(de->od);
+ kfree(de);
+}
+
+static struct objio_dev_ent *_dev_list_find(const struct nfs_server *nfss,
+ const struct nfs4_deviceid *d_id)
+{
+ struct nfs4_deviceid_node *d;
+ struct objio_dev_ent *de;
+
+ d = nfs4_find_get_deviceid(nfss->pnfs_curr_ld, nfss->nfs_client, d_id);
+ if (!d)
+ return NULL;
+
+ de = container_of(d, struct objio_dev_ent, id_node);
+ return de;
+}
+
+static struct objio_dev_ent *
+_dev_list_add(const struct nfs_server *nfss,
+ const struct nfs4_deviceid *d_id, struct osd_dev *od,
+ gfp_t gfp_flags)
+{
+ struct nfs4_deviceid_node *d;
+ struct objio_dev_ent *de = kzalloc(sizeof(*de), gfp_flags);
+ struct objio_dev_ent *n;
+
+ if (!de) {
+ dprintk("%s: -ENOMEM od=%p\n", __func__, od);
+ return NULL;
+ }
+
+ dprintk("%s: Adding od=%p\n", __func__, od);
+ nfs4_init_deviceid_node(&de->id_node,
+ nfss->pnfs_curr_ld,
+ nfss->nfs_client,
+ d_id);
+ de->od = od;
+
+ d = nfs4_insert_deviceid_node(&de->id_node);
+ n = container_of(d, struct objio_dev_ent, id_node);
+ if (n != de) {
+ dprintk("%s: Race with other n->od=%p\n", __func__, n->od);
+ objio_free_deviceid_node(&de->id_node);
+ de = n;
+ }
+
+ atomic_inc(&de->id_node.ref);
+ return de;
+}
+
+struct caps_buffers {
+ u8 caps_key[OSD_CRYPTO_KEYID_SIZE];
+ u8 creds[OSD_CAP_LEN];
+};
+
+struct objio_segment {
+ struct pnfs_layout_segment lseg;
+
+ struct pnfs_osd_object_cred *comps;
+
+ unsigned mirrors_p1;
+ unsigned stripe_unit;
+ unsigned group_width; /* Data stripe_units without integrity comps */
+ u64 group_depth;
+ unsigned group_count;
+
+ unsigned max_io_size;
+
+ unsigned comps_index;
+ unsigned num_comps;
+ /* variable length */
+ struct objio_dev_ent *ods[];
+};
+
+static inline struct objio_segment *
+OBJIO_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg, struct objio_segment, lseg);
+}
+
+struct objio_state;
+typedef ssize_t (*objio_done_fn)(struct objio_state *ios);
+
+struct objio_state {
+ /* Generic layer */
+ struct objlayout_io_state ol_state;
+
+ struct objio_segment *layout;
+
+ struct kref kref;
+ objio_done_fn done;
+ void *private;
+
+ unsigned long length;
+ unsigned numdevs; /* Actually used devs in this IO */
+ /* A per-device variable array of size numdevs */
+ struct _objio_per_comp {
+ struct bio *bio;
+ struct osd_request *or;
+ unsigned long length;
+ u64 offset;
+ unsigned dev;
+ } per_dev[];
+};
+
+/* Send and wait for a get_device_info of devices in the layout,
+ then look them up with the osd_initiator library */
+static struct objio_dev_ent *_device_lookup(struct pnfs_layout_hdr *pnfslay,
+ struct objio_segment *objio_seg, unsigned comp,
+ gfp_t gfp_flags)
+{
+ struct pnfs_osd_deviceaddr *deviceaddr;
+ struct nfs4_deviceid *d_id;
+ struct objio_dev_ent *ode;
+ struct osd_dev *od;
+ struct osd_dev_info odi;
+ int err;
+
+ d_id = &objio_seg->comps[comp].oc_object_id.oid_device_id;
+
+ ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id);
+ if (ode)
+ return ode;
+
+ err = objlayout_get_deviceinfo(pnfslay, d_id, &deviceaddr, gfp_flags);
+ if (unlikely(err)) {
+ dprintk("%s: objlayout_get_deviceinfo dev(%llx:%llx) =>%d\n",
+ __func__, _DEVID_LO(d_id), _DEVID_HI(d_id), err);
+ return ERR_PTR(err);
+ }
+
+ odi.systemid_len = deviceaddr->oda_systemid.len;
+ if (odi.systemid_len > sizeof(odi.systemid)) {
+ err = -EINVAL;
+ goto out;
+ } else if (odi.systemid_len)
+ memcpy(odi.systemid, deviceaddr->oda_systemid.data,
+ odi.systemid_len);
+ odi.osdname_len = deviceaddr->oda_osdname.len;
+ odi.osdname = (u8 *)deviceaddr->oda_osdname.data;
+
+ if (!odi.osdname_len && !odi.systemid_len) {
+ dprintk("%s: !odi.osdname_len && !odi.systemid_len\n",
+ __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+ od = osduld_info_lookup(&odi);
+ if (unlikely(IS_ERR(od))) {
+ err = PTR_ERR(od);
+ dprintk("%s: osduld_info_lookup => %d\n", __func__, err);
+ goto out;
+ }
+
+ ode = _dev_list_add(NFS_SERVER(pnfslay->plh_inode), d_id, od,
+ gfp_flags);
+
+out:
+ dprintk("%s: return=%d\n", __func__, err);
+ objlayout_put_deviceinfo(deviceaddr);
+ return err ? ERR_PTR(err) : ode;
+}
+
+static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay,
+ struct objio_segment *objio_seg,
+ gfp_t gfp_flags)
+{
+ unsigned i;
+ int err;
+
+ /* lookup all devices */
+ for (i = 0; i < objio_seg->num_comps; i++) {
+ struct objio_dev_ent *ode;
+
+ ode = _device_lookup(pnfslay, objio_seg, i, gfp_flags);
+ if (unlikely(IS_ERR(ode))) {
+ err = PTR_ERR(ode);
+ goto out;
+ }
+ objio_seg->ods[i] = ode;
+ }
+ err = 0;
+
+out:
+ dprintk("%s: return=%d\n", __func__, err);
+ return err;
+}
+
+static int _verify_data_map(struct pnfs_osd_layout *layout)
+{
+ struct pnfs_osd_data_map *data_map = &layout->olo_map;
+ u64 stripe_length;
+ u32 group_width;
+
+/* FIXME: Only raid0 for now. if not go through MDS */
+ if (data_map->odm_raid_algorithm != PNFS_OSD_RAID_0) {
+ printk(KERN_ERR "Only RAID_0 for now\n");
+ return -ENOTSUPP;
+ }
+ if (0 != (data_map->odm_num_comps % (data_map->odm_mirror_cnt + 1))) {
+ printk(KERN_ERR "Data Map wrong, num_comps=%u mirrors=%u\n",
+ data_map->odm_num_comps, data_map->odm_mirror_cnt);
+ return -EINVAL;
+ }
+
+ if (data_map->odm_group_width)
+ group_width = data_map->odm_group_width;
+ else
+ group_width = data_map->odm_num_comps /
+ (data_map->odm_mirror_cnt + 1);
+
+ stripe_length = (u64)data_map->odm_stripe_unit * group_width;
+ if (stripe_length >= (1ULL << 32)) {
+ printk(KERN_ERR "Total Stripe length(0x%llx)"
+ " >= 32bit is not supported\n", _LLU(stripe_length));
+ return -ENOTSUPP;
+ }
+
+ if (0 != (data_map->odm_stripe_unit & ~PAGE_MASK)) {
+ printk(KERN_ERR "Stripe Unit(0x%llx)"
+ " must be Multples of PAGE_SIZE(0x%lx)\n",
+ _LLU(data_map->odm_stripe_unit), PAGE_SIZE);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void copy_single_comp(struct pnfs_osd_object_cred *cur_comp,
+ struct pnfs_osd_object_cred *src_comp,
+ struct caps_buffers *caps_p)
+{
+ WARN_ON(src_comp->oc_cap_key.cred_len > sizeof(caps_p->caps_key));
+ WARN_ON(src_comp->oc_cap.cred_len > sizeof(caps_p->creds));
+
+ *cur_comp = *src_comp;
+
+ memcpy(caps_p->caps_key, src_comp->oc_cap_key.cred,
+ sizeof(caps_p->caps_key));
+ cur_comp->oc_cap_key.cred = caps_p->caps_key;
+
+ memcpy(caps_p->creds, src_comp->oc_cap.cred,
+ sizeof(caps_p->creds));
+ cur_comp->oc_cap.cred = caps_p->creds;
+}
+
+int objio_alloc_lseg(struct pnfs_layout_segment **outp,
+ struct pnfs_layout_hdr *pnfslay,
+ struct pnfs_layout_range *range,
+ struct xdr_stream *xdr,
+ gfp_t gfp_flags)
+{
+ struct objio_segment *objio_seg;
+ struct pnfs_osd_xdr_decode_layout_iter iter;
+ struct pnfs_osd_layout layout;
+ struct pnfs_osd_object_cred *cur_comp, src_comp;
+ struct caps_buffers *caps_p;
+ int err;
+
+ err = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
+ if (unlikely(err))
+ return err;
+
+ err = _verify_data_map(&layout);
+ if (unlikely(err))
+ return err;
+
+ objio_seg = kzalloc(sizeof(*objio_seg) +
+ sizeof(objio_seg->ods[0]) * layout.olo_num_comps +
+ sizeof(*objio_seg->comps) * layout.olo_num_comps +
+ sizeof(struct caps_buffers) * layout.olo_num_comps,
+ gfp_flags);
+ if (!objio_seg)
+ return -ENOMEM;
+
+ objio_seg->comps = (void *)(objio_seg->ods + layout.olo_num_comps);
+ cur_comp = objio_seg->comps;
+ caps_p = (void *)(cur_comp + layout.olo_num_comps);
+ while (pnfs_osd_xdr_decode_layout_comp(&src_comp, &iter, xdr, &err))
+ copy_single_comp(cur_comp++, &src_comp, caps_p++);
+ if (unlikely(err))
+ goto err;
+
+ objio_seg->num_comps = layout.olo_num_comps;
+ objio_seg->comps_index = layout.olo_comps_index;
+ err = objio_devices_lookup(pnfslay, objio_seg, gfp_flags);
+ if (err)
+ goto err;
+
+ objio_seg->mirrors_p1 = layout.olo_map.odm_mirror_cnt + 1;
+ objio_seg->stripe_unit = layout.olo_map.odm_stripe_unit;
+ if (layout.olo_map.odm_group_width) {
+ objio_seg->group_width = layout.olo_map.odm_group_width;
+ objio_seg->group_depth = layout.olo_map.odm_group_depth;
+ objio_seg->group_count = layout.olo_map.odm_num_comps /
+ objio_seg->mirrors_p1 /
+ objio_seg->group_width;
+ } else {
+ objio_seg->group_width = layout.olo_map.odm_num_comps /
+ objio_seg->mirrors_p1;
+ objio_seg->group_depth = -1;
+ objio_seg->group_count = 1;
+ }
+
+ /* Cache this calculation it will hit for every page */
+ objio_seg->max_io_size = (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE -
+ objio_seg->stripe_unit) *
+ objio_seg->group_width;
+
+ *outp = &objio_seg->lseg;
+ return 0;
+
+err:
+ kfree(objio_seg);
+ dprintk("%s: Error: return %d\n", __func__, err);
+ *outp = NULL;
+ return err;
+}
+
+void objio_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ int i;
+ struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
+
+ for (i = 0; i < objio_seg->num_comps; i++) {
+ if (!objio_seg->ods[i])
+ break;
+ nfs4_put_deviceid_node(&objio_seg->ods[i]->id_node);
+ }
+ kfree(objio_seg);
+}
+
+int objio_alloc_io_state(struct pnfs_layout_segment *lseg,
+ struct objlayout_io_state **outp,
+ gfp_t gfp_flags)
+{
+ struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
+ struct objio_state *ios;
+ const unsigned first_size = sizeof(*ios) +
+ objio_seg->num_comps * sizeof(ios->per_dev[0]);
+ const unsigned sec_size = objio_seg->num_comps *
+ sizeof(ios->ol_state.ioerrs[0]);
+
+ ios = kzalloc(first_size + sec_size, gfp_flags);
+ if (unlikely(!ios))
+ return -ENOMEM;
+
+ ios->layout = objio_seg;
+ ios->ol_state.ioerrs = ((void *)ios) + first_size;
+ ios->ol_state.num_comps = objio_seg->num_comps;
+
+ *outp = &ios->ol_state;
+ return 0;
+}
+
+void objio_free_io_state(struct objlayout_io_state *ol_state)
+{
+ struct objio_state *ios = container_of(ol_state, struct objio_state,
+ ol_state);
+
+ kfree(ios);
+}
+
+enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
+{
+ switch (oep) {
+ case OSD_ERR_PRI_NO_ERROR:
+ return (enum pnfs_osd_errno)0;
+
+ case OSD_ERR_PRI_CLEAR_PAGES:
+ BUG_ON(1);
+ return 0;
+
+ case OSD_ERR_PRI_RESOURCE:
+ return PNFS_OSD_ERR_RESOURCE;
+ case OSD_ERR_PRI_BAD_CRED:
+ return PNFS_OSD_ERR_BAD_CRED;
+ case OSD_ERR_PRI_NO_ACCESS:
+ return PNFS_OSD_ERR_NO_ACCESS;
+ case OSD_ERR_PRI_UNREACHABLE:
+ return PNFS_OSD_ERR_UNREACHABLE;
+ case OSD_ERR_PRI_NOT_FOUND:
+ return PNFS_OSD_ERR_NOT_FOUND;
+ case OSD_ERR_PRI_NO_SPACE:
+ return PNFS_OSD_ERR_NO_SPACE;
+ default:
+ WARN_ON(1);
+ /* fallthrough */
+ case OSD_ERR_PRI_EIO:
+ return PNFS_OSD_ERR_EIO;
+ }
+}
+
+static void _clear_bio(struct bio *bio)
+{
+ struct bio_vec *bv;
+ unsigned i;
+
+ __bio_for_each_segment(bv, bio, i, 0) {
+ unsigned this_count = bv->bv_len;
+
+ if (likely(PAGE_SIZE == this_count))
+ clear_highpage(bv->bv_page);
+ else
+ zero_user(bv->bv_page, bv->bv_offset, this_count);
+ }
+}
+
+static int _io_check(struct objio_state *ios, bool is_write)
+{
+ enum osd_err_priority oep = OSD_ERR_PRI_NO_ERROR;
+ int lin_ret = 0;
+ int i;
+
+ for (i = 0; i < ios->numdevs; i++) {
+ struct osd_sense_info osi;
+ struct osd_request *or = ios->per_dev[i].or;
+ unsigned dev;
+ int ret;
+
+ if (!or)
+ continue;
+
+ ret = osd_req_decode_sense(or, &osi);
+ if (likely(!ret))
+ continue;
+
+ if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
+ /* start read offset passed endof file */
+ BUG_ON(is_write);
+ _clear_bio(ios->per_dev[i].bio);
+ dprintk("%s: start read offset passed end of file "
+ "offset=0x%llx, length=0x%lx\n", __func__,
+ _LLU(ios->per_dev[i].offset),
+ ios->per_dev[i].length);
+
+ continue; /* we recovered */
+ }
+ dev = ios->per_dev[i].dev;
+ objlayout_io_set_result(&ios->ol_state, dev,
+ &ios->layout->comps[dev].oc_object_id,
+ osd_pri_2_pnfs_err(osi.osd_err_pri),
+ ios->per_dev[i].offset,
+ ios->per_dev[i].length,
+ is_write);
+
+ if (osi.osd_err_pri >= oep) {
+ oep = osi.osd_err_pri;
+ lin_ret = ret;
+ }
+ }
+
+ return lin_ret;
+}
+
+/*
+ * Common IO state helpers.
+ */
+static void _io_free(struct objio_state *ios)
+{
+ unsigned i;
+
+ for (i = 0; i < ios->numdevs; i++) {
+ struct _objio_per_comp *per_dev = &ios->per_dev[i];
+
+ if (per_dev->or) {
+ osd_end_request(per_dev->or);
+ per_dev->or = NULL;
+ }
+
+ if (per_dev->bio) {
+ bio_put(per_dev->bio);
+ per_dev->bio = NULL;
+ }
+ }
+}
+
+struct osd_dev *_io_od(struct objio_state *ios, unsigned dev)
+{
+ unsigned min_dev = ios->layout->comps_index;
+ unsigned max_dev = min_dev + ios->layout->num_comps;
+
+ BUG_ON(dev < min_dev || max_dev <= dev);
+ return ios->layout->ods[dev - min_dev]->od;
+}
+
+struct _striping_info {
+ u64 obj_offset;
+ u64 group_length;
+ unsigned dev;
+ unsigned unit_off;
+};
+
+static void _calc_stripe_info(struct objio_state *ios, u64 file_offset,
+ struct _striping_info *si)
+{
+ u32 stripe_unit = ios->layout->stripe_unit;
+ u32 group_width = ios->layout->group_width;
+ u64 group_depth = ios->layout->group_depth;
+ u32 U = stripe_unit * group_width;
+
+ u64 T = U * group_depth;
+ u64 S = T * ios->layout->group_count;
+ u64 M = div64_u64(file_offset, S);
+
+ /*
+ G = (L - (M * S)) / T
+ H = (L - (M * S)) % T
+ */
+ u64 LmodU = file_offset - M * S;
+ u32 G = div64_u64(LmodU, T);
+ u64 H = LmodU - G * T;
+
+ u32 N = div_u64(H, U);
+
+ div_u64_rem(file_offset, stripe_unit, &si->unit_off);
+ si->obj_offset = si->unit_off + (N * stripe_unit) +
+ (M * group_depth * stripe_unit);
+
+ /* "H - (N * U)" is just "H % U" so it's bound to u32 */
+ si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
+ si->dev *= ios->layout->mirrors_p1;
+
+ si->group_length = T - H;
+}
+
+static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg,
+ unsigned pgbase, struct _objio_per_comp *per_dev, int cur_len,
+ gfp_t gfp_flags)
+{
+ unsigned pg = *cur_pg;
+ struct request_queue *q =
+ osd_request_queue(_io_od(ios, per_dev->dev));
+
+ per_dev->length += cur_len;
+
+ if (per_dev->bio == NULL) {
+ unsigned stripes = ios->layout->num_comps /
+ ios->layout->mirrors_p1;
+ unsigned pages_in_stripe = stripes *
+ (ios->layout->stripe_unit / PAGE_SIZE);
+ unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) /
+ stripes;
+
+ if (BIO_MAX_PAGES_KMALLOC < bio_size)
+ bio_size = BIO_MAX_PAGES_KMALLOC;
+
+ per_dev->bio = bio_kmalloc(gfp_flags, bio_size);
+ if (unlikely(!per_dev->bio)) {
+ dprintk("Faild to allocate BIO size=%u\n", bio_size);
+ return -ENOMEM;
+ }
+ }
+
+ while (cur_len > 0) {
+ unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
+ unsigned added_len;
+
+ BUG_ON(ios->ol_state.nr_pages <= pg);
+ cur_len -= pglen;
+
+ added_len = bio_add_pc_page(q, per_dev->bio,
+ ios->ol_state.pages[pg], pglen, pgbase);
+ if (unlikely(pglen != added_len))
+ return -ENOMEM;
+ pgbase = 0;
+ ++pg;
+ }
+ BUG_ON(cur_len);
+
+ *cur_pg = pg;
+ return 0;
+}
+
+static int _prepare_one_group(struct objio_state *ios, u64 length,
+ struct _striping_info *si, unsigned *last_pg,
+ gfp_t gfp_flags)
+{
+ unsigned stripe_unit = ios->layout->stripe_unit;
+ unsigned mirrors_p1 = ios->layout->mirrors_p1;
+ unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
+ unsigned dev = si->dev;
+ unsigned first_dev = dev - (dev % devs_in_group);
+ unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
+ unsigned cur_pg = *last_pg;
+ int ret = 0;
+
+ while (length) {
+ struct _objio_per_comp *per_dev = &ios->per_dev[dev];
+ unsigned cur_len, page_off = 0;
+
+ if (!per_dev->length) {
+ per_dev->dev = dev;
+ if (dev < si->dev) {
+ per_dev->offset = si->obj_offset + stripe_unit -
+ si->unit_off;
+ cur_len = stripe_unit;
+ } else if (dev == si->dev) {
+ per_dev->offset = si->obj_offset;
+ cur_len = stripe_unit - si->unit_off;
+ page_off = si->unit_off & ~PAGE_MASK;
+ BUG_ON(page_off &&
+ (page_off != ios->ol_state.pgbase));
+ } else { /* dev > si->dev */
+ per_dev->offset = si->obj_offset - si->unit_off;
+ cur_len = stripe_unit;
+ }
+
+ if (max_comp < dev)
+ max_comp = dev;
+ } else {
+ cur_len = stripe_unit;
+ }
+ if (cur_len >= length)
+ cur_len = length;
+
+ ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
+ cur_len, gfp_flags);
+ if (unlikely(ret))
+ goto out;
+
+ dev += mirrors_p1;
+ dev = (dev % devs_in_group) + first_dev;
+
+ length -= cur_len;
+ ios->length += cur_len;
+ }
+out:
+ ios->numdevs = max_comp + mirrors_p1;
+ *last_pg = cur_pg;
+ return ret;
+}
+
+static int _io_rw_pagelist(struct objio_state *ios, gfp_t gfp_flags)
+{
+ u64 length = ios->ol_state.count;
+ u64 offset = ios->ol_state.offset;
+ struct _striping_info si;
+ unsigned last_pg = 0;
+ int ret = 0;
+
+ while (length) {
+ _calc_stripe_info(ios, offset, &si);
+
+ if (length < si.group_length)
+ si.group_length = length;
+
+ ret = _prepare_one_group(ios, si.group_length, &si, &last_pg, gfp_flags);
+ if (unlikely(ret))
+ goto out;
+
+ offset += si.group_length;
+ length -= si.group_length;
+ }
+
+out:
+ if (!ios->length)
+ return ret;
+
+ return 0;
+}
+
+static ssize_t _sync_done(struct objio_state *ios)
+{
+ struct completion *waiting = ios->private;
+
+ complete(waiting);
+ return 0;
+}
+
+static void _last_io(struct kref *kref)
+{
+ struct objio_state *ios = container_of(kref, struct objio_state, kref);
+
+ ios->done(ios);
+}
+
+static void _done_io(struct osd_request *or, void *p)
+{
+ struct objio_state *ios = p;
+
+ kref_put(&ios->kref, _last_io);
+}
+
+static ssize_t _io_exec(struct objio_state *ios)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ ssize_t status = 0; /* sync status */
+ unsigned i;
+ objio_done_fn saved_done_fn = ios->done;
+ bool sync = ios->ol_state.sync;
+
+ if (sync) {
+ ios->done = _sync_done;
+ ios->private = &wait;
+ }
+
+ kref_init(&ios->kref);
+
+ for (i = 0; i < ios->numdevs; i++) {
+ struct osd_request *or = ios->per_dev[i].or;
+
+ if (!or)
+ continue;
+
+ kref_get(&ios->kref);
+ osd_execute_request_async(or, _done_io, ios);
+ }
+
+ kref_put(&ios->kref, _last_io);
+
+ if (sync) {
+ wait_for_completion(&wait);
+ status = saved_done_fn(ios);
+ }
+
+ return status;
+}
+
+/*
+ * read
+ */
+static ssize_t _read_done(struct objio_state *ios)
+{
+ ssize_t status;
+ int ret = _io_check(ios, false);
+
+ _io_free(ios);
+
+ if (likely(!ret))
+ status = ios->length;
+ else
+ status = ret;
+
+ objlayout_read_done(&ios->ol_state, status, ios->ol_state.sync);
+ return status;
+}
+
+static int _read_mirrors(struct objio_state *ios, unsigned cur_comp)
+{
+ struct osd_request *or = NULL;
+ struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
+ unsigned dev = per_dev->dev;
+ struct pnfs_osd_object_cred *cred =
+ &ios->layout->comps[dev];
+ struct osd_obj_id obj = {
+ .partition = cred->oc_object_id.oid_partition_id,
+ .id = cred->oc_object_id.oid_object_id,
+ };
+ int ret;
+
+ or = osd_start_request(_io_od(ios, dev), GFP_KERNEL);
+ if (unlikely(!or)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ per_dev->or = or;
+
+ osd_req_read(or, &obj, per_dev->offset, per_dev->bio, per_dev->length);
+
+ ret = osd_finalize_request(or, 0, cred->oc_cap.cred, NULL);
+ if (ret) {
+ dprintk("%s: Faild to osd_finalize_request() => %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ dprintk("%s:[%d] dev=%d obj=0x%llx start=0x%llx length=0x%lx\n",
+ __func__, cur_comp, dev, obj.id, _LLU(per_dev->offset),
+ per_dev->length);
+
+err:
+ return ret;
+}
+
+static ssize_t _read_exec(struct objio_state *ios)
+{
+ unsigned i;
+ int ret;
+
+ for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
+ if (!ios->per_dev[i].length)
+ continue;
+ ret = _read_mirrors(ios, i);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ ios->done = _read_done;
+ return _io_exec(ios); /* In sync mode exec returns the io status */
+
+err:
+ _io_free(ios);
+ return ret;
+}
+
+ssize_t objio_read_pagelist(struct objlayout_io_state *ol_state)
+{
+ struct objio_state *ios = container_of(ol_state, struct objio_state,
+ ol_state);
+ int ret;
+
+ ret = _io_rw_pagelist(ios, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ return _read_exec(ios);
+}
+
+/*
+ * write
+ */
+static ssize_t _write_done(struct objio_state *ios)
+{
+ ssize_t status;
+ int ret = _io_check(ios, true);
+
+ _io_free(ios);
+
+ if (likely(!ret)) {
+ /* FIXME: should be based on the OSD's persistence model
+ * See OSD2r05 Section 4.13 Data persistence model */
+ ios->ol_state.committed = NFS_FILE_SYNC;
+ status = ios->length;
+ } else {
+ status = ret;
+ }
+
+ objlayout_write_done(&ios->ol_state, status, ios->ol_state.sync);
+ return status;
+}
+
+static int _write_mirrors(struct objio_state *ios, unsigned cur_comp)
+{
+ struct _objio_per_comp *master_dev = &ios->per_dev[cur_comp];
+ unsigned dev = ios->per_dev[cur_comp].dev;
+ unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
+ int ret;
+
+ for (; cur_comp < last_comp; ++cur_comp, ++dev) {
+ struct osd_request *or = NULL;
+ struct pnfs_osd_object_cred *cred =
+ &ios->layout->comps[dev];
+ struct osd_obj_id obj = {
+ .partition = cred->oc_object_id.oid_partition_id,
+ .id = cred->oc_object_id.oid_object_id,
+ };
+ struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
+ struct bio *bio;
+
+ or = osd_start_request(_io_od(ios, dev), GFP_NOFS);
+ if (unlikely(!or)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ per_dev->or = or;
+
+ if (per_dev != master_dev) {
+ bio = bio_kmalloc(GFP_NOFS,
+ master_dev->bio->bi_max_vecs);
+ if (unlikely(!bio)) {
+ dprintk("Faild to allocate BIO size=%u\n",
+ master_dev->bio->bi_max_vecs);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ __bio_clone(bio, master_dev->bio);
+ bio->bi_bdev = NULL;
+ bio->bi_next = NULL;
+ per_dev->bio = bio;
+ per_dev->dev = dev;
+ per_dev->length = master_dev->length;
+ per_dev->offset = master_dev->offset;
+ } else {
+ bio = master_dev->bio;
+ bio->bi_rw |= REQ_WRITE;
+ }
+
+ osd_req_write(or, &obj, per_dev->offset, bio, per_dev->length);
+
+ ret = osd_finalize_request(or, 0, cred->oc_cap.cred, NULL);
+ if (ret) {
+ dprintk("%s: Faild to osd_finalize_request() => %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ dprintk("%s:[%d] dev=%d obj=0x%llx start=0x%llx length=0x%lx\n",
+ __func__, cur_comp, dev, obj.id, _LLU(per_dev->offset),
+ per_dev->length);
+ }
+
+err:
+ return ret;
+}
+
+static ssize_t _write_exec(struct objio_state *ios)
+{
+ unsigned i;
+ int ret;
+
+ for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
+ if (!ios->per_dev[i].length)
+ continue;
+ ret = _write_mirrors(ios, i);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ ios->done = _write_done;
+ return _io_exec(ios); /* In sync mode exec returns the io->status */
+
+err:
+ _io_free(ios);
+ return ret;
+}
+
+ssize_t objio_write_pagelist(struct objlayout_io_state *ol_state, bool stable)
+{
+ struct objio_state *ios = container_of(ol_state, struct objio_state,
+ ol_state);
+ int ret;
+
+ /* TODO: ios->stable = stable; */
+ ret = _io_rw_pagelist(ios, GFP_NOFS);
+ if (unlikely(ret))
+ return ret;
+
+ return _write_exec(ios);
+}
+
+static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *prev, struct nfs_page *req)
+{
+ if (!pnfs_generic_pg_test(pgio, prev, req))
+ return false;
+
+ return pgio->pg_count + req->wb_bytes <=
+ OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
+}
+
+static struct pnfs_layoutdriver_type objlayout_type = {
+ .id = LAYOUT_OSD2_OBJECTS,
+ .name = "LAYOUT_OSD2_OBJECTS",
+ .flags = PNFS_LAYOUTRET_ON_SETATTR,
+
+ .alloc_layout_hdr = objlayout_alloc_layout_hdr,
+ .free_layout_hdr = objlayout_free_layout_hdr,
+
+ .alloc_lseg = objlayout_alloc_lseg,
+ .free_lseg = objlayout_free_lseg,
+
+ .read_pagelist = objlayout_read_pagelist,
+ .write_pagelist = objlayout_write_pagelist,
+ .pg_test = objio_pg_test,
+
+ .free_deviceid_node = objio_free_deviceid_node,
+
+ .encode_layoutcommit = objlayout_encode_layoutcommit,
+ .encode_layoutreturn = objlayout_encode_layoutreturn,
+};
+
+MODULE_DESCRIPTION("pNFS Layout Driver for OSD2 objects");
+MODULE_AUTHOR("Benny Halevy <bhalevy@panasas.com>");
+MODULE_LICENSE("GPL");
+
+static int __init
+objlayout_init(void)
+{
+ int ret = pnfs_register_layoutdriver(&objlayout_type);
+
+ if (ret)
+ printk(KERN_INFO
+ "%s: Registering OSD pNFS Layout Driver failed: error=%d\n",
+ __func__, ret);
+ else
+ printk(KERN_INFO "%s: Registered OSD pNFS Layout Driver\n",
+ __func__);
+ return ret;
+}
+
+static void __exit
+objlayout_exit(void)
+{
+ pnfs_unregister_layoutdriver(&objlayout_type);
+ printk(KERN_INFO "%s: Unregistered OSD pNFS Layout Driver\n",
+ __func__);
+}
+
+module_init(objlayout_init);
+module_exit(objlayout_exit);
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
new file mode 100644
index 0000000..dc3956c
--- /dev/null
+++ b/fs/nfs/objlayout/objlayout.c
@@ -0,0 +1,712 @@
+/*
+ * pNFS Objects layout driver high level definitions
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <scsi/osd_initiator.h>
+#include "objlayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+/*
+ * Create a objlayout layout structure for the given inode and return it.
+ */
+struct pnfs_layout_hdr *
+objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+ struct objlayout *objlay;
+
+ objlay = kzalloc(sizeof(struct objlayout), gfp_flags);
+ if (objlay) {
+ spin_lock_init(&objlay->lock);
+ INIT_LIST_HEAD(&objlay->err_list);
+ }
+ dprintk("%s: Return %p\n", __func__, objlay);
+ return &objlay->pnfs_layout;
+}
+
+/*
+ * Free an objlayout layout structure
+ */
+void
+objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct objlayout *objlay = OBJLAYOUT(lo);
+
+ dprintk("%s: objlay %p\n", __func__, objlay);
+
+ WARN_ON(!list_empty(&objlay->err_list));
+ kfree(objlay);
+}
+
+/*
+ * Unmarshall layout and store it in pnfslay.
+ */
+struct pnfs_layout_segment *
+objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ int status = -ENOMEM;
+ struct xdr_stream stream;
+ struct xdr_buf buf = {
+ .pages = lgr->layoutp->pages,
+ .page_len = lgr->layoutp->len,
+ .buflen = lgr->layoutp->len,
+ .len = lgr->layoutp->len,
+ };
+ struct page *scratch;
+ struct pnfs_layout_segment *lseg;
+
+ dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay);
+
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ goto err_nofree;
+
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags);
+ if (unlikely(status)) {
+ dprintk("%s: objio_alloc_lseg Return err %d\n", __func__,
+ status);
+ goto err;
+ }
+
+ __free_page(scratch);
+
+ dprintk("%s: Return %p\n", __func__, lseg);
+ return lseg;
+
+err:
+ __free_page(scratch);
+err_nofree:
+ dprintk("%s: Err Return=>%d\n", __func__, status);
+ return ERR_PTR(status);
+}
+
+/*
+ * Free a layout segement
+ */
+void
+objlayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ dprintk("%s: freeing layout segment %p\n", __func__, lseg);
+
+ if (unlikely(!lseg))
+ return;
+
+ objio_free_lseg(lseg);
+}
+
+/*
+ * I/O Operations
+ */
+static inline u64
+end_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ end = start + len;
+ return end >= start ? end : NFS4_MAX_UINT64;
+}
+
+/* last octet in a range */
+static inline u64
+last_byte_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ BUG_ON(!len);
+ end = start + len;
+ return end > start ? end - 1 : NFS4_MAX_UINT64;
+}
+
+static struct objlayout_io_state *
+objlayout_alloc_io_state(struct pnfs_layout_hdr *pnfs_layout_type,
+ struct page **pages,
+ unsigned pgbase,
+ loff_t offset,
+ size_t count,
+ struct pnfs_layout_segment *lseg,
+ void *rpcdata,
+ gfp_t gfp_flags)
+{
+ struct objlayout_io_state *state;
+ u64 lseg_end_offset;
+
+ dprintk("%s: allocating io_state\n", __func__);
+ if (objio_alloc_io_state(lseg, &state, gfp_flags))
+ return NULL;
+
+ BUG_ON(offset < lseg->pls_range.offset);
+ lseg_end_offset = end_offset(lseg->pls_range.offset,
+ lseg->pls_range.length);
+ BUG_ON(offset >= lseg_end_offset);
+ if (offset + count > lseg_end_offset) {
+ count = lseg->pls_range.length -
+ (offset - lseg->pls_range.offset);
+ dprintk("%s: truncated count %Zd\n", __func__, count);
+ }
+
+ if (pgbase > PAGE_SIZE) {
+ pages += pgbase >> PAGE_SHIFT;
+ pgbase &= ~PAGE_MASK;
+ }
+
+ INIT_LIST_HEAD(&state->err_list);
+ state->lseg = lseg;
+ state->rpcdata = rpcdata;
+ state->pages = pages;
+ state->pgbase = pgbase;
+ state->nr_pages = (pgbase + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ state->offset = offset;
+ state->count = count;
+ state->sync = 0;
+
+ return state;
+}
+
+static void
+objlayout_free_io_state(struct objlayout_io_state *state)
+{
+ dprintk("%s: freeing io_state\n", __func__);
+ if (unlikely(!state))
+ return;
+
+ objio_free_io_state(state);
+}
+
+/*
+ * I/O done common code
+ */
+static void
+objlayout_iodone(struct objlayout_io_state *state)
+{
+ dprintk("%s: state %p status\n", __func__, state);
+
+ if (likely(state->status >= 0)) {
+ objlayout_free_io_state(state);
+ } else {
+ struct objlayout *objlay = OBJLAYOUT(state->lseg->pls_layout);
+
+ spin_lock(&objlay->lock);
+ objlay->delta_space_valid = OBJ_DSU_INVALID;
+ list_add(&objlay->err_list, &state->err_list);
+ spin_unlock(&objlay->lock);
+ }
+}
+
+/*
+ * objlayout_io_set_result - Set an osd_error code on a specific osd comp.
+ *
+ * The @index component IO failed (error returned from target). Register
+ * the error for later reporting at layout-return.
+ */
+void
+objlayout_io_set_result(struct objlayout_io_state *state, unsigned index,
+ struct pnfs_osd_objid *pooid, int osd_error,
+ u64 offset, u64 length, bool is_write)
+{
+ struct pnfs_osd_ioerr *ioerr = &state->ioerrs[index];
+
+ BUG_ON(index >= state->num_comps);
+ if (osd_error) {
+ ioerr->oer_component = *pooid;
+ ioerr->oer_comp_offset = offset;
+ ioerr->oer_comp_length = length;
+ ioerr->oer_iswrite = is_write;
+ ioerr->oer_errno = osd_error;
+
+ dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) "
+ "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n",
+ __func__, index, ioerr->oer_errno,
+ ioerr->oer_iswrite,
+ _DEVID_LO(&ioerr->oer_component.oid_device_id),
+ _DEVID_HI(&ioerr->oer_component.oid_device_id),
+ ioerr->oer_component.oid_partition_id,
+ ioerr->oer_component.oid_object_id,
+ ioerr->oer_comp_offset,
+ ioerr->oer_comp_length);
+ } else {
+ /* User need not call if no error is reported */
+ ioerr->oer_errno = 0;
+ }
+}
+
+/* Function scheduled on rpc workqueue to call ->nfs_readlist_complete().
+ * This is because the osd completion is called with ints-off from
+ * the block layer
+ */
+static void _rpc_read_complete(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_read_data *rdata;
+
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ rdata = container_of(task, struct nfs_read_data, task);
+
+ pnfs_ld_read_done(rdata);
+}
+
+void
+objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
+{
+ int eof = state->eof;
+ struct nfs_read_data *rdata;
+
+ state->status = status;
+ dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof);
+ rdata = state->rpcdata;
+ rdata->task.tk_status = status;
+ if (status >= 0) {
+ rdata->res.count = status;
+ rdata->res.eof = eof;
+ }
+ objlayout_iodone(state);
+ /* must not use state after this point */
+
+ if (sync)
+ pnfs_ld_read_done(rdata);
+ else {
+ INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete);
+ schedule_work(&rdata->task.u.tk_work);
+ }
+}
+
+/*
+ * Perform sync or async reads.
+ */
+enum pnfs_try_status
+objlayout_read_pagelist(struct nfs_read_data *rdata)
+{
+ loff_t offset = rdata->args.offset;
+ size_t count = rdata->args.count;
+ struct objlayout_io_state *state;
+ ssize_t status = 0;
+ loff_t eof;
+
+ dprintk("%s: Begin inode %p offset %llu count %d\n",
+ __func__, rdata->inode, offset, (int)count);
+
+ eof = i_size_read(rdata->inode);
+ if (unlikely(offset + count > eof)) {
+ if (offset >= eof) {
+ status = 0;
+ rdata->res.count = 0;
+ rdata->res.eof = 1;
+ goto out;
+ }
+ count = eof - offset;
+ }
+
+ state = objlayout_alloc_io_state(NFS_I(rdata->inode)->layout,
+ rdata->args.pages, rdata->args.pgbase,
+ offset, count,
+ rdata->lseg, rdata,
+ GFP_KERNEL);
+ if (unlikely(!state)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ state->eof = state->offset + state->count >= eof;
+
+ status = objio_read_pagelist(state);
+ out:
+ dprintk("%s: Return status %Zd\n", __func__, status);
+ rdata->pnfs_error = status;
+ return PNFS_ATTEMPTED;
+}
+
+/* Function scheduled on rpc workqueue to call ->nfs_writelist_complete().
+ * This is because the osd completion is called with ints-off from
+ * the block layer
+ */
+static void _rpc_write_complete(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_write_data *wdata;
+
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ wdata = container_of(task, struct nfs_write_data, task);
+
+ pnfs_ld_write_done(wdata);
+}
+
+void
+objlayout_write_done(struct objlayout_io_state *state, ssize_t status,
+ bool sync)
+{
+ struct nfs_write_data *wdata;
+
+ dprintk("%s: Begin\n", __func__);
+ wdata = state->rpcdata;
+ state->status = status;
+ wdata->task.tk_status = status;
+ if (status >= 0) {
+ wdata->res.count = status;
+ wdata->verf.committed = state->committed;
+ dprintk("%s: Return status %d committed %d\n",
+ __func__, wdata->task.tk_status,
+ wdata->verf.committed);
+ } else
+ dprintk("%s: Return status %d\n",
+ __func__, wdata->task.tk_status);
+ objlayout_iodone(state);
+ /* must not use state after this point */
+
+ if (sync)
+ pnfs_ld_write_done(wdata);
+ else {
+ INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete);
+ schedule_work(&wdata->task.u.tk_work);
+ }
+}
+
+/*
+ * Perform sync or async writes.
+ */
+enum pnfs_try_status
+objlayout_write_pagelist(struct nfs_write_data *wdata,
+ int how)
+{
+ struct objlayout_io_state *state;
+ ssize_t status;
+
+ dprintk("%s: Begin inode %p offset %llu count %u\n",
+ __func__, wdata->inode, wdata->args.offset, wdata->args.count);
+
+ state = objlayout_alloc_io_state(NFS_I(wdata->inode)->layout,
+ wdata->args.pages,
+ wdata->args.pgbase,
+ wdata->args.offset,
+ wdata->args.count,
+ wdata->lseg, wdata,
+ GFP_NOFS);
+ if (unlikely(!state)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ state->sync = how & FLUSH_SYNC;
+
+ status = objio_write_pagelist(state, how & FLUSH_STABLE);
+ out:
+ dprintk("%s: Return status %Zd\n", __func__, status);
+ wdata->pnfs_error = status;
+ return PNFS_ATTEMPTED;
+}
+
+void
+objlayout_encode_layoutcommit(struct pnfs_layout_hdr *pnfslay,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *args)
+{
+ struct objlayout *objlay = OBJLAYOUT(pnfslay);
+ struct pnfs_osd_layoutupdate lou;
+ __be32 *start;
+
+ dprintk("%s: Begin\n", __func__);
+
+ spin_lock(&objlay->lock);
+ lou.dsu_valid = (objlay->delta_space_valid == OBJ_DSU_VALID);
+ lou.dsu_delta = objlay->delta_space_used;
+ objlay->delta_space_used = 0;
+ objlay->delta_space_valid = OBJ_DSU_INIT;
+ lou.olu_ioerr_flag = !list_empty(&objlay->err_list);
+ spin_unlock(&objlay->lock);
+
+ start = xdr_reserve_space(xdr, 4);
+
+ BUG_ON(pnfs_osd_xdr_encode_layoutupdate(xdr, &lou));
+
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+
+ dprintk("%s: Return delta_space_used %lld err %d\n", __func__,
+ lou.dsu_delta, lou.olu_ioerr_flag);
+}
+
+static int
+err_prio(u32 oer_errno)
+{
+ switch (oer_errno) {
+ case 0:
+ return 0;
+
+ case PNFS_OSD_ERR_RESOURCE:
+ return OSD_ERR_PRI_RESOURCE;
+ case PNFS_OSD_ERR_BAD_CRED:
+ return OSD_ERR_PRI_BAD_CRED;
+ case PNFS_OSD_ERR_NO_ACCESS:
+ return OSD_ERR_PRI_NO_ACCESS;
+ case PNFS_OSD_ERR_UNREACHABLE:
+ return OSD_ERR_PRI_UNREACHABLE;
+ case PNFS_OSD_ERR_NOT_FOUND:
+ return OSD_ERR_PRI_NOT_FOUND;
+ case PNFS_OSD_ERR_NO_SPACE:
+ return OSD_ERR_PRI_NO_SPACE;
+ default:
+ WARN_ON(1);
+ /* fallthrough */
+ case PNFS_OSD_ERR_EIO:
+ return OSD_ERR_PRI_EIO;
+ }
+}
+
+static void
+merge_ioerr(struct pnfs_osd_ioerr *dest_err,
+ const struct pnfs_osd_ioerr *src_err)
+{
+ u64 dest_end, src_end;
+
+ if (!dest_err->oer_errno) {
+ *dest_err = *src_err;
+ /* accumulated device must be blank */
+ memset(&dest_err->oer_component.oid_device_id, 0,
+ sizeof(dest_err->oer_component.oid_device_id));
+
+ return;
+ }
+
+ if (dest_err->oer_component.oid_partition_id !=
+ src_err->oer_component.oid_partition_id)
+ dest_err->oer_component.oid_partition_id = 0;
+
+ if (dest_err->oer_component.oid_object_id !=
+ src_err->oer_component.oid_object_id)
+ dest_err->oer_component.oid_object_id = 0;
+
+ if (dest_err->oer_comp_offset > src_err->oer_comp_offset)
+ dest_err->oer_comp_offset = src_err->oer_comp_offset;
+
+ dest_end = end_offset(dest_err->oer_comp_offset,
+ dest_err->oer_comp_length);
+ src_end = end_offset(src_err->oer_comp_offset,
+ src_err->oer_comp_length);
+ if (dest_end < src_end)
+ dest_end = src_end;
+
+ dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset;
+
+ if ((src_err->oer_iswrite == dest_err->oer_iswrite) &&
+ (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) {
+ dest_err->oer_errno = src_err->oer_errno;
+ } else if (src_err->oer_iswrite) {
+ dest_err->oer_iswrite = true;
+ dest_err->oer_errno = src_err->oer_errno;
+ }
+}
+
+static void
+encode_accumulated_error(struct objlayout *objlay, __be32 *p)
+{
+ struct objlayout_io_state *state, *tmp;
+ struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0};
+
+ list_for_each_entry_safe(state, tmp, &objlay->err_list, err_list) {
+ unsigned i;
+
+ for (i = 0; i < state->num_comps; i++) {
+ struct pnfs_osd_ioerr *ioerr = &state->ioerrs[i];
+
+ if (!ioerr->oer_errno)
+ continue;
+
+ printk(KERN_ERR "%s: err[%d]: errno=%d is_write=%d "
+ "dev(%llx:%llx) par=0x%llx obj=0x%llx "
+ "offset=0x%llx length=0x%llx\n",
+ __func__, i, ioerr->oer_errno,
+ ioerr->oer_iswrite,
+ _DEVID_LO(&ioerr->oer_component.oid_device_id),
+ _DEVID_HI(&ioerr->oer_component.oid_device_id),
+ ioerr->oer_component.oid_partition_id,
+ ioerr->oer_component.oid_object_id,
+ ioerr->oer_comp_offset,
+ ioerr->oer_comp_length);
+
+ merge_ioerr(&accumulated_err, ioerr);
+ }
+ list_del(&state->err_list);
+ objlayout_free_io_state(state);
+ }
+
+ pnfs_osd_xdr_encode_ioerr(p, &accumulated_err);
+}
+
+void
+objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ struct objlayout *objlay = OBJLAYOUT(pnfslay);
+ struct objlayout_io_state *state, *tmp;
+ __be32 *start;
+
+ dprintk("%s: Begin\n", __func__);
+ start = xdr_reserve_space(xdr, 4);
+ BUG_ON(!start);
+
+ spin_lock(&objlay->lock);
+
+ list_for_each_entry_safe(state, tmp, &objlay->err_list, err_list) {
+ __be32 *last_xdr = NULL, *p;
+ unsigned i;
+ int res = 0;
+
+ for (i = 0; i < state->num_comps; i++) {
+ struct pnfs_osd_ioerr *ioerr = &state->ioerrs[i];
+
+ if (!ioerr->oer_errno)
+ continue;
+
+ dprintk("%s: err[%d]: errno=%d is_write=%d "
+ "dev(%llx:%llx) par=0x%llx obj=0x%llx "
+ "offset=0x%llx length=0x%llx\n",
+ __func__, i, ioerr->oer_errno,
+ ioerr->oer_iswrite,
+ _DEVID_LO(&ioerr->oer_component.oid_device_id),
+ _DEVID_HI(&ioerr->oer_component.oid_device_id),
+ ioerr->oer_component.oid_partition_id,
+ ioerr->oer_component.oid_object_id,
+ ioerr->oer_comp_offset,
+ ioerr->oer_comp_length);
+
+ p = pnfs_osd_xdr_ioerr_reserve_space(xdr);
+ if (unlikely(!p)) {
+ res = -E2BIG;
+ break; /* accumulated_error */
+ }
+
+ last_xdr = p;
+ pnfs_osd_xdr_encode_ioerr(p, &state->ioerrs[i]);
+ }
+
+ /* TODO: use xdr_write_pages */
+ if (unlikely(res)) {
+ /* no space for even one error descriptor */
+ BUG_ON(!last_xdr);
+
+ /* we've encountered a situation with lots and lots of
+ * errors and no space to encode them all. Use the last
+ * available slot to report the union of all the
+ * remaining errors.
+ */
+ encode_accumulated_error(objlay, last_xdr);
+ goto loop_done;
+ }
+ list_del(&state->err_list);
+ objlayout_free_io_state(state);
+ }
+loop_done:
+ spin_unlock(&objlay->lock);
+
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+ dprintk("%s: Return\n", __func__);
+}
+
+
+/*
+ * Get Device Info API for io engines
+ */
+struct objlayout_deviceinfo {
+ struct page *page;
+ struct pnfs_osd_deviceaddr da; /* This must be last */
+};
+
+/* Initialize and call nfs_getdeviceinfo, then decode and return a
+ * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo()
+ * should be called.
+ */
+int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
+ struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
+ gfp_t gfp_flags)
+{
+ struct objlayout_deviceinfo *odi;
+ struct pnfs_device pd;
+ struct super_block *sb;
+ struct page *page, **pages;
+ u32 *p;
+ int err;
+
+ page = alloc_page(gfp_flags);
+ if (!page)
+ return -ENOMEM;
+
+ pages = &page;
+ pd.pages = pages;
+
+ memcpy(&pd.dev_id, d_id, sizeof(*d_id));
+ pd.layout_type = LAYOUT_OSD2_OBJECTS;
+ pd.pages = &page;
+ pd.pgbase = 0;
+ pd.pglen = PAGE_SIZE;
+ pd.mincount = 0;
+
+ sb = pnfslay->plh_inode->i_sb;
+ err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
+ dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
+ if (err)
+ goto err_out;
+
+ p = page_address(page);
+ odi = kzalloc(sizeof(*odi), gfp_flags);
+ if (!odi) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ pnfs_osd_xdr_decode_deviceaddr(&odi->da, p);
+ odi->page = page;
+ *deviceaddr = &odi->da;
+ return 0;
+
+err_out:
+ __free_page(page);
+ return err;
+}
+
+void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr)
+{
+ struct objlayout_deviceinfo *odi = container_of(deviceaddr,
+ struct objlayout_deviceinfo,
+ da);
+
+ __free_page(odi->page);
+ kfree(odi);
+}
diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h
new file mode 100644
index 0000000..a8244c8
--- /dev/null
+++ b/fs/nfs/objlayout/objlayout.h
@@ -0,0 +1,187 @@
+/*
+ * Data types and function declerations for interfacing with the
+ * pNFS standard object layout driver.
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OBJLAYOUT_H
+#define _OBJLAYOUT_H
+
+#include <linux/nfs_fs.h>
+#include <linux/pnfs_osd_xdr.h>
+#include "../pnfs.h"
+
+/*
+ * per-inode layout
+ */
+struct objlayout {
+ struct pnfs_layout_hdr pnfs_layout;
+
+ /* for layout_commit */
+ enum osd_delta_space_valid_enum {
+ OBJ_DSU_INIT = 0,
+ OBJ_DSU_VALID,
+ OBJ_DSU_INVALID,
+ } delta_space_valid;
+ s64 delta_space_used; /* consumed by write ops */
+
+ /* for layout_return */
+ spinlock_t lock;
+ struct list_head err_list;
+};
+
+static inline struct objlayout *
+OBJLAYOUT(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct objlayout, pnfs_layout);
+}
+
+/*
+ * per-I/O operation state
+ * embedded in objects provider io_state data structure
+ */
+struct objlayout_io_state {
+ struct pnfs_layout_segment *lseg;
+
+ struct page **pages;
+ unsigned pgbase;
+ unsigned nr_pages;
+ unsigned long count;
+ loff_t offset;
+ bool sync;
+
+ void *rpcdata;
+ int status; /* res */
+ int eof; /* res */
+ int committed; /* res */
+
+ /* Error reporting (layout_return) */
+ struct list_head err_list;
+ unsigned num_comps;
+ /* Pointer to array of error descriptors of size num_comps.
+ * It should contain as many entries as devices in the osd_layout
+ * that participate in the I/O. It is up to the io_engine to allocate
+ * needed space and set num_comps.
+ */
+ struct pnfs_osd_ioerr *ioerrs;
+};
+
+/*
+ * Raid engine I/O API
+ */
+extern int objio_alloc_lseg(struct pnfs_layout_segment **outp,
+ struct pnfs_layout_hdr *pnfslay,
+ struct pnfs_layout_range *range,
+ struct xdr_stream *xdr,
+ gfp_t gfp_flags);
+extern void objio_free_lseg(struct pnfs_layout_segment *lseg);
+
+extern int objio_alloc_io_state(
+ struct pnfs_layout_segment *lseg,
+ struct objlayout_io_state **outp,
+ gfp_t gfp_flags);
+extern void objio_free_io_state(struct objlayout_io_state *state);
+
+extern ssize_t objio_read_pagelist(struct objlayout_io_state *ol_state);
+extern ssize_t objio_write_pagelist(struct objlayout_io_state *ol_state,
+ bool stable);
+
+/*
+ * callback API
+ */
+extern void objlayout_io_set_result(struct objlayout_io_state *state,
+ unsigned index, struct pnfs_osd_objid *pooid,
+ int osd_error, u64 offset, u64 length, bool is_write);
+
+static inline void
+objlayout_add_delta_space_used(struct objlayout_io_state *state, s64 space_used)
+{
+ struct objlayout *objlay = OBJLAYOUT(state->lseg->pls_layout);
+
+ /* If one of the I/Os errored out and the delta_space_used was
+ * invalid we render the complete report as invalid. Protocol mandate
+ * the DSU be accurate or not reported.
+ */
+ spin_lock(&objlay->lock);
+ if (objlay->delta_space_valid != OBJ_DSU_INVALID) {
+ objlay->delta_space_valid = OBJ_DSU_VALID;
+ objlay->delta_space_used += space_used;
+ }
+ spin_unlock(&objlay->lock);
+}
+
+extern void objlayout_read_done(struct objlayout_io_state *state,
+ ssize_t status, bool sync);
+extern void objlayout_write_done(struct objlayout_io_state *state,
+ ssize_t status, bool sync);
+
+extern int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
+ struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
+ gfp_t gfp_flags);
+extern void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr);
+
+/*
+ * exported generic objects function vectors
+ */
+
+extern struct pnfs_layout_hdr *objlayout_alloc_layout_hdr(struct inode *, gfp_t gfp_flags);
+extern void objlayout_free_layout_hdr(struct pnfs_layout_hdr *);
+
+extern struct pnfs_layout_segment *objlayout_alloc_lseg(
+ struct pnfs_layout_hdr *,
+ struct nfs4_layoutget_res *,
+ gfp_t gfp_flags);
+extern void objlayout_free_lseg(struct pnfs_layout_segment *);
+
+extern enum pnfs_try_status objlayout_read_pagelist(
+ struct nfs_read_data *);
+
+extern enum pnfs_try_status objlayout_write_pagelist(
+ struct nfs_write_data *,
+ int how);
+
+extern void objlayout_encode_layoutcommit(
+ struct pnfs_layout_hdr *,
+ struct xdr_stream *,
+ const struct nfs4_layoutcommit_args *);
+
+extern void objlayout_encode_layoutreturn(
+ struct pnfs_layout_hdr *,
+ struct xdr_stream *,
+ const struct nfs4_layoutreturn_args *);
+
+#endif /* _OBJLAYOUT_H */
diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
new file mode 100644
index 0000000..16fc758
--- /dev/null
+++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
@@ -0,0 +1,412 @@
+/*
+ * Object-Based pNFS Layout XDR layer
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/pnfs_osd_xdr.h>
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/*
+ * The following implementation is based on RFC5664
+ */
+
+/*
+ * struct pnfs_osd_objid {
+ * struct nfs4_deviceid oid_device_id;
+ * u64 oid_partition_id;
+ * u64 oid_object_id;
+ * }; // xdr size 32 bytes
+ */
+static __be32 *
+_osd_xdr_decode_objid(__be32 *p, struct pnfs_osd_objid *objid)
+{
+ p = xdr_decode_opaque_fixed(p, objid->oid_device_id.data,
+ sizeof(objid->oid_device_id.data));
+
+ p = xdr_decode_hyper(p, &objid->oid_partition_id);
+ p = xdr_decode_hyper(p, &objid->oid_object_id);
+ return p;
+}
+/*
+ * struct pnfs_osd_opaque_cred {
+ * u32 cred_len;
+ * void *cred;
+ * }; // xdr size [variable]
+ * The return pointers are from the xdr buffer
+ */
+static int
+_osd_xdr_decode_opaque_cred(struct pnfs_osd_opaque_cred *opaque_cred,
+ struct xdr_stream *xdr)
+{
+ __be32 *p = xdr_inline_decode(xdr, 1);
+
+ if (!p)
+ return -EINVAL;
+
+ opaque_cred->cred_len = be32_to_cpu(*p++);
+
+ p = xdr_inline_decode(xdr, opaque_cred->cred_len);
+ if (!p)
+ return -EINVAL;
+
+ opaque_cred->cred = p;
+ return 0;
+}
+
+/*
+ * struct pnfs_osd_object_cred {
+ * struct pnfs_osd_objid oc_object_id;
+ * u32 oc_osd_version;
+ * u32 oc_cap_key_sec;
+ * struct pnfs_osd_opaque_cred oc_cap_key
+ * struct pnfs_osd_opaque_cred oc_cap;
+ * }; // xdr size 32 + 4 + 4 + [variable] + [variable]
+ */
+static int
+_osd_xdr_decode_object_cred(struct pnfs_osd_object_cred *comp,
+ struct xdr_stream *xdr)
+{
+ __be32 *p = xdr_inline_decode(xdr, 32 + 4 + 4);
+ int ret;
+
+ if (!p)
+ return -EIO;
+
+ p = _osd_xdr_decode_objid(p, &comp->oc_object_id);
+ comp->oc_osd_version = be32_to_cpup(p++);
+ comp->oc_cap_key_sec = be32_to_cpup(p);
+
+ ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap_key, xdr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap, xdr);
+ return ret;
+}
+
+/*
+ * struct pnfs_osd_data_map {
+ * u32 odm_num_comps;
+ * u64 odm_stripe_unit;
+ * u32 odm_group_width;
+ * u32 odm_group_depth;
+ * u32 odm_mirror_cnt;
+ * u32 odm_raid_algorithm;
+ * }; // xdr size 4 + 8 + 4 + 4 + 4 + 4
+ */
+static inline int
+_osd_data_map_xdr_sz(void)
+{
+ return 4 + 8 + 4 + 4 + 4 + 4;
+}
+
+static __be32 *
+_osd_xdr_decode_data_map(__be32 *p, struct pnfs_osd_data_map *data_map)
+{
+ data_map->odm_num_comps = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &data_map->odm_stripe_unit);
+ data_map->odm_group_width = be32_to_cpup(p++);
+ data_map->odm_group_depth = be32_to_cpup(p++);
+ data_map->odm_mirror_cnt = be32_to_cpup(p++);
+ data_map->odm_raid_algorithm = be32_to_cpup(p++);
+ dprintk("%s: odm_num_comps=%u odm_stripe_unit=%llu odm_group_width=%u "
+ "odm_group_depth=%u odm_mirror_cnt=%u odm_raid_algorithm=%u\n",
+ __func__,
+ data_map->odm_num_comps,
+ (unsigned long long)data_map->odm_stripe_unit,
+ data_map->odm_group_width,
+ data_map->odm_group_depth,
+ data_map->odm_mirror_cnt,
+ data_map->odm_raid_algorithm);
+ return p;
+}
+
+int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr)
+{
+ __be32 *p;
+
+ memset(iter, 0, sizeof(*iter));
+
+ p = xdr_inline_decode(xdr, _osd_data_map_xdr_sz() + 4 + 4);
+ if (unlikely(!p))
+ return -EINVAL;
+
+ p = _osd_xdr_decode_data_map(p, &layout->olo_map);
+ layout->olo_comps_index = be32_to_cpup(p++);
+ layout->olo_num_comps = be32_to_cpup(p++);
+ iter->total_comps = layout->olo_num_comps;
+ return 0;
+}
+
+bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
+ int *err)
+{
+ BUG_ON(iter->decoded_comps > iter->total_comps);
+ if (iter->decoded_comps == iter->total_comps)
+ return false;
+
+ *err = _osd_xdr_decode_object_cred(comp, xdr);
+ if (unlikely(*err)) {
+ dprintk("%s: _osd_xdr_decode_object_cred=>%d decoded_comps=%d "
+ "total_comps=%d\n", __func__, *err,
+ iter->decoded_comps, iter->total_comps);
+ return false; /* stop the loop */
+ }
+ dprintk("%s: dev(%llx:%llx) par=0x%llx obj=0x%llx "
+ "key_len=%u cap_len=%u\n",
+ __func__,
+ _DEVID_LO(&comp->oc_object_id.oid_device_id),
+ _DEVID_HI(&comp->oc_object_id.oid_device_id),
+ comp->oc_object_id.oid_partition_id,
+ comp->oc_object_id.oid_object_id,
+ comp->oc_cap_key.cred_len, comp->oc_cap.cred_len);
+
+ iter->decoded_comps++;
+ return true;
+}
+
+/*
+ * Get Device Information Decoding
+ *
+ * Note: since Device Information is currently done synchronously, all
+ * variable strings fields are left inside the rpc buffer and are only
+ * pointed to by the pnfs_osd_deviceaddr members. So the read buffer
+ * should not be freed while the returned information is in use.
+ */
+/*
+ *struct nfs4_string {
+ * unsigned int len;
+ * char *data;
+ *}; // size [variable]
+ * NOTE: Returned string points to inside the XDR buffer
+ */
+static __be32 *
+__read_u8_opaque(__be32 *p, struct nfs4_string *str)
+{
+ str->len = be32_to_cpup(p++);
+ str->data = (char *)p;
+
+ p += XDR_QUADLEN(str->len);
+ return p;
+}
+
+/*
+ * struct pnfs_osd_targetid {
+ * u32 oti_type;
+ * struct nfs4_string oti_scsi_device_id;
+ * };// size 4 + [variable]
+ */
+static __be32 *
+__read_targetid(__be32 *p, struct pnfs_osd_targetid* targetid)
+{
+ u32 oti_type;
+
+ oti_type = be32_to_cpup(p++);
+ targetid->oti_type = oti_type;
+
+ switch (oti_type) {
+ case OBJ_TARGET_SCSI_NAME:
+ case OBJ_TARGET_SCSI_DEVICE_ID:
+ p = __read_u8_opaque(p, &targetid->oti_scsi_device_id);
+ }
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_net_addr {
+ * struct nfs4_string r_netid;
+ * struct nfs4_string r_addr;
+ * };
+ */
+static __be32 *
+__read_net_addr(__be32 *p, struct pnfs_osd_net_addr* netaddr)
+{
+ p = __read_u8_opaque(p, &netaddr->r_netid);
+ p = __read_u8_opaque(p, &netaddr->r_addr);
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_targetaddr {
+ * u32 ota_available;
+ * struct pnfs_osd_net_addr ota_netaddr;
+ * };
+ */
+static __be32 *
+__read_targetaddr(__be32 *p, struct pnfs_osd_targetaddr *targetaddr)
+{
+ u32 ota_available;
+
+ ota_available = be32_to_cpup(p++);
+ targetaddr->ota_available = ota_available;
+
+ if (ota_available)
+ p = __read_net_addr(p, &targetaddr->ota_netaddr);
+
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_deviceaddr {
+ * struct pnfs_osd_targetid oda_targetid;
+ * struct pnfs_osd_targetaddr oda_targetaddr;
+ * u8 oda_lun[8];
+ * struct nfs4_string oda_systemid;
+ * struct pnfs_osd_object_cred oda_root_obj_cred;
+ * struct nfs4_string oda_osdname;
+ * };
+ */
+
+/* We need this version for the pnfs_osd_xdr_decode_deviceaddr which does
+ * not have an xdr_stream
+ */
+static __be32 *
+__read_opaque_cred(__be32 *p,
+ struct pnfs_osd_opaque_cred *opaque_cred)
+{
+ opaque_cred->cred_len = be32_to_cpu(*p++);
+ opaque_cred->cred = p;
+ return p + XDR_QUADLEN(opaque_cred->cred_len);
+}
+
+static __be32 *
+__read_object_cred(__be32 *p, struct pnfs_osd_object_cred *comp)
+{
+ p = _osd_xdr_decode_objid(p, &comp->oc_object_id);
+ comp->oc_osd_version = be32_to_cpup(p++);
+ comp->oc_cap_key_sec = be32_to_cpup(p++);
+
+ p = __read_opaque_cred(p, &comp->oc_cap_key);
+ p = __read_opaque_cred(p, &comp->oc_cap);
+ return p;
+}
+
+void pnfs_osd_xdr_decode_deviceaddr(
+ struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p)
+{
+ p = __read_targetid(p, &deviceaddr->oda_targetid);
+
+ p = __read_targetaddr(p, &deviceaddr->oda_targetaddr);
+
+ p = xdr_decode_opaque_fixed(p, deviceaddr->oda_lun,
+ sizeof(deviceaddr->oda_lun));
+
+ p = __read_u8_opaque(p, &deviceaddr->oda_systemid);
+
+ p = __read_object_cred(p, &deviceaddr->oda_root_obj_cred);
+
+ p = __read_u8_opaque(p, &deviceaddr->oda_osdname);
+
+ /* libosd likes this terminated in dbg. It's last, so no problems */
+ deviceaddr->oda_osdname.data[deviceaddr->oda_osdname.len] = 0;
+}
+
+/*
+ * struct pnfs_osd_layoutupdate {
+ * u32 dsu_valid;
+ * s64 dsu_delta;
+ * u32 olu_ioerr_flag;
+ * }; xdr size 4 + 8 + 4
+ */
+int
+pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
+ struct pnfs_osd_layoutupdate *lou)
+{
+ __be32 *p = xdr_reserve_space(xdr, 4 + 8 + 4);
+
+ if (!p)
+ return -E2BIG;
+
+ *p++ = cpu_to_be32(lou->dsu_valid);
+ if (lou->dsu_valid)
+ p = xdr_encode_hyper(p, lou->dsu_delta);
+ *p++ = cpu_to_be32(lou->olu_ioerr_flag);
+ return 0;
+}
+
+/*
+ * struct pnfs_osd_objid {
+ * struct nfs4_deviceid oid_device_id;
+ * u64 oid_partition_id;
+ * u64 oid_object_id;
+ * }; // xdr size 32 bytes
+ */
+static inline __be32 *
+pnfs_osd_xdr_encode_objid(__be32 *p, struct pnfs_osd_objid *object_id)
+{
+ p = xdr_encode_opaque_fixed(p, &object_id->oid_device_id.data,
+ sizeof(object_id->oid_device_id.data));
+ p = xdr_encode_hyper(p, object_id->oid_partition_id);
+ p = xdr_encode_hyper(p, object_id->oid_object_id);
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_ioerr {
+ * struct pnfs_osd_objid oer_component;
+ * u64 oer_comp_offset;
+ * u64 oer_comp_length;
+ * u32 oer_iswrite;
+ * u32 oer_errno;
+ * }; // xdr size 32 + 24 bytes
+ */
+void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr)
+{
+ p = pnfs_osd_xdr_encode_objid(p, &ioerr->oer_component);
+ p = xdr_encode_hyper(p, ioerr->oer_comp_offset);
+ p = xdr_encode_hyper(p, ioerr->oer_comp_length);
+ *p++ = cpu_to_be32(ioerr->oer_iswrite);
+ *p = cpu_to_be32(ioerr->oer_errno);
+}
+
+__be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 32 + 24);
+ if (unlikely(!p))
+ dprintk("%s: out of xdr space\n", __func__);
+
+ return p;
+}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index c80add6..7913961 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -204,6 +204,21 @@ nfs_wait_on_request(struct nfs_page *req)
TASK_UNINTERRUPTIBLE);
}
+static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
+{
+ /*
+ * FIXME: ideally we should be able to coalesce all requests
+ * that are not block boundary aligned, but currently this
+ * is problematic for the case of bsize < PAGE_CACHE_SIZE,
+ * since nfs_flush_multi and nfs_pagein_multi assume you
+ * can have only one struct nfs_page.
+ */
+ if (desc->pg_bsize < PAGE_SIZE)
+ return 0;
+
+ return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
+}
+
/**
* nfs_pageio_init - initialise a page io descriptor
* @desc: pointer to descriptor
@@ -229,6 +244,8 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
desc->pg_lseg = NULL;
+ desc->pg_test = nfs_generic_pg_test;
+ pnfs_pageio_init(desc, inode);
}
/**
@@ -242,29 +259,23 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
*
* Return 'true' if this is the case, else return 'false'.
*/
-static int nfs_can_coalesce_requests(struct nfs_page *prev,
- struct nfs_page *req,
- struct nfs_pageio_descriptor *pgio)
+static bool nfs_can_coalesce_requests(struct nfs_page *prev,
+ struct nfs_page *req,
+ struct nfs_pageio_descriptor *pgio)
{
if (req->wb_context->cred != prev->wb_context->cred)
- return 0;
+ return false;
if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
- return 0;
+ return false;
if (req->wb_context->state != prev->wb_context->state)
- return 0;
+ return false;
if (req->wb_index != (prev->wb_index + 1))
- return 0;
+ return false;
if (req->wb_pgbase != 0)
- return 0;
+ return false;
if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
- return 0;
- /*
- * Non-whole file layouts need to check that req is inside of
- * pgio->pg_lseg.
- */
- if (pgio->pg_test && !pgio->pg_test(pgio, prev, req))
- return 0;
- return 1;
+ return false;
+ return pgio->pg_test(pgio, prev, req);
}
/**
@@ -278,31 +289,18 @@ static int nfs_can_coalesce_requests(struct nfs_page *prev,
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
- size_t newlen = req->wb_bytes;
-
if (desc->pg_count != 0) {
struct nfs_page *prev;
- /*
- * FIXME: ideally we should be able to coalesce all requests
- * that are not block boundary aligned, but currently this
- * is problematic for the case of bsize < PAGE_CACHE_SIZE,
- * since nfs_flush_multi and nfs_pagein_multi assume you
- * can have only one struct nfs_page.
- */
- if (desc->pg_bsize < PAGE_SIZE)
- return 0;
- newlen += desc->pg_count;
- if (newlen > desc->pg_bsize)
- return 0;
prev = nfs_list_entry(desc->pg_list.prev);
if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
- } else
+ } else {
desc->pg_base = req->wb_pgbase;
+ }
nfs_list_remove_request(req);
nfs_list_add_request(req, &desc->pg_list);
- desc->pg_count = newlen;
+ desc->pg_count += req->wb_bytes;
return 1;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index f57f528..8c1309d 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -177,13 +177,28 @@ get_layout_hdr(struct pnfs_layout_hdr *lo)
atomic_inc(&lo->plh_refcount);
}
+static struct pnfs_layout_hdr *
+pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
+ return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
+ kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
+}
+
+static void
+pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
+ return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
+}
+
static void
destroy_layout_hdr(struct pnfs_layout_hdr *lo)
{
dprintk("%s: freeing layout cache %p\n", __func__, lo);
BUG_ON(!list_empty(&lo->plh_layouts));
NFS_I(lo->plh_inode)->layout = NULL;
- kfree(lo);
+ pnfs_free_layout_hdr(lo);
}
static void
@@ -228,7 +243,7 @@ put_lseg_common(struct pnfs_layout_segment *lseg)
{
struct inode *inode = lseg->pls_layout->plh_inode;
- BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+ WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
list_del_init(&lseg->pls_list);
if (list_empty(&lseg->pls_layout->plh_segs)) {
set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
@@ -261,11 +276,72 @@ put_lseg(struct pnfs_layout_segment *lseg)
}
EXPORT_SYMBOL_GPL(put_lseg);
+static inline u64
+end_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ end = start + len;
+ return end >= start ? end : NFS4_MAX_UINT64;
+}
+
+/* last octet in a range */
+static inline u64
+last_byte_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ BUG_ON(!len);
+ end = start + len;
+ return end > start ? end - 1 : NFS4_MAX_UINT64;
+}
+
+/*
+ * is l2 fully contained in l1?
+ * start1 end1
+ * [----------------------------------)
+ * start2 end2
+ * [----------------)
+ */
+static inline int
+lo_seg_contained(struct pnfs_layout_range *l1,
+ struct pnfs_layout_range *l2)
+{
+ u64 start1 = l1->offset;
+ u64 end1 = end_offset(start1, l1->length);
+ u64 start2 = l2->offset;
+ u64 end2 = end_offset(start2, l2->length);
+
+ return (start1 <= start2) && (end1 >= end2);
+}
+
+/*
+ * is l1 and l2 intersecting?
+ * start1 end1
+ * [----------------------------------)
+ * start2 end2
+ * [----------------)
+ */
+static inline int
+lo_seg_intersecting(struct pnfs_layout_range *l1,
+ struct pnfs_layout_range *l2)
+{
+ u64 start1 = l1->offset;
+ u64 end1 = end_offset(start1, l1->length);
+ u64 start2 = l2->offset;
+ u64 end2 = end_offset(start2, l2->length);
+
+ return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
+ (end2 == NFS4_MAX_UINT64 || end2 > start1);
+}
+
static bool
-should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
+should_free_lseg(struct pnfs_layout_range *lseg_range,
+ struct pnfs_layout_range *recall_range)
{
- return (recall_iomode == IOMODE_ANY ||
- lseg_iomode == recall_iomode);
+ return (recall_range->iomode == IOMODE_ANY ||
+ lseg_range->iomode == recall_range->iomode) &&
+ lo_seg_intersecting(lseg_range, recall_range);
}
/* Returns 1 if lseg is removed from list, 0 otherwise */
@@ -296,7 +372,7 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
int
mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
- u32 iomode)
+ struct pnfs_layout_range *recall_range)
{
struct pnfs_layout_segment *lseg, *next;
int invalid = 0, removed = 0;
@@ -309,7 +385,8 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
return 0;
}
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
- if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
+ if (!recall_range ||
+ should_free_lseg(&lseg->pls_range, recall_range)) {
dprintk("%s: freeing lseg %p iomode %d "
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
@@ -358,7 +435,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
lo = nfsi->layout;
if (lo) {
lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
- mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
+ mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
}
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
@@ -467,7 +544,7 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_open_context *ctx,
- u32 iomode,
+ struct pnfs_layout_range *range,
gfp_t gfp_flags)
{
struct inode *ino = lo->plh_inode;
@@ -499,11 +576,11 @@ send_layoutget(struct pnfs_layout_hdr *lo,
goto out_err_free;
}
- lgp->args.minlength = NFS4_MAX_UINT64;
+ lgp->args.minlength = PAGE_CACHE_SIZE;
+ if (lgp->args.minlength > range->length)
+ lgp->args.minlength = range->length;
lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
- lgp->args.range.iomode = iomode;
- lgp->args.range.offset = 0;
- lgp->args.range.length = NFS4_MAX_UINT64;
+ lgp->args.range = *range;
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
@@ -518,7 +595,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
nfs4_proc_layoutget(lgp);
if (!lseg) {
/* remember that LAYOUTGET failed and suspend trying */
- set_bit(lo_fail_bit(iomode), &lo->plh_flags);
+ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
}
/* free xdr pages */
@@ -542,6 +619,51 @@ out_err_free:
return NULL;
}
+/* Initiates a LAYOUTRETURN(FILE) */
+int
+_pnfs_return_layout(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo = NULL;
+ struct nfs_inode *nfsi = NFS_I(ino);
+ LIST_HEAD(tmp_list);
+ struct nfs4_layoutreturn *lrp;
+ nfs4_stateid stateid;
+ int status = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ spin_lock(&ino->i_lock);
+ lo = nfsi->layout;
+ if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) {
+ spin_unlock(&ino->i_lock);
+ dprintk("%s: no layout segments to return\n", __func__);
+ goto out;
+ }
+ stateid = nfsi->layout->plh_stateid;
+ /* Reference matched in nfs4_layoutreturn_release */
+ get_layout_hdr(lo);
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+
+ WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
+
+ lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
+ if (unlikely(lrp == NULL)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ lrp->args.stateid = stateid;
+ lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
+ lrp->args.inode = ino;
+ lrp->clp = NFS_SERVER(ino)->nfs_client;
+
+ status = nfs4_proc_layoutreturn(lrp);
+out:
+ dprintk("<-- %s status: %d\n", __func__, status);
+ return status;
+}
+
bool pnfs_roc(struct inode *ino)
{
struct pnfs_layout_hdr *lo;
@@ -625,10 +747,23 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
* are seen first.
*/
static s64
-cmp_layout(u32 iomode1, u32 iomode2)
+cmp_layout(struct pnfs_layout_range *l1,
+ struct pnfs_layout_range *l2)
{
+ s64 d;
+
+ /* high offset > low offset */
+ d = l1->offset - l2->offset;
+ if (d)
+ return d;
+
+ /* short length > long length */
+ d = l2->length - l1->length;
+ if (d)
+ return d;
+
/* read > read/write */
- return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
+ return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
}
static void
@@ -636,13 +771,12 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
struct pnfs_layout_segment *lseg)
{
struct pnfs_layout_segment *lp;
- int found = 0;
dprintk("%s:Begin\n", __func__);
assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lp, &lo->plh_segs, pls_list) {
- if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
+ if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
continue;
list_add_tail(&lseg->pls_list, &lp->pls_list);
dprintk("%s: inserted lseg %p "
@@ -652,16 +786,14 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
lseg->pls_range.offset, lseg->pls_range.length,
lp, lp->pls_range.iomode, lp->pls_range.offset,
lp->pls_range.length);
- found = 1;
- break;
- }
- if (!found) {
- list_add_tail(&lseg->pls_list, &lo->plh_segs);
- dprintk("%s: inserted lseg %p "
- "iomode %d offset %llu length %llu at tail\n",
- __func__, lseg, lseg->pls_range.iomode,
- lseg->pls_range.offset, lseg->pls_range.length);
+ goto out;
}
+ list_add_tail(&lseg->pls_list, &lo->plh_segs);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu at tail\n",
+ __func__, lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset, lseg->pls_range.length);
+out:
get_layout_hdr(lo);
dprintk("%s:Return\n", __func__);
@@ -672,7 +804,7 @@ alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
struct pnfs_layout_hdr *lo;
- lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
+ lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
if (!lo)
return NULL;
atomic_set(&lo->plh_refcount, 1);
@@ -705,7 +837,7 @@ pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
if (likely(nfsi->layout == NULL)) /* Won the race? */
nfsi->layout = new;
else
- kfree(new);
+ pnfs_free_layout_hdr(new);
return nfsi->layout;
}
@@ -721,16 +853,28 @@ pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
* READ RW true
*/
static int
-is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
+is_matching_lseg(struct pnfs_layout_range *ls_range,
+ struct pnfs_layout_range *range)
{
- return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
+ struct pnfs_layout_range range1;
+
+ if ((range->iomode == IOMODE_RW &&
+ ls_range->iomode != IOMODE_RW) ||
+ !lo_seg_intersecting(ls_range, range))
+ return 0;
+
+ /* range1 covers only the first byte in the range */
+ range1 = *range;
+ range1.length = 1;
+ return lo_seg_contained(ls_range, &range1);
}
/*
* lookup range in layout
*/
static struct pnfs_layout_segment *
-pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
+pnfs_find_lseg(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range)
{
struct pnfs_layout_segment *lseg, *ret = NULL;
@@ -739,11 +883,11 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
- is_matching_lseg(lseg, iomode)) {
+ is_matching_lseg(&lseg->pls_range, range)) {
ret = get_lseg(lseg);
break;
}
- if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
+ if (cmp_layout(range, &lseg->pls_range) > 0)
break;
}
@@ -759,9 +903,17 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
+ loff_t pos,
+ u64 count,
enum pnfs_iomode iomode,
gfp_t gfp_flags)
{
+ struct pnfs_layout_range arg = {
+ .iomode = iomode,
+ .offset = pos,
+ .length = count,
+ };
+ unsigned pg_offset;
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
struct pnfs_layout_hdr *lo;
@@ -789,7 +941,7 @@ pnfs_update_layout(struct inode *ino,
goto out_unlock;
/* Check to see if the layout for the given range already exists */
- lseg = pnfs_find_lseg(lo, iomode);
+ lseg = pnfs_find_lseg(lo, &arg);
if (lseg)
goto out_unlock;
@@ -811,7 +963,14 @@ pnfs_update_layout(struct inode *ino,
spin_unlock(&clp->cl_lock);
}
- lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
+ pg_offset = arg.offset & ~PAGE_CACHE_MASK;
+ if (pg_offset) {
+ arg.offset -= pg_offset;
+ arg.length += pg_offset;
+ }
+ arg.length = PAGE_CACHE_ALIGN(arg.length);
+
+ lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
if (!lseg && first) {
spin_lock(&clp->cl_lock);
list_del_init(&lo->plh_layouts);
@@ -838,17 +997,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
int status = 0;
- /* Verify we got what we asked for.
- * Note that because the xdr parsing only accepts a single
- * element array, this can fail even if the server is behaving
- * correctly.
- */
- if (lgp->args.range.iomode > res->range.iomode ||
- res->range.offset != 0 ||
- res->range.length != NFS4_MAX_UINT64) {
- status = -EINVAL;
- goto out;
- }
/* Inject layout blob into I/O device driver */
lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
if (!lseg || IS_ERR(lseg)) {
@@ -895,51 +1043,64 @@ out_forget_reply:
goto out;
}
-static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
- struct nfs_page *prev,
- struct nfs_page *req)
+bool
+pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
{
+ enum pnfs_iomode access_type;
+ gfp_t gfp_flags;
+
+ /* We assume that pg_ioflags == 0 iff we're reading a page */
+ if (pgio->pg_ioflags == 0) {
+ access_type = IOMODE_READ;
+ gfp_flags = GFP_KERNEL;
+ } else {
+ access_type = IOMODE_RW;
+ gfp_flags = GFP_NOFS;
+ }
+
if (pgio->pg_count == prev->wb_bytes) {
/* This is first coelesce call for a series of nfs_pages */
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
prev->wb_context,
- IOMODE_READ,
- GFP_KERNEL);
+ req_offset(req),
+ pgio->pg_count,
+ access_type,
+ gfp_flags);
+ return true;
}
- return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
-}
-void
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
-{
- struct pnfs_layoutdriver_type *ld;
+ if (pgio->pg_lseg &&
+ req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset,
+ pgio->pg_lseg->pls_range.length))
+ return false;
- ld = NFS_SERVER(inode)->pnfs_curr_ld;
- pgio->pg_test = (ld && ld->pg_test) ? pnfs_read_pg_test : NULL;
+ return true;
}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
-static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
- struct nfs_page *prev,
- struct nfs_page *req)
+/*
+ * Called by non rpc-based layout drivers
+ */
+int
+pnfs_ld_write_done(struct nfs_write_data *data)
{
- if (pgio->pg_count == prev->wb_bytes) {
- /* This is first coelesce call for a series of nfs_pages */
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- prev->wb_context,
- IOMODE_RW,
- GFP_NOFS);
- }
- return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
-}
+ int status;
-void
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode)
-{
- struct pnfs_layoutdriver_type *ld;
+ if (!data->pnfs_error) {
+ pnfs_set_layoutcommit(data);
+ data->mds_ops->rpc_call_done(&data->task, data);
+ data->mds_ops->rpc_release(data);
+ return 0;
+ }
- ld = NFS_SERVER(inode)->pnfs_curr_ld;
- pgio->pg_test = (ld && ld->pg_test) ? pnfs_write_pg_test : NULL;
+ dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
+ data->pnfs_error);
+ status = nfs_initiate_write(data, NFS_CLIENT(data->inode),
+ data->mds_ops, NFS_FILE_SYNC);
+ return status ? : -EAGAIN;
}
+EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
enum pnfs_try_status
pnfs_try_to_write_data(struct nfs_write_data *wdata,
@@ -966,6 +1127,29 @@ pnfs_try_to_write_data(struct nfs_write_data *wdata,
}
/*
+ * Called by non rpc-based layout drivers
+ */
+int
+pnfs_ld_read_done(struct nfs_read_data *data)
+{
+ int status;
+
+ if (!data->pnfs_error) {
+ __nfs4_read_done_cb(data);
+ data->mds_ops->rpc_call_done(&data->task, data);
+ data->mds_ops->rpc_release(data);
+ return 0;
+ }
+
+ dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
+ data->pnfs_error);
+ status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
+ data->mds_ops);
+ return status ? : -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
+
+/*
* Call the appropriate parallel I/O subsystem read function.
*/
enum pnfs_try_status
@@ -1009,7 +1193,7 @@ void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{
struct nfs_inode *nfsi = NFS_I(wdata->inode);
- loff_t end_pos = wdata->args.offset + wdata->res.count;
+ loff_t end_pos = wdata->mds_offset + wdata->res.count;
bool mark_as_dirty = false;
spin_lock(&nfsi->vfs_inode.i_lock);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 0c015ba..48d0a8e 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,6 +30,7 @@
#ifndef FS_NFS_PNFS_H
#define FS_NFS_PNFS_H
+#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
enum {
@@ -64,17 +65,29 @@ enum {
NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
};
+enum layoutdriver_policy_flags {
+ /* Should the pNFS client commit and return the layout upon a setattr */
+ PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
+};
+
+struct nfs4_deviceid_node;
+
/* Per-layout driver specific registration structure */
struct pnfs_layoutdriver_type {
struct list_head pnfs_tblid;
const u32 id;
const char *name;
struct module *owner;
+ unsigned flags;
+
+ struct pnfs_layout_hdr * (*alloc_layout_hdr) (struct inode *inode, gfp_t gfp_flags);
+ void (*free_layout_hdr) (struct pnfs_layout_hdr *);
+
struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
void (*free_lseg) (struct pnfs_layout_segment *lseg);
/* test for nfs page cache coalescing */
- int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
+ bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
/* Returns true if layoutdriver wants to divert this request to
* driver's commit routine.
@@ -89,6 +102,16 @@ struct pnfs_layoutdriver_type {
*/
enum pnfs_try_status (*read_pagelist) (struct nfs_read_data *nfs_data);
enum pnfs_try_status (*write_pagelist) (struct nfs_write_data *nfs_data, int how);
+
+ void (*free_deviceid_node) (struct nfs4_deviceid_node *);
+
+ void (*encode_layoutreturn) (struct pnfs_layout_hdr *layoutid,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args);
+
+ void (*encode_layoutcommit) (struct pnfs_layout_hdr *layoutid,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *args);
};
struct pnfs_layout_hdr {
@@ -120,21 +143,22 @@ extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev);
extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
/* pnfs.c */
void get_layout_hdr(struct pnfs_layout_hdr *lo);
void put_lseg(struct pnfs_layout_segment *lseg);
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
- enum pnfs_iomode access_type, gfp_t gfp_flags);
+ loff_t pos, u64 count, enum pnfs_iomode access_type,
+ gfp_t gfp_flags);
void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
void unset_pnfs_layoutdriver(struct nfs_server *);
enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
const struct rpc_call_ops *, int);
enum pnfs_try_status pnfs_try_to_read_data(struct nfs_read_data *,
const struct rpc_call_ops *);
-void pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
-void pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *);
+bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
int pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
@@ -148,13 +172,37 @@ int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
struct nfs4_state *open_state);
int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
- u32 iomode);
+ struct pnfs_layout_range *recall_range);
bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
+int _pnfs_return_layout(struct inode *);
+int pnfs_ld_write_done(struct nfs_write_data *);
+int pnfs_ld_read_done(struct nfs_read_data *);
+
+/* pnfs_dev.c */
+struct nfs4_deviceid_node {
+ struct hlist_node node;
+ const struct pnfs_layoutdriver_type *ld;
+ const struct nfs_client *nfs_client;
+ struct nfs4_deviceid deviceid;
+ atomic_t ref;
+};
+
+void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
+struct nfs4_deviceid_node *nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
+struct nfs4_deviceid_node *nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
+void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
+void nfs4_init_deviceid_node(struct nfs4_deviceid_node *,
+ const struct pnfs_layoutdriver_type *,
+ const struct nfs_client *,
+ const struct nfs4_deviceid *);
+struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *);
+bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *);
+void nfs4_deviceid_purge_client(const struct nfs_client *);
static inline int lo_fail_bit(u32 iomode)
{
@@ -223,6 +271,36 @@ static inline void pnfs_clear_request_commit(struct nfs_page *req)
put_lseg(req->wb_commit_lseg);
}
+/* Should the pNFS client commit and return the layout upon a setattr */
+static inline bool
+pnfs_ld_layoutret_on_setattr(struct inode *inode)
+{
+ if (!pnfs_enabled_sb(NFS_SERVER(inode)))
+ return false;
+ return NFS_SERVER(inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_SETATTR;
+}
+
+static inline int pnfs_return_layout(struct inode *ino)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct nfs_server *nfss = NFS_SERVER(ino);
+
+ if (pnfs_enabled_sb(nfss) && nfsi->layout)
+ return _pnfs_return_layout(ino);
+
+ return 0;
+}
+
+static inline void pnfs_pageio_init(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+ if (ld)
+ pgio->pg_test = ld->pg_test;
+}
+
#else /* CONFIG_NFS_V4_1 */
static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
@@ -245,7 +323,8 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg)
static inline struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
- enum pnfs_iomode access_type, gfp_t gfp_flags)
+ loff_t pos, u64 count, enum pnfs_iomode access_type,
+ gfp_t gfp_flags)
{
return NULL;
}
@@ -264,6 +343,17 @@ pnfs_try_to_write_data(struct nfs_write_data *data,
return PNFS_NOT_ATTEMPTED;
}
+static inline int pnfs_return_layout(struct inode *ino)
+{
+ return 0;
+}
+
+static inline bool
+pnfs_ld_layoutret_on_setattr(struct inode *inode)
+{
+ return false;
+}
+
static inline bool
pnfs_roc(struct inode *ino)
{
@@ -294,16 +384,9 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
{
}
-static inline void
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *ino)
-{
- pgio->pg_test = NULL;
-}
-
-static inline void
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *ino)
+static inline void pnfs_pageio_init(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode)
{
- pgio->pg_test = NULL;
}
static inline void
@@ -331,6 +414,10 @@ static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{
return 0;
}
+
+static inline void nfs4_deviceid_purge_client(struct nfs_client *ncl)
+{
+}
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
new file mode 100644
index 0000000..c65e133
--- /dev/null
+++ b/fs/nfs/pnfs_dev.c
@@ -0,0 +1,270 @@
+/*
+ * Device operations for the pnfs client.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ * Garth Goodson <Garth.Goodson@netapp.com>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+/*
+ * Device ID RCU cache. A device ID is unique per server and layout type.
+ */
+#define NFS4_DEVICE_ID_HASH_BITS 5
+#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
+#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
+
+static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
+static DEFINE_SPINLOCK(nfs4_deviceid_lock);
+
+void
+nfs4_print_deviceid(const struct nfs4_deviceid *id)
+{
+ u32 *p = (u32 *)id;
+
+ dprintk("%s: device id= [%x%x%x%x]\n", __func__,
+ p[0], p[1], p[2], p[3]);
+}
+EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
+
+static inline u32
+nfs4_deviceid_hash(const struct nfs4_deviceid *id)
+{
+ unsigned char *cptr = (unsigned char *)id->data;
+ unsigned int nbytes = NFS4_DEVICEID4_SIZE;
+ u32 x = 0;
+
+ while (nbytes--) {
+ x *= 37;
+ x += *cptr++;
+ }
+ return x & NFS4_DEVICE_ID_HASH_MASK;
+}
+
+static struct nfs4_deviceid_node *
+_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id,
+ long hash)
+{
+ struct nfs4_deviceid_node *d;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+ if (d->ld == ld && d->nfs_client == clp &&
+ !memcmp(&d->deviceid, id, sizeof(*id))) {
+ if (atomic_read(&d->ref))
+ return d;
+ else
+ continue;
+ }
+ return NULL;
+}
+
+/*
+ * Lookup a deviceid in cache and get a reference count on it if found
+ *
+ * @clp nfs_client associated with deviceid
+ * @id deviceid to look up
+ */
+struct nfs4_deviceid_node *
+_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id,
+ long hash)
+{
+ struct nfs4_deviceid_node *d;
+
+ rcu_read_lock();
+ d = _lookup_deviceid(ld, clp, id, hash);
+ if (d && !atomic_inc_not_zero(&d->ref))
+ d = NULL;
+ rcu_read_unlock();
+ return d;
+}
+
+struct nfs4_deviceid_node *
+nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id)
+{
+ return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
+}
+EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
+
+/*
+ * Unhash and put deviceid
+ *
+ * @clp nfs_client associated with deviceid
+ * @id the deviceid to unhash
+ *
+ * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
+ */
+struct nfs4_deviceid_node *
+nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id)
+{
+ struct nfs4_deviceid_node *d;
+
+ spin_lock(&nfs4_deviceid_lock);
+ rcu_read_lock();
+ d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
+ rcu_read_unlock();
+ if (!d) {
+ spin_unlock(&nfs4_deviceid_lock);
+ return NULL;
+ }
+ hlist_del_init_rcu(&d->node);
+ spin_unlock(&nfs4_deviceid_lock);
+ synchronize_rcu();
+
+ /* balance the initial ref set in pnfs_insert_deviceid */
+ if (atomic_dec_and_test(&d->ref))
+ return d;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nfs4_unhash_put_deviceid);
+
+/*
+ * Delete a deviceid from cache
+ *
+ * @clp struct nfs_client qualifying the deviceid
+ * @id deviceid to delete
+ */
+void
+nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id)
+{
+ struct nfs4_deviceid_node *d;
+
+ d = nfs4_unhash_put_deviceid(ld, clp, id);
+ if (!d)
+ return;
+ d->ld->free_deviceid_node(d);
+}
+EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
+
+void
+nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
+ const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *nfs_client,
+ const struct nfs4_deviceid *id)
+{
+ INIT_HLIST_NODE(&d->node);
+ d->ld = ld;
+ d->nfs_client = nfs_client;
+ d->deviceid = *id;
+ atomic_set(&d->ref, 1);
+}
+EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
+
+/*
+ * Uniquely initialize and insert a deviceid node into cache
+ *
+ * @new new deviceid node
+ * Note that the caller must set up the following members:
+ * new->ld
+ * new->nfs_client
+ * new->deviceid
+ *
+ * @ret the inserted node, if none found, otherwise, the found entry.
+ */
+struct nfs4_deviceid_node *
+nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
+{
+ struct nfs4_deviceid_node *d;
+ long hash;
+
+ spin_lock(&nfs4_deviceid_lock);
+ hash = nfs4_deviceid_hash(&new->deviceid);
+ d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
+ if (d) {
+ spin_unlock(&nfs4_deviceid_lock);
+ return d;
+ }
+
+ hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
+ spin_unlock(&nfs4_deviceid_lock);
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
+
+/*
+ * Dereference a deviceid node and delete it when its reference count drops
+ * to zero.
+ *
+ * @d deviceid node to put
+ *
+ * @ret true iff the node was deleted
+ */
+bool
+nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
+{
+ if (!atomic_dec_and_lock(&d->ref, &nfs4_deviceid_lock))
+ return false;
+ hlist_del_init_rcu(&d->node);
+ spin_unlock(&nfs4_deviceid_lock);
+ synchronize_rcu();
+ d->ld->free_deviceid_node(d);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
+
+static void
+_deviceid_purge_client(const struct nfs_client *clp, long hash)
+{
+ struct nfs4_deviceid_node *d;
+ struct hlist_node *n, *next;
+ HLIST_HEAD(tmp);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+ if (d->nfs_client == clp && atomic_read(&d->ref)) {
+ hlist_del_init_rcu(&d->node);
+ hlist_add_head(&d->node, &tmp);
+ }
+ rcu_read_unlock();
+
+ if (hlist_empty(&tmp))
+ return;
+
+ synchronize_rcu();
+ hlist_for_each_entry_safe(d, n, next, &tmp, node)
+ if (atomic_dec_and_test(&d->ref))
+ d->ld->free_deviceid_node(d);
+}
+
+void
+nfs4_deviceid_purge_client(const struct nfs_client *clp)
+{
+ long h;
+
+ spin_lock(&nfs4_deviceid_lock);
+ for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
+ _deviceid_purge_client(clp, h);
+ spin_unlock(&nfs4_deviceid_lock);
+}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 2bcf0dc..20a7f95 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -288,7 +288,9 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
atomic_set(&req->wb_complete, requests);
BUG_ON(desc->pg_lseg != NULL);
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
+ req_offset(req), desc->pg_count,
+ IOMODE_READ, GFP_KERNEL);
ClearPageError(page);
offset = 0;
nbytes = desc->pg_count;
@@ -351,7 +353,9 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
}
req = nfs_list_entry(data->pages.next);
if ((!lseg) && list_is_singular(&data->pages))
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
+ req_offset(req), desc->pg_count,
+ IOMODE_READ, GFP_KERNEL);
ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
0, lseg);
@@ -660,7 +664,6 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
- pnfs_pageio_init_read(&pgio, inode);
if (rsize < PAGE_CACHE_SIZE)
nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
else
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index e288f06..ce40e5c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -63,6 +63,7 @@
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -732,6 +733,28 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
return 0;
}
+#ifdef CONFIG_NFS_V4_1
+void show_sessions(struct seq_file *m, struct nfs_server *server)
+{
+ if (nfs4_has_session(server->nfs_client))
+ seq_printf(m, ",sessions");
+}
+#else
+void show_sessions(struct seq_file *m, struct nfs_server *server) {}
+#endif
+
+#ifdef CONFIG_NFS_V4_1
+void show_pnfs(struct seq_file *m, struct nfs_server *server)
+{
+ seq_printf(m, ",pnfs=");
+ if (server->pnfs_curr_ld)
+ seq_printf(m, "%s", server->pnfs_curr_ld->name);
+ else
+ seq_printf(m, "not configured");
+}
+#else /* CONFIG_NFS_V4_1 */
+void show_pnfs(struct seq_file *m, struct nfs_server *server) {}
+#endif /* CONFIG_NFS_V4_1 */
static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
{
@@ -792,6 +815,8 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "bm0=0x%x", nfss->attr_bitmask[0]);
seq_printf(m, ",bm1=0x%x", nfss->attr_bitmask[1]);
seq_printf(m, ",acl=0x%x", nfss->acl_bitmask);
+ show_sessions(m, nfss);
+ show_pnfs(m, nfss);
}
#endif
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 49c715b..e268e3b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -939,7 +939,9 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
atomic_set(&req->wb_complete, requests);
BUG_ON(desc->pg_lseg);
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
+ req_offset(req), desc->pg_count,
+ IOMODE_RW, GFP_NOFS);
ClearPageError(page);
offset = 0;
nbytes = desc->pg_count;
@@ -1013,7 +1015,9 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
}
req = nfs_list_entry(data->pages.next);
if ((!lseg) && list_is_singular(&data->pages))
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
+ req_offset(req), desc->pg_count,
+ IOMODE_RW, GFP_NOFS);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
@@ -1032,8 +1036,6 @@ static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
{
size_t wsize = NFS_SERVER(inode)->wsize;
- pnfs_pageio_init_write(pgio, inode);
-
if (wsize < PAGE_CACHE_SIZE)
nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
else
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index ad000ae..b9566e4 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1354,12 +1354,6 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
- if (rv)
- goto out;
- rv = check_nfsd_access(exp, rqstp);
- if (rv)
- fh_put(fhp);
-out:
exp_put(exp);
return rv;
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 2247fc9..9095f3c 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -245,7 +245,7 @@ nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
}
/* Now create the file and set attributes */
- nfserr = nfsd_create_v3(rqstp, dirfhp, argp->name, argp->len,
+ nfserr = do_nfsd_create(rqstp, dirfhp, argp->name, argp->len,
attr, newfhp,
argp->createmode, argp->verf, NULL, NULL);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index ad48fac..08c6e36 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -842,7 +842,7 @@ out:
return rv;
}
-__be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
+static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
{
struct svc_fh fh;
int err;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 5fcb139..3a6dbd7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -196,9 +196,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
/*
* Note: create modes (UNCHECKED,GUARDED...) are the same
- * in NFSv4 as in v3.
+ * in NFSv4 as in v3 except EXCLUSIVE4_1.
*/
- status = nfsd_create_v3(rqstp, current_fh, open->op_fname.data,
+ status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
open->op_fname.len, &open->op_iattr,
&resfh, open->op_createmode,
(u32 *)open->op_verf.data,
@@ -403,7 +403,7 @@ nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
putfh->pf_fhlen);
- return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
+ return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
}
static __be32
@@ -762,6 +762,9 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 err;
fh_init(&resfh, NFS4_FHSIZE);
+ err = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_EXEC);
+ if (err)
+ return err;
err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
secinfo->si_name, secinfo->si_namelen,
&exp, &dentry);
@@ -986,6 +989,9 @@ enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
+ /* For rfc 5661 section 2.6.3.1.1: */
+ OP_HANDLES_WRONGSEC = 1 << 3,
+ OP_IS_PUTFH_LIKE = 1 << 4,
};
struct nfsd4_operation {
@@ -1031,6 +1037,44 @@ static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
return nfs_ok;
}
+static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
+{
+ return &nfsd4_ops[op->opnum];
+}
+
+static bool need_wrongsec_check(struct svc_rqst *rqstp)
+{
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+ struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
+ struct nfsd4_op *next = &argp->ops[resp->opcnt];
+ struct nfsd4_operation *thisd;
+ struct nfsd4_operation *nextd;
+
+ thisd = OPDESC(this);
+ /*
+ * Most ops check wronsec on our own; only the putfh-like ops
+ * have special rules.
+ */
+ if (!(thisd->op_flags & OP_IS_PUTFH_LIKE))
+ return false;
+ /*
+ * rfc 5661 2.6.3.1.1.6: don't bother erroring out a
+ * put-filehandle operation if we're not going to use the
+ * result:
+ */
+ if (argp->opcnt == resp->opcnt)
+ return false;
+
+ nextd = OPDESC(next);
+ /*
+ * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
+ * errors themselves as necessary; others should check for them
+ * now:
+ */
+ return !(nextd->op_flags & OP_HANDLES_WRONGSEC);
+}
+
/*
* COMPOUND call.
*/
@@ -1108,7 +1152,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
goto encode_op;
}
- opdesc = &nfsd4_ops[op->opnum];
+ opdesc = OPDESC(op);
if (!cstate->current_fh.fh_dentry) {
if (!(opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
@@ -1126,6 +1170,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
else
BUG_ON(op->status == nfs_ok);
+ if (!op->status && need_wrongsec_check(rqstp))
+ op->status = check_nfsd_access(cstate->current_fh.fh_export, rqstp);
+
encode_op:
/* Only from SEQUENCE */
if (resp->cstate.status == nfserr_replay_cache) {
@@ -1217,10 +1264,12 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOOKUP] = {
.op_func = (nfsd4op_func)nfsd4_lookup,
+ .op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_LOOKUP",
},
[OP_LOOKUPP] = {
.op_func = (nfsd4op_func)nfsd4_lookupp,
+ .op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_LOOKUPP",
},
[OP_NVERIFY] = {
@@ -1229,6 +1278,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_OPEN] = {
.op_func = (nfsd4op_func)nfsd4_open,
+ .op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_OPEN",
},
[OP_OPEN_CONFIRM] = {
@@ -1241,17 +1291,20 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_PUTFH] = {
.op_func = (nfsd4op_func)nfsd4_putfh,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_IS_PUTFH_LIKE,
.op_name = "OP_PUTFH",
},
[OP_PUTPUBFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_IS_PUTFH_LIKE,
.op_name = "OP_PUTPUBFH",
},
[OP_PUTROOTFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_IS_PUTFH_LIKE,
.op_name = "OP_PUTROOTFH",
},
[OP_READ] = {
@@ -1281,15 +1334,18 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_RESTOREFH] = {
.op_func = (nfsd4op_func)nfsd4_restorefh,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_IS_PUTFH_LIKE,
.op_name = "OP_RESTOREFH",
},
[OP_SAVEFH] = {
.op_func = (nfsd4op_func)nfsd4_savefh,
+ .op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SAVEFH",
},
[OP_SECINFO] = {
.op_func = (nfsd4op_func)nfsd4_secinfo,
+ .op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO",
},
[OP_SETATTR] = {
@@ -1353,6 +1409,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_SECINFO_NO_NAME] = {
.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
+ .op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
},
};
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4cf04e1..e98f3c2 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1519,6 +1519,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
bool confirm_me = false;
int status = 0;
+ if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
+ return nfserr_inval;
+
nfs4_lock_state();
unconf = find_unconfirmed_client(&cr_ses->clientid);
conf = find_confirmed_client(&cr_ses->clientid);
@@ -1637,8 +1640,9 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
return nfserr_badsession;
status = nfsd4_map_bcts_dir(&bcts->dir);
- nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
- return nfs_ok;
+ if (!status)
+ nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
+ return status;
}
static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
@@ -1725,6 +1729,13 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
return;
}
+static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
+{
+ struct nfsd4_compoundargs *args = rqstp->rq_argp;
+
+ return args->opcnt > session->se_fchannel.maxops;
+}
+
__be32
nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -1753,6 +1764,10 @@ nfsd4_sequence(struct svc_rqst *rqstp,
if (!session)
goto out;
+ status = nfserr_too_many_ops;
+ if (nfsd4_session_too_many_ops(rqstp, session))
+ goto out;
+
status = nfserr_badslot;
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out;
@@ -1808,6 +1823,8 @@ out:
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
{
+ int status = 0;
+
if (rc->rca_one_fs) {
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
@@ -1817,9 +1834,14 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
*/
return nfs_ok;
}
+
nfs4_lock_state();
- if (is_client_expired(cstate->session->se_client)) {
- nfs4_unlock_state();
+ status = nfserr_complete_already;
+ if (cstate->session->se_client->cl_firststate)
+ goto out;
+
+ status = nfserr_stale_clientid;
+ if (is_client_expired(cstate->session->se_client))
/*
* The following error isn't really legal.
* But we only get here if the client just explicitly
@@ -1827,11 +1849,13 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
* error it gets back on an operation for the dead
* client.
*/
- return nfserr_stale_clientid;
- }
+ goto out;
+
+ status = nfs_ok;
nfsd4_create_clid_dir(cstate->session->se_client);
+out:
nfs4_unlock_state();
- return nfs_ok;
+ return status;
}
__be32
@@ -2462,7 +2486,7 @@ find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
return NULL;
}
-int share_access_to_flags(u32 share_access)
+static int share_access_to_flags(u32 share_access)
{
share_access &= ~NFS4_SHARE_WANT_MASK;
@@ -2882,7 +2906,7 @@ out:
return status;
}
-struct lock_manager nfsd4_manager = {
+static struct lock_manager nfsd4_manager = {
};
static void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c6766af..9901811 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -424,15 +424,12 @@ nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access
static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
{
DECODE_HEAD;
- u32 dummy;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
READ32(bcts->dir);
- /* XXX: Perhaps Tom Tucker could help us figure out how we
- * should be using ctsa_use_conn_in_rdma_mode: */
- READ32(dummy);
-
+ /* XXX: skipping ctsa_use_conn_in_rdma_mode. Perhaps Tom Tucker
+ * could help us figure out we should be using it. */
DECODE_TAIL;
}
@@ -588,8 +585,6 @@ nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
READ_BUF(lockt->lt_owner.len);
READMEM(lockt->lt_owner.data, lockt->lt_owner.len);
- if (argp->minorversion && !zero_clientid(&lockt->lt_clientid))
- return nfserr_inval;
DECODE_TAIL;
}
@@ -3120,7 +3115,7 @@ nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr,
return nfserr;
}
-__be32
+static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_sequence *seq)
{
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 55c8e63..90c6aa6 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -344,7 +344,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
* which clients virtually always use auth_sys for,
* even while using RPCSEC_GSS for NFS.
*/
- if (access & NFSD_MAY_LOCK)
+ if (access & NFSD_MAY_LOCK || access & NFSD_MAY_BYPASS_GSS)
goto skip_pseudoflavor_check;
/*
* Clients may expect to be able to use auth_sys during mount,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 129f3c9..d571827 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -181,16 +181,10 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct svc_export *exp;
struct dentry *dparent;
struct dentry *dentry;
- __be32 err;
int host_err;
dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
- /* Obtain dentry and export. */
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
- if (err)
- return err;
-
dparent = fhp->fh_dentry;
exp = fhp->fh_export;
exp_get(exp);
@@ -254,6 +248,9 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
struct dentry *dentry;
__be32 err;
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
+ if (err)
+ return err;
err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
if (err)
return err;
@@ -877,13 +874,11 @@ static __be32
nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
- struct inode *inode;
mm_segment_t oldfs;
__be32 err;
int host_err;
err = nfserr_perm;
- inode = file->f_path.dentry->d_inode;
if (file->f_op->splice_read && rqstp->rq_splice_ok) {
struct splice_desc sd = {
@@ -1340,11 +1335,18 @@ out_nfserr:
}
#ifdef CONFIG_NFSD_V3
+
+static inline int nfsd_create_is_exclusive(int createmode)
+{
+ return createmode == NFS3_CREATE_EXCLUSIVE
+ || createmode == NFS4_CREATE_EXCLUSIVE4_1;
+}
+
/*
- * NFSv3 version of nfsd_create
+ * NFSv3 and NFSv4 version of nfsd_create
*/
__be32
-nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
+do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
struct svc_fh *resfhp, int createmode, u32 *verifier,
int *truncp, int *created)
@@ -1396,7 +1398,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (err)
goto out;
- if (createmode == NFS3_CREATE_EXCLUSIVE) {
+ if (nfsd_create_is_exclusive(createmode)) {
/* solaris7 gets confused (bugid 4218508) if these have
* the high bit set, so just clear the high bits. If this is
* ever changed to use different attrs for storing the
@@ -1437,6 +1439,11 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
&& dchild->d_inode->i_atime.tv_sec == v_atime
&& dchild->d_inode->i_size == 0 )
break;
+ case NFS4_CREATE_EXCLUSIVE4_1:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+ && dchild->d_inode->i_size == 0 )
+ goto set_attr;
/* fallthru */
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
@@ -1455,7 +1462,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
nfsd_check_ignore_resizing(iap);
- if (createmode == NFS3_CREATE_EXCLUSIVE) {
+ if (nfsd_create_is_exclusive(createmode)) {
/* Cram the verifier into atime/mtime */
iap->ia_valid = ATTR_MTIME|ATTR_ATIME
| ATTR_MTIME_SET|ATTR_ATIME_SET;
@@ -2034,7 +2041,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
struct inode *inode = dentry->d_inode;
int err;
- if (acc == NFSD_MAY_NOP)
+ if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
return 0;
#if 0
dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 9a370a5..e0bbac0 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -17,10 +17,14 @@
#define NFSD_MAY_SATTR 8
#define NFSD_MAY_TRUNC 16
#define NFSD_MAY_LOCK 32
+#define NFSD_MAY_MASK 63
+
+/* extra hints to permission and open routines: */
#define NFSD_MAY_OWNER_OVERRIDE 64
#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
#define NFSD_MAY_NOT_BREAK_LEASE 512
+#define NFSD_MAY_BYPASS_GSS 1024
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
@@ -54,7 +58,7 @@ __be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
int type, dev_t rdev, struct svc_fh *res);
#ifdef CONFIG_NFSD_V3
__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
-__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
+__be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
struct svc_fh *res, int createmode,
u32 *verifier, int *truncp, int *created);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 587f184..b954878 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -917,7 +917,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
* construction. This function can be called both as a single operation
* and as a part of indivisible file operations.
*/
-void nilfs_dirty_inode(struct inode *inode)
+void nilfs_dirty_inode(struct inode *inode, int flags)
{
struct nilfs_transaction_info ti;
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1102a5f..546849b 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -334,8 +334,6 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
struct nilfs_transaction_info ti;
int err;
- dentry_unhash(dentry);
-
err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
if (err)
return err;
@@ -371,9 +369,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct nilfs_transaction_info ti;
int err;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1);
if (unlikely(err))
return err;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index a9c6a53..f02b9ad 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -269,7 +269,7 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh);
extern int nilfs_inode_dirty(struct inode *);
int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty);
extern int nilfs_mark_inode_dirty(struct inode *);
-extern void nilfs_dirty_inode(struct inode *);
+extern void nilfs_dirty_inode(struct inode *, int flags);
int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index c368360c..3b8d397 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -241,11 +241,9 @@ static int omfs_remove(struct inode *dir, struct dentry *dentry)
int ret;
- if (S_ISDIR(inode->i_mode)) {
- dentry_unhash(dentry);
- if (!omfs_dir_is_empty(inode))
- return -ENOTEMPTY;
- }
+ if (S_ISDIR(inode->i_mode) &&
+ !omfs_dir_is_empty(inode))
+ return -ENOTEMPTY;
ret = omfs_delete_entry(dentry);
if (ret)
@@ -382,9 +380,6 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
int err;
if (new_inode) {
- if (S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* overwriting existing file/dir */
err = omfs_remove(new_dir, new_dentry);
if (err)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4ede550..14def99 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -83,6 +83,9 @@
#include <linux/pid_namespace.h>
#include <linux/fs_struct.h>
#include <linux/slab.h>
+#ifdef CONFIG_HARDWALL
+#include <asm/hardwall.h>
+#endif
#include "internal.h"
/* NOTE:
@@ -2842,6 +2845,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_TASK_IO_ACCOUNTING
INF("io", S_IRUGO, proc_tgid_io_accounting),
#endif
+#ifdef CONFIG_HARDWALL
+ INF("hardwall", S_IRUGO, proc_pid_hardwall),
+#endif
};
static int proc_tgid_base_readdir(struct file * filp,
@@ -3181,6 +3187,9 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_TASK_IO_ACCOUNTING
INF("io", S_IRUGO, proc_tid_io_accounting),
#endif
+#ifdef CONFIG_HARDWALL
+ INF("hardwall", S_IRUGO, proc_pid_hardwall),
+#endif
};
static int proc_tid_base_readdir(struct file * filp,
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 76c8164..1186626 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -831,8 +831,6 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
INITIALIZE_PATH(path);
struct reiserfs_dir_entry de;
- dentry_unhash(dentry);
-
/* we will be doing 2 balancings and update 2 stat data, we change quotas
* of the owner of the directory and of the owner of the parent directory.
* The quota structure is possibly deleted only on last iput => outside
@@ -1227,9 +1225,6 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned long savelink = 1;
struct timespec ctime;
- if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
- dentry_unhash(new_dentry);
-
/* three balancings: (1) old name removal, (2) new name insertion
and (3) maybe "save" link insertion
stat data updates: (1) old directory,
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b216ff6..aa91089 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -568,7 +568,7 @@ static void destroy_inodecache(void)
}
/* we don't mark inodes dirty, we just log them */
-static void reiserfs_dirty_inode(struct inode *inode)
+static void reiserfs_dirty_inode(struct inode *inode, int flags)
{
struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 50f1abc..e8a62f4 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -98,7 +98,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
I_MUTEX_CHILD, dir->i_sb);
- dentry_unhash(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (!error)
dentry->d_inode->i_flags |= S_DEAD;
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 730c562..5e1101f 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -147,7 +147,7 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
* table[0] points to the first inode lookup table metadata block,
* this should be less than lookup_table_start
*/
- if (!IS_ERR(table) && table[0] >= lookup_table_start) {
+ if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
kfree(table);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 1516a649..0ed6edb 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -90,7 +90,7 @@ __le64 *squashfs_read_fragment_index_table(struct super_block *sb,
* table[0] points to the first fragment table metadata block, this
* should be less than fragment_table_start
*/
- if (!IS_ERR(table) && table[0] >= fragment_table_start) {
+ if (!IS_ERR(table) && le64_to_cpu(table[0]) >= fragment_table_start) {
kfree(table);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index a70858e..d38ea3d 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -93,7 +93,7 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
* table[0] points to the first id lookup table metadata block, this
* should be less than id_table_start
*/
- if (!IS_ERR(table) && table[0] >= id_table_start) {
+ if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
kfree(table);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 6f26abe..7438850 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -245,7 +245,7 @@ allocate_id_index_table:
msblk->id_table = NULL;
goto failed_mount;
}
- next_table = msblk->id_table[0];
+ next_table = le64_to_cpu(msblk->id_table[0]);
/* Handle inode lookup table */
lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
@@ -261,7 +261,7 @@ allocate_id_index_table:
msblk->inode_lookup_table = NULL;
goto failed_mount;
}
- next_table = msblk->inode_lookup_table[0];
+ next_table = le64_to_cpu(msblk->inode_lookup_table[0]);
sb->s_export_op = &squashfs_export_ops;
@@ -286,7 +286,7 @@ handle_fragments:
msblk->fragment_index = NULL;
goto failed_mount;
}
- next_table = msblk->fragment_index[0];
+ next_table = le64_to_cpu(msblk->fragment_index[0]);
check_directory_table:
/* Sanity check directory_table */
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index e2cc675..e474fbc 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -196,8 +196,6 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
struct inode *inode = dentry->d_inode;
int err = -ENOTEMPTY;
- dentry_unhash(dentry);
-
if (sysv_empty_dir(inode)) {
err = sysv_unlink(dir, dentry);
if (!err) {
@@ -224,9 +222,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
struct sysv_dir_entry * old_de;
int err = -ENOENT;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_de = sysv_find_entry(old_dentry, &old_page);
if (!old_de)
goto out;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c2b8094..ef5abd3 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -656,8 +656,6 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
struct ubifs_inode *dir_ui = ubifs_inode(dir);
struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
- dentry_unhash(dentry);
-
/*
* Budget request settings: deletion direntry, deletion inode and
* changing the parent inode. If budgeting fails, go ahead anyway
@@ -978,9 +976,6 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
struct timespec time;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
/*
* Budget request settings: deletion direntry, new direntry, removing
* the old inode, and changing old and new parent directory inodes.
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 46961c0..ca953a9 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -277,8 +277,9 @@ static int kick_a_thread(void)
return 0;
}
-int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask)
+int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
{
+ int nr = sc->nr_to_scan;
int freed, contention = 0;
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 6db0bdaa..1ab0d22 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -382,7 +382,7 @@ done:
end_writeback(inode);
}
-static void ubifs_dirty_inode(struct inode *inode)
+static void ubifs_dirty_inode(struct inode *inode, int flags)
{
struct ubifs_inode *ui = ubifs_inode(inode);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 93d1412..a70d7b4 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1614,7 +1614,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
int ubifs_tnc_end_commit(struct ubifs_info *c);
/* shrinker.c */
-int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
+int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc);
/* commit.c */
int ubifs_bg_thread(void *info);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 4d76594..f1dce84 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -783,8 +783,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
struct fileIdentDesc *fi, cfi;
struct kernel_lb_addr tloc;
- dentry_unhash(dentry);
-
retval = -ENOENT;
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
if (!fi)
@@ -1083,9 +1081,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct kernel_lb_addr tloc;
struct udf_inode_info *old_iinfo = UDF_I(old_inode);
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
if (ofi) {
if (ofibh.sbh != ofibh.ebh)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 953ebdf..29309e2 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -258,8 +258,6 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
struct inode * inode = dentry->d_inode;
int err= -ENOTEMPTY;
- dentry_unhash(dentry);
-
lock_ufs(dir->i_sb);
if (ufs_empty_dir (inode)) {
err = ufs_unlink(dir, dentry);
@@ -284,9 +282,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ufs_dir_entry *old_de;
int err = -ENOENT;
- if (new_inode && S_ISDIR(new_inode->i_mode))
- dentry_unhash(new_dentry);
-
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
diff --git a/fs/xattr.c b/fs/xattr.c
index f1ef949..f060663 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -46,18 +46,22 @@ xattr_permission(struct inode *inode, const char *name, int mask)
return 0;
/*
- * The trusted.* namespace can only be accessed by a privileged user.
+ * The trusted.* namespace can only be accessed by privileged users.
*/
- if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
- return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
+ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
+ return 0;
+ }
- /* In user.* namespace, only regular files and directories can have
+ /*
+ * In the user.* namespace, only regular files and directories can have
* extended attributes. For sticky directories, only the owner and
- * privileged user can write attributes.
+ * privileged users can write attributes.
*/
if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -EPERM;
+ return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
(mask & MAY_WRITE) && !inode_owner_or_capable(inode))
return -EPERM;
@@ -87,7 +91,11 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
{
struct inode *inode = dentry->d_inode;
int error = -EOPNOTSUPP;
+ int issec = !strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN);
+ if (issec)
+ inode->i_flags &= ~S_NOSEC;
if (inode->i_op->setxattr) {
error = inode->i_op->setxattr(dentry, name, value, size, flags);
if (!error) {
@@ -95,8 +103,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
security_inode_post_setxattr(dentry, name, value,
size, flags);
}
- } else if (!strncmp(name, XATTR_SECURITY_PREFIX,
- XATTR_SECURITY_PREFIX_LEN)) {
+ } else if (issec) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
error = security_inode_setsecurity(inode, suffix, value,
size, flags);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 98b9c91..1e3a7ce 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -925,7 +925,8 @@ xfs_fs_inode_init_once(
*/
STATIC void
xfs_fs_dirty_inode(
- struct inode *inode)
+ struct inode *inode,
+ int flags)
{
barrier();
XFS_I(inode)->i_update_core = 1;
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index a3252a5..a756bc8 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -98,6 +98,9 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
/*
* Spinlock primitives
*/
+acpi_status
+acpi_os_create_lock(acpi_spinlock *out_handle);
+
void acpi_os_delete_lock(acpi_spinlock handle);
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index f6ad63d..2ed0a84 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20110316
+#define ACPI_CA_VERSION 0x20110413
#include "actypes.h"
#include "actbl.h"
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 64f838b..b67231b 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -501,8 +501,9 @@ typedef u64 acpi_integer;
#define ACPI_STATE_D1 (u8) 1
#define ACPI_STATE_D2 (u8) 2
#define ACPI_STATE_D3 (u8) 3
-#define ACPI_D_STATES_MAX ACPI_STATE_D3
-#define ACPI_D_STATE_COUNT 4
+#define ACPI_STATE_D3_COLD (u8) 4
+#define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD
+#define ACPI_D_STATE_COUNT 5
#define ACPI_STATE_C0 (u8) 0
#define ACPI_STATE_C1 (u8) 1
@@ -712,8 +713,24 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5
#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6
#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7
-#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8
-#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127
+
+#define ACPI_NUM_PREDEFINED_REGIONS 8
+
+/*
+ * Special Address Spaces
+ *
+ * Note: A Data Table region is a special type of operation region
+ * that has its own AML opcode. However, internally, the AML
+ * interpreter simply creates an operation region with an an address
+ * space type of ACPI_ADR_SPACE_DATA_TABLE.
+ */
+#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */
+#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 0x7F
+
+/* Values for _REG connection code */
+
+#define ACPI_REG_DISCONNECT 0
+#define ACPI_REG_CONNECT 1
/*
* bit_register IDs
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 55192ac..ba4928c 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -310,14 +310,7 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
/* in processor_core.c */
void acpi_processor_set_pdc(acpi_handle handle);
-#ifdef CONFIG_SMP
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
-#else
-static inline int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
-{
- return -1;
-}
-#endif
/* in processor_throttling.c */
int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index ff5c660..fcdcb5d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -35,9 +35,9 @@
* platform data and other tables.
*/
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
- return ((unsigned)number) < ARCH_NR_GPIOS;
+ return number >= 0 && number < ARCH_NR_GPIOS;
}
struct device;
@@ -193,8 +193,8 @@ struct gpio {
};
extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-extern int gpio_request_array(struct gpio *array, size_t num);
-extern void gpio_free_array(struct gpio *array, size_t num);
+extern int gpio_request_array(const struct gpio *array, size_t num);
+extern void gpio_free_array(const struct gpio *array, size_t num);
#ifdef CONFIG_GPIO_SYSFS
@@ -212,7 +212,7 @@ extern void gpio_unexport(unsigned gpio);
#else /* !CONFIG_GPIOLIB */
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
return number >= 0;
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 33d52470..ae90e0f 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -681,9 +681,11 @@ __SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_setns 268
+__SYSCALL(__NR_setns, sys_setns)
#undef __NR_syscalls
-#define __NR_syscalls 268
+#define __NR_syscalls 269
/*
* All syscalls below here should go away really,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a2e910e..1deb2a7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -150,8 +150,7 @@ extern int ec_read(u8 addr, u8 *val);
extern int ec_write(u8 addr, u8 val);
extern int ec_transaction(u8 command,
const u8 *wdata, unsigned wdata_len,
- u8 *rdata, unsigned rdata_len,
- int force_poll);
+ u8 *rdata, unsigned rdata_len);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 96c038e..ee456c7 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -34,4 +34,17 @@ static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
}
#endif
+#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
+static inline void atomic_or(int i, atomic_t *v)
+{
+ int old;
+ int new;
+
+ do {
+ old = atomic_read(v);
+ new = old | i;
+ } while (atomic_cmpxchg(v, old, new) != old);
+}
+#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index f20eb8f..e9eaec5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -146,7 +146,7 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
{
- cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+ do_set_cpus_allowed(p, cpu_possible_mask);
return cpumask_any(cpu_active_mask);
}
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 32a4423..4427e04 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -191,6 +191,12 @@ struct dm_target {
/* Used to provide an error string from the ctr */
char *error;
+
+ /*
+ * Set if this target needs to receive discards regardless of
+ * whether or not its underlying devices have support.
+ */
+ unsigned discards_supported:1;
};
/* Each target can link one of these into the table */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index 5c9186b..f4b0aa3 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -69,8 +69,7 @@ struct dm_io_request {
*
* Create/destroy may block.
*/
-struct dm_io_client *dm_io_client_create(unsigned num_pages);
-int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
+struct dm_io_client *dm_io_client_create(void);
void dm_io_client_destroy(struct dm_io_client *client);
/*
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index 5db2163..298d587 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -25,8 +25,7 @@
* To use kcopyd you must first create a dm_kcopyd_client object.
*/
struct dm_kcopyd_client;
-int dm_kcopyd_client_create(unsigned num_pages,
- struct dm_kcopyd_client **result);
+struct dm_kcopyd_client *dm_kcopyd_client_create(void);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
/*
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 5619f85..bbd8661 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -9,8 +9,12 @@
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+#define VTD_STRIDE_SHIFT (9)
+#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
+
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
+#define DMA_PTE_LARGE_PAGE (1 << 7)
#define DMA_PTE_SNP (1 << 11)
#define CONTEXT_TT_MULTI_LEVEL 0
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 6998d93..4bfe0a2 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 33fa120..e376270 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern unsigned long efi_get_time(void);
extern int efi_set_rtc_mmss(unsigned long nowtime);
+extern void efi_reserve_boot_services(void);
extern struct efi_memory_map memmap;
/**
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 85c1d30..5e06acf 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -909,7 +909,7 @@ extern int ext3_setattr (struct dentry *, struct iattr *);
extern void ext3_evict_inode (struct inode *);
extern int ext3_sync_inode (handle_t *, struct inode *);
extern void ext3_discard_reservation (struct inode *);
-extern void ext3_dirty_inode(struct inode *);
+extern void ext3_dirty_inode(struct inode *, int);
extern int ext3_change_inode_journal_flag(struct inode *, int);
extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
extern int ext3_can_truncate(struct inode *inode);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2416093..c55d6b7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -237,6 +237,7 @@ struct inodes_stat_t {
#define S_PRIVATE 512 /* Inode is fs-internal */
#define S_IMA 1024 /* Inode has an associated IMA struct */
#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
+#define S_NOSEC 4096 /* no suid or xattr security attributes */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -273,6 +274,7 @@ struct inodes_stat_t {
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
+#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
/* the read-only stuff doesn't really belong here, but any other place is
probably as bad and I don't want to create yet another include file. */
@@ -1618,7 +1620,7 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*dirty_inode) (struct inode *);
+ void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
@@ -2582,5 +2584,16 @@ int __init get_filesystem_list(char *buf);
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
(flag & __FMODE_NONOTIFY)))
+static inline int is_sxid(mode_t mode)
+{
+ return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+}
+
+static inline void inode_has_no_xattr(struct inode *inode)
+{
+ if (!is_sxid(inode->i_mode))
+ inode->i_flags |= S_NOSEC;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index b5a550a..59d3ef1 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -16,6 +16,11 @@ struct trace_print_flags {
const char *name;
};
+struct trace_print_flags_u64 {
+ unsigned long long mask;
+ const char *name;
+};
+
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
const struct trace_print_flags *flag_array);
@@ -23,6 +28,13 @@ const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array);
+#if BITS_PER_LONG == 32
+const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
+ *symbol_array);
+#endif
+
const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 32720ba..32d47e7 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -25,9 +25,9 @@ struct gpio_chip;
* warning when something is wrongly called.
*/
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
- return 0;
+ return false;
}
static inline int gpio_request(unsigned gpio, const char *label)
@@ -41,7 +41,7 @@ static inline int gpio_request_one(unsigned gpio,
return -ENOSYS;
}
-static inline int gpio_request_array(struct gpio *array, size_t num)
+static inline int gpio_request_array(const struct gpio *array, size_t num)
{
return -ENOSYS;
}
@@ -54,7 +54,7 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
-static inline void gpio_free_array(struct gpio *array, size_t num)
+static inline void gpio_free_array(const struct gpio *array, size_t num)
{
might_sleep();
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2a78aae..027935c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -264,6 +264,8 @@ struct mm_struct {
struct linux_binfmt *binfmt;
+ cpumask_var_t cpu_vm_mask_var;
+
/* Architecture-specific MM context */
mm_context_t context;
@@ -311,10 +313,18 @@ struct mm_struct {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
-
- cpumask_var_t cpu_vm_mask_var;
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ struct cpumask cpumask_allocation;
+#endif
};
+static inline void mm_init_cpumask(struct mm_struct *mm)
+{
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ mm->cpu_vm_mask_var = &mm->cpumask_allocation;
+#endif
+}
+
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 29312bd..c928dac 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1051,12 +1051,14 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
+#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
+#endif
static inline int pfn_present(unsigned long pfn)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9d5306b..2541fb8 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -322,9 +322,12 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
/* Kernel-side ioctl definitions */
-extern int add_mtd_device(struct mtd_info *mtd);
-extern int del_mtd_device (struct mtd_info *mtd);
+struct mtd_partition;
+extern int mtd_device_register(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nr_parts);
+extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
@@ -348,15 +351,9 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
unsigned long count, loff_t from, size_t *retlen);
-#ifdef CONFIG_MTD_PARTITIONS
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
+
void mtd_erase_callback(struct erase_info *instr);
-#else
-static inline void mtd_erase_callback(struct erase_info *instr)
-{
- if (instr->callback)
- instr->callback(instr);
-}
-#endif
/*
* Debugging macro and defines
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index d441927..c2b9ac4 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -237,9 +237,9 @@ typedef enum {
* If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch
* the OOB area.
*/
-#define NAND_USE_FLASH_BBT_NO_OOB 0x00100000
+#define NAND_USE_FLASH_BBT_NO_OOB 0x00800000
/* Create an empty BBT with no vendor information if the BBT is available */
-#define NAND_CREATE_EMPTY_BBT 0x00200000
+#define NAND_CREATE_EMPTY_BBT 0x01000000
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 4a0a8ba..3a6f037 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -16,7 +16,7 @@
* Partition definition structure:
*
* An array of struct partition is passed along with a MTD object to
- * add_mtd_partitions() to create them.
+ * mtd_device_register() to create them.
*
* For each partition, these fields are available:
* name: string that will be used to label the partition's MTD device.
@@ -49,9 +49,6 @@ struct mtd_partition {
struct mtd_info;
-int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
-int del_mtd_partitions(struct mtd_info *);
-
/*
* Functions dealing with the various ways of partitioning the space
*/
@@ -73,14 +70,17 @@ extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
struct device;
struct device_node;
+#ifdef CONFIG_MTD_OF_PARTS
int __devinit of_mtd_parse_partitions(struct device *dev,
struct device_node *node,
struct mtd_partition **pparts);
-
-#ifdef CONFIG_MTD_PARTITIONS
-static inline int mtd_has_partitions(void) { return 1; }
#else
-static inline int mtd_has_partitions(void) { return 0; }
+static inline int of_mtd_parse_partitions(struct device *dev,
+ struct device_node *node,
+ struct mtd_partition **pparts)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_MTD_CMDLINE_PARTS
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index 49b9590..e5f21d2 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -19,6 +19,7 @@
#include <linux/mtd/partitions.h>
struct map_info;
+struct platform_device;
struct physmap_flash_data {
unsigned int width;
@@ -37,8 +38,6 @@ struct physmap_flash_data {
void physmap_configure(unsigned long addr, unsigned long size,
int bankwidth, void (*set_vpp)(struct map_info *, int) );
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* Machines that wish to do flash partition may want to call this function in
* their setup routine.
@@ -50,6 +49,4 @@ void physmap_configure(unsigned long addr, unsigned long size,
*/
void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
-#endif /* defined(CONFIG_MTD_PARTITIONS) */
-
#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 178fafe..504b289 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -562,6 +562,7 @@ enum {
NFSPROC4_CLNT_LAYOUTGET,
NFSPROC4_CLNT_GETDEVICEINFO,
NFSPROC4_CLNT_LAYOUTCOMMIT,
+ NFSPROC4_CLNT_LAYOUTRETURN,
};
/* nfs41 types */
@@ -570,9 +571,11 @@ struct nfs4_sessionid {
};
/* Create Session Flags */
-#define SESSION4_PERSIST 0x001
-#define SESSION4_BACK_CHAN 0x002
-#define SESSION4_RDMA 0x004
+#define SESSION4_PERSIST 0x001
+#define SESSION4_BACK_CHAN 0x002
+#define SESSION4_RDMA 0x004
+
+#define SESSION4_FLAG_MASK_A 0x007
enum state_protect_how4 {
SP4_NONE = 0,
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 91af2e4..3a34e80 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -68,7 +68,7 @@ struct nfs_pageio_descriptor {
int pg_ioflags;
int pg_error;
struct pnfs_layout_segment *pg_lseg;
- int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
+ bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7e371f7..5e8444a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -269,6 +269,27 @@ struct nfs4_layoutcommit_data {
struct nfs4_layoutcommit_res res;
};
+struct nfs4_layoutreturn_args {
+ __u32 layout_type;
+ struct inode *inode;
+ nfs4_stateid stateid;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_layoutreturn_res {
+ struct nfs4_sequence_res seq_res;
+ u32 lrs_present;
+ nfs4_stateid stateid;
+};
+
+struct nfs4_layoutreturn {
+ struct nfs4_layoutreturn_args args;
+ struct nfs4_layoutreturn_res res;
+ struct rpc_cred *cred;
+ struct nfs_client *clp;
+ int rpc_status;
+};
+
/*
* Arguments to the open call.
*/
@@ -1087,6 +1108,7 @@ struct nfs_read_data {
const struct rpc_call_ops *mds_ops;
int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
__u64 mds_offset;
+ int pnfs_error;
struct page *page_array[NFS_PAGEVEC_SIZE];
};
@@ -1112,6 +1134,7 @@ struct nfs_write_data {
unsigned long timestamp; /* For lease renewal */
#endif
__u64 mds_offset; /* Filelayout dense stripe */
+ int pnfs_error;
struct page *page_array[NFS_PAGEVEC_SIZE];
};
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 79a6700..6081493 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -308,7 +308,7 @@ static inline void SetPageUptodate(struct page *page)
{
#ifdef CONFIG_S390
if (!test_and_set_bit(PG_uptodate, &page->flags))
- page_set_storage_key(page_to_pfn(page), PAGE_DEFAULT_KEY, 0);
+ page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0);
#else
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 24787b7..a311008 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2483,6 +2483,7 @@
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index 77cbddb..a7d87f9 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -16,6 +16,10 @@
#define PM_QOS_NUM_CLASSES 4
#define PM_QOS_DEFAULT_VALUE -1
+#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+
struct pm_qos_request_list {
struct plist_node list;
int pm_qos_class;
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
new file mode 100644
index 0000000..76efbdd
--- /dev/null
+++ b/include/linux/pnfs_osd_xdr.h
@@ -0,0 +1,345 @@
+/*
+ * pNFS-osd on-the-wire data structures
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __PNFS_OSD_XDR_H__
+#define __PNFS_OSD_XDR_H__
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <scsi/osd_protocol.h>
+
+#define PNFS_OSD_OSDNAME_MAXSIZE 256
+
+/*
+ * draft-ietf-nfsv4-minorversion-22
+ * draft-ietf-nfsv4-pnfs-obj-12
+ */
+
+/* Layout Structure */
+
+enum pnfs_osd_raid_algorithm4 {
+ PNFS_OSD_RAID_0 = 1,
+ PNFS_OSD_RAID_4 = 2,
+ PNFS_OSD_RAID_5 = 3,
+ PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
+};
+
+/* struct pnfs_osd_data_map4 {
+ * uint32_t odm_num_comps;
+ * length4 odm_stripe_unit;
+ * uint32_t odm_group_width;
+ * uint32_t odm_group_depth;
+ * uint32_t odm_mirror_cnt;
+ * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
+ * };
+ */
+struct pnfs_osd_data_map {
+ u32 odm_num_comps;
+ u64 odm_stripe_unit;
+ u32 odm_group_width;
+ u32 odm_group_depth;
+ u32 odm_mirror_cnt;
+ u32 odm_raid_algorithm;
+};
+
+/* struct pnfs_osd_objid4 {
+ * deviceid4 oid_device_id;
+ * uint64_t oid_partition_id;
+ * uint64_t oid_object_id;
+ * };
+ */
+struct pnfs_osd_objid {
+ struct nfs4_deviceid oid_device_id;
+ u64 oid_partition_id;
+ u64 oid_object_id;
+};
+
+/* For printout. I use:
+ * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
+ * BE style
+ */
+#define _DEVID_LO(oid_device_id) \
+ (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
+
+#define _DEVID_HI(oid_device_id) \
+ (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
+
+static inline int
+pnfs_osd_objid_xdr_sz(void)
+{
+ return (NFS4_DEVICEID4_SIZE / 4) + 2 + 2;
+}
+
+enum pnfs_osd_version {
+ PNFS_OSD_MISSING = 0,
+ PNFS_OSD_VERSION_1 = 1,
+ PNFS_OSD_VERSION_2 = 2
+};
+
+struct pnfs_osd_opaque_cred {
+ u32 cred_len;
+ void *cred;
+};
+
+enum pnfs_osd_cap_key_sec {
+ PNFS_OSD_CAP_KEY_SEC_NONE = 0,
+ PNFS_OSD_CAP_KEY_SEC_SSV = 1,
+};
+
+/* struct pnfs_osd_object_cred4 {
+ * pnfs_osd_objid4 oc_object_id;
+ * pnfs_osd_version4 oc_osd_version;
+ * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
+ * opaque oc_capability_key<>;
+ * opaque oc_capability<>;
+ * };
+ */
+struct pnfs_osd_object_cred {
+ struct pnfs_osd_objid oc_object_id;
+ u32 oc_osd_version;
+ u32 oc_cap_key_sec;
+ struct pnfs_osd_opaque_cred oc_cap_key;
+ struct pnfs_osd_opaque_cred oc_cap;
+};
+
+/* struct pnfs_osd_layout4 {
+ * pnfs_osd_data_map4 olo_map;
+ * uint32_t olo_comps_index;
+ * pnfs_osd_object_cred4 olo_components<>;
+ * };
+ */
+struct pnfs_osd_layout {
+ struct pnfs_osd_data_map olo_map;
+ u32 olo_comps_index;
+ u32 olo_num_comps;
+ struct pnfs_osd_object_cred *olo_comps;
+};
+
+/* Device Address */
+enum pnfs_osd_targetid_type {
+ OBJ_TARGET_ANON = 1,
+ OBJ_TARGET_SCSI_NAME = 2,
+ OBJ_TARGET_SCSI_DEVICE_ID = 3,
+};
+
+/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
+ * case OBJ_TARGET_SCSI_NAME:
+ * string oti_scsi_name<>;
+ *
+ * case OBJ_TARGET_SCSI_DEVICE_ID:
+ * opaque oti_scsi_device_id<>;
+ *
+ * default:
+ * void;
+ * };
+ *
+ * union pnfs_osd_targetaddr4 switch (bool ota_available) {
+ * case TRUE:
+ * netaddr4 ota_netaddr;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * struct pnfs_osd_deviceaddr4 {
+ * pnfs_osd_targetid4 oda_targetid;
+ * pnfs_osd_targetaddr4 oda_targetaddr;
+ * uint64_t oda_lun;
+ * opaque oda_systemid<>;
+ * pnfs_osd_object_cred4 oda_root_obj_cred;
+ * opaque oda_osdname<>;
+ * };
+ */
+struct pnfs_osd_targetid {
+ u32 oti_type;
+ struct nfs4_string oti_scsi_device_id;
+};
+
+enum { PNFS_OSD_TARGETID_MAX = 1 + PNFS_OSD_OSDNAME_MAXSIZE / 4 };
+
+/* struct netaddr4 {
+ * // see struct rpcb in RFC1833
+ * string r_netid<>; // network id
+ * string r_addr<>; // universal address
+ * };
+ */
+struct pnfs_osd_net_addr {
+ struct nfs4_string r_netid;
+ struct nfs4_string r_addr;
+};
+
+struct pnfs_osd_targetaddr {
+ u32 ota_available;
+ struct pnfs_osd_net_addr ota_netaddr;
+};
+
+enum {
+ NETWORK_ID_MAX = 16 / 4,
+ UNIVERSAL_ADDRESS_MAX = 64 / 4,
+ PNFS_OSD_TARGETADDR_MAX = 3 + NETWORK_ID_MAX + UNIVERSAL_ADDRESS_MAX,
+};
+
+struct pnfs_osd_deviceaddr {
+ struct pnfs_osd_targetid oda_targetid;
+ struct pnfs_osd_targetaddr oda_targetaddr;
+ u8 oda_lun[8];
+ struct nfs4_string oda_systemid;
+ struct pnfs_osd_object_cred oda_root_obj_cred;
+ struct nfs4_string oda_osdname;
+};
+
+enum {
+ ODA_OSDNAME_MAX = PNFS_OSD_OSDNAME_MAXSIZE / 4,
+ PNFS_OSD_DEVICEADDR_MAX =
+ PNFS_OSD_TARGETID_MAX + PNFS_OSD_TARGETADDR_MAX +
+ 2 /*oda_lun*/ +
+ 1 + OSD_SYSTEMID_LEN +
+ 1 + ODA_OSDNAME_MAX,
+};
+
+/* LAYOUTCOMMIT: layoutupdate */
+
+/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
+ * case TRUE:
+ * int64_t dsu_delta;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * struct pnfs_osd_layoutupdate4 {
+ * pnfs_osd_deltaspaceused4 olu_delta_space_used;
+ * bool olu_ioerr_flag;
+ * };
+ */
+struct pnfs_osd_layoutupdate {
+ u32 dsu_valid;
+ s64 dsu_delta;
+ u32 olu_ioerr_flag;
+};
+
+/* LAYOUTRETURN: I/O Rrror Report */
+
+enum pnfs_osd_errno {
+ PNFS_OSD_ERR_EIO = 1,
+ PNFS_OSD_ERR_NOT_FOUND = 2,
+ PNFS_OSD_ERR_NO_SPACE = 3,
+ PNFS_OSD_ERR_BAD_CRED = 4,
+ PNFS_OSD_ERR_NO_ACCESS = 5,
+ PNFS_OSD_ERR_UNREACHABLE = 6,
+ PNFS_OSD_ERR_RESOURCE = 7
+};
+
+/* struct pnfs_osd_ioerr4 {
+ * pnfs_osd_objid4 oer_component;
+ * length4 oer_comp_offset;
+ * length4 oer_comp_length;
+ * bool oer_iswrite;
+ * pnfs_osd_errno4 oer_errno;
+ * };
+ */
+struct pnfs_osd_ioerr {
+ struct pnfs_osd_objid oer_component;
+ u64 oer_comp_offset;
+ u64 oer_comp_length;
+ u32 oer_iswrite;
+ u32 oer_errno;
+};
+
+/* OSD XDR API */
+/* Layout helpers */
+/* Layout decoding is done in two parts:
+ * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
+ * of the layout. @iter members need not be initialized.
+ * Returned:
+ * @layout members are set. (@layout->olo_comps set to NULL).
+ *
+ * Zero on success, or negative error if passed xdr is broken.
+ *
+ * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
+ * false, to decode the next component.
+ * Returned:
+ * true if there is more to decode or false if we are done or error.
+ *
+ * Example:
+ * struct pnfs_osd_xdr_decode_layout_iter iter;
+ * struct pnfs_osd_layout layout;
+ * struct pnfs_osd_object_cred comp;
+ * int status;
+ *
+ * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
+ * if (unlikely(status))
+ * goto err;
+ * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
+ * // All of @comp strings point to inside the xdr_buffer
+ * // or scrach buffer. Copy them out to user memory eg.
+ * copy_single_comp(dest_comp++, &comp);
+ * }
+ * if (unlikely(status))
+ * goto err;
+ */
+
+struct pnfs_osd_xdr_decode_layout_iter {
+ unsigned total_comps;
+ unsigned decoded_comps;
+};
+
+extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
+
+extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
+ int *err);
+
+/* Device Info helpers */
+
+/* Note: All strings inside @deviceaddr point to space inside @p.
+ * @p should stay valid while @deviceaddr is in use.
+ */
+extern void pnfs_osd_xdr_decode_deviceaddr(
+ struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
+
+/* layoutupdate (layout_commit) xdr helpers */
+extern int
+pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
+ struct pnfs_osd_layoutupdate *lou);
+
+/* osd_ioerror encoding/decoding (layout_return) */
+/* Client */
+extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
+extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
+
+#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dc88712..2a8621c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1546,7 +1546,7 @@ struct task_struct {
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
- /* bitmask of trace recursion */
+ /* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
@@ -1841,9 +1841,16 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif
#ifdef CONFIG_SMP
+extern void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask);
+
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
#else
+static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+}
static inline int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask)
{
@@ -2187,7 +2194,6 @@ static inline void mmdrop(struct mm_struct * mm)
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 77e6248..c68a147 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -145,6 +145,7 @@ typedef __be32 rpc_fraghdr;
#define RPCBIND_NETID_TCP "tcp"
#define RPCBIND_NETID_UDP6 "udp6"
#define RPCBIND_NETID_TCP6 "tcp6"
+#define RPCBIND_NETID_LOCAL "local"
/*
* Note that RFC 1833 does not put any size restrictions on the
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 04dba23..85c50b4 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,6 +28,7 @@ struct svc_sock {
/* private TCP part */
u32 sk_reclen; /* length of record */
u32 sk_tcplen; /* current read length */
+ struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
/*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index fc84b7a..a20970e 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -216,6 +216,8 @@ extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
+extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, unsigned int len);
extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index a0f998c..81cce3b 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -141,7 +141,8 @@ enum xprt_transports {
XPRT_TRANSPORT_UDP = IPPROTO_UDP,
XPRT_TRANSPORT_TCP = IPPROTO_TCP,
XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
- XPRT_TRANSPORT_RDMA = 256
+ XPRT_TRANSPORT_RDMA = 256,
+ XPRT_TRANSPORT_LOCAL = 257,
};
struct rpc_xprt {
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d512d98..5ca0951 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long probe_kernel_read(void *dst, void *src, size_t size);
-extern long __probe_kernel_read(void *dst, void *src, size_t size);
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
-extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
+extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index aff5b4f..7108857 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,6 +51,13 @@ struct virtqueue {
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
+ * virtqueue_enable_cb_delayed: restart callbacks after disable_cb.
+ * vq: the struct virtqueue we're talking about.
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
* virtqueue_detach_unused_buf: detach first unused buffer
* vq: the struct virtqueue we're talking about.
* Returns NULL or the "data" token handed to add_buf
@@ -86,6 +93,8 @@ void virtqueue_disable_cb(struct virtqueue *vq);
bool virtqueue_enable_cb(struct virtqueue *vq);
+bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+
void *virtqueue_detach_unused_buf(struct virtqueue *vq);
/**
diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h
index e68b439..277c4ad 100644
--- a/include/linux/virtio_9p.h
+++ b/include/linux/virtio_9p.h
@@ -1,7 +1,30 @@
#ifndef _LINUX_VIRTIO_9P_H
#define _LINUX_VIRTIO_9P_H
/* This header is BSD licensed so anyone can use the definitions to implement
- * compatible drivers/servers. */
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index a50ecd1..652dc8b 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -1,7 +1,30 @@
#ifndef _LINUX_VIRTIO_BALLOON_H
#define _LINUX_VIRTIO_BALLOON_H
/* This header is BSD licensed so anyone can use the definitions to implement
- * compatible drivers/servers. */
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 167720d..e0edb40 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -1,7 +1,30 @@
#ifndef _LINUX_VIRTIO_BLK_H
#define _LINUX_VIRTIO_BLK_H
/* This header is BSD licensed so anyone can use the definitions to implement
- * compatible drivers/servers. */
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 800617b..39c88c5 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -1,7 +1,30 @@
#ifndef _LINUX_VIRTIO_CONFIG_H
#define _LINUX_VIRTIO_CONFIG_H
/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers. */
+ * anyone can use the definitions to implement compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
/* Virtio devices use a standardized configuration space to define their
* features and pass configuration information, but each implementation can
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index e4d3335..bdf4b00 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -5,7 +5,31 @@
#include <linux/virtio_config.h>
/*
* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers.
+ * anyone can use the definitions to implement compatible drivers/servers:
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*
* Copyright (C) Red Hat, Inc., 2009, 2010, 2011
* Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
index 06660c0..85bb0bb 100644
--- a/include/linux/virtio_ids.h
+++ b/include/linux/virtio_ids.h
@@ -5,7 +5,29 @@
*
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
- */
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
#define VIRTIO_ID_NET 1 /* virtio net */
#define VIRTIO_ID_BLOCK 2 /* virtio block */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 085e422..136040b 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -1,7 +1,30 @@
#ifndef _LINUX_VIRTIO_NET_H
#define _LINUX_VIRTIO_NET_H
/* This header is BSD licensed so anyone can use the definitions to implement
- * compatible drivers/servers. */
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index 9a3d7c4..ea66f3f 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -11,6 +11,29 @@
*
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*/
#ifndef _LINUX_VIRTIO_PCI_H
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index e4d144b..4a32cb6 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -7,6 +7,29 @@
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
* Copyright Rusty Russell IBM Corporation 2007. */
#include <linux/types.h>
@@ -29,6 +52,12 @@
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
@@ -83,6 +112,7 @@ struct vring {
* __u16 avail_flags;
* __u16 avail_idx;
* __u16 available[num];
+ * __u16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
@@ -91,8 +121,14 @@ struct vring {
* __u16 used_flags;
* __u16 used_idx;
* struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
* };
*/
+/* We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility. */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
+
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align)
{
@@ -107,7 +143,21 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
{
return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
+ align - 1) & ~(align - 1))
- + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num;
+ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
+}
+
+/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
+/* Assuming a given event_idx value from the other size, if
+ * we have just incremented index from old to new_idx,
+ * should we trigger an event? */
+static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
+{
+ /* Note: Xen has similar logic for notification hold-off
+ * in include/xen/interface/io/ring.h with req_event and req_prod
+ * corresponding to event_idx + 1 and new_idx respectively.
+ * Note also that req_event and req_prod in Xen start at 1,
+ * event indexes in virtio start at 0. */
+ return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
}
#ifdef __KERNEL__
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 8f6bb9c..ee86606 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -604,6 +604,7 @@ struct sas_domain_function_template {
int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
+ int (*lldd_ata_soft_reset)(struct domain_device *);
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
int (*lldd_query_task)(struct sas_task *);
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index d6e7994..81dd12e 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -9,6 +9,7 @@
#define MSG_SIMPLE_TAG 0x20
#define MSG_HEAD_TAG 0x21
#define MSG_ORDERED_TAG 0x22
+#define MSG_ACA_TAG 0x24 /* unsupported */
#define SCSI_NO_TAG (-1) /* identify no tag in use */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1d3b5b2..561ac99 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -98,6 +98,7 @@ enum transport_state_table {
TRANSPORT_REMOVE = 14,
TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
+ TRANSPORT_FREE_CMD_INTR = 17,
};
/* Used for struct se_cmd->se_cmd_flags */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index dc78f77..747e140 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -77,7 +77,6 @@ struct target_core_fabric_ops {
u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
u16 (*get_fabric_sense_len)(void);
int (*is_state_remove)(struct se_cmd *);
- u64 (*pack_lun)(unsigned int);
/*
* fabric module calls for target_core_fabric_configfs.c
*/
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 59aa464..24a1c6c 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
+extern void transport_generic_free_cmd_intr(struct se_cmd *);
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index f445cff..4114129 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -28,7 +28,7 @@ struct extent_buffer;
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
#define __show_root_type(obj) \
- __print_symbolic(obj, \
+ __print_symbolic_u64(obj, \
{ BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
{ BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
{ BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
@@ -125,7 +125,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
);
#define __show_map_type(type) \
- __print_symbolic(type, \
+ __print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
{ EXTENT_MAP_INLINE, "INLINE" }, \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 3e68366..533c49f 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -205,6 +205,19 @@
ftrace_print_symbols_seq(p, value, symbols); \
})
+#undef __print_symbolic_u64
+#if BITS_PER_LONG == 32
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array, { -1, NULL } }; \
+ ftrace_print_symbols_seq_u64(p, value, symbols); \
+ })
+#else
+#define __print_symbolic_u64(value, symbol_array...) \
+ __print_symbolic(value, symbol_array)
+#endif
+
#undef __print_hex
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
diff --git a/init/main.c b/init/main.c
index d2f1e08..cafba67 100644
--- a/init/main.c
+++ b/init/main.c
@@ -487,6 +487,7 @@ asmlinkage void __init start_kernel(void)
printk(KERN_NOTICE "%s", linux_banner);
setup_arch(&command_line);
mm_init_owner(&init_mm, &init_task);
+ mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -510,7 +511,6 @@ asmlinkage void __init start_kernel(void)
sort_main_extable();
trap_init();
mm_init();
- BUG_ON(mm_init_cpumask(&init_mm, 0));
/*
* Set up the scheduler prior starting any interrupts (such as the
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1ceeb04..9c9b754 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
rcu_read_lock();
cs = task_cs(tsk);
if (cs)
- cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
+ do_set_cpus_allowed(tsk, cs->cpus_allowed);
rcu_read_unlock();
/*
@@ -2217,7 +2217,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
* Like above we can temporary set any mask and rely on
* set_cpus_allowed_ptr() as synchronization point.
*/
- cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
+ do_set_cpus_allowed(tsk, cpu_possible_mask);
cpu = cpumask_any(cpu_active_mask);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8a15944..9efe710 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5028,6 +5028,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
else
perf_event_output(event, nmi, data, regs);
+ if (event->fasync && event->pending_kill) {
+ if (nmi) {
+ event->pending_wakeup = 1;
+ irq_work_queue(&event->pending);
+ } else
+ perf_event_wakeup(event);
+ }
+
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index ca406d9..0276c30 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -484,20 +484,6 @@ static void mm_init_aio(struct mm_struct *mm)
#endif
}
-int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm)
-{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL))
- return -ENOMEM;
-
- if (oldmm)
- cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm));
- else
- memset(mm_cpumask(mm), 0, cpumask_size());
-#endif
- return 0;
-}
-
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
@@ -538,17 +524,8 @@ struct mm_struct * mm_alloc(void)
return NULL;
memset(mm, 0, sizeof(*mm));
- mm = mm_init(mm, current);
- if (!mm)
- return NULL;
-
- if (mm_init_cpumask(mm, NULL)) {
- mm_free_pgd(mm);
- free_mm(mm);
- return NULL;
- }
-
- return mm;
+ mm_init_cpumask(mm);
+ return mm_init(mm, current);
}
/*
@@ -559,7 +536,6 @@ struct mm_struct * mm_alloc(void)
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
- free_cpumask_var(mm->cpu_vm_mask_var);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
@@ -753,6 +729,7 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
+ mm_init_cpumask(mm);
/* Initializing for Swap token stuff */
mm->token_priority = 0;
@@ -765,9 +742,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
if (!mm_init(mm, tsk))
goto fail_nomem;
- if (mm_init_cpumask(mm, oldmm))
- goto fail_nocpumask;
-
if (init_new_context(tsk, mm))
goto fail_nocontext;
@@ -794,9 +768,6 @@ fail_nomem:
return NULL;
fail_nocontext:
- free_cpumask_var(mm->cpu_vm_mask_var);
-
-fail_nocpumask:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
@@ -1591,6 +1562,13 @@ void __init proc_caches_init(void)
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
+ /*
+ * FIXME! The "sizeof(struct mm_struct)" currently includes the
+ * whole struct cpumask for the OFFSTACK case. We could change
+ * this to *only* allocate as much of it as required by the
+ * maximum number of CPU's we can ever have. The cpumask_allocation
+ * is at the end of the structure, exactly for that reason.
+ */
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 74d1c09..fa27e75 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
}
static void __jump_label_update(struct jump_label_key *key,
- struct jump_entry *entry, int enable)
+ struct jump_entry *entry,
+ struct jump_entry *stop, int enable)
{
- for (; entry->key == (jump_label_t)(unsigned long)key; entry++) {
+ for (; (entry < stop) &&
+ (entry->key == (jump_label_t)(unsigned long)key);
+ entry++) {
/*
* entry->code set to 0 invalidates module init text sections
* kernel_text_address() verifies we are not in core kernel
@@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable)
struct jump_label_mod *mod = key->next;
while (mod) {
- __jump_label_update(key, mod->entries, enable);
+ struct module *m = mod->mod;
+
+ __jump_label_update(key, mod->entries,
+ m->jump_entries + m->num_jump_entries,
+ enable);
mod = mod->next;
}
}
@@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;
if (jump_label_enabled(key))
- __jump_label_update(key, iter, JUMP_LABEL_ENABLE);
+ __jump_label_update(key, iter, iter_stop,
+ JUMP_LABEL_ENABLE);
}
return 0;
@@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable)
/* if there are no users, entry can be NULL */
if (entry)
- __jump_label_update(key, entry, enable);
+ __jump_label_update(key, entry, __stop___jump_table, enable);
#ifdef CONFIG_MODULES
__jump_label_mod_update(key, enable);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 3b34d27..4ba7ccc 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
return;
}
- p->cpus_allowed = cpumask_of_cpu(cpu);
- p->rt.nr_cpus_allowed = 1;
+ /* It's safe because the task is inactive. */
+ do_set_cpus_allowed(p, cpumask_of(cpu));
p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index fd8d1e0..6824ca7 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -54,11 +54,17 @@ enum pm_qos_type {
PM_QOS_MIN /* return the smallest value */
};
+/*
+ * Note: The lockless read path depends on the CPU accessing
+ * target_value atomically. Atomic access is only guaranteed on all CPU
+ * types linux supports for 32 bit quantites
+ */
struct pm_qos_object {
struct plist_head requests;
struct blocking_notifier_head *notifiers;
struct miscdevice pm_qos_power_miscdev;
char *name;
+ s32 target_value; /* Do not change to 64 bit */
s32 default_value;
enum pm_qos_type type;
};
@@ -71,7 +77,8 @@ static struct pm_qos_object cpu_dma_pm_qos = {
.requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
.notifiers = &cpu_dma_lat_notifier,
.name = "cpu_dma_latency",
- .default_value = 2000 * USEC_PER_SEC,
+ .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
};
@@ -80,7 +87,8 @@ static struct pm_qos_object network_lat_pm_qos = {
.requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
.notifiers = &network_lat_notifier,
.name = "network_latency",
- .default_value = 2000 * USEC_PER_SEC,
+ .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN
};
@@ -90,7 +98,8 @@ static struct pm_qos_object network_throughput_pm_qos = {
.requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
.notifiers = &network_throughput_notifier,
.name = "network_throughput",
- .default_value = 0,
+ .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
};
@@ -136,6 +145,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
}
}
+static inline s32 pm_qos_read_value(struct pm_qos_object *o)
+{
+ return o->target_value;
+}
+
+static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
+{
+ o->target_value = value;
+}
+
static void update_target(struct pm_qos_object *o, struct plist_node *node,
int del, int value)
{
@@ -160,6 +179,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
plist_add(node, &o->requests);
}
curr_value = pm_qos_get_value(o);
+ pm_qos_set_value(o, curr_value);
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (prev_value != curr_value)
@@ -194,18 +214,11 @@ static int find_pm_qos_object_by_minor(int minor)
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
- * This function returns the current target value in an atomic manner.
+ * This function returns the current target value.
*/
int pm_qos_request(int pm_qos_class)
{
- unsigned long flags;
- int value;
-
- spin_lock_irqsave(&pm_qos_lock, flags);
- value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
- spin_unlock_irqrestore(&pm_qos_lock, flags);
-
- return value;
+ return pm_qos_read_value(pm_qos_array[pm_qos_class]);
}
EXPORT_SYMBOL_GPL(pm_qos_request);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f07d2f0..89419ff 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -36,7 +36,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/nmi.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
@@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
static char rcu_kthreads_spawnable;
@@ -163,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = 1,
- .dynticks = 1,
+ .dynticks = ATOMIC_INIT(1),
};
#endif /* #ifdef CONFIG_NO_HZ */
@@ -322,13 +321,25 @@ void rcu_enter_nohz(void)
unsigned long flags;
struct rcu_dynticks *rdtp;
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- rdtp->dynticks++;
- rdtp->dynticks_nesting--;
- WARN_ON_ONCE(rdtp->dynticks & 0x1);
+ if (--rdtp->dynticks_nesting) {
+ local_irq_restore(flags);
+ return;
+ }
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+ atomic_inc(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
local_irq_restore(flags);
+
+ /* If the interrupt queued a callback, get out of dyntick mode. */
+ if (in_irq() &&
+ (__get_cpu_var(rcu_sched_data).nxtlist ||
+ __get_cpu_var(rcu_bh_data).nxtlist ||
+ rcu_preempt_needs_cpu(smp_processor_id())))
+ set_need_resched();
}
/*
@@ -344,11 +355,16 @@ void rcu_exit_nohz(void)
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- rdtp->dynticks++;
- rdtp->dynticks_nesting++;
- WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
+ if (rdtp->dynticks_nesting++) {
+ local_irq_restore(flags);
+ return;
+ }
+ smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
+ atomic_inc(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
local_irq_restore(flags);
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
/**
@@ -362,11 +378,15 @@ void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks & 0x1)
+ if (rdtp->dynticks_nmi_nesting == 0 &&
+ (atomic_read(&rdtp->dynticks) & 0x1))
return;
- rdtp->dynticks_nmi++;
- WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ rdtp->dynticks_nmi_nesting++;
+ smp_mb__before_atomic_inc(); /* Force delay from prior write. */
+ atomic_inc(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
}
/**
@@ -380,11 +400,14 @@ void rcu_nmi_exit(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks & 0x1)
+ if (rdtp->dynticks_nmi_nesting == 0 ||
+ --rdtp->dynticks_nmi_nesting != 0)
return;
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- rdtp->dynticks_nmi++;
- WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+ atomic_inc(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force delay to next write. */
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
/**
@@ -395,13 +418,7 @@ void rcu_nmi_exit(void)
*/
void rcu_irq_enter(void)
{
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
-
- if (rdtp->dynticks_nesting++)
- return;
- rdtp->dynticks++;
- WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ rcu_exit_nohz();
}
/**
@@ -413,18 +430,7 @@ void rcu_irq_enter(void)
*/
void rcu_irq_exit(void)
{
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
-
- if (--rdtp->dynticks_nesting)
- return;
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- rdtp->dynticks++;
- WARN_ON_ONCE(rdtp->dynticks & 0x1);
-
- /* If the interrupt queued a callback, get out of dyntick mode. */
- if (__this_cpu_read(rcu_sched_data.nxtlist) ||
- __this_cpu_read(rcu_bh_data.nxtlist))
- set_need_resched();
+ rcu_enter_nohz();
}
#ifdef CONFIG_SMP
@@ -436,19 +442,8 @@ void rcu_irq_exit(void)
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
- int ret;
- int snap;
- int snap_nmi;
-
- snap = rdp->dynticks->dynticks;
- snap_nmi = rdp->dynticks->dynticks_nmi;
- smp_mb(); /* Order sampling of snap with end of grace period. */
- rdp->dynticks_snap = snap;
- rdp->dynticks_nmi_snap = snap_nmi;
- ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
- if (ret)
- rdp->dynticks_fqs++;
- return ret;
+ rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+ return 0;
}
/*
@@ -459,16 +454,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
- long curr;
- long curr_nmi;
- long snap;
- long snap_nmi;
+ unsigned long curr;
+ unsigned long snap;
- curr = rdp->dynticks->dynticks;
- snap = rdp->dynticks_snap;
- curr_nmi = rdp->dynticks->dynticks_nmi;
- snap_nmi = rdp->dynticks_nmi_snap;
- smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+ curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
+ snap = (unsigned long)rdp->dynticks_snap;
/*
* If the CPU passed through or entered a dynticks idle phase with
@@ -478,8 +468,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
- if ((curr != snap || (curr & 0x1) == 0) &&
- (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
+ if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
rdp->dynticks_fqs++;
return 1;
}
@@ -908,6 +897,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
unsigned long gp_duration;
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+
+ /*
+ * Ensure that all grace-period and pre-grace-period activity
+ * is seen before the assignment to rsp->completed.
+ */
+ smp_mb(); /* See above block comment. */
gp_duration = jiffies - rsp->gp_start;
if (gp_duration > rsp->gp_max)
rsp->gp_max = gp_duration;
@@ -1455,25 +1450,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static void rcu_process_callbacks(void)
{
- /*
- * Memory references from any prior RCU read-side critical sections
- * executed by the interrupted code must be seen before any RCU
- * grace-period manipulations below.
- */
- smp_mb(); /* See above block comment. */
-
__rcu_process_callbacks(&rcu_sched_state,
&__get_cpu_var(rcu_sched_data));
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
rcu_preempt_process_callbacks();
- /*
- * Memory references from any later RCU read-side critical sections
- * executed by the interrupted code must be seen after any RCU
- * grace-period manipulations above.
- */
- smp_mb(); /* See above block comment. */
-
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
rcu_needs_cpu_flush();
}
@@ -1494,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void)
local_irq_restore(flags);
return;
}
- wake_up(&__get_cpu_var(rcu_cpu_wq));
+ wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
local_irq_restore(flags);
}
@@ -1544,13 +1525,10 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
*/
static void rcu_cpu_kthread_timer(unsigned long arg)
{
- unsigned long flags;
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
struct rcu_node *rnp = rdp->mynode;
- raw_spin_lock_irqsave(&rnp->lock, flags);
- rnp->wakemask |= rdp->grpmask;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ atomic_or(rdp->grpmask, &rnp->wakemask);
invoke_rcu_node_kthread(rnp);
}
@@ -1617,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg)
unsigned long flags;
int spincnt = 0;
unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
- wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
for (;;) {
*statusp = RCU_KTHREAD_WAITING;
- wait_event_interruptible(*wqp,
- *workp != 0 || kthread_should_stop());
+ rcu_wait(*workp != 0 || kthread_should_stop());
local_bh_disable();
if (rcu_cpu_kthread_should_stop(cpu)) {
local_bh_enable();
@@ -1675,7 +1651,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
per_cpu(rcu_cpu_kthread_task, cpu) = t;
- wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
@@ -1698,11 +1673,10 @@ static int rcu_node_kthread(void *arg)
for (;;) {
rnp->node_kthread_status = RCU_KTHREAD_WAITING;
- wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
+ rcu_wait(atomic_read(&rnp->wakemask) != 0);
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
raw_spin_lock_irqsave(&rnp->lock, flags);
- mask = rnp->wakemask;
- rnp->wakemask = 0;
+ mask = atomic_xchg(&rnp->wakemask, 0);
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
if ((mask & 0x1) == 0)
@@ -1783,13 +1757,14 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->node_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up_process(t);
sp.sched_priority = 99;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
}
return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
}
+static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
+
/*
* Spawn all kthreads -- called as soon as the scheduler is running.
*/
@@ -1797,24 +1772,31 @@ static int __init rcu_spawn_kthreads(void)
{
int cpu;
struct rcu_node *rnp;
+ struct task_struct *t;
rcu_kthreads_spawnable = 1;
for_each_possible_cpu(cpu) {
- init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
per_cpu(rcu_cpu_has_work, cpu) = 0;
- if (cpu_online(cpu))
+ if (cpu_online(cpu)) {
(void)rcu_spawn_one_cpu_kthread(cpu);
+ t = per_cpu(rcu_cpu_kthread_task, cpu);
+ if (t)
+ wake_up_process(t);
+ }
}
rnp = rcu_get_root(rcu_state);
- init_waitqueue_head(&rnp->node_wq);
- rcu_init_boost_waitqueue(rnp);
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
- if (NUM_RCU_NODES > 1)
+ if (rnp->node_kthread_task)
+ wake_up_process(rnp->node_kthread_task);
+ if (NUM_RCU_NODES > 1) {
rcu_for_each_leaf_node(rcu_state, rnp) {
- init_waitqueue_head(&rnp->node_wq);
- rcu_init_boost_waitqueue(rnp);
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+ t = rnp->node_kthread_task;
+ if (t)
+ wake_up_process(t);
+ rcu_wake_one_boost_kthread(rnp);
}
+ }
return 0;
}
early_initcall(rcu_spawn_kthreads);
@@ -2218,14 +2200,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
}
-static void __cpuinit rcu_online_cpu(int cpu)
+static void __cpuinit rcu_prepare_cpu(int cpu)
{
rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
rcu_preempt_init_percpu_data(cpu);
}
-static void __cpuinit rcu_online_kthreads(int cpu)
+static void __cpuinit rcu_prepare_kthreads(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
@@ -2239,6 +2221,31 @@ static void __cpuinit rcu_online_kthreads(int cpu)
}
/*
+ * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
+ * but the RCU threads are woken on demand, and if demand is low this
+ * could be a while triggering the hung task watchdog.
+ *
+ * In order to avoid this, poke all tasks once the CPU is fully
+ * up and running.
+ */
+static void __cpuinit rcu_online_kthreads(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+ struct rcu_node *rnp = rdp->mynode;
+ struct task_struct *t;
+
+ t = per_cpu(rcu_cpu_kthread_task, cpu);
+ if (t)
+ wake_up_process(t);
+
+ t = rnp->node_kthread_task;
+ if (t)
+ wake_up_process(t);
+
+ rcu_wake_one_boost_kthread(rnp);
+}
+
+/*
* Handle CPU online/offline notification events.
*/
static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
@@ -2251,10 +2258,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- rcu_online_cpu(cpu);
- rcu_online_kthreads(cpu);
+ rcu_prepare_cpu(cpu);
+ rcu_prepare_kthreads(cpu);
break;
case CPU_ONLINE:
+ rcu_online_kthreads(cpu);
case CPU_DOWN_FAILED:
rcu_node_kthread_setaffinity(rnp, -1);
rcu_cpu_kthread_setrt(cpu, 1);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 2576648..7b9a08b 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,11 +84,9 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
- int dynticks_nesting; /* Track nesting level, sort of. */
- int dynticks; /* Even value for dynticks-idle, else odd. */
- int dynticks_nmi; /* Even value for either dynticks-idle or */
- /* not in nmi handler, else odd. So this */
- /* remains even for nmi from irq handler. */
+ int dynticks_nesting; /* Track irq/process nesting level. */
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
+ atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
};
/* RCU's kthread states for tracing. */
@@ -121,7 +119,9 @@ struct rcu_node {
/* elements that need to drain to allow the */
/* current expedited grace period to */
/* complete (only for TREE_PREEMPT_RCU). */
- unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */
+ atomic_t wakemask; /* CPUs whose kthread needs to be awakened. */
+ /* Since this has meaning only for leaf */
+ /* rcu_node structures, 32 bits suffices. */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
@@ -159,9 +159,6 @@ struct rcu_node {
struct task_struct *boost_kthread_task;
/* kthread that takes care of priority */
/* boosting for this rcu_node structure. */
- wait_queue_head_t boost_wq;
- /* Wait queue on which to park the boost */
- /* kthread. */
unsigned int boost_kthread_status;
/* State of boost_kthread_task for tracing. */
unsigned long n_tasks_boosted;
@@ -188,9 +185,6 @@ struct rcu_node {
/* kthread that takes care of this rcu_node */
/* structure, for example, awakening the */
/* per-CPU kthreads as needed. */
- wait_queue_head_t node_wq;
- /* Wait queue on which to park the per-node */
- /* kthread. */
unsigned int node_kthread_status;
/* State of node_kthread_task for tracing. */
} ____cacheline_internodealigned_in_smp;
@@ -284,7 +278,6 @@ struct rcu_data {
/* 3) dynticks interface. */
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
- int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
#endif /* #ifdef CONFIG_NO_HZ */
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
@@ -337,6 +330,16 @@ struct rcu_data {
/* scheduling clock irq */
/* before ratting on them. */
+#define rcu_wait(cond) \
+do { \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (cond) \
+ break; \
+ schedule(); \
+ } \
+ __set_current_state(TASK_RUNNING); \
+} while (0)
/*
* RCU global state, including node hierarchy. This hierarchy is
@@ -446,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void);
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3f6559a..c8bff30 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg)
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
- wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
- rnp->exp_tasks);
+ rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@ -1275,14 +1274,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
}
/*
- * Initialize the RCU-boost waitqueue.
- */
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
- init_waitqueue_head(&rnp->boost_wq);
-}
-
-/*
* Create an RCU-boost kthread for the specified node if one does not
* already exist. We only create this kthread for preemptible RCU.
* Returns zero if all is well, a negated errno otherwise.
@@ -1306,12 +1297,17 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
}
+static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+{
+ if (rnp->boost_kthread_task)
+ wake_up_process(rnp->boost_kthread_task);
+}
+
#else /* #ifdef CONFIG_RCU_BOOST */
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
@@ -1328,10 +1324,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
-}
-
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index)
@@ -1339,6 +1331,10 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0;
}
+static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+{
+}
+
#endif /* #else #ifdef CONFIG_RCU_BOOST */
#ifndef CONFIG_SMP
@@ -1520,7 +1516,6 @@ int rcu_needs_cpu(int cpu)
{
int c = 0;
int snap;
- int snap_nmi;
int thatcpu;
/* Check for being in the holdoff period. */
@@ -1531,10 +1526,10 @@ int rcu_needs_cpu(int cpu)
for_each_online_cpu(thatcpu) {
if (thatcpu == cpu)
continue;
- snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
- snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
+ snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
+ thatcpu).dynticks);
smp_mb(); /* Order sampling of snap with end of grace period. */
- if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
+ if ((snap & 0x1) != 0) {
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
return rcu_needs_cpu_quick_check(cpu);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index aa0fd72..9678cc3 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -69,10 +69,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
- seq_printf(m, " dt=%d/%d dn=%d df=%lu",
- rdp->dynticks->dynticks,
+ seq_printf(m, " dt=%d/%d/%d df=%lu",
+ atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi,
+ rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
@@ -141,9 +141,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu",
- rdp->dynticks->dynticks,
+ atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi,
+ rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
@@ -167,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
{
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
#ifdef CONFIG_NO_HZ
- seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
+ seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
#ifdef CONFIG_TREE_PREEMPT_RCU
diff --git a/kernel/sched.c b/kernel/sched.c
index 5e43e9d..cbb3a0e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2573,7 +2573,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
if (!next)
smp_send_reschedule(cpu);
}
-#endif
+
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
+{
+ struct rq *rq;
+ int ret = 0;
+
+ rq = __task_rq_lock(p);
+ if (p->on_cpu) {
+ ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+ ttwu_do_wakeup(rq, p, wake_flags);
+ ret = 1;
+ }
+ __task_rq_unlock(rq);
+
+ return ret;
+
+}
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
{
@@ -2631,17 +2650,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
while (p->on_cpu) {
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
- * If called from interrupt context we could have landed in the
- * middle of schedule(), in this case we should take care not
- * to spin on ->on_cpu if p is current, since that would
- * deadlock.
+ * In case the architecture enables interrupts in
+ * context_switch(), we cannot busy wait, since that
+ * would lead to deadlocks when an interrupt hits and
+ * tries to wake up @prev. So bail and do a complete
+ * remote wakeup.
*/
- if (p == current) {
- ttwu_queue(p, cpu);
+ if (ttwu_activate_remote(p, wake_flags))
goto stat;
- }
-#endif
+#else
cpu_relax();
+#endif
}
/*
* Pairs with the smp_wmb() in finish_lock_switch().
@@ -5841,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
- cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+ do_set_cpus_allowed(idle, cpumask_of(cpu));
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -5929,6 +5948,16 @@ static inline void sched_init_granularity(void)
}
#ifdef CONFIG_SMP
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ else {
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+}
+
/*
* This is how migration works:
*
@@ -5974,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
}
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
- else {
- cpumask_copy(&p->cpus_allowed, new_mask);
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
- }
+ do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e32a9b7..433491c2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1076,8 +1076,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->on_rq = 0;
update_cfs_load(cfs_rq, 0);
account_entity_dequeue(cfs_rq, se);
- update_min_vruntime(cfs_rq);
- update_cfs_shares(cfs_rq);
/*
* Normalize the entity after updating the min_vruntime because the
@@ -1086,6 +1084,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+
+ update_min_vruntime(cfs_rq);
+ update_cfs_shares(cfs_rq);
}
/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 64b2a37..88725c9 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1263,6 +1263,7 @@ static int find_lowest_rq(struct task_struct *task)
if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
+ rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
@@ -1272,15 +1273,20 @@ static int find_lowest_rq(struct task_struct *task)
* remote processor.
*/
if (this_cpu != -1 &&
- cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
+ cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
+ rcu_read_unlock();
return this_cpu;
+ }
best_cpu = cpumask_first_and(lowest_mask,
sched_domain_span(sd));
- if (best_cpu < nr_cpu_ids)
+ if (best_cpu < nr_cpu_ids) {
+ rcu_read_unlock();
return best_cpu;
+ }
}
}
+ rcu_read_unlock();
/*
* And finally, if there were no matches within the domains
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 48ddf43..331e01b 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -37,7 +37,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
#ifdef CONFIG_SMP
/* domain-specific stats */
- preempt_disable();
+ rcu_read_lock();
for_each_domain(cpu, sd) {
enum cpu_idle_type itype;
@@ -64,7 +64,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->ttwu_wake_remote, sd->ttwu_move_affine,
sd->ttwu_move_balance);
}
- preempt_enable();
+ rcu_read_unlock();
#endif
}
kfree(mask_str);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d017c2c..1ee417f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
static void ftrace_global_list_func(unsigned long ip,
unsigned long parent_ip)
{
- struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
+ struct ftrace_ops *op;
+
+ if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
+ return;
+ trace_recursion_set(TRACE_GLOBAL_BIT);
+ op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) {
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next); /*see above*/
};
+ trace_recursion_clear(TRACE_GLOBAL_BIT);
}
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
@@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command);
}
-static void ftrace_startup(struct ftrace_ops *ops, int command)
+static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;
if (unlikely(ftrace_disabled))
- return;
+ return -ENODEV;
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
@@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command);
+
+ return 0;
}
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
@@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void)
ret = __register_ftrace_function(&trace_probe_ops);
if (!ret)
- ftrace_startup(&trace_probe_ops, 0);
+ ret = ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1;
}
@@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command) do { } while (0)
+# define ftrace_startup(ops, command) \
+ ({ \
+ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+ 0; \
+ })
# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op;
+ if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
+ return;
+
+ trace_recursion_set(TRACE_INTERNAL_BIT);
/*
* Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched().
@@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
op = rcu_dereference_raw(op->next);
};
preempt_enable_notrace();
+ trace_recursion_clear(TRACE_INTERNAL_BIT);
}
static void clear_ftrace_swapper(void)
@@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
ret = __register_ftrace_function(ops);
if (!ret)
- ftrace_startup(ops, 0);
+ ret = ftrace_startup(ops, 0);
out_unlock:
@@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
- ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+ ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0ef7b4b..b0c7aa4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void)
printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
"HC[%lu]:SC[%lu]:NMI[%lu]\n",
- current->trace_recursion,
+ trace_recursion_buffer(),
hardirq_count() >> HARDIRQ_SHIFT,
softirq_count() >> SOFTIRQ_SHIFT,
in_nmi());
@@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void)
static inline int trace_recursive_lock(void)
{
- current->trace_recursion++;
+ trace_recursion_inc();
- if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
+ if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
return 0;
trace_recursive_fail();
@@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void)
static inline void trace_recursive_unlock(void)
{
- WARN_ON_ONCE(!current->trace_recursion);
+ WARN_ON_ONCE(!trace_recursion_buffer());
- current->trace_recursion--;
+ trace_recursion_dec();
}
#else
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6b69c4b..229f859 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[];
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"
+/* Only current can touch trace_recursion */
+#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
+#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
+
+/* Ring buffer has the 10 LSB bits to count */
+#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
+
+/* for function tracing recursion */
+#define TRACE_INTERNAL_BIT (1<<11)
+#define TRACE_GLOBAL_BIT (1<<12)
+
+#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
+#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
+#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 2fe1103..686ec39 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata =
static __init void event_trace_self_test_with_function(void)
{
- register_ftrace_function(&trace_ops);
+ int ret;
+ ret = register_ftrace_function(&trace_ops);
+ if (WARN_ON(ret < 0)) {
+ pr_info("Failed to enable function tracer for event tests\n");
+ return;
+ }
pr_info("Running tests again, along with the function tracer\n");
event_trace_self_tests();
unregister_ftrace_function(&trace_ops);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index cf535cc..e37de492 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -353,6 +353,33 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
}
EXPORT_SYMBOL(ftrace_print_symbols_seq);
+#if BITS_PER_LONG == 32
+const char *
+ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
+ const struct trace_print_flags_u64 *symbol_array)
+{
+ int i;
+ const char *ret = p->buffer + p->len;
+
+ for (i = 0; symbol_array[i].name; i++) {
+
+ if (val != symbol_array[i].mask)
+ continue;
+
+ trace_seq_puts(p, symbol_array[i].name);
+ break;
+ }
+
+ if (!p->len)
+ trace_seq_printf(p, "0x%llx", val);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
+#endif
+
const char *
ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
{
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7daa4b0..3d0c56a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -415,15 +415,13 @@ static void watchdog_nmi_disable(int cpu) { return; }
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
/* prepare/enable/disable routines */
-static int watchdog_prepare_cpu(int cpu)
+static void watchdog_prepare_cpu(int cpu)
{
struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
WARN_ON(per_cpu(softlockup_watchdog, cpu));
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
-
- return 0;
}
static int watchdog_enable(int cpu)
@@ -542,17 +540,16 @@ static int __cpuinit
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
- int err = 0;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- err = watchdog_prepare_cpu(hotcpu);
+ watchdog_prepare_cpu(hotcpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
if (watchdog_enabled)
- err = watchdog_enable(hotcpu);
+ watchdog_enable(hotcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 619313e..507a22f 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -144,7 +144,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \
local_irq_disable(); \
- irq_enter(); \
+ __irq_enter(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
diff --git a/mm/filemap.c b/mm/filemap.c
index bcdc393..d7b1057 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1982,16 +1982,26 @@ static int __remove_suid(struct dentry *dentry, int kill)
int file_remove_suid(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
- int killsuid = should_remove_suid(dentry);
- int killpriv = security_inode_need_killpriv(dentry);
+ struct inode *inode = dentry->d_inode;
+ int killsuid;
+ int killpriv;
int error = 0;
+ /* Fast path for nothing security related */
+ if (IS_NOSEC(inode))
+ return 0;
+
+ killsuid = should_remove_suid(dentry);
+ killpriv = security_inode_need_killpriv(dentry);
+
if (killpriv < 0)
return killpriv;
if (killpriv)
error = security_inode_killpriv(dentry);
if (!error && killsuid)
error = __remove_suid(dentry, killsuid);
+ if (!error)
+ inode->i_flags |= S_NOSEC;
return error;
}
@@ -2327,7 +2337,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
repeat:
page = find_lock_page(mapping, index);
if (page)
- return page;
+ goto found;
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
if (!page)
@@ -2340,6 +2350,8 @@ repeat:
goto repeat;
return NULL;
}
+found:
+ wait_on_page_writeback(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
diff --git a/mm/maccess.c b/mm/maccess.c
index e2b6f56..4cee182 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -15,10 +15,10 @@
* happens, handle that and return -EFAULT.
*/
-long __weak probe_kernel_read(void *dst, void *src, size_t size)
+long __weak probe_kernel_read(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_read")));
-long __probe_kernel_read(void *dst, void *src, size_t size)
+long __probe_kernel_read(void *dst, const void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-long __weak probe_kernel_write(void *dst, void *src, size_t size)
+long __weak probe_kernel_write(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_write")));
-long __probe_kernel_write(void *dst, void *src, size_t size)
+long __probe_kernel_write(void *dst, const void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a4e1db3..4e8985a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2247,10 +2247,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (should_fail_alloc_page(gfp_mask, order))
return NULL;
-#ifndef CONFIG_ZONE_DMA
- if (WARN_ON_ONCE(gfp_mask & __GFP_DMA))
- return NULL;
-#endif
/*
* Check the zones suitable for the gfp_mask contain at least one
diff --git a/mm/rmap.c b/mm/rmap.c
index 3a39b51..0eb463e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -352,6 +352,11 @@ void __init anon_vma_init(void)
* The page might have been remapped to a different anon_vma or the anon_vma
* returned may already be freed (and even reused).
*
+ * In case it was remapped to a different anon_vma, the new anon_vma will be a
+ * child of the old anon_vma, and the anon_vma lifetime rules will therefore
+ * ensure that any anon_vma obtained from the page will still be valid for as
+ * long as we observe page_mapped() [ hence all those page_mapped() tests ].
+ *
* All users of this function must be very careful when walking the anon_vma
* chain and verify that the page in question is indeed mapped in it
* [ something equivalent to page_mapped_in_vma() ].
@@ -405,6 +410,7 @@ out:
struct anon_vma *page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma = NULL;
+ struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
@@ -415,13 +421,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- if (mutex_trylock(&anon_vma->root->mutex)) {
+ root_anon_vma = ACCESS_ONCE(anon_vma->root);
+ if (mutex_trylock(&root_anon_vma->mutex)) {
/*
- * If we observe a !0 refcount, then holding the lock ensures
- * the anon_vma will not go away, see __put_anon_vma().
+ * If the page is still mapped, then this anon_vma is still
+ * its anon_vma, and holding the mutex ensures that it will
+ * not go away, see anon_vma_free().
*/
- if (!atomic_read(&anon_vma->refcount)) {
- anon_vma_unlock(anon_vma);
+ if (!page_mapped(page)) {
+ mutex_unlock(&root_anon_vma->mutex);
anon_vma = NULL;
}
goto out;
@@ -1014,7 +1022,7 @@ void do_page_add_anon_rmap(struct page *page,
return;
VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+ /* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address, exclusive);
else
@@ -1709,7 +1717,7 @@ void hugepage_add_anon_rmap(struct page *page,
BUG_ON(!PageLocked(page));
BUG_ON(!anon_vma);
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+ /* address might be in next vma when migration races vma_adjust */
first = atomic_inc_and_test(&page->_mapcount);
if (first)
__hugepage_set_anon_rmap(page, vma, address, 0);
diff --git a/mm/shmem.c b/mm/shmem.c
index 1acfb26..d221a1c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1114,8 +1114,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
delete_from_page_cache(page);
shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
swap_shmem_alloc(swap);
+ spin_unlock(&info->lock);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
return 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8d83f9d..b84d739 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -13,10 +13,6 @@
* and need to be refreshed, or when a packet was damaged in transit.
* This may be have to be moved to the VFS layer.
*
- * NB: BSD uses a more intelligent approach to guessing when a request
- * or reply has been lost by keeping the RTO estimate for each procedure.
- * We currently make do with a constant timeout value.
- *
* Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
* Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
*/
@@ -32,7 +28,9 @@
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/workqueue.h>
+#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/un.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -298,22 +296,27 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
* up a string representation of the passed-in address.
*/
if (args->servername == NULL) {
+ struct sockaddr_un *sun =
+ (struct sockaddr_un *)args->address;
+ struct sockaddr_in *sin =
+ (struct sockaddr_in *)args->address;
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)args->address;
+
servername[0] = '\0';
switch (args->address->sa_family) {
- case AF_INET: {
- struct sockaddr_in *sin =
- (struct sockaddr_in *)args->address;
+ case AF_LOCAL:
+ snprintf(servername, sizeof(servername), "%s",
+ sun->sun_path);
+ break;
+ case AF_INET:
snprintf(servername, sizeof(servername), "%pI4",
&sin->sin_addr.s_addr);
break;
- }
- case AF_INET6: {
- struct sockaddr_in6 *sin =
- (struct sockaddr_in6 *)args->address;
+ case AF_INET6:
snprintf(servername, sizeof(servername), "%pI6",
- &sin->sin6_addr);
+ &sin6->sin6_addr);
break;
- }
default:
/* caller wants default server name, but
* address family isn't recognized. */
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c652e4c..9a80a92 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/socket.h>
+#include <linux/un.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/kernel.h>
@@ -32,6 +33,8 @@
# define RPCDBG_FACILITY RPCDBG_BIND
#endif
+#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
+
#define RPCBIND_PROGRAM (100000u)
#define RPCBIND_PORT (111u)
@@ -158,20 +161,69 @@ static void rpcb_map_release(void *data)
kfree(map);
}
-static const struct sockaddr_in rpcb_inaddr_loopback = {
- .sin_family = AF_INET,
- .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
- .sin_port = htons(RPCBIND_PORT),
-};
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local_unix(void)
+{
+ static const struct sockaddr_un rpcb_localaddr_rpcbind = {
+ .sun_family = AF_LOCAL,
+ .sun_path = RPCBIND_SOCK_PATHNAME,
+ };
+ struct rpc_create_args args = {
+ .net = &init_net,
+ .protocol = XPRT_TRANSPORT_LOCAL,
+ .address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
+ .addrsize = sizeof(rpcb_localaddr_rpcbind),
+ .servername = "localhost",
+ .program = &rpcb_program,
+ .version = RPCBVERS_2,
+ .authflavor = RPC_AUTH_NULL,
+ };
+ struct rpc_clnt *clnt, *clnt4;
+ int result = 0;
+
+ /*
+ * Because we requested an RPC PING at transport creation time,
+ * this works only if the user space portmapper is rpcbind, and
+ * it's listening on AF_LOCAL on the named socket.
+ */
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create AF_LOCAL rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+ result = -PTR_ERR(clnt);
+ goto out;
+ }
+
+ clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
+ if (IS_ERR(clnt4)) {
+ dprintk("RPC: failed to bind second program to "
+ "rpcbind v4 client (errno %ld).\n",
+ PTR_ERR(clnt4));
+ clnt4 = NULL;
+ }
+
+ /* Protected by rpcb_create_local_mutex */
+ rpcb_local_clnt = clnt;
+ rpcb_local_clnt4 = clnt4;
-static DEFINE_MUTEX(rpcb_create_local_mutex);
+out:
+ return result;
+}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
-static int rpcb_create_local(void)
+static int rpcb_create_local_net(void)
{
+ static const struct sockaddr_in rpcb_inaddr_loopback = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
+ .sin_port = htons(RPCBIND_PORT),
+ };
struct rpc_create_args args = {
.net = &init_net,
.protocol = XPRT_TRANSPORT_TCP,
@@ -186,13 +238,6 @@ static int rpcb_create_local(void)
struct rpc_clnt *clnt, *clnt4;
int result = 0;
- if (rpcb_local_clnt)
- return result;
-
- mutex_lock(&rpcb_create_local_mutex);
- if (rpcb_local_clnt)
- goto out;
-
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
@@ -214,10 +259,34 @@ static int rpcb_create_local(void)
clnt4 = NULL;
}
+ /* Protected by rpcb_create_local_mutex */
rpcb_local_clnt = clnt;
rpcb_local_clnt4 = clnt4;
out:
+ return result;
+}
+
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local(void)
+{
+ static DEFINE_MUTEX(rpcb_create_local_mutex);
+ int result = 0;
+
+ if (rpcb_local_clnt)
+ return result;
+
+ mutex_lock(&rpcb_create_local_mutex);
+ if (rpcb_local_clnt)
+ goto out;
+
+ if (rpcb_create_local_unix() != 0)
+ result = rpcb_create_local_net();
+
+out:
mutex_unlock(&rpcb_create_local_mutex);
return result;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 08e05a8..2b90292 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -942,6 +942,8 @@ static void svc_unregister(const struct svc_serv *serv)
if (progp->pg_vers[i]->vs_hidden)
continue;
+ dprintk("svc: attempting to unregister %sv%u\n",
+ progp->pg_name, i);
__svc_unregister(progp->pg_prog, i, progp->pg_name);
}
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b7d435c..af04f77 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -387,6 +387,33 @@ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
return len;
}
+static int svc_partial_recvfrom(struct svc_rqst *rqstp,
+ struct kvec *iov, int nr,
+ int buflen, unsigned int base)
+{
+ size_t save_iovlen;
+ void __user *save_iovbase;
+ unsigned int i;
+ int ret;
+
+ if (base == 0)
+ return svc_recvfrom(rqstp, iov, nr, buflen);
+
+ for (i = 0; i < nr; i++) {
+ if (iov[i].iov_len > base)
+ break;
+ base -= iov[i].iov_len;
+ }
+ save_iovlen = iov[i].iov_len;
+ save_iovbase = iov[i].iov_base;
+ iov[i].iov_len -= base;
+ iov[i].iov_base += base;
+ ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen);
+ iov[i].iov_len = save_iovlen;
+ iov[i].iov_base = save_iovbase;
+ return ret;
+}
+
/*
* Set socket snd and rcv buffer lengths
*/
@@ -409,7 +436,6 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
lock_sock(sock->sk);
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
- sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
#endif
@@ -884,6 +910,56 @@ failed:
return NULL;
}
+static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
+{
+ unsigned int i, len, npages;
+
+ if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ return 0;
+ len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ if (rqstp->rq_pages[i] != NULL)
+ put_page(rqstp->rq_pages[i]);
+ BUG_ON(svsk->sk_pages[i] == NULL);
+ rqstp->rq_pages[i] = svsk->sk_pages[i];
+ svsk->sk_pages[i] = NULL;
+ }
+ rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]);
+ return len;
+}
+
+static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
+{
+ unsigned int i, len, npages;
+
+ if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ return;
+ len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ svsk->sk_pages[i] = rqstp->rq_pages[i];
+ rqstp->rq_pages[i] = NULL;
+ }
+}
+
+static void svc_tcp_clear_pages(struct svc_sock *svsk)
+{
+ unsigned int i, len, npages;
+
+ if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ goto out;
+ len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ BUG_ON(svsk->sk_pages[i] == NULL);
+ put_page(svsk->sk_pages[i]);
+ svsk->sk_pages[i] = NULL;
+ }
+out:
+ svsk->sk_tcplen = 0;
+}
+
/*
* Receive data.
* If we haven't gotten the record length yet, get the next four bytes.
@@ -893,31 +969,15 @@ failed:
static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ unsigned int want;
int len;
- if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
- /* sndbuf needs to have room for one request
- * per thread, otherwise we can stall even when the
- * network isn't a bottleneck.
- *
- * We count all threads rather than threads in a
- * particular pool, which provides an upper bound
- * on the number of threads which will access the socket.
- *
- * rcvbuf just needs to be able to hold a few requests.
- * Normally they will be removed from the queue
- * as soon a a complete request arrives.
- */
- svc_sock_setbufsize(svsk->sk_sock,
- (serv->sv_nrthreads+3) * serv->sv_max_mesg,
- 3 * serv->sv_max_mesg);
-
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
- int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
struct kvec iov;
+ want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
@@ -927,7 +987,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < want) {
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
- goto err_again; /* record header not complete */
+ return -EAGAIN;
}
svsk->sk_reclen = ntohl(svsk->sk_reclen);
@@ -954,83 +1014,75 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
}
}
- /* Check whether enough data is available */
- len = svc_recv_available(svsk);
- if (len < 0)
- goto error;
+ if (svsk->sk_reclen < 8)
+ goto err_delete; /* client is nuts. */
- if (len < svsk->sk_reclen) {
- dprintk("svc: incomplete TCP record (%d of %d)\n",
- len, svsk->sk_reclen);
- goto err_again; /* record not complete */
- }
len = svsk->sk_reclen;
- set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
return len;
- error:
- if (len == -EAGAIN)
- dprintk("RPC: TCP recv_record got EAGAIN\n");
+error:
+ dprintk("RPC: TCP recv_record got %d\n", len);
return len;
- err_delete:
+err_delete:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- err_again:
return -EAGAIN;
}
-static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
- struct rpc_rqst **reqpp, struct kvec *vec)
+static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
+ struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
struct rpc_rqst *req = NULL;
- u32 *p;
- u32 xid;
- u32 calldir;
- int len;
-
- len = svc_recvfrom(rqstp, vec, 1, 8);
- if (len < 0)
- goto error;
+ struct kvec *src, *dst;
+ __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
+ __be32 xid;
+ __be32 calldir;
- p = (u32 *)rqstp->rq_arg.head[0].iov_base;
xid = *p++;
calldir = *p;
- if (calldir == 0) {
- /* REQUEST is the most common case */
- vec[0] = rqstp->rq_arg.head[0];
- } else {
- /* REPLY */
- struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
-
- if (bc_xprt)
- req = xprt_lookup_rqst(bc_xprt, xid);
-
- if (!req) {
- printk(KERN_NOTICE
- "%s: Got unrecognized reply: "
- "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
- __func__, ntohl(calldir),
- bc_xprt, xid);
- vec[0] = rqstp->rq_arg.head[0];
- goto out;
- }
+ if (bc_xprt)
+ req = xprt_lookup_rqst(bc_xprt, xid);
- memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
- sizeof(struct xdr_buf));
- /* copy the xid and call direction */
- memcpy(req->rq_private_buf.head[0].iov_base,
- rqstp->rq_arg.head[0].iov_base, 8);
- vec[0] = req->rq_private_buf.head[0];
+ if (!req) {
+ printk(KERN_NOTICE
+ "%s: Got unrecognized reply: "
+ "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
+ __func__, ntohl(calldir),
+ bc_xprt, xid);
+ return -EAGAIN;
}
- out:
- vec[0].iov_base += 8;
- vec[0].iov_len -= 8;
- len = svsk->sk_reclen - 8;
- error:
- *reqpp = req;
- return len;
+
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
+ /*
+ * XXX!: cheating for now! Only copying HEAD.
+ * But we know this is good enough for now (in fact, for any
+ * callback reply in the forseeable future).
+ */
+ dst = &req->rq_private_buf.head[0];
+ src = &rqstp->rq_arg.head[0];
+ if (dst->iov_len < src->iov_len)
+ return -EAGAIN; /* whatever; just giving up. */
+ memcpy(dst->iov_base, src->iov_base, src->iov_len);
+ xprt_complete_rqst(req->rq_task, svsk->sk_reclen);
+ rqstp->rq_arg.len = 0;
+ return 0;
}
+static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
+{
+ int i = 0;
+ int t = 0;
+
+ while (t < len) {
+ vec[i].iov_base = page_address(pages[i]);
+ vec[i].iov_len = PAGE_SIZE;
+ i++;
+ t += PAGE_SIZE;
+ }
+ return i;
+}
+
+
/*
* Receive data from a TCP socket.
*/
@@ -1041,8 +1093,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
int len;
struct kvec *vec;
- int pnum, vlen;
- struct rpc_rqst *req = NULL;
+ unsigned int want, base;
+ __be32 *p;
+ __be32 calldir;
+ int pnum;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
@@ -1053,87 +1107,73 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (len < 0)
goto error;
+ base = svc_tcp_restore_pages(svsk, rqstp);
+ want = svsk->sk_reclen - base;
+
vec = rqstp->rq_vec;
- vec[0] = rqstp->rq_arg.head[0];
- vlen = PAGE_SIZE;
- /*
- * We have enough data for the whole tcp record. Let's try and read the
- * first 8 bytes to get the xid and the call direction. We can use this
- * to figure out if this is a call or a reply to a callback. If
- * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
- * In that case, don't bother with the calldir and just read the data.
- * It will be rejected in svc_process.
- */
- if (len >= 8) {
- len = svc_process_calldir(svsk, rqstp, &req, vec);
- if (len < 0)
- goto err_again;
- vlen -= 8;
- }
+ pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
+ svsk->sk_reclen);
- pnum = 1;
- while (vlen < len) {
- vec[pnum].iov_base = (req) ?
- page_address(req->rq_private_buf.pages[pnum - 1]) :
- page_address(rqstp->rq_pages[pnum]);
- vec[pnum].iov_len = PAGE_SIZE;
- pnum++;
- vlen += PAGE_SIZE;
- }
rqstp->rq_respages = &rqstp->rq_pages[pnum];
/* Now receive data */
- len = svc_recvfrom(rqstp, vec, pnum, len);
- if (len < 0)
- goto err_again;
-
- /*
- * Account for the 8 bytes we read earlier
- */
- len += 8;
-
- if (req) {
- xprt_complete_rqst(req->rq_task, len);
- len = 0;
- goto out;
+ len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
+ if (len >= 0)
+ svsk->sk_tcplen += len;
+ if (len != want) {
+ if (len < 0 && len != -EAGAIN)
+ goto err_other;
+ svc_tcp_save_pages(svsk, rqstp);
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ svsk->sk_tcplen, svsk->sk_reclen);
+ goto err_noclose;
}
- dprintk("svc: TCP complete record (%d bytes)\n", len);
- rqstp->rq_arg.len = len;
+
+ rqstp->rq_arg.len = svsk->sk_reclen;
rqstp->rq_arg.page_base = 0;
- if (len <= rqstp->rq_arg.head[0].iov_len) {
- rqstp->rq_arg.head[0].iov_len = len;
+ if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
+ rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
rqstp->rq_arg.page_len = 0;
- } else {
- rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
- }
+ } else
+ rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
-out:
+ p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
+ calldir = p[1];
+ if (calldir)
+ len = receive_cb_reply(svsk, rqstp);
+
/* Reset TCP read info */
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
+ /* If we have more data, signal svc_xprt_enqueue() to try again */
+ if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
+ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+
+ if (len < 0)
+ goto error;
svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
- return len;
+ dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
+ return rqstp->rq_arg.len;
-err_again:
- if (len == -EAGAIN) {
- dprintk("RPC: TCP recvfrom got EAGAIN\n");
- return len;
- }
error:
- if (len != -EAGAIN) {
- printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
- svsk->sk_xprt.xpt_server->sv_name, -len);
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- }
+ if (len != -EAGAIN)
+ goto err_other;
+ dprintk("RPC: TCP recvfrom got EAGAIN\n");
return -EAGAIN;
+err_other:
+ printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
+ svsk->sk_xprt.xpt_server->sv_name, -len);
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+err_noclose:
+ return -EAGAIN; /* record not complete */
}
/*
@@ -1304,18 +1344,10 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
+ memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
- /* initialise setting must have enough space to
- * receive and respond to one request.
- * svc_tcp_recvfrom will re-adjust if necessary
- */
- svc_sock_setbufsize(svsk->sk_sock,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
-
- set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (sk->sk_state != TCP_ESTABLISHED)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1379,8 +1411,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
/* Initialize the socket */
if (sock->type == SOCK_DGRAM)
svc_udp_init(svsk, serv);
- else
+ else {
+ /* initialise setting must have enough space to
+ * receive and respond to one request.
+ */
+ svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
+ 4 * serv->sv_max_mesg);
svc_tcp_init(svsk, serv);
+ }
dprintk("svc: svc_setup_socket created %p (inet %p)\n",
svsk, svsk->sk_sk);
@@ -1562,8 +1600,10 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
svc_sock_detach(xprt);
- if (!test_bit(XPT_LISTENER, &xprt->xpt_flags))
+ if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ svc_tcp_clear_pages(svsk);
kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
+ }
}
/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 679cd67..f008c14 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -638,6 +638,25 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
+/**
+ * xdr_init_decode - Initialize an xdr_stream for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to XDR buffer from which to decode data
+ * @pages: list of pages to decode into
+ * @len: length in bytes of buffer in pages
+ */
+void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, unsigned int len)
+{
+ memset(buf, 0, sizeof(*buf));
+ buf->pages = pages;
+ buf->page_len = len;
+ buf->buflen = len;
+ buf->len = len;
+ xdr_init_decode(xdr, buf, NULL);
+}
+EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
+
static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bf005d3..72abb73 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -19,6 +19,7 @@
*/
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/capability.h>
@@ -28,6 +29,7 @@
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
+#include <linux/un.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
@@ -45,6 +47,9 @@
#include <net/tcp.h>
#include "sunrpc.h"
+
+static void xs_close(struct rpc_xprt *xprt);
+
/*
* xprtsock tunables
*/
@@ -261,6 +266,11 @@ static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
return (struct sockaddr *) &xprt->addr;
}
+static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
+{
+ return (struct sockaddr_un *) &xprt->addr;
+}
+
static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
{
return (struct sockaddr_in *) &xprt->addr;
@@ -276,23 +286,34 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
struct sockaddr *sap = xs_addr(xprt);
struct sockaddr_in6 *sin6;
struct sockaddr_in *sin;
+ struct sockaddr_un *sun;
char buf[128];
- (void)rpc_ntop(sap, buf, sizeof(buf));
- xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
-
switch (sap->sa_family) {
+ case AF_LOCAL:
+ sun = xs_addr_un(xprt);
+ strlcpy(buf, sun->sun_path, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] =
+ kstrdup(buf, GFP_KERNEL);
+ break;
case AF_INET:
+ (void)rpc_ntop(sap, buf, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] =
+ kstrdup(buf, GFP_KERNEL);
sin = xs_addr_in(xprt);
snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
break;
case AF_INET6:
+ (void)rpc_ntop(sap, buf, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] =
+ kstrdup(buf, GFP_KERNEL);
sin6 = xs_addr_in6(xprt);
snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
break;
default:
BUG();
}
+
xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
}
@@ -495,6 +516,70 @@ static int xs_nospace(struct rpc_task *task)
return ret;
}
+/*
+ * Construct a stream transport record marker in @buf.
+ */
+static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
+{
+ u32 reclen = buf->len - sizeof(rpc_fraghdr);
+ rpc_fraghdr *base = buf->head[0].iov_base;
+ *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
+}
+
+/**
+ * xs_local_send_request - write an RPC request to an AF_LOCAL socket
+ * @task: RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ * 0: The request has been sent
+ * EAGAIN: The socket was blocked, please call again later to
+ * complete the request
+ * ENOTCONN: Caller needs to invoke connect logic then call again
+ * other: Some other error occured, the request was not sent
+ */
+static int xs_local_send_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct xdr_buf *xdr = &req->rq_snd_buf;
+ int status;
+
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
+
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base, req->rq_svec->iov_len);
+
+ status = xs_sendpages(transport->sock, NULL, 0,
+ xdr, req->rq_bytes_sent);
+ dprintk("RPC: %s(%u) = %d\n",
+ __func__, xdr->len - req->rq_bytes_sent, status);
+ if (likely(status >= 0)) {
+ req->rq_bytes_sent += status;
+ req->rq_xmit_bytes_sent += status;
+ if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+ req->rq_bytes_sent = 0;
+ return 0;
+ }
+ status = -EAGAIN;
+ }
+
+ switch (status) {
+ case -EAGAIN:
+ status = xs_nospace(task);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
+ case -EPIPE:
+ xs_close(xprt);
+ status = -ENOTCONN;
+ }
+
+ return status;
+}
+
/**
* xs_udp_send_request - write an RPC request to a UDP socket
* @task: address of RPC task that manages the state of an RPC request
@@ -574,13 +659,6 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
kernel_sock_shutdown(sock, SHUT_WR);
}
-static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
-{
- u32 reclen = buf->len - sizeof(rpc_fraghdr);
- rpc_fraghdr *base = buf->head[0].iov_base;
- *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
-}
-
/**
* xs_tcp_send_request - write an RPC request to a TCP socket
* @task: address of RPC task that manages the state of an RPC request
@@ -603,7 +681,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
struct xdr_buf *xdr = &req->rq_snd_buf;
int status;
- xs_encode_tcp_record_marker(&req->rq_snd_buf);
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
xs_pktdump("packet data:",
req->rq_svec->iov_base,
@@ -785,6 +863,88 @@ static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
return (struct rpc_xprt *) sk->sk_user_data;
}
+static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
+{
+ struct xdr_skb_reader desc = {
+ .skb = skb,
+ .offset = sizeof(rpc_fraghdr),
+ .count = skb->len - sizeof(rpc_fraghdr),
+ };
+
+ if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
+ return -1;
+ if (desc.count)
+ return -1;
+ return 0;
+}
+
+/**
+ * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
+ * @sk: socket with data to read
+ * @len: how much data to read
+ *
+ * Currently this assumes we can read the whole reply in a single gulp.
+ */
+static void xs_local_data_ready(struct sock *sk, int len)
+{
+ struct rpc_task *task;
+ struct rpc_xprt *xprt;
+ struct rpc_rqst *rovr;
+ struct sk_buff *skb;
+ int err, repsize, copied;
+ u32 _xid;
+ __be32 *xp;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ dprintk("RPC: %s...\n", __func__);
+ xprt = xprt_from_sock(sk);
+ if (xprt == NULL)
+ goto out;
+
+ skb = skb_recv_datagram(sk, 0, 1, &err);
+ if (skb == NULL)
+ goto out;
+
+ if (xprt->shutdown)
+ goto dropit;
+
+ repsize = skb->len - sizeof(rpc_fraghdr);
+ if (repsize < 4) {
+ dprintk("RPC: impossible RPC reply size %d\n", repsize);
+ goto dropit;
+ }
+
+ /* Copy the XID from the skb... */
+ xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
+ if (xp == NULL)
+ goto dropit;
+
+ /* Look up and lock the request corresponding to the given XID */
+ spin_lock(&xprt->transport_lock);
+ rovr = xprt_lookup_rqst(xprt, *xp);
+ if (!rovr)
+ goto out_unlock;
+ task = rovr->rq_task;
+
+ copied = rovr->rq_private_buf.buflen;
+ if (copied > repsize)
+ copied = repsize;
+
+ if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
+ dprintk("RPC: sk_buff copy failed\n");
+ goto out_unlock;
+ }
+
+ xprt_complete_rqst(task, copied);
+
+ out_unlock:
+ spin_unlock(&xprt->transport_lock);
+ dropit:
+ skb_free_datagram(sk, skb);
+ out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
/**
* xs_udp_data_ready - "data ready" callback for UDP sockets
* @sk: socket with data to read
@@ -1344,7 +1504,6 @@ static void xs_tcp_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
/* The server initiated a shutdown of the socket */
xprt_force_disconnect(xprt);
- case TCP_SYN_SENT:
xprt->connect_cookie++;
case TCP_CLOSING:
/*
@@ -1571,11 +1730,31 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
return err;
}
+/*
+ * We don't support autobind on AF_LOCAL sockets
+ */
+static void xs_local_rpcbind(struct rpc_task *task)
+{
+ xprt_set_bound(task->tk_xprt);
+}
+
+static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
+{
+}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key xs_key[2];
static struct lock_class_key xs_slock_key[2];
+static inline void xs_reclassify_socketu(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ BUG_ON(sock_owned_by_user(sk));
+ sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
+ &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
+}
+
static inline void xs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -1597,6 +1776,9 @@ static inline void xs_reclassify_socket6(struct socket *sock)
static inline void xs_reclassify_socket(int family, struct socket *sock)
{
switch (family) {
+ case AF_LOCAL:
+ xs_reclassify_socketu(sock);
+ break;
case AF_INET:
xs_reclassify_socket4(sock);
break;
@@ -1606,6 +1788,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
}
}
#else
+static inline void xs_reclassify_socketu(struct socket *sock)
+{
+}
+
static inline void xs_reclassify_socket4(struct socket *sock)
{
}
@@ -1644,6 +1830,94 @@ out:
return ERR_PTR(err);
}
+static int xs_local_finish_connecting(struct rpc_xprt *xprt,
+ struct socket *sock)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+
+ if (!transport->inet) {
+ struct sock *sk = sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ xs_save_old_callbacks(transport, sk);
+
+ sk->sk_user_data = xprt;
+ sk->sk_data_ready = xs_local_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+ sk->sk_error_report = xs_error_report;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ xprt_clear_connected(xprt);
+
+ /* Reset to new socket */
+ transport->sock = sock;
+ transport->inet = sk;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ /* Tell the socket layer to start connecting... */
+ xprt->stat.connect_count++;
+ xprt->stat.connect_start = jiffies;
+ return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
+}
+
+/**
+ * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
+ * @xprt: RPC transport to connect
+ * @transport: socket transport to connect
+ * @create_sock: function to create a socket of the correct type
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_local_setup_socket(struct work_struct *work)
+{
+ struct sock_xprt *transport =
+ container_of(work, struct sock_xprt, connect_worker.work);
+ struct rpc_xprt *xprt = &transport->xprt;
+ struct socket *sock;
+ int status = -EIO;
+
+ if (xprt->shutdown)
+ goto out;
+
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ status = __sock_create(xprt->xprt_net, AF_LOCAL,
+ SOCK_STREAM, 0, &sock, 1);
+ if (status < 0) {
+ dprintk("RPC: can't create AF_LOCAL "
+ "transport socket (%d).\n", -status);
+ goto out;
+ }
+ xs_reclassify_socketu(sock);
+
+ dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+ status = xs_local_finish_connecting(xprt, sock);
+ switch (status) {
+ case 0:
+ dprintk("RPC: xprt %p connected to %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ xprt_set_connected(xprt);
+ break;
+ case -ENOENT:
+ dprintk("RPC: xprt %p: socket %s does not exist\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ break;
+ default:
+ printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
+ __func__, -status,
+ xprt->address_strings[RPC_DISPLAY_ADDR]);
+ }
+
+out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
+}
+
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1758,6 +2032,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ int ret = -ENOTCONN;
if (!transport->inet) {
struct sock *sk = sock->sk;
@@ -1789,12 +2064,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
}
if (!xprt_bound(xprt))
- return -ENOTCONN;
+ goto out;
/* Tell the socket layer to start connecting... */
xprt->stat.connect_count++;
xprt->stat.connect_start = jiffies;
- return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ switch (ret) {
+ case 0:
+ case -EINPROGRESS:
+ /* SYN_SENT! */
+ xprt->connect_cookie++;
+ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ }
+out:
+ return ret;
}
/**
@@ -1917,6 +2202,32 @@ static void xs_connect(struct rpc_task *task)
}
/**
+ * xs_local_print_stats - display AF_LOCAL socket-specifc stats
+ * @xprt: rpc_xprt struct containing statistics
+ * @seq: output file
+ *
+ */
+static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
+{
+ long idle_time = 0;
+
+ if (xprt_connected(xprt))
+ idle_time = (long)(jiffies - xprt->last_used) / HZ;
+
+ seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
+ "%llu %llu\n",
+ xprt->stat.bind_count,
+ xprt->stat.connect_count,
+ xprt->stat.connect_time,
+ idle_time,
+ xprt->stat.sends,
+ xprt->stat.recvs,
+ xprt->stat.bad_xids,
+ xprt->stat.req_u,
+ xprt->stat.bklog_u);
+}
+
+/**
* xs_udp_print_stats - display UDP socket-specifc stats
* @xprt: rpc_xprt struct containing statistics
* @seq: output file
@@ -2014,10 +2325,7 @@ static int bc_sendto(struct rpc_rqst *req)
unsigned long headoff;
unsigned long tailoff;
- /*
- * Set up the rpc header and record marker stuff
- */
- xs_encode_tcp_record_marker(xbufp);
+ xs_encode_stream_record_marker(xbufp);
tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
@@ -2089,6 +2397,21 @@ static void bc_destroy(struct rpc_xprt *xprt)
{
}
+static struct rpc_xprt_ops xs_local_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xs_tcp_release_xprt,
+ .rpcbind = xs_local_rpcbind,
+ .set_port = xs_local_set_port,
+ .connect = xs_connect,
+ .buf_alloc = rpc_malloc,
+ .buf_free = rpc_free,
+ .send_request = xs_local_send_request,
+ .set_retrans_timeout = xprt_set_retrans_timeout_def,
+ .close = xs_close,
+ .destroy = xs_destroy,
+ .print_stats = xs_local_print_stats,
+};
+
static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
@@ -2150,6 +2473,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap)
};
switch (family) {
+ case AF_LOCAL:
+ break;
case AF_INET:
memcpy(sap, &sin, sizeof(sin));
break;
@@ -2197,6 +2522,70 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
return xprt;
}
+static const struct rpc_timeout xs_local_default_timeout = {
+ .to_initval = 10 * HZ,
+ .to_maxval = 10 * HZ,
+ .to_retries = 2,
+};
+
+/**
+ * xs_setup_local - Set up transport to use an AF_LOCAL socket
+ * @args: rpc transport creation arguments
+ *
+ * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
+ */
+static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
+{
+ struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
+ struct sock_xprt *transport;
+ struct rpc_xprt *xprt;
+ struct rpc_xprt *ret;
+
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
+ if (IS_ERR(xprt))
+ return xprt;
+ transport = container_of(xprt, struct sock_xprt, xprt);
+
+ xprt->prot = 0;
+ xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
+ xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+
+ xprt->bind_timeout = XS_BIND_TO;
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+ xprt->ops = &xs_local_ops;
+ xprt->timeout = &xs_local_default_timeout;
+
+ switch (sun->sun_family) {
+ case AF_LOCAL:
+ if (sun->sun_path[0] != '/') {
+ dprintk("RPC: bad AF_LOCAL address: %s\n",
+ sun->sun_path);
+ ret = ERR_PTR(-EINVAL);
+ goto out_err;
+ }
+ xprt_set_bound(xprt);
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ xs_local_setup_socket);
+ xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
+ break;
+ default:
+ ret = ERR_PTR(-EAFNOSUPPORT);
+ goto out_err;
+ }
+
+ dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+ if (try_module_get(THIS_MODULE))
+ return xprt;
+ ret = ERR_PTR(-EINVAL);
+out_err:
+ xprt_free(xprt);
+ return ret;
+}
+
static const struct rpc_timeout xs_udp_default_timeout = {
.to_initval = 5 * HZ,
.to_maxval = 30 * HZ,
@@ -2438,6 +2827,14 @@ out_err:
return ret;
}
+static struct xprt_class xs_local_transport = {
+ .list = LIST_HEAD_INIT(xs_local_transport.list),
+ .name = "named UNIX socket",
+ .owner = THIS_MODULE,
+ .ident = XPRT_TRANSPORT_LOCAL,
+ .setup = xs_setup_local,
+};
+
static struct xprt_class xs_udp_transport = {
.list = LIST_HEAD_INIT(xs_udp_transport.list),
.name = "udp",
@@ -2473,6 +2870,7 @@ int init_socket_xprt(void)
sunrpc_table_header = register_sysctl_table(sunrpc_table);
#endif
+ xprt_register_transport(&xs_local_transport);
xprt_register_transport(&xs_udp_transport);
xprt_register_transport(&xs_tcp_transport);
xprt_register_transport(&xs_bc_tcp_transport);
@@ -2493,6 +2891,7 @@ void cleanup_socket_xprt(void)
}
#endif
+ xprt_unregister_transport(&xs_local_transport);
xprt_unregister_transport(&xs_udp_transport);
xprt_unregister_transport(&xs_tcp_transport);
xprt_unregister_transport(&xs_bc_tcp_transport);
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 4be6036..f40a6af6 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -43,6 +43,7 @@
#undef ELF_R_INFO
#undef Elf_r_info
#undef ELF_ST_BIND
+#undef ELF_ST_TYPE
#undef fn_ELF_R_SYM
#undef fn_ELF_R_INFO
#undef uint_t
@@ -76,6 +77,7 @@
# define ELF_R_INFO ELF64_R_INFO
# define Elf_r_info Elf64_r_info
# define ELF_ST_BIND ELF64_ST_BIND
+# define ELF_ST_TYPE ELF64_ST_TYPE
# define fn_ELF_R_SYM fn_ELF64_R_SYM
# define fn_ELF_R_INFO fn_ELF64_R_INFO
# define uint_t uint64_t
@@ -108,6 +110,7 @@
# define ELF_R_INFO ELF32_R_INFO
# define Elf_r_info Elf32_r_info
# define ELF_ST_BIND ELF32_ST_BIND
+# define ELF_ST_TYPE ELF32_ST_TYPE
# define fn_ELF_R_SYM fn_ELF32_R_SYM
# define fn_ELF_R_INFO fn_ELF32_R_INFO
# define uint_t uint32_t
@@ -427,6 +430,11 @@ static unsigned find_secsym_ndx(unsigned const txtndx,
if (txtndx == w2(symp->st_shndx)
/* avoid STB_WEAK */
&& (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
+ /* function symbols on ARM have quirks, avoid them */
+ if (w2(ehdr->e_machine) == EM_ARM
+ && ELF_ST_TYPE(symp->st_info) == STT_FUNC)
+ continue;
+
*recvalp = _w(symp->st_value);
return symp - sym0;
}
diff --git a/scripts/tags.sh b/scripts/tags.sh
index bd6185d..75c5d24f1 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -132,7 +132,7 @@ exuberant()
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \
- --regex-c++='/^DEFINE_EVENT\(([^,)]*).*/trace_\1/'
+ --regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/'
all_kconfigs | xargs $1 -a \
--langdef=kconfig --language-force=kconfig \
@@ -152,7 +152,9 @@ emacs()
{
all_sources | xargs $1 -a \
--regex='/^ENTRY(\([^)]*\)).*/\1/' \
- --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/'
+ --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \
+ --regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \
+ --regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/'
all_kconfigs | xargs $1 -a \
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ae3a698..ec1bcec 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -593,7 +593,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
- return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
+ return aa_audit(AUDIT_APPARMOR_DENIED,
+ __aa_current_profile(), GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3487056..486f6de 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1085,7 +1085,7 @@ static void azx_init_pci(struct azx *chip)
* codecs.
* The PCI register TCSEL is defined in the Intel manuals.
*/
- if (chip->driver_caps & AZX_DCAPS_NO_TCSEL) {
+ if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
snd_printdd(SFX "Clearing TCSEL\n");
update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
}
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 459566b..d155cbb 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
config SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
- depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_EXYNOS4
+ depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_EXYNOS4
select S3C64XX_DMA if ARCH_S3C64XX
select S3C2410_DMA if ARCH_S3C2410
help
@@ -55,7 +55,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
- depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110)
+ depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDKV210 || MACH_SMDKC110)
select SND_SOC_WM8580
select SND_SAMSUNG_I2S
help
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index 8aacf23..3d26f66 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -249,7 +249,7 @@ static int __init smdk_audio_init(void)
int ret;
char *str;
- if (machine_is_smdkc100() || machine_is_smdk6442()
+ if (machine_is_smdkc100()
|| machine_is_smdkv210() || machine_is_smdkc110()) {
smdk.num_links = 3;
/* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1455413..032ba63 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -215,11 +215,13 @@ LIB_FILE=$(OUTPUT)libperf.a
LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/linux/const.h
LIB_H += ../../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
LIB_H += util/include/linux/compiler.h
+LIB_H += util/include/linux/const.h
LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e18eb7e..7b139e1 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -8,8 +8,6 @@
#include "builtin.h"
#include "util/util.h"
-
-#include "util/util.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0974f95..8e2c857 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -823,6 +823,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
symbol__init();
+ if (symbol_conf.kptr_restrict)
+ pr_warning(
+"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
+"check /proc/sys/kernel/kptr_restrict.\n\n"
+"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
+"file is not found in the buildid cache or in the vmlinux path.\n\n"
+"Samples in kernel modules won't be resolved at all.\n\n"
+"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
+"even with a suitable vmlinux or kallsyms file.\n\n");
+
if (no_buildid_cache || no_buildid)
disable_buildid_cache();
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 498c6f7..287a173 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -116,6 +116,9 @@ static int process_sample_event(union perf_event *event,
if (al.filtered || (hide_unresolved && al.sym == NULL))
return 0;
+ if (al.map != NULL)
+ al.map->dso->hit = 1;
+
if (perf_session__add_hist_entry(session, &al, sample, evsel)) {
pr_debug("problem incrementing symbol period, skipping event\n");
return -1;
@@ -249,6 +252,8 @@ static int __cmd_report(void)
u64 nr_samples;
struct perf_session *session;
struct perf_evsel *pos;
+ struct map *kernel_map;
+ struct kmap *kernel_kmap;
const char *help = "For a higher level overview, try: perf report --sort comm,dso";
signal(SIGINT, sig_handler);
@@ -268,6 +273,24 @@ static int __cmd_report(void)
if (ret)
goto out_delete;
+ kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION];
+ kernel_kmap = map__kmap(kernel_map);
+ if (kernel_map == NULL ||
+ (kernel_map->dso->hit &&
+ (kernel_kmap->ref_reloc_sym == NULL ||
+ kernel_kmap->ref_reloc_sym->addr == 0))) {
+ const struct dso *kdso = kernel_map->dso;
+
+ ui__warning(
+"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
+"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
+"Samples in kernel modules can't be resolved as well.\n\n",
+ RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ?
+"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
+"can't be resolved." :
+"If some relocation was applied (e.g. kexec) symbols may be misresolved.");
+ }
+
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout);
goto out_delete;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 974f6d3..22747de 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,7 +10,6 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
-#include "util/parse-options.h"
#include "util/util.h"
#include "util/evlist.h"
#include "util/evsel.h"
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index b671862..2da9162 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -474,7 +474,7 @@ static int test__basic_mmap(void)
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
- int sample_size = perf_sample_size(attr.sample_type);
+ int sample_size = __perf_evsel__sample_size(attr.sample_type);
for (i = 0; i < nsyscalls; ++i) {
char name[64];
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 2d7934e9..f2f3f49 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -62,8 +62,6 @@
#include <linux/unistd.h>
#include <linux/types.h>
-#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-
static struct perf_top top = {
.count_filter = 5,
.delay_secs = 2,
@@ -82,6 +80,8 @@ static bool use_tui, use_stdio;
static int default_interval = 0;
+static bool kptr_restrict_warned;
+static bool vmlinux_warned;
static bool inherit = false;
static int realtime_prio = 0;
static bool group = false;
@@ -740,7 +740,22 @@ static void perf_event__process_sample(const union perf_event *event,
al.filtered)
return;
+ if (!kptr_restrict_warned &&
+ symbol_conf.kptr_restrict &&
+ al.cpumode == PERF_RECORD_MISC_KERNEL) {
+ ui__warning(
+"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
+"Check /proc/sys/kernel/kptr_restrict.\n\n"
+"Kernel%s samples will not be resolved.\n",
+ !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
+ " modules" : "");
+ if (use_browser <= 0)
+ sleep(5);
+ kptr_restrict_warned = true;
+ }
+
if (al.sym == NULL) {
+ const char *msg = "Kernel samples will not be resolved.\n";
/*
* As we do lazy loading of symtabs we only will know if the
* specified vmlinux file is invalid when we actually have a
@@ -752,12 +767,20 @@ static void perf_event__process_sample(const union perf_event *event,
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
- if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
+ if (!kptr_restrict_warned && !vmlinux_warned &&
+ al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
- ui__warning("The %s file can't be used\n",
- symbol_conf.vmlinux_name);
- exit_browser(0);
- exit(1);
+ if (symbol_conf.vmlinux_name) {
+ ui__warning("The %s file can't be used.\n%s",
+ symbol_conf.vmlinux_name, msg);
+ } else {
+ ui__warning("A vmlinux file was not found.\n%s",
+ msg);
+ }
+
+ if (use_browser <= 0)
+ sleep(5);
+ vmlinux_warned = true;
}
return;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 6635fcd..3c1b8a6 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -35,22 +35,6 @@ const char *perf_event__name(unsigned int id)
return perf_event__names[id];
}
-int perf_sample_size(u64 sample_type)
-{
- u64 mask = sample_type & PERF_SAMPLE_MASK;
- int size = 0;
- int i;
-
- for (i = 0; i < 64; i++) {
- if (mask & (1ULL << i))
- size++;
- }
-
- size *= sizeof(u64);
-
- return size;
-}
-
static struct perf_sample synth_sample = {
.pid = -1,
.tid = -1,
@@ -553,9 +537,18 @@ static int perf_event__process_kernel_mmap(union perf_event *event,
goto out_problem;
perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
- perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
- symbol_name,
- event->mmap.pgoff);
+
+ /*
+ * Avoid using a zero address (kptr_restrict) for the ref reloc
+ * symbol. Effectively having zero here means that at record
+ * time /proc/sys/kernel/kptr_restrict was non zero.
+ */
+ if (event->mmap.pgoff != 0) {
+ perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ event->mmap.pgoff);
+ }
+
if (machine__is_default_guest(machine)) {
/*
* preload dso of guest kernel and modules
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c083328..1d7f664 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -82,8 +82,6 @@ struct perf_sample {
struct ip_callchain *callchain;
};
-int perf_sample_size(u64 sample_type);
-
#define BUILD_ID_SIZE 20
struct build_id_event {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 50aa348..b021ea9 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,7 +12,6 @@
#include "evlist.h"
#include "evsel.h"
#include "util.h"
-#include "debug.h"
#include <sys/mman.h>
@@ -257,19 +256,15 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
return evlist->mmap != NULL ? 0 : -ENOMEM;
}
-static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
+static int __perf_evlist__mmap(struct perf_evlist *evlist,
int idx, int prot, int mask, int fd)
{
evlist->mmap[idx].prev = 0;
evlist->mmap[idx].mask = mask;
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
- if (evlist->mmap[idx].base == MAP_FAILED) {
- if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
- ui__warning("Inherit is not allowed on per-task "
- "events using mmap.\n");
+ if (evlist->mmap[idx].base == MAP_FAILED)
return -1;
- }
perf_evlist__add_pollfd(evlist, fd);
return 0;
@@ -289,7 +284,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
if (output == -1) {
output = fd;
- if (__perf_evlist__mmap(evlist, evsel, cpu,
+ if (__perf_evlist__mmap(evlist, cpu,
prot, mask, output) < 0)
goto out_unmap;
} else {
@@ -329,7 +324,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
if (output == -1) {
output = fd;
- if (__perf_evlist__mmap(evlist, evsel, thread,
+ if (__perf_evlist__mmap(evlist, thread,
prot, mask, output) < 0)
goto out_unmap;
} else {
@@ -460,33 +455,46 @@ int perf_evlist__set_filters(struct perf_evlist *evlist)
return 0;
}
-u64 perf_evlist__sample_type(struct perf_evlist *evlist)
+bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
{
- struct perf_evsel *pos;
- u64 type = 0;
-
- list_for_each_entry(pos, &evlist->entries, node) {
- if (!type)
- type = pos->attr.sample_type;
- else if (type != pos->attr.sample_type)
- die("non matching sample_type");
+ struct perf_evsel *pos, *first;
+
+ pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry_continue(pos, &evlist->entries, node) {
+ if (first->attr.sample_type != pos->attr.sample_type)
+ return false;
}
- return type;
+ return true;
}
-bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
+u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ return first->attr.sample_type;
+}
+
+bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
{
- bool value = false, first = true;
- struct perf_evsel *pos;
-
- list_for_each_entry(pos, &evlist->entries, node) {
- if (first) {
- value = pos->attr.sample_id_all;
- first = false;
- } else if (value != pos->attr.sample_id_all)
- die("non matching sample_id_all");
+ struct perf_evsel *pos, *first;
+
+ pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry_continue(pos, &evlist->entries, node) {
+ if (first->attr.sample_id_all != pos->attr.sample_id_all)
+ return false;
}
- return value;
+ return true;
+}
+
+bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ return first->attr.sample_id_all;
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0a1ef1f..b2b8623 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -66,7 +66,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
void perf_evlist__delete_maps(struct perf_evlist *evlist);
int perf_evlist__set_filters(struct perf_evlist *evlist);
-u64 perf_evlist__sample_type(struct perf_evlist *evlist);
-bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
+u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
+bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ee0fe0d..0239eb8 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,6 +15,22 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+int __perf_evsel__sample_size(u64 sample_type)
+{
+ u64 mask = sample_type & PERF_SAMPLE_MASK;
+ int size = 0;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (mask & (1ULL << i))
+ size++;
+ }
+
+ size *= sizeof(u64);
+
+ return size;
+}
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
@@ -35,7 +51,17 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
+ int cpu, thread;
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
+
+ if (evsel->fd) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ FD(evsel, cpu, thread) = -1;
+ }
+ }
+ }
+
return evsel->fd != NULL ? 0 : -ENOMEM;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index f79bb2c..7e9366e 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -149,4 +149,11 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
return __perf_evsel__read(evsel, ncpus, nthreads, true);
}
+int __perf_evsel__sample_size(u64 sample_type);
+
+static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
+{
+ return __perf_evsel__sample_size(evsel->attr.sample_type);
+}
+
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 0717beb..afb0849 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -193,9 +193,13 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
*linkname = malloc(size), *targetname;
int len, err = -1;
- if (is_kallsyms)
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ return 0;
+ }
realname = (char *)name;
- else
+ } else
realname = realpath(name, NULL);
if (realname == NULL || filename == NULL || linkname == NULL)
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h
new file mode 100644
index 0000000..1b476c9
--- /dev/null
+++ b/tools/perf/util/include/linux/const.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/const.h"
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 69436b3..a9ac050 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -674,7 +674,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
struct perf_evlist *evlist = &pevlist->evlist;
union perf_event *event;
int sample_id_all = 1, cpu;
- static char *kwlist[] = {"sample_id_all", NULL, NULL};
+ static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL};
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
@@ -692,16 +692,14 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
first = list_entry(evlist->entries.next, struct perf_evsel, node);
err = perf_event__parse_sample(event, first->attr.sample_type,
- perf_sample_size(first->attr.sample_type),
+ perf_evsel__sample_size(first),
sample_id_all, &pevent->sample);
- if (err) {
- pr_err("Can't parse sample, err = %d\n", err);
- goto end;
- }
-
+ if (err)
+ return PyErr_Format(PyExc_OSError,
+ "perf: can't parse sample, err=%d", err);
return pyevent;
}
-end:
+
Py_INCREF(Py_None);
return Py_None;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 64500fc..f5a8fbd 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -58,6 +58,16 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close;
}
+ if (!perf_evlist__valid_sample_type(self->evlist)) {
+ pr_err("non matching sample_type");
+ goto out_close;
+ }
+
+ if (!perf_evlist__valid_sample_id_all(self->evlist)) {
+ pr_err("non matching sample_id_all");
+ goto out_close;
+ }
+
self->size = input_stat.st_size;
return 0;
@@ -97,7 +107,7 @@ out:
void perf_session__update_sample_type(struct perf_session *self)
{
self->sample_type = perf_evlist__sample_type(self->evlist);
- self->sample_size = perf_sample_size(self->sample_type);
+ self->sample_size = __perf_evsel__sample_size(self->sample_type);
self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
perf_session__id_header_size(self);
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 516876d..eec1963 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -676,9 +676,30 @@ discard_symbol: rb_erase(&pos->rb_node, root);
return count + moved;
}
+static bool symbol__restricted_filename(const char *filename,
+ const char *restricted_filename)
+{
+ bool restricted = false;
+
+ if (symbol_conf.kptr_restrict) {
+ char *r = realpath(filename, NULL);
+
+ if (r != NULL) {
+ restricted = strcmp(r, restricted_filename) == 0;
+ free(r);
+ return restricted;
+ }
+ }
+
+ return restricted;
+}
+
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return -1;
+
if (dso__load_all_kallsyms(dso, filename, map) < 0)
return -1;
@@ -1790,6 +1811,9 @@ static int machine__create_modules(struct machine *machine)
modules = path;
}
+ if (symbol__restricted_filename(path, "/proc/modules"))
+ return -1;
+
file = fopen(modules, "r");
if (file == NULL)
return -1;
@@ -2239,6 +2263,9 @@ static u64 machine__get_kernel_start_addr(struct machine *machine)
}
}
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return 0;
+
if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
return 0;
@@ -2410,6 +2437,25 @@ static int setup_list(struct strlist **list, const char *list_str,
return 0;
}
+static bool symbol__read_kptr_restrict(void)
+{
+ bool value = false;
+
+ if (geteuid() != 0) {
+ FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
+ if (fp != NULL) {
+ char line[8];
+
+ if (fgets(line, sizeof(line), fp) != NULL)
+ value = atoi(line) != 0;
+
+ fclose(fp);
+ }
+ }
+
+ return value;
+}
+
int symbol__init(void)
{
const char *symfs;
@@ -2456,6 +2502,8 @@ int symbol__init(void)
if (symfs != symbol_conf.symfs)
free((void *)symfs);
+ symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
+
symbol_conf.initialized = true;
return 0;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 242de01..325ee36 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -75,7 +75,8 @@ struct symbol_conf {
use_callchain,
exclude_other,
show_cpu_utilization,
- initialized;
+ initialized,
+ kptr_restrict;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index df0c6d2..74d3331 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -198,6 +198,14 @@ const struct option longopts[] = {
.val = 'h',
},
{
+ .name = "event-idx",
+ .val = 'E',
+ },
+ {
+ .name = "no-event-idx",
+ .val = 'e',
+ },
+ {
.name = "indirect",
.val = 'I',
},
@@ -211,13 +219,17 @@ const struct option longopts[] = {
static void help()
{
- fprintf(stderr, "Usage: virtio_test [--help] [--no-indirect]\n");
+ fprintf(stderr, "Usage: virtio_test [--help]"
+ " [--no-indirect]"
+ " [--no-event-idx]"
+ "\n");
}
int main(int argc, char **argv)
{
struct vdev_info dev;
- unsigned long long features = 1ULL << VIRTIO_RING_F_INDIRECT_DESC;
+ unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX);
int o;
for (;;) {
@@ -228,6 +240,9 @@ int main(int argc, char **argv)
case '?':
help();
exit(2);
+ case 'e':
+ features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX);
+ break;
case 'h':
help();
goto done;
OpenPOWER on IntegriCloud