summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-adp887056
-rw-r--r--Documentation/accounting/cgroupstats.txt4
-rw-r--r--Documentation/cgroups/blkio-controller.txt31
-rw-r--r--Documentation/cgroups/cgroups.txt60
-rw-r--r--Documentation/cgroups/cpuacct.txt21
-rw-r--r--Documentation/cgroups/cpusets.txt28
-rw-r--r--Documentation/cgroups/devices.txt6
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt20
-rw-r--r--Documentation/cgroups/memory.txt58
-rw-r--r--Documentation/feature-removal-schedule.txt17
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--Documentation/kmemleak.txt4
-rw-r--r--Documentation/md.txt2
-rw-r--r--Documentation/power/devices.txt67
-rw-r--r--Documentation/power/runtime_pm.txt5
-rw-r--r--Documentation/printk-formats.txt119
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt7
-rw-r--r--Documentation/scheduler/sched-rt-group.txt7
-rw-r--r--Documentation/usb/error-codes.txt9
-rw-r--r--Documentation/vm/hwpoison.txt6
-rw-r--r--MAINTAINERS48
-rw-r--r--Makefile19
-rw-r--r--arch/alpha/include/asm/mmzone.h1
-rw-r--r--arch/alpha/kernel/osf_sys.c11
-rw-r--r--arch/arm/boot/compressed/head.S20
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/netx_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig2
-rw-r--r--arch/arm/configs/xcep_defconfig2
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S2
-rw-r--r--arch/arm/kernel/devtree.c3
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/module.c13
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c2
-rw-r--r--arch/arm/mach-davinci/devices.c2
-rw-r--r--arch/arm/mach-davinci/gpio.c7
-rw-r--r--arch/arm/mach-ep93xx/core.c6
-rw-r--r--arch/arm/mach-exynos4/Kconfig6
-rw-r--r--arch/arm/mach-exynos4/Makefile2
-rw-r--r--arch/arm/mach-exynos4/cpu.c2
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-usb-phy.h2
-rw-r--r--arch/arm/mach-exynos4/init.c1
-rw-r--r--arch/arm/mach-exynos4/setup-usb-phy.c (renamed from arch/arm/mach-exynos4/usb-phy.c)0
-rw-r--r--arch/arm/mach-exynos4/time.c2
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c1
-rw-r--r--arch/arm/mach-footbridge/include/mach/debug-macro.S5
-rw-r--r--arch/arm/mach-h720x/Kconfig2
-rw-r--r--arch/arm/mach-msm/timer.c14
-rw-r--r--arch/arm/mach-mxs/ocotp.c2
-rw-r--r--arch/arm/mach-omap1/Makefile4
-rw-r--r--arch/arm/mach-omap1/pm_bus.c8
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c3
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c1
-rw-r--r--arch/arm/mach-s3c2410/Makefile1
-rw-r--r--arch/arm/mach-s3c2410/irq.c34
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c8
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c208
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c6
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c14
-rw-r--r--arch/arm/mach-u300/clock.h2
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h22
-rw-r--r--arch/arm/mach-u300/timer.c3
-rw-r--r--arch/arm/mach-ux500/board-mop500-pins.c16
-rw-r--r--arch/arm/mach-ux500/board-mop500.c54
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c3
-rw-r--r--arch/arm/mach-vexpress/v2m.c15
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-v7.S26
-rw-r--r--arch/arm/plat-iop/cp6.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c6
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h1
-rw-r--r--arch/arm/plat-omap/omap_device.c19
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-s3c24xx/irq.c6
-rw-r--r--arch/arm/plat-s5p/dev-onenand.c12
-rw-r--r--arch/arm/plat-s5p/include/plat/map-s5p.h2
-rw-r--r--arch/arm/plat-samsung/dev-onenand.c12
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h2
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig3
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/include/asm/processor.h1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c3
-rw-r--r--arch/avr32/mach-at32ap/include/mach/cpu.h12
-rw-r--r--arch/avr32/mach-at32ap/intc.c4
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig2
-rw-r--r--arch/m32r/include/asm/mmzone.h8
-rw-r--r--arch/m68k/Kconfig.nommu52
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c3
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S20
-rw-r--r--arch/m68k/lib/memcpy.c9
-rw-r--r--arch/m68k/lib/memset.c9
-rw-r--r--arch/m68k/lib/muldi3.c21
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mn10300/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/asm/mmzone.h7
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/dtc-src/.gitignore3
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts9
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/mmzone.h7
-rw-r--r--arch/powerpc/include/asm/rio.h2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/prom.c27
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c29
-rw-r--r--arch/powerpc/kernel/signal_32.c57
-rw-r--r--arch/powerpc/kernel/signal_64.c17
-rw-r--r--arch/powerpc/kernel/traps.c24
-rw-r--r--arch/powerpc/mm/fault.c10
-rw-r--r--arch/powerpc/mm/init_32.c15
-rw-r--r--arch/powerpc/mm/init_64.c14
-rw-r--r--arch/powerpc/mm/mem.c19
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c6
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c35
-rw-r--r--arch/powerpc/sysdev/mpic.c11
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/oprofile/init.c8
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c48
-rw-r--r--arch/sh/boot/compressed/Makefile22
-rw-r--r--arch/sh/configs/titan_defconfig2
-rw-r--r--arch/sh/include/asm/cmpxchg-grb.h21
-rw-r--r--arch/sh/include/asm/mmzone.h4
-rw-r--r--arch/sh/include/asm/processor_64.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h8
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c40
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/mm/cache-debugfs.c25
-rw-r--r--arch/sparc/Kconfig18
-rw-r--r--arch/sparc/include/asm/floppy_32.h8
-rw-r--r--arch/sparc/include/asm/floppy_64.h4
-rw-r--r--arch/sparc/include/asm/leon.h3
-rw-r--r--arch/sparc/include/asm/leon_pci.h21
-rw-r--r--arch/sparc/include/asm/mmzone.h2
-rw-r--r--arch/sparc/include/asm/pci_32.h24
-rw-r--r--arch/sparc/include/asm/pcic.h2
-rw-r--r--arch/sparc/include/asm/system_32.h2
-rw-r--r--arch/sparc/include/asm/system_64.h2
-rw-r--r--arch/sparc/kernel/Makefile4
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/auxio_32.c2
-rw-r--r--arch/sparc/kernel/chmc.c2
-rw-r--r--arch/sparc/kernel/entry.S8
-rw-r--r--arch/sparc/kernel/leon_kernel.c31
-rw-r--r--arch/sparc/kernel/leon_pci.c253
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c897
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/pci_common.c4
-rw-r--r--arch/sparc/kernel/pci_schizo.c6
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c2
-rw-r--r--arch/sparc/kernel/psycho_common.c2
-rw-r--r--arch/sparc/kernel/sbus.c4
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/smp_32.c6
-rw-r--r--arch/sparc/kernel/sun4d_irq.c126
-rw-r--r--arch/sparc/kernel/sys_sparc32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c6
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c4
-rw-r--r--arch/sparc/kernel/viohs.c2
-rw-r--r--arch/sparc/kernel/visemul.c14
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/sun4c.c8
-rw-r--r--arch/sparc/mm/tsb.c6
-rw-r--r--arch/sparc/prom/console_32.c2
-rw-r--r--arch/sparc/prom/init_32.c2
-rw-r--r--arch/sparc/prom/mp.c2
-rw-r--r--arch/tile/include/asm/mmzone.h11
-rw-r--r--arch/um/include/asm/percpu.h6
-rw-r--r--arch/unicore32/Kconfig4
-rw-r--r--arch/unicore32/Makefile38
-rw-r--r--arch/unicore32/boot/compressed/Makefile2
-rw-r--r--arch/unicore32/configs/unicore32_defconfig (renamed from arch/unicore32/configs/debug_defconfig)8
-rw-r--r--arch/unicore32/include/asm/Kbuild59
-rw-r--r--arch/unicore32/kernel/Makefile1
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/mmzone_32.h11
-rw-r--r--arch/x86/include/asm/mmzone_64.h3
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kvm/emulate.c12
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/genhd.c79
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_marvell.c3
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/main.c28
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c12
-rw-r--r--drivers/char/hpet.c25
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c9
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/dma/shdma.c13
-rw-r--r--drivers/firmware/google/Kconfig1
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/gpio-nomadik.c40
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/drm_gem.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c9
-rw-r--r--drivers/gpu/drm/drm_pci.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c308
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c32
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h8
-rw-r--r--drivers/gpu/drm/radeon/r600.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c123
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-magicmouse.c10
-rw-r--r--drivers/hid/hid-multitouch.c74
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c8
-rw-r--r--drivers/hwmon/asus_atk0110.c5
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/s3c-hwmon.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c8
-rw-r--r--drivers/i2c/muxes/pca954x.c7
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/isdn/gigaset/interface.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c1
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/leds-lp5521.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/bitmap.c104
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/md.c42
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c24
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/cs5535-mfgpt.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/lkdtm.c8
-rw-r--r--drivers/misc/pti.c11
-rw-r--r--drivers/misc/sgi-xp/xpnet.c6
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c8
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.c15
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c39
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/mmci.c12
-rw-r--r--drivers/mmc/host/of_mmc_spi.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c4
-rw-r--r--drivers/mmc/host/vub300.c11
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/arm/am79c961a.c126
-rw-r--r--drivers/net/arm/ep93xx_eth.c82
-rw-r--r--drivers/net/bfin_mac.c20
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c29
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/gianfar_ethtool.c64
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/igb/igb_main.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/ppp_async.c4
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/r8169.c10
-rw-r--r--drivers/net/smc91x.c6
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/kalmia.c384
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c11
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c12
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c24
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c21
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c30
-rw-r--r--drivers/oprofile/buffer_sync.c21
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c1
-rw-r--r--drivers/ptp/ptp_chardev.c11
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-dev.c3
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-puv3.c (renamed from arch/unicore32/kernel/rtc.c)14
-rw-r--r--drivers/rtc/rtc-vt8500.c45
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/spi/amba-pl022.c1
-rw-r--r--drivers/spi/spi_bfin5xx.c7
-rw-r--r--drivers/ssb/driver_pcicore.c10
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/altera-stapl/altera-jtag.c2
-rw-r--r--drivers/staging/altera-stapl/altera.c2
-rw-r--r--drivers/staging/altera-stapl/altera.h (renamed from include/staging/altera.h)0
-rw-r--r--drivers/staging/ath6kl/Kconfig1
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c3
-rw-r--r--drivers/staging/brcm80211/Kconfig2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c2
-rw-r--r--drivers/staging/comedi/Kconfig22
-rw-r--r--drivers/staging/gma500/psb_drv.c15
-rw-r--r--drivers/staging/gma500/psb_fb.c10
-rw-r--r--drivers/staging/gma500/psb_intel_bios.c13
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/accel/adis16201.h2
-rw-r--r--drivers/staging/iio/accel/adis16203.h2
-rw-r--r--drivers/staging/iio/accel/adis16204.h2
-rw-r--r--drivers/staging/iio/accel/adis16209.h4
-rw-r--r--drivers/staging/iio/dac/max517.c2
-rw-r--r--drivers/staging/iio/gyro/adis16260.h2
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c10
-rw-r--r--drivers/staging/iio/industrialio-trigger.c1
-rw-r--r--drivers/staging/mei/init.c6
-rw-r--r--drivers/staging/mei/wd.c13
-rw-r--r--drivers/staging/olpc_dcon/Kconfig1
-rw-r--r--drivers/staging/rts_pstor/sd.c2
-rw-r--r--drivers/staging/usbip/stub_dev.c21
-rw-r--r--drivers/staging/usbip/stub_rx.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c13
-rw-r--r--drivers/target/target_core_configfs.c24
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_tmr.c8
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c64
-rw-r--r--drivers/target/tcm_fc/tfc_io.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/tty/n_gsm.c26
-rw-r--r--drivers/tty/n_tty.c1
-rw-r--r--drivers/tty/serial/8250.c1
-rw-r--r--drivers/tty/serial/8250_pci.c61
-rw-r--r--drivers/tty/serial/amba-pl011.c123
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c18
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c5
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/s5pv210.c4
-rw-r--r--drivers/tty/tty_ldisc.c4
-rw-r--r--drivers/usb/core/driver.c17
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/core/message.c15
-rw-r--r--drivers/usb/host/ehci-ath79.c10
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c1
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.h3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/video/aty/atyfb_base.c10
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp8870_bl.c1012
-rw-r--r--drivers/video/efifb.c2
-rw-r--r--drivers/video/s3c-fb.c22
-rw-r--r--drivers/video/sh_mobile_hdmi.c18
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/swiotlb-xen.c12
-rw-r--r--fs/afs/dir.c8
-rw-r--r--fs/afs/fsclient.c3
-rw-r--r--fs/afs/inode.c10
-rw-r--r--fs/afs/super.c74
-rw-r--r--fs/afs/write.c21
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/block_dev.c14
-rw-r--r--fs/btrfs/ctree.c10
-rw-r--r--fs/btrfs/ctree.h16
-rw-r--r--fs/btrfs/delayed-inode.c136
-rw-r--r--fs/btrfs/delayed-inode.h6
-rw-r--r--fs/btrfs/disk-io.c17
-rw-r--r--fs/btrfs/extent-tree.c63
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c172
-rw-r--r--fs/btrfs/inode.c18
-rw-r--r--fs/btrfs/ioctl.c25
-rw-r--r--fs/btrfs/relocation.c30
-rw-r--r--fs/btrfs/scrub.c69
-rw-r--r--fs/btrfs/sysfs.c146
-rw-r--r--fs/btrfs/transaction.c121
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c10
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c35
-rw-r--r--fs/ceph/inode.c18
-rw-r--r--fs/ceph/ioctl.c6
-rw-r--r--fs/ceph/locks.c29
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/xattr.c6
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifsfs.c192
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/connect.c111
-rw-r--r--fs/cifs/fscache.c51
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/dcookies.c3
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext4/ext4_extents.h9
-rw-r--r--fs/ext4/extents.c42
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/mballoc.c8
-rw-r--r--fs/ext4/move_extent.c10
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/inode.c7
-rw-r--r--fs/isofs/inode.c3
-rw-r--r--fs/jbd2/checkpoint.c28
-rw-r--r--fs/jbd2/commit.c33
-rw-r--r--fs/jbd2/journal.c91
-rw-r--r--fs/jbd2/transaction.c69
-rw-r--r--fs/jfs/file.c6
-rw-r--r--fs/jfs/jfs_imap.c12
-rw-r--r--fs/jfs/jfs_incore.h3
-rw-r--r--fs/jfs/resize.c2
-rw-r--r--fs/lockd/clntproc.c8
-rw-r--r--fs/logfs/dir.c8
-rw-r--r--fs/namei.c34
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h11
-rw-r--r--fs/nfs/nfs4filelayout.c21
-rw-r--r--fs/nfs/nfs4proc.c45
-rw-r--r--fs/nfs/nfs4xdr.c26
-rw-r--r--fs/nfs/objlayout/objio_osd.c4
-rw-r--r--fs/nfs/objlayout/objlayout.c2
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/nfs/pnfs.c44
-rw-r--r--fs/nfs/pnfs.h1
-rw-r--r--fs/nfs/pnfs_dev.c17
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/nfsd/vfs.c19
-rw-r--r--fs/nilfs2/btree.c39
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/proc/namespaces.c9
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/proc/root.c11
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/romfs/mmap-nommu.c8
-rw-r--r--fs/sysfs/mount.c37
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/timerfd.c5
-rw-r--r--fs/ubifs/super.c136
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c50
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c75
-rw-r--r--fs/xfs/xfs_attr.c7
-rw-r--r--fs/xfs/xfs_iget.c13
-rw-r--r--fs/xfs/xfs_inode.h10
-rw-r--r--fs/xfs/xfs_log.c11
-rw-r--r--fs/xfs/xfs_vnodeops.c7
-rw-r--r--include/asm-generic/gpio.h10
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/linux/amba/serial.h3
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blktrace_api.h3
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/device_cgroup.h10
-rw-r--r--include/linux/ethtool.h6
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/gpio.h11
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/i2c/adp8870.h153
-rw-r--r--include/linux/if_packet.h2
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/input/sh_keysc.h2
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kmod.h8
-rw-r--r--include/linux/kmsg_dump.h1
-rw-r--r--include/linux/kobject_ns.h10
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/nfs_page.h3
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu.h3
-rw-r--r--include/linux/pm.h3
-rw-r--r--include/linux/seqlock.h1
-rw-r--r--include/linux/shmem_fs.h21
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/swap.h18
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/sysfs.h7
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/uts.h2
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/sock.h1
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/trace/events/ext4.h179
-rw-r--r--include/trace/events/irq.h3
-rw-r--r--include/trace/events/vmscan.h83
-rw-r--r--init/Kconfig10
-rw-r--r--init/calibrate.c17
-rw-r--r--init/main.c1
-rw-r--r--kernel/exit.c31
-rw-r--r--kernel/gcov/Kconfig3
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/rcutree.c398
-rw-r--r--kernel/rcutree.h12
-rw-r--r--kernel/rcutree_plugin.h419
-rw-r--r--kernel/rcutree_trace.c32
-rw-r--r--kernel/sched_rt.c6
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/taskstats.c15
-rw-r--r--kernel/time/alarmtimer.c158
-rw-r--r--kernel/time/clocksource.c24
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/trace_kprobe.c8
-rw-r--r--kernel/trace/trace_printk.c5
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/kobject.c26
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/vsprintf.c4
-rw-r--r--mm/compaction.c76
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memcontrol.c82
-rw-r--r--mm/memory-failure.c25
-rw-r--r--mm/memory.c32
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/page_cgroup.c71
-rw-r--r--mm/rmap.c111
-rw-r--r--mm/shmem.c74
-rw-r--r--mm/slab.c9
-rw-r--r--mm/slub.c12
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/thrash.c105
-rw-r--r--mm/truncate.c29
-rw-r--r--mm/vmscan.c47
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c60
-rw-r--r--net/bluetooth/hci_event.c18
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfmuxl.c2
-rw-r--r--net/ceph/osd_client.c15
-rw-r--r--net/core/dev.c23
-rw-r--r--net/core/net-sysfs.c23
-rw-r--r--net/core/net_namespace.c28
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c82
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/irda/iriap.c5
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/mac80211/ibss.c6
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c21
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/tx.c7
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c10
-rw-r--r--net/netfilter/nf_conntrack_irc.c3
-rw-r--r--net/netfilter/nf_conntrack_pptp.c3
-rw-r--r--net/netfilter/nf_conntrack_sane.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sched.c1
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/xfrm/xfrm_replay.c4
-rw-r--r--scripts/Makefile.asm-generic1
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/depmod.sh48
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/device_cgroup.c8
-rw-r--r--security/keys/request_key.c6
-rw-r--r--security/selinux/selinuxfs.c37
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/tomoyo/mount.c2
-rw-r--r--sound/core/misc.c40
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/pci/asihpi/asihpi.c1
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c8
-rw-r--r--sound/pci/hda/hda_beep.h9
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c31
-rw-r--r--sound/pci/hda/patch_via.c46
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/rme9652/hdspm.c8
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c4
-rw-r--r--sound/soc/codecs/ad1836.c14
-rw-r--r--sound/soc/codecs/ad1836.h6
-rw-r--r--sound/soc/codecs/wm8804.c9
-rw-r--r--sound/soc/codecs/wm8915.c3
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8991.c1
-rw-r--r--sound/soc/fsl/fsl_dma.c9
-rw-r--r--sound/soc/imx/Kconfig7
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c2
-rw-r--r--sound/soc/imx/imx-ssi.c2
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c4
-rw-r--r--sound/soc/samsung/i2s.c4
-rw-r--r--sound/soc/soc-cache.c6
-rw-r--r--sound/soc/soc-dapm.c17
-rw-r--r--sound/usb/6fire/firmware.c1
-rw-r--r--sound/usb/6fire/pcm.c4
-rw-r--r--tools/perf/Makefile2
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN7
-rw-r--r--tools/perf/util/trace-event-parse.c1
815 files changed, 10199 insertions, 4647 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
new file mode 100644
index 0000000..aa11dbd
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
@@ -0,0 +1,56 @@
+What: /sys/class/backlight/<backlight>/<ambient light zone>_max
+What: /sys/class/backlight/<backlight>/l1_daylight_max
+What: /sys/class/backlight/<backlight>/l2_bright_max
+What: /sys/class/backlight/<backlight>/l3_office_max
+What: /sys/class/backlight/<backlight>/l4_indoor_max
+What: /sys/class/backlight/<backlight>/l5_dark_max
+Date: Mai 2011
+KernelVersion: 2.6.40
+Contact: device-drivers-devel@blackfin.uclinux.org
+Description:
+ Control the maximum brightness for <ambient light zone>
+ on this <backlight>. Values are between 0 and 127. This file
+ will also show the brightness level stored for this
+ <ambient light zone>.
+
+What: /sys/class/backlight/<backlight>/<ambient light zone>_dim
+What: /sys/class/backlight/<backlight>/l2_bright_dim
+What: /sys/class/backlight/<backlight>/l3_office_dim
+What: /sys/class/backlight/<backlight>/l4_indoor_dim
+What: /sys/class/backlight/<backlight>/l5_dark_dim
+Date: Mai 2011
+KernelVersion: 2.6.40
+Contact: device-drivers-devel@blackfin.uclinux.org
+Description:
+ Control the dim brightness for <ambient light zone>
+ on this <backlight>. Values are between 0 and 127, typically
+ set to 0. Full off when the backlight is disabled.
+ This file will also show the dim brightness level stored for
+ this <ambient light zone>.
+
+What: /sys/class/backlight/<backlight>/ambient_light_level
+Date: Mai 2011
+KernelVersion: 2.6.40
+Contact: device-drivers-devel@blackfin.uclinux.org
+Description:
+ Get conversion value of the light sensor.
+ This value is updated every 80 ms (when the light sensor
+ is enabled). Returns integer between 0 (dark) and
+ 8000 (max ambient brightness)
+
+What: /sys/class/backlight/<backlight>/ambient_light_zone
+Date: Mai 2011
+KernelVersion: 2.6.40
+Contact: device-drivers-devel@blackfin.uclinux.org
+Description:
+ Get/Set current ambient light zone. Reading returns
+ integer between 1..5 (1 = daylight, 2 = bright, ..., 5 = dark).
+ Writing a value between 1..5 forces the backlight controller
+ to enter the corresponding ambient light zone.
+ Writing 0 returns to normal/automatic ambient light level
+ operation. The ambient light sensing feature on these devices
+ is an extension to the API documented in
+ Documentation/ABI/stable/sysfs-class-backlight.
+ It can be enabled by writing the value stored in
+ /sys/class/backlight/<backlight>/max_brightness to
+ /sys/class/backlight/<backlight>/brightness. \ No newline at end of file
diff --git a/Documentation/accounting/cgroupstats.txt b/Documentation/accounting/cgroupstats.txt
index eda40fd..d16a984 100644
--- a/Documentation/accounting/cgroupstats.txt
+++ b/Documentation/accounting/cgroupstats.txt
@@ -21,7 +21,7 @@ information will not be available.
To extract cgroup statistics a utility very similar to getdelays.c
has been developed, the sample output of the utility is shown below
-~/balbir/cgroupstats # ./getdelays -C "/cgroup/a"
+~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup/a"
sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0
-~/balbir/cgroupstats # ./getdelays -C "/cgroup"
+~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup"
sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 465351d..cd45c8e 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -28,16 +28,19 @@ cgroups. Here is what you can do.
- Enable group scheduling in CFQ
CONFIG_CFQ_GROUP_IOSCHED=y
-- Compile and boot into kernel and mount IO controller (blkio).
+- Compile and boot into kernel and mount IO controller (blkio); see
+ cgroups.txt, Why are cgroups needed?.
- mount -t cgroup -o blkio none /cgroup
+ mount -t tmpfs cgroup_root /sys/fs/cgroup
+ mkdir /sys/fs/cgroup/blkio
+ mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
- Create two cgroups
- mkdir -p /cgroup/test1/ /cgroup/test2
+ mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
- Set weights of group test1 and test2
- echo 1000 > /cgroup/test1/blkio.weight
- echo 500 > /cgroup/test2/blkio.weight
+ echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
+ echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
- Create two same size files (say 512MB each) on same disk (file1, file2) and
launch two dd threads in different cgroup to read those files.
@@ -46,12 +49,12 @@ cgroups. Here is what you can do.
echo 3 > /proc/sys/vm/drop_caches
dd if=/mnt/sdb/zerofile1 of=/dev/null &
- echo $! > /cgroup/test1/tasks
- cat /cgroup/test1/tasks
+ echo $! > /sys/fs/cgroup/blkio/test1/tasks
+ cat /sys/fs/cgroup/blkio/test1/tasks
dd if=/mnt/sdb/zerofile2 of=/dev/null &
- echo $! > /cgroup/test2/tasks
- cat /cgroup/test2/tasks
+ echo $! > /sys/fs/cgroup/blkio/test2/tasks
+ cat /sys/fs/cgroup/blkio/test2/tasks
- At macro level, first dd should finish first. To get more precise data, keep
on looking at (with the help of script), at blkio.disk_time and
@@ -68,13 +71,13 @@ Throttling/Upper Limit policy
- Enable throttling in block layer
CONFIG_BLK_DEV_THROTTLING=y
-- Mount blkio controller
- mount -t cgroup -o blkio none /cgroup/blkio
+- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)
+ mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
- Specify a bandwidth rate on particular device for root group. The format
for policy is "<major>:<minor> <byes_per_second>".
- echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device
+ echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
Above will put a limit of 1MB/second on reads happening for root group
on device having major/minor number 8:16.
@@ -108,7 +111,7 @@ Hierarchical Cgroups
CFQ and throttling will practically treat all groups at same level.
pivot
- / | \ \
+ / / \ \
root test1 test2 test3
Down the line we can implement hierarchical accounting/control support
@@ -149,7 +152,7 @@ Proportional weight policy files
Following is the format.
- #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device
+ # echo dev_maj:dev_minor weight > blkio.weight_device
Configure weight=300 on /dev/sdb (8:16) in this cgroup
# echo 8:16 300 > blkio.weight_device
# cat blkio.weight_device
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 0ed99f0..cd67e90 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -138,11 +138,11 @@ With the ability to classify tasks differently for different resources
the admin can easily set up a script which receives exec notifications
and depending on who is launching the browser he can
- # echo browser_pid > /mnt/<restype>/<userclass>/tasks
+ # echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks
With only a single hierarchy, he now would potentially have to create
a separate cgroup for every browser launched and associate it with
-approp network and other resource class. This may lead to
+appropriate network and other resource class. This may lead to
proliferation of such cgroups.
Also lets say that the administrator would like to give enhanced network
@@ -153,9 +153,9 @@ apps enhanced CPU power,
With ability to write pids directly to resource classes, it's just a
matter of :
- # echo pid > /mnt/network/<new_class>/tasks
+ # echo pid > /sys/fs/cgroup/network/<new_class>/tasks
(after some time)
- # echo pid > /mnt/network/<orig_class>/tasks
+ # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
Without this ability, he would have to split the cgroup into
multiple separate ones and then associate the new cgroups with the
@@ -310,21 +310,24 @@ subsystem, this is the case for the cpuset.
To start a new job that is to be contained within a cgroup, using
the "cpuset" cgroup subsystem, the steps are something like:
- 1) mkdir /dev/cgroup
- 2) mount -t cgroup -ocpuset cpuset /dev/cgroup
- 3) Create the new cgroup by doing mkdir's and write's (or echo's) in
- the /dev/cgroup virtual file system.
- 4) Start a task that will be the "founding father" of the new job.
- 5) Attach that task to the new cgroup by writing its pid to the
- /dev/cgroup tasks file for that cgroup.
- 6) fork, exec or clone the job tasks from this founding father task.
+ 1) mount -t tmpfs cgroup_root /sys/fs/cgroup
+ 2) mkdir /sys/fs/cgroup/cpuset
+ 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
+ 4) Create the new cgroup by doing mkdir's and write's (or echo's) in
+ the /sys/fs/cgroup virtual file system.
+ 5) Start a task that will be the "founding father" of the new job.
+ 6) Attach that task to the new cgroup by writing its pid to the
+ /sys/fs/cgroup/cpuset/tasks file for that cgroup.
+ 7) fork, exec or clone the job tasks from this founding father task.
For example, the following sequence of commands will setup a cgroup
named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
and then start a subshell 'sh' in that cgroup:
- mount -t cgroup cpuset -ocpuset /dev/cgroup
- cd /dev/cgroup
+ mount -t tmpfs cgroup_root /sys/fs/cgroup
+ mkdir /sys/fs/cgroup/cpuset
+ mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset
+ cd /sys/fs/cgroup/cpuset
mkdir Charlie
cd Charlie
/bin/echo 2-3 > cpuset.cpus
@@ -345,7 +348,7 @@ Creating, modifying, using the cgroups can be done through the cgroup
virtual filesystem.
To mount a cgroup hierarchy with all available subsystems, type:
-# mount -t cgroup xxx /dev/cgroup
+# mount -t cgroup xxx /sys/fs/cgroup
The "xxx" is not interpreted by the cgroup code, but will appear in
/proc/mounts so may be any useful identifying string that you like.
@@ -354,23 +357,32 @@ Note: Some subsystems do not work without some user input first. For instance,
if cpusets are enabled the user will have to populate the cpus and mems files
for each new cgroup created before that group can be used.
+As explained in section `1.2 Why are cgroups needed?' you should create
+different hierarchies of cgroups for each single resource or group of
+resources you want to control. Therefore, you should mount a tmpfs on
+/sys/fs/cgroup and create directories for each cgroup resource or resource
+group.
+
+# mount -t tmpfs cgroup_root /sys/fs/cgroup
+# mkdir /sys/fs/cgroup/rg1
+
To mount a cgroup hierarchy with just the cpuset and memory
subsystems, type:
-# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
+# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
To change the set of subsystems bound to a mounted hierarchy, just
remount with different options:
-# mount -o remount,cpuset,blkio hier1 /dev/cgroup
+# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
Now memory is removed from the hierarchy and blkio is added.
Note this will add blkio to the hierarchy but won't remove memory or
cpuset, because the new options are appended to the old ones:
-# mount -o remount,blkio /dev/cgroup
+# mount -o remount,blkio /sys/fs/cgroup/rg1
To Specify a hierarchy's release_agent:
# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
- xxx /dev/cgroup
+ xxx /sys/fs/cgroup/rg1
Note that specifying 'release_agent' more than once will return failure.
@@ -379,17 +391,17 @@ when the hierarchy consists of a single (root) cgroup. Supporting
the ability to arbitrarily bind/unbind subsystems from an existing
cgroup hierarchy is intended to be implemented in the future.
-Then under /dev/cgroup you can find a tree that corresponds to the
-tree of the cgroups in the system. For instance, /dev/cgroup
+Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the
+tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1
is the cgroup that holds the whole system.
If you want to change the value of release_agent:
-# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent
+# echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent
It can also be changed via remount.
-If you want to create a new cgroup under /dev/cgroup:
-# cd /dev/cgroup
+If you want to create a new cgroup under /sys/fs/cgroup/rg1:
+# cd /sys/fs/cgroup/rg1
# mkdir my_cgroup
Now you want to do something with this cgroup.
diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
index 8b93094..9ad85df 100644
--- a/Documentation/cgroups/cpuacct.txt
+++ b/Documentation/cgroups/cpuacct.txt
@@ -10,26 +10,25 @@ directly present in its group.
Accounting groups can be created by first mounting the cgroup filesystem.
-# mkdir /cgroups
-# mount -t cgroup -ocpuacct none /cgroups
-
-With the above step, the initial or the parent accounting group
-becomes visible at /cgroups. At bootup, this group includes all the
-tasks in the system. /cgroups/tasks lists the tasks in this cgroup.
-/cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by
-this group which is essentially the CPU time obtained by all the tasks
+# mount -t cgroup -ocpuacct none /sys/fs/cgroup
+
+With the above step, the initial or the parent accounting group becomes
+visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
+the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
+/sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
+by this group which is essentially the CPU time obtained by all the tasks
in the system.
-New accounting groups can be created under the parent group /cgroups.
+New accounting groups can be created under the parent group /sys/fs/cgroup.
-# cd /cgroups
+# cd /sys/fs/cgroup
# mkdir g1
# echo $$ > g1
The above steps create a new group g1 and move the current shell
process (bash) into it. CPU time consumed by this bash and its children
can be obtained from g1/cpuacct.usage and the same is accumulated in
-/cgroups/cpuacct.usage also.
+/sys/fs/cgroup/cpuacct.usage also.
cpuacct.stat file lists a few statistics which further divide the
CPU time obtained by the cgroup into user and system times. Currently
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 98a3082..5b0d78e 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -661,21 +661,21 @@ than stress the kernel.
To start a new job that is to be contained within a cpuset, the steps are:
- 1) mkdir /dev/cpuset
- 2) mount -t cgroup -ocpuset cpuset /dev/cpuset
+ 1) mkdir /sys/fs/cgroup/cpuset
+ 2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
3) Create the new cpuset by doing mkdir's and write's (or echo's) in
- the /dev/cpuset virtual file system.
+ the /sys/fs/cgroup/cpuset virtual file system.
4) Start a task that will be the "founding father" of the new job.
5) Attach that task to the new cpuset by writing its pid to the
- /dev/cpuset tasks file for that cpuset.
+ /sys/fs/cgroup/cpuset tasks file for that cpuset.
6) fork, exec or clone the job tasks from this founding father task.
For example, the following sequence of commands will setup a cpuset
named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
and then start a subshell 'sh' in that cpuset:
- mount -t cgroup -ocpuset cpuset /dev/cpuset
- cd /dev/cpuset
+ mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
+ cd /sys/fs/cgroup/cpuset
mkdir Charlie
cd Charlie
/bin/echo 2-3 > cpuset.cpus
@@ -710,14 +710,14 @@ Creating, modifying, using the cpusets can be done through the cpuset
virtual filesystem.
To mount it, type:
-# mount -t cgroup -o cpuset cpuset /dev/cpuset
+# mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset
-Then under /dev/cpuset you can find a tree that corresponds to the
-tree of the cpusets in the system. For instance, /dev/cpuset
+Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the
+tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset
is the cpuset that holds the whole system.
-If you want to create a new cpuset under /dev/cpuset:
-# cd /dev/cpuset
+If you want to create a new cpuset under /sys/fs/cgroup/cpuset:
+# cd /sys/fs/cgroup/cpuset
# mkdir my_cpuset
Now you want to do something with this cpuset.
@@ -765,12 +765,12 @@ wrapper around the cgroup filesystem.
The command
-mount -t cpuset X /dev/cpuset
+mount -t cpuset X /sys/fs/cgroup/cpuset
is equivalent to
-mount -t cgroup -ocpuset,noprefix X /dev/cpuset
-echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent
+mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset
+echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent
2.2 Adding/removing cpus
------------------------
diff --git a/Documentation/cgroups/devices.txt b/Documentation/cgroups/devices.txt
index 57ca4c8..16624a7f8 100644
--- a/Documentation/cgroups/devices.txt
+++ b/Documentation/cgroups/devices.txt
@@ -22,16 +22,16 @@ removed from the child(ren).
An entry is added using devices.allow, and removed using
devices.deny. For instance
- echo 'c 1:3 mr' > /cgroups/1/devices.allow
+ echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
allows cgroup 1 to read and mknod the device usually known as
/dev/null. Doing
- echo a > /cgroups/1/devices.deny
+ echo a > /sys/fs/cgroup/1/devices.deny
will remove the default 'a *:* rwm' entry. Doing
- echo a > /cgroups/1/devices.allow
+ echo a > /sys/fs/cgroup/1/devices.allow
will add the 'a *:* rwm' entry to the whitelist.
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index 41f37fe..c21d777 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -59,28 +59,28 @@ is non-freezable.
* Examples of usage :
- # mkdir /containers
- # mount -t cgroup -ofreezer freezer /containers
- # mkdir /containers/0
- # echo $some_pid > /containers/0/tasks
+ # mkdir /sys/fs/cgroup/freezer
+ # mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer
+ # mkdir /sys/fs/cgroup/freezer/0
+ # echo $some_pid > /sys/fs/cgroup/freezer/0/tasks
to get status of the freezer subsystem :
- # cat /containers/0/freezer.state
+ # cat /sys/fs/cgroup/freezer/0/freezer.state
THAWED
to freeze all tasks in the container :
- # echo FROZEN > /containers/0/freezer.state
- # cat /containers/0/freezer.state
+ # echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state
+ # cat /sys/fs/cgroup/freezer/0/freezer.state
FREEZING
- # cat /containers/0/freezer.state
+ # cat /sys/fs/cgroup/freezer/0/freezer.state
FROZEN
to unfreeze all tasks in the container :
- # echo THAWED > /containers/0/freezer.state
- # cat /containers/0/freezer.state
+ # echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state
+ # cat /sys/fs/cgroup/freezer/0/freezer.state
THAWED
This is the basic mechanism which should do the right thing for user space task
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 7c16347..06eb6d9 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,8 +1,8 @@
Memory Resource Controller
-NOTE: The Memory Resource Controller has been generically been referred
- to as the memory controller in this document. Do not confuse memory
- controller used here with the memory controller that is used in hardware.
+NOTE: The Memory Resource Controller has generically been referred to as the
+ memory controller in this document. Do not confuse memory controller
+ used here with the memory controller that is used in hardware.
(For editors)
In this document:
@@ -70,6 +70,7 @@ Brief summary of control files.
(See sysctl's vm.swappiness)
memory.move_charge_at_immigrate # set/show controls of moving charges
memory.oom_control # set/show oom controls.
+ memory.numa_stat # show the number of memory usage per numa node
1. History
@@ -181,7 +182,7 @@ behind this approach is that a cgroup that aggressively uses a shared
page will eventually get charged for it (once it is uncharged from
the cgroup that brought it in -- this will happen on memory pressure).
-Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used..
+Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
When you do swapoff and make swapped-out pages of shmem(tmpfs) to
be backed into memory in force, charges for pages are accounted against the
caller of swapoff rather than the users of shmem.
@@ -213,7 +214,7 @@ affecting global LRU, memory+swap limit is better than just limiting swap from
OS point of view.
* What happens when a cgroup hits memory.memsw.limit_in_bytes
-When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out
+When a cgroup hits memory.memsw.limit_in_bytes, it's useless to do swap-out
in this cgroup. Then, swap-out will not be done by cgroup routine and file
caches are dropped. But as mentioned above, global LRU can do swapout memory
from it for sanity of the system's memory management state. You can't forbid
@@ -263,16 +264,17 @@ b. Enable CONFIG_RESOURCE_COUNTERS
c. Enable CONFIG_CGROUP_MEM_RES_CTLR
d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
-1. Prepare the cgroups
-# mkdir -p /cgroups
-# mount -t cgroup none /cgroups -o memory
+1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
+# mount -t tmpfs none /sys/fs/cgroup
+# mkdir /sys/fs/cgroup/memory
+# mount -t cgroup none /sys/fs/cgroup/memory -o memory
2. Make the new group and move bash into it
-# mkdir /cgroups/0
-# echo $$ > /cgroups/0/tasks
+# mkdir /sys/fs/cgroup/memory/0
+# echo $$ > /sys/fs/cgroup/memory/0/tasks
Since now we're in the 0 cgroup, we can alter the memory limit:
-# echo 4M > /cgroups/0/memory.limit_in_bytes
+# echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes
NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
@@ -280,11 +282,11 @@ mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
NOTE: We cannot set limits on the root cgroup any more.
-# cat /cgroups/0/memory.limit_in_bytes
+# cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes
4194304
We can check the usage:
-# cat /cgroups/0/memory.usage_in_bytes
+# cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes
1216512
A successful write to this file does not guarantee a successful set of
@@ -464,6 +466,24 @@ value for efficient access. (Of course, when necessary, it's synchronized.)
If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP)
value in memory.stat(see 5.2).
+5.6 numa_stat
+
+This is similar to numa_maps but operates on a per-memcg basis. This is
+useful for providing visibility into the numa locality information within
+an memcg since the pages are allowed to be allocated from any physical
+node. One of the usecases is evaluating application performance by
+combining this information with the application's cpu allocation.
+
+We export "total", "file", "anon" and "unevictable" pages per-node for
+each memcg. The ouput format of memory.numa_stat is:
+
+total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
+file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
+anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+
+And we have total = file + anon + unevictable.
+
6. Hierarchy support
The memory controller supports a deep hierarchy and hierarchical accounting.
@@ -471,13 +491,13 @@ The hierarchy is created by creating the appropriate cgroups in the
cgroup filesystem. Consider for example, the following cgroup filesystem
hierarchy
- root
+ root
/ | \
- / | \
- a b c
- | \
- | \
- d e
+ / | \
+ a b c
+ | \
+ | \
+ d e
In the diagram above, with hierarchical accounting enabled, all memory
usage of e, is accounted to its ancestors up until the root (i.e, c and root),
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 1a9446b..72e2384 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -481,23 +481,6 @@ Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
----------------------------
-What: namespace cgroup (ns_cgroup)
-When: 2.6.38
-Why: The ns_cgroup leads to some problems:
- * cgroup creation is out-of-control
- * cgroup name can conflict when pids are looping
- * it is not possible to have a single process handling
- a lot of namespaces without falling in a exponential creation time
- * we may want to create a namespace without creating a cgroup
-
- The ns_cgroup is replaced by a compatibility flag 'clone_children',
- where a newly created cgroup will copy the parent cgroup values.
- The userspace has to manually create a cgroup and add a task to
- the 'tasks' file.
-Who: Daniel Lezcano <daniel.lezcano@free.fr>
-
-----------------------------
-
What: iwlwifi disable_hw_scan module parameters
When: 2.6.40
Why: Hareware scan is the prefer method for iwlwifi devices for
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index f481780..db3b1ab 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
TASKLET: 0 0 0 290
SCHED: 27035 26983 26971 26746
HRTIMER: 0 0 0 0
+ RCU: 1678 1769 2178 2250
1.3 IDE devices in /proc/ide
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 090e6ee..51063e6 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -11,7 +11,9 @@ with the difference that the orphan objects are not freed but only
reported via /sys/kernel/debug/kmemleak. A similar method is used by the
Valgrind tool (memcheck --leak-check) to detect the memory leaks in
user-space applications.
-Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze and tile.
+
+Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported
+architectures.
Usage
-----
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 2366b1c..f0eee83 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -555,7 +555,7 @@ also have
sync_min
sync_max
The two values, given as numbers of sectors, indicate a range
- withing the array where 'check'/'repair' will operate. Must be
+ within the array where 'check'/'repair' will operate. Must be
a multiple of chunk_size. When it reaches "sync_max" it will
pause, rather than complete.
You can use 'select' or 'poll' on "sync_completed" to wait for
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 8888083..64565aa 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct
device. This field is a pointer to an object of type struct dev_power_domain,
defined in include/linux/pm.h, providing a set of power management callbacks
analogous to the subsystem-level and device driver callbacks that are executed
-for the given device during all power transitions, in addition to the respective
-subsystem-level callbacks. Specifically, the power domain "suspend" callbacks
-(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are
-executed after the analogous subsystem-level callbacks, while the power domain
-"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore,
-etc.) are executed before the analogous subsystem-level callbacks. Error codes
-returned by the "suspend" and "resume" power domain callbacks are ignored.
-
-Power domain ->runtime_idle() callback is executed before the subsystem-level
-->runtime_idle() callback and the result returned by it is not ignored. Namely,
-if it returns error code, the subsystem-level ->runtime_idle() callback will not
-be called and the helper function rpm_idle() executing it will return error
-code. This mechanism is intended to help platforms where saving device state
-is a time consuming operation and should only be carried out if all devices
-in the power domain are idle, before turning off the shared power resource(s).
-Namely, the power domain ->runtime_idle() callback may return error code until
-the pm_runtime_idle() helper (or its asychronous version) has been called for
-all devices in the power domain (it is recommended that the returned error code
-be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
-callback from being run prematurely.
-
-The support for device power domains is only relevant to platforms needing to
-use the same subsystem-level (e.g. platform bus type) and device driver power
-management callbacks in many different power domain configurations and wanting
-to avoid incorporating the support for power domains into the subsystem-level
-callbacks. The other platforms need not implement it or take it into account
-in any way.
-
-
-System Devices
---------------
-System devices (sysdevs) follow a slightly different API, which can be found in
-
- include/linux/sysdev.h
- drivers/base/sys.c
-
-System devices will be suspended with interrupts disabled, and after all other
-devices have been suspended. On resume, they will be resumed before any other
-devices, and also with interrupts disabled. These things occur in special
-"sysdev_driver" phases, which affect only system devices.
-
-Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
-the non-boot CPUs are all offline and IRQs are disabled on the remaining online
-CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
-sleep state (or a system image is created). During resume (or after the image
-has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
-are enabled on the only online CPU, the non-boot CPUs are enabled, and the
-resume_noirq (or thaw_noirq or restore_noirq) phase begins.
-
-Code to actually enter and exit the system-wide low power state sometimes
-involves hardware details that are only known to the boot firmware, and
-may leave a CPU running software (from SRAM or flash memory) that monitors
-the system and manages its wakeup sequence.
+for the given device during all power transitions, instead of the respective
+subsystem-level callbacks. Specifically, if a device's pm_domain pointer is
+not NULL, the ->suspend() callback from the object pointed to by it will be
+executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
+anlogously for all of the remaining callbacks. In other words, power management
+domain callbacks, if defined for the given device, always take precedence over
+the callbacks provided by the device's subsystem (e.g. bus type).
+
+The support for device power management domains is only relevant to platforms
+needing to use the same device driver power management callbacks in many
+different power domain configurations and wanting to avoid incorporating the
+support for power domains into subsystem-level callbacks, for example by
+modifying the platform bus type. Other platforms need not implement it or take
+it into account in any way.
Device Low Power (suspend) States
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 654097b..22accb3 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -566,11 +566,6 @@ to do this is:
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
-The PM core always increments the run-time usage counter before calling the
-->prepare() callback and decrements it after calling the ->complete() callback.
-Hence disabling run-time PM temporarily like this will not cause any run-time
-suspend callbacks to be lost.
-
7. Generic subsystem callbacks
Subsystems may wish to conserve code space by using the set of generic power
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 1b5a5dd..5df176e 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -9,7 +9,121 @@ If variable is of Type, use printk format specifier:
size_t %zu or %zx
ssize_t %zd or %zx
-Raw pointer value SHOULD be printed with %p.
+Raw pointer value SHOULD be printed with %p. The kernel supports
+the following extended format specifiers for pointer types:
+
+Symbols/Function Pointers:
+
+ %pF versatile_init+0x0/0x110
+ %pf versatile_init
+ %pS versatile_init+0x0/0x110
+ %ps versatile_init
+ %pB prev_fn_of_versatile_init+0x88/0x88
+
+ For printing symbols and function pointers. The 'S' and 's' specifiers
+ result in the symbol name with ('S') or without ('s') offsets. Where
+ this is used on a kernel without KALLSYMS - the symbol address is
+ printed instead.
+
+ The 'B' specifier results in the symbol name with offsets and should be
+ used when printing stack backtraces. The specifier takes into
+ consideration the effect of compiler optimisations which may occur
+ when tail-call's are used and marked with the noreturn GCC attribute.
+
+ On ia64, ppc64 and parisc64 architectures function pointers are
+ actually function descriptors which must first be resolved. The 'F' and
+ 'f' specifiers perform this resolution and then provide the same
+ functionality as the 'S' and 's' specifiers.
+
+Kernel Pointers:
+
+ %pK 0x01234567 or 0x0123456789abcdef
+
+ For printing kernel pointers which should be hidden from unprivileged
+ users. The behaviour of %pK depends on the kptr_restrict sysctl - see
+ Documentation/sysctl/kernel.txt for more details.
+
+Struct Resources:
+
+ %pr [mem 0x60000000-0x6fffffff flags 0x2200] or
+ [mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
+ %pR [mem 0x60000000-0x6fffffff pref] or
+ [mem 0x0000000060000000-0x000000006fffffff pref]
+
+ For printing struct resources. The 'R' and 'r' specifiers result in a
+ printed resource with ('R') or without ('r') a decoded flags member.
+
+MAC/FDDI addresses:
+
+ %pM 00:01:02:03:04:05
+ %pMF 00-01-02-03-04-05
+ %pm 000102030405
+
+ For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm'
+ specifiers result in a printed address with ('M') or without ('m') byte
+ separators. The default byte separator is the colon (':').
+
+ Where FDDI addresses are concerned the 'F' specifier can be used after
+ the 'M' specifier to use dash ('-') separators instead of the default
+ separator.
+
+IPv4 addresses:
+
+ %pI4 1.2.3.4
+ %pi4 001.002.003.004
+ %p[Ii][hnbl]
+
+ For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
+ specifiers result in a printed address with ('i4') or without ('I4')
+ leading zeros.
+
+ The additional 'h', 'n', 'b', and 'l' specifiers are used to specify
+ host, network, big or little endian order addresses respectively. Where
+ no specifier is provided the default network/big endian order is used.
+
+IPv6 addresses:
+
+ %pI6 0001:0002:0003:0004:0005:0006:0007:0008
+ %pi6 00010002000300040005000600070008
+ %pI6c 1:2:3:4:5:6:7:8
+
+ For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6'
+ specifiers result in a printed address with ('I6') or without ('i6')
+ colon-separators. Leading zeros are always used.
+
+ The additional 'c' specifier can be used with the 'I' specifier to
+ print a compressed IPv6 address as described by
+ http://tools.ietf.org/html/rfc5952
+
+UUID/GUID addresses:
+
+ %pUb 00010203-0405-0607-0809-0a0b0c0d0e0f
+ %pUB 00010203-0405-0607-0809-0A0B0C0D0E0F
+ %pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
+ %pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
+
+ For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
+ 'b' and 'B' specifiers are used to specify a little endian order in
+ lower ('l') or upper case ('L') hex characters - and big endian order
+ in lower ('b') or upper case ('B') hex characters.
+
+ Where no additional specifiers are used the default little endian
+ order with lower case hex characters will be printed.
+
+struct va_format:
+
+ %pV
+
+ For printing struct va_format structures. These contain a format string
+ and va_list as follows:
+
+ struct va_format {
+ const char *fmt;
+ va_list *va;
+ };
+
+ Do not use this feature without some mechanism to verify the
+ correctness of the format string and va_list arguments.
u64 SHOULD be printed with %llu/%llx, (unsigned long long):
@@ -32,4 +146,5 @@ Reminder: sizeof() result is of type size_t.
Thank you for your cooperation and attention.
-By Randy Dunlap <rdunlap@xenotime.net>
+By Randy Dunlap <rdunlap@xenotime.net> and
+Andrew Murray <amurray@mpc-data.co.uk>
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 9996199..91ecff0 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -223,9 +223,10 @@ When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
group created using the pseudo filesystem. See example steps below to create
task groups and modify their CPU share using the "cgroups" pseudo filesystem.
- # mkdir /dev/cpuctl
- # mount -t cgroup -ocpu none /dev/cpuctl
- # cd /dev/cpuctl
+ # mount -t tmpfs cgroup_root /sys/fs/cgroup
+ # mkdir /sys/fs/cgroup/cpu
+ # mount -t cgroup -ocpu none /sys/fs/cgroup/cpu
+ # cd /sys/fs/cgroup/cpu
# mkdir multimedia # create "multimedia" group of tasks
# mkdir browser # create "browser" group of tasks
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 605b0d4..71b54d5 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -129,9 +129,8 @@ priority!
Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
CPU bandwidth to task groups.
-This uses the /cgroup virtual file system and
-"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
-control group.
+This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
+to control the CPU time reserved for each control group.
For more information on working with control groups, you should read
Documentation/cgroups/cgroups.txt as well.
@@ -150,7 +149,7 @@ For now, this can be simplified to just the following (but see Future plans):
===============
There is work in progress to make the scheduling period for each group
-("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well.
+("<cgroup>/cpu.rt_period_us") configurable as well.
The constraint on the period is that a subgroup must have a smaller or
equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/usb/error-codes.txt b/Documentation/usb/error-codes.txt
index d83703e..b3f606b 100644
--- a/Documentation/usb/error-codes.txt
+++ b/Documentation/usb/error-codes.txt
@@ -76,6 +76,13 @@ A transfer's actual_length may be positive even when an error has been
reported. That's because transfers often involve several packets, so that
one or more packets could finish before an error stops further endpoint I/O.
+For isochronous URBs, the urb status value is non-zero only if the URB is
+unlinked, the device is removed, the host controller is disabled, or the total
+transferred length is less than the requested length and the URB_SHORT_NOT_OK
+flag is set. Completion handlers for isochronous URBs should only see
+urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+Individual frame descriptor status fields may report more status codes.
+
0 Transfer completed successfully
@@ -132,7 +139,7 @@ one or more packets could finish before an error stops further endpoint I/O.
device removal events immediately.
-EXDEV ISO transfer only partially completed
- look at individual frame status for details
+ (only set in iso_frame_desc[n].status, not urb->status)
-EINVAL ISO madness, if this happens: Log off and go home
diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
index 12f9ba2..5500684 100644
--- a/Documentation/vm/hwpoison.txt
+++ b/Documentation/vm/hwpoison.txt
@@ -129,12 +129,12 @@ Limit injection to pages owned by memgroup. Specified by inode number
of the memcg.
Example:
- mkdir /cgroup/hwpoison
+ mkdir /sys/fs/cgroup/mem/hwpoison
usemem -m 100 -s 1000 &
- echo `jobs -p` > /cgroup/hwpoison/tasks
+ echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
- memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ')
+ memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ')
echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
page-types -p `pidof init` --hwpoison # shall do nothing
diff --git a/MAINTAINERS b/MAINTAINERS
index 2cc0e1f..2b39691 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1739,7 +1739,7 @@ S: Supported
F: drivers/net/enic/
CIRRUS LOGIC EP93XX ETHERNET DRIVER
-M: Lennert Buytenhek <kernel@wantstofly.org>
+M: Hartley Sweeten <hsweeten@visionengravers.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/arm/ep93xx_eth.c
@@ -1889,7 +1889,6 @@ L: cpufreq@vger.kernel.org
W: http://www.codemonkey.org.uk/projects/cpufreq/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
S: Maintained
-F: arch/x86/kernel/cpu/cpufreq/
F: drivers/cpufreq/
F: include/linux/cpufreq.h
@@ -2292,8 +2291,7 @@ F: drivers/scsi/eata_pio.*
EBTABLES
M: Bart De Schuymer <bart.de.schuymer@pandora.be>
-L: ebtables-user@lists.sourceforge.net
-L: ebtables-devel@lists.sourceforge.net
+L: netfilter-devel@vger.kernel.org
W: http://ebtables.sourceforge.net/
S: Maintained
F: include/linux/netfilter_bridge/ebt_*.h
@@ -3820,6 +3818,12 @@ S: Maintained
F: drivers/leds/
F: include/linux/leds.h
+LEGACY EEPROM DRIVER
+M: Jean Delvare <khali@linux-fr.org>
+S: Maintained
+F: Documentation/misc-devices/eeprom
+F: drivers/misc/eeprom/eeprom.c
+
LEGO USB Tower driver
M: Juergen Stuber <starblue@users.sourceforge.net>
L: legousb-devel@lists.sourceforge.net
@@ -4146,7 +4150,7 @@ F: include/linux/mm.h
F: mm/
MEMORY RESOURCE CONTROLLER
-M: Balbir Singh <balbir@linux.vnet.ibm.com>
+M: Balbir Singh <bsingharora@gmail.com>
M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
L: linux-mm@kvack.org
@@ -4891,7 +4895,7 @@ F: mm/percpu*.c
F: arch/*/include/asm/percpu.h
PER-TASK DELAY ACCOUNTING
-M: Balbir Singh <balbir@linux.vnet.ibm.com>
+M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
F: include/linux/delayacct.h
F: kernel/delayacct.c
@@ -4946,6 +4950,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.gi
F: drivers/input/serio/i8042-unicore32io.h
F: drivers/i2c/busses/i2c-puv3.c
F: drivers/video/fb-puv3.c
+F: drivers/rtc/rtc-puv3.c
PMC SIERRA MaxRAID DRIVER
M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
@@ -6098,7 +6103,7 @@ F: include/target/
F: Documentation/target/
TASKSTATS STATISTICS INTERFACE
-M: Balbir Singh <balbir@linux.vnet.ibm.com>
+M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
F: Documentation/accounting/taskstats*
F: include/linux/taskstats*
@@ -6430,8 +6435,9 @@ S: Maintained
F: drivers/usb/misc/rio500*
USB EHCI DRIVER
+M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/usb/ehci.txt
F: drivers/usb/host/ehci*
@@ -6458,9 +6464,15 @@ M: Jiri Kosina <jkosina@suse.cz>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
-F: Documentation/usb/hiddev.txt
+F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/
+USB/IP DRIVERS
+M: Matt Mooney <mfm@muteddisk.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/staging/usbip/
+
USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
L: linux-usb@vger.kernel.org
@@ -6490,8 +6502,9 @@ S: Maintained
F: sound/usb/midi.*
USB OHCI DRIVER
+M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/usb/ohci.txt
F: drivers/usb/host/ohci*
@@ -6717,6 +6730,14 @@ S: Maintained
F: Documentation/filesystems/vfat.txt
F: fs/fat/
+VIDEOBUF2 FRAMEWORK
+M: Pawel Osciak <pawel@osciak.com>
+M: Marek Szyprowski <m.szyprowski@samsung.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/video/videobuf2-*
+F: include/media/videobuf2-*
+
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com>
L: virtualization@lists.linux-foundation.org
@@ -6994,6 +7015,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
+X86 MCE INFRASTRUCTURE
+M: Tony Luck <tony.luck@intel.com>
+M: Borislav Petkov <bp@amd64.org>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: arch/x86/kernel/cpu/mcheck/*
+
XEN HYPERVISOR INTERFACE
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/Makefile b/Makefile
index 0f1db8d..dc67046 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
NAME = Sneaky Weasel
# *DOCUMENTATION*
@@ -378,7 +378,7 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
-KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
+KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
@@ -1005,7 +1005,7 @@ endef
define filechk_version.h
(echo \#define LINUX_VERSION_CODE $(shell \
- expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
+ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
endef
@@ -1110,11 +1110,6 @@ modules_install: _modinst_ _modinst_post
PHONY += _modinst_
_modinst_:
- @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
- echo "Warning: you may need to install module-init-tools"; \
- echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
- sleep 1; \
- fi
@rm -rf $(MODLIB)/kernel
@rm -f $(MODLIB)/source
@mkdir -p $(MODLIB)/kernel
@@ -1531,12 +1526,8 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
# Run depmod only if we have System.map and depmod is executable
quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
- cmd_depmod = \
- if [ -r System.map -a -x $(DEPMOD) ]; then \
- $(DEPMOD) -ae -F System.map \
- $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) ) \
- $(KERNELRELEASE); \
- fi
+ cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
+ $(KERNELRELEASE)
# Create temporary dir for module support files
# clean it up only when building all modules
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 8af56ce..445dc42 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
* Given a kernel address, find the home node of the underlying memory.
*/
#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
/*
* Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 376f221..326f0a2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
return -EFAULT;
len = namelen;
- if (namelen > 32)
+ if (len > 32)
len = 32;
down_read(&uts_sem);
@@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
down_read(&uts_sem);
res = sysinfo_table[offset];
len = strlen(res)+1;
- if (len > count)
+ if ((unsigned long)len > (unsigned long)count)
len = count;
if (copy_to_user(buf, res, len))
err = -EFAULT;
@@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
return 1;
case GSI_GET_HWRPB:
- if (nbytes < sizeof(*hwrpb))
+ if (nbytes > sizeof(*hwrpb))
return -EINVAL;
if (copy_to_user(buffer, hwrpb, nbytes) != 0)
return -EFAULT;
@@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
{
struct rusage r;
long ret, err;
+ unsigned int status = 0;
mm_segment_t old_fs;
if (!ur)
@@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
old_fs = get_fs();
set_fs (KERNEL_DS);
- ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
+ ret = sys_wait4(pid, (unsigned int __user *) &status, options,
+ (struct rusage __user *) &r);
set_fs (old_fs);
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
err = 0;
+ err |= put_user(status, ustatus);
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index f9da419..940b201 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -597,6 +597,8 @@ __common_mmu_cache_on:
sub pc, lr, r0, lsr #32 @ properly flush pipeline
#endif
+#define PROC_ENTRY_SIZE (4*5)
+
/*
* Here follow the relocatable cache support functions for the
* various processors. This is a generic hook for locating an
@@ -624,7 +626,7 @@ call_cache_fn: adr r12, proc_types
ARM( addeq pc, r12, r3 ) @ call cache function
THUMB( addeq r12, r3 )
THUMB( moveq pc, r12 ) @ call cache function
- add r12, r12, #4*5
+ add r12, r12, #PROC_ENTRY_SIZE
b 1b
/*
@@ -691,9 +693,9 @@ proc_types:
.word 0x41069260 @ ARM926EJ-S (v5TEJ)
.word 0xff0ffff0
- b __arm926ejs_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv5tej_mmu_cache_flush
+ W(b) __arm926ejs_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
@@ -794,6 +796,16 @@ proc_types:
.size proc_types, . - proc_types
+ /*
+ * If you get a "non-constant expression in ".if" statement"
+ * error from the assembler on this line, check that you have
+ * not accidentally written a "b" instruction where you should
+ * have written W(b).
+ */
+ .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+ .error "The size of one or more proc_types entries is wrong."
+ .endif
+
/*
* Turn off the Cache and MMU. ARMv3 does not support
* reading the control register, but ARMv4 does.
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 889922a..67b5abb6 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -157,7 +157,7 @@ CONFIG_LEDS_GPIO=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/netx_defconfig b/arch/arm/configs/netx_defconfig
index 316af54..9c0ad79 100644
--- a/arch/arm/configs/netx_defconfig
+++ b/arch/arm/configs/netx_defconfig
@@ -60,7 +60,7 @@ CONFIG_FB_ARMCLCD=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_INOTIFY=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 8b0c717..1d01ddd 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -142,7 +142,7 @@ CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_USB_G_PRINTER=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_SA1100=m
CONFIG_EXT2_FS=m
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 5b55041..721832f 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -73,7 +73,7 @@ CONFIG_SENSORS_MAX6650=m
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SA1100=m
CONFIG_DMADEVICES=y
# CONFIG_DNOTIFY is not set
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 960f655..59577ad 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -158,7 +158,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_LEDS_TRIGGER_BACKLIGHT=m
CONFIG_LEDS_TRIGGER_GPIO=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_ISL1208=m
CONFIG_RTC_DRV_PXA=m
CONFIG_EXT2_FS=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index bc2d2d7..65c3f24 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -13,6 +13,9 @@
* Do not include any C declarations in this file - it is included by
* assembler source.
*/
+#ifndef __ASM_ASSEMBLER_H__
+#define __ASM_ASSEMBLER_H__
+
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif
@@ -290,3 +293,4 @@
.macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
.endm
+#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index ec0bbf7..2da8547 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -1,3 +1,5 @@
+#include <asm/assembler.h>
+
/*
* Interrupt handling. Preserves r7, r8, r9
*/
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index a701e42..0cdd7b4 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -76,6 +76,9 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
unsigned long dt_root;
const char *model;
+ if (!dt_phys)
+ return NULL;
+
devtree = phys_to_virt(dt_phys);
/* check device tree validity */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e8d8856..90c62cd 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -435,6 +435,10 @@ __irq_usr:
usr_entry
kuser_cmpxchg_check
+#ifdef CONFIG_IRQSOFF_TRACER
+ bl trace_hardirqs_off
+#endif
+
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -453,7 +457,7 @@ __irq_usr:
#endif
mov why, #0
- b ret_to_user
+ b ret_to_user_from_irq
UNWIND(.fnend )
ENDPROC(__irq_usr)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 1e7b04a..b2a27b6 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -64,6 +64,7 @@ work_resched:
ENTRY(ret_to_user)
ret_slow_syscall:
disable_irq @ disable interrupts
+ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne work_pending
@@ -75,6 +76,7 @@ no_work_pending:
arch_ret_to_user r1, lr
restore_user_regs fast = 0, offset = 0
+ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)
/*
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index fee7c36..016d6a0 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset -= 0x02000000;
offset += sym->st_value - loc;
- /* only Thumb addresses allowed (no interworking) */
- if (!(offset & 1) ||
+ /*
+ * For function symbols, only Thumb addresses are
+ * allowed (no interworking).
+ *
+ * For non-function symbols, the destination
+ * has no specific ARM/Thumb disposition, so
+ * the branch is resolved under the assumption
+ * that interworking is not required.
+ */
+ if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
+ !(offset & 1)) ||
offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 344e52b..e7f92a4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
smp_store_cpu_info(cpu);
/*
- * OK, now it's safe to let the boot CPU continue
+ * OK, now it's safe to let the boot CPU continue. Wait for
+ * the CPU migration code to notice that the CPU is online
+ * before we continue.
*/
set_cpu_online(cpu, true);
+ while (!cpu_active(cpu))
+ cpu_relax();
/*
* OK, it's off to the idle thread for us
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d52eec2..6807cb1 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -139,7 +139,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
fs = get_fs();
set_fs(KERNEL_DS);
- for (i = -4; i < 1; i++) {
+ for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
@@ -563,7 +563,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (!pte_present(*pte) || !pte_dirty(*pte)) {
+ if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 4e66881..fc4e98e 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -494,7 +494,7 @@ static struct platform_device da850_mcasp_device = {
.resource = da850_mcasp_resources,
};
-struct platform_device davinci_pcm_device = {
+static struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 8f4f736..806a2f0 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -298,7 +298,7 @@ static void davinci_init_wdt(void)
/*-------------------------------------------------------------------------*/
-struct platform_device davinci_pcm_device = {
+static struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index a0b8388..e722139 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -252,9 +252,11 @@ static struct irq_chip gpio_irqchip = {
static void
gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(irq);
+ struct davinci_gpio_regs __iomem *g;
u32 mask = 0xffff;
+ g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
+
/* we only care about one bank */
if (irq & 1)
mask <<= 16;
@@ -422,8 +424,7 @@ static int __init davinci_gpio_irq_setup(void)
/* set up all irqs in this bank */
irq_set_chained_handler(bank_irq, gpio_irq_handler);
- irq_set_chip_data(bank_irq, (__force void *)g);
- irq_set_handler_data(bank_irq, (void *)irq);
+ irq_set_handler_data(bank_irq, (__force void *)g);
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
irq_set_chip(irq, &gpio_irqchip);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 8207954..1d4b65f 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -402,11 +402,15 @@ static struct resource ep93xx_eth_resource[] = {
}
};
+static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device ep93xx_eth_device = {
.name = "ep93xx-eth",
.id = -1,
.dev = {
- .platform_data = &ep93xx_eth_data,
+ .platform_data = &ep93xx_eth_data,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &ep93xx_eth_dma_mask,
},
.num_resources = ARRAY_SIZE(ep93xx_eth_resource),
.resource = ep93xx_eth_resource,
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index b92c1e5..1435fc3 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -91,6 +91,11 @@ config EXYNOS4_SETUP_FIMC
help
Common setup code for the camera interfaces.
+config EXYNOS4_SETUP_USB_PHY
+ bool
+ help
+ Common setup code for USB PHY controller
+
# machine support
menu "EXYNOS4 Machines"
@@ -176,6 +181,7 @@ config MACH_NURI
select EXYNOS4_SETUP_I2C3
select EXYNOS4_SETUP_I2C5
select EXYNOS4_SETUP_SDHCI
+ select EXYNOS4_SETUP_USB_PHY
select SAMSUNG_DEV_PWM
help
Machine support for Samsung Mobile NURI Board.
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index a9bb94f..60fe5ecf 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -56,4 +56,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o
obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
-obj-$(CONFIG_USB_SUPPORT) += usb-phy.o
+obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 08813a6..9babe44 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -98,7 +98,7 @@ static struct map_desc exynos4_iodesc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S5P_VA_USB_HSPHY,
+ .virtual = (unsigned long)S3C_VA_USB_HSPHY,
.pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
.length = SZ_4K,
.type = MT_DEVICE,
diff --git a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
index 703118d..c337cf3 100644
--- a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
@@ -11,7 +11,7 @@
#ifndef __PLAT_S5P_REGS_USB_PHY_H
#define __PLAT_S5P_REGS_USB_PHY_H
-#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S5P_VA_USB_HSPHY)
+#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY)
#define EXYNOS4_PHYPWR EXYNOS4_HSOTG_PHYREG(0x00)
#define PHY1_HSIC_NORMAL_MASK (0xf << 9)
diff --git a/arch/arm/mach-exynos4/init.c b/arch/arm/mach-exynos4/init.c
index cf91f50..a8a83e3 100644
--- a/arch/arm/mach-exynos4/init.c
+++ b/arch/arm/mach-exynos4/init.c
@@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
tcfg->clocks = exynos4_serial_clocks;
tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
}
+ tcfg->flags |= NO_NEED_CHECK_CLKSRC;
}
s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
diff --git a/arch/arm/mach-exynos4/usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c
index 0883c1b..0883c1b 100644
--- a/arch/arm/mach-exynos4/usb-phy.c
+++ b/arch/arm/mach-exynos4/setup-usb-phy.c
diff --git a/arch/arm/mach-exynos4/time.c b/arch/arm/mach-exynos4/time.c
index 86b9fa0..ebb8f38 100644
--- a/arch/arm/mach-exynos4/time.c
+++ b/arch/arm/mach-exynos4/time.c
@@ -206,6 +206,7 @@ static cycle_t exynos4_pwm4_read(struct clocksource *cs)
return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40));
}
+#ifdef CONFIG_PM
static void exynos4_pwm4_resume(struct clocksource *cs)
{
unsigned long pclk;
@@ -218,6 +219,7 @@ static void exynos4_pwm4_resume(struct clocksource *cs)
exynos4_pwm_init(4, ~0);
exynos4_pwm_start(4, 1);
}
+#endif
struct clocksource pwm_clocksource = {
.name = "pwm_timer4",
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 5f1f986..121ad1d 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -103,6 +103,7 @@ static void __init footbridge_timer_init(void)
clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
+ ce->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(ce);
}
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index 30b971d..1be2eeb 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -26,6 +26,7 @@
#include <asm/hardware/debug-8250.S>
#else
+#include <mach/hardware.h>
/* For EBSA285 debugging */
.equ dc21285_high, ARMCSR_BASE & 0xff000000
.equ dc21285_low, ARMCSR_BASE & 0x00ffffff
@@ -36,8 +37,8 @@
.else
mov \rp, #0
.endif
- orr \rv, \rp, #0x42000000
- orr \rp, \rp, #dc21285_high
+ orr \rv, \rp, #dc21285_high
+ orr \rp, \rp, #0x42000000
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig
index 9b6982ef..abf356c 100644
--- a/arch/arm/mach-h720x/Kconfig
+++ b/arch/arm/mach-h720x/Kconfig
@@ -6,12 +6,14 @@ config ARCH_H7201
bool "gms30c7201"
depends on ARCH_H720X
select CPU_H7201
+ select ZONE_DMA
help
Say Y here if you are using the Hynix GMS30C7201 Reference Board
config ARCH_H7202
bool "hms30c7202"
select CPU_H7202
+ select ZONE_DMA
depends on ARCH_H720X
help
Say Y here if you are using the Hynix HMS30C7202 Reference Board
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 38b95e9..63621f1 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -23,6 +23,8 @@
#include <linux/io.h>
#include <asm/mach/time.h>
+#include <asm/hardware/gic.h>
+
#include <mach/msm_iomap.h>
#include <mach/cpu.h>
@@ -55,10 +57,12 @@ enum timer_location {
#if defined(CONFIG_ARCH_QSD8X50)
#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
#define MSM_DGT_SHIFT (0)
-#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \
- defined(CONFIG_ARCH_MSM8960)
+#elif defined(CONFIG_ARCH_MSM7X30)
#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
#define MSM_DGT_SHIFT (0)
+#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
+#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
+#define MSM_DGT_SHIFT (0)
#else
#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
#define MSM_DGT_SHIFT (5)
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
{
struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
- return readl(clk->global_counter);
+ /*
+ * Shift timer count down by a constant due to unreliable lower bits
+ * on some targets.
+ */
+ return readl(clk->global_counter) >> clk->shift;
}
static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
index 65157a3..54add60 100644
--- a/arch/arm/mach-mxs/ocotp.c
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -16,6 +16,8 @@
#include <linux/err.h>
#include <linux/mutex.h>
+#include <asm/processor.h> /* for cpu_relax() */
+
#include <mach/mxs.h>
#define OCOTP_WORD_OFFSET 0x20
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index af98117..5b114d1 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -4,14 +4,14 @@
# Common support
obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
-obj-y += clock.o clock_data.o opp_data.o reset.o
+obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
# Power Management
-obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o
+obj-$(CONFIG_PM) += pm.o sleep.o
# DSP
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index fe31d93..334fb88 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
USE_PLATFORM_PM_SLEEP_OPS
},
};
+#define OMAP1_PWR_DOMAIN (&default_power_domain)
+#else
+#define OMAP1_PWR_DOMAIN NULL
+#endif /* CONFIG_PM_RUNTIME */
static struct pm_clk_notifier_block platform_bus_notifier = {
- .pwr_domain = &default_power_domain,
+ .pwr_domain = OMAP1_PWR_DOMAIN,
.con_ids = { "ick", "fck", NULL, },
};
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
return 0;
}
core_initcall(omap1_pm_runtime_init);
-#endif /* CONFIG_PM_RUNTIME */
+
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 2a0bb48..23f71d4 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -84,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
static struct omap_nand_platform_data pandora_nand_data = {
.cs = 0,
- .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */
+ .devsize = NAND_BUSWIDTH_16,
+ .xfer_type = NAND_OMAP_PREFETCH_DMA,
.parts = omap3pandora_nand_partitions,
.nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions),
};
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index a5a83b3..e01da45 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
static int pm_dbg_init_done;
-static int __init pm_dbg_init(void);
+static int pm_dbg_init(void);
enum {
DEBUG_FILE_COUNTERS = 0,
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
-static int __init pm_dbg_init(void)
+static int pm_dbg_init(void)
{
int i;
struct dentry *d;
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 7fe7406..094279a 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 0d468e9..8169535 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -10,7 +10,6 @@ obj-n :=
obj- :=
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
-obj-$(CONFIG_CPU_S3C2410) += irq.o
obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o
obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o
obj-$(CONFIG_S3C2410_PM) += pm.o sleep.o
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c
deleted file mode 100644
index 2854129..0000000
--- a/arch/arm/mach-s3c2410/irq.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/* linux/arch/arm/mach-s3c2410/irq.c
- *
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/syscore_ops.h>
-
-#include <plat/cpu.h>
-#include <plat/pm.h>
-
-struct syscore_ops s3c24xx_irq_syscore_ops = {
- .suspend = s3c24xx_irq_suspend,
- .resume = s3c24xx_irq_resume,
-};
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index 22046e2..153af8b 100644
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -101,12 +101,14 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
unsigned long tmp, tmp1;
void __iomem *reg = NULL;
- if (ch == DMC0)
+ if (ch == DMC0) {
reg = (S5P_VA_DMC0 + 0x30);
- else if (ch == DMC1)
+ } else if (ch == DMC1) {
reg = (S5P_VA_DMC1 + 0x30);
- else
+ } else {
printk(KERN_ERR "Cannot find DMC port\n");
+ return;
+ }
/* Find current DRAM frequency */
tmp = s5pv210_dram_conf[ch].freq;
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index c95258c..1e2aba2 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -382,10 +382,8 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
}
static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
- .tmio_caps = MMC_CAP_NONREMOVABLE,
+ .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
.tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.set_pwr = ag5evm_sdhi1_set_pwr,
};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 776f205..7e1d375 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -126,7 +126,7 @@
* ------+--------------------+--------------------+-------
* IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low
* IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High
- * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Tuch Panel | Low
+ * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Touch Panel | Low
* IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low
* IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low
* IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High
@@ -165,10 +165,10 @@
* USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
* But don't select both drivers in same time.
* These uses same IRQ number for request_irq(), and aren't supporting
- * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE.
+ * IRQF_SHARED / IORESOURCE_IRQ_SHAREABLE.
*
* Actually these are old/new version of USB driver.
- * This mean its register will be broken if it supports SHARD IRQ,
+ * This mean its register will be broken if it supports shared IRQ,
*/
/*
@@ -562,7 +562,121 @@ out:
clk_put(hdmi_ick);
}
-/* USB1 (Host) */
+/* USBHS0 is connected to CN22 which takes a USB Mini-B plug
+ *
+ * The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug,
+ * but on this particular board IRQ7 is already used by
+ * the touch screen. This leaves us with software polling.
+ */
+#define USBHS0_POLL_INTERVAL (HZ * 5)
+
+struct usbhs_private {
+ unsigned int usbphyaddr;
+ unsigned int usbcrcaddr;
+ struct renesas_usbhs_platform_info info;
+ struct delayed_work work;
+ struct platform_device *pdev;
+};
+
+#define usbhs_get_priv(pdev) \
+ container_of(renesas_usbhs_get_info(pdev), \
+ struct usbhs_private, info)
+
+#define usbhs_is_connected(priv) \
+ (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
+
+static int usbhs_get_vbus(struct platform_device *pdev)
+{
+ return usbhs_is_connected(usbhs_get_priv(pdev));
+}
+
+static void usbhs_phy_reset(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ /* init phy */
+ __raw_writew(0x8a0a, priv->usbcrcaddr);
+}
+
+static int usbhs0_get_id(struct platform_device *pdev)
+{
+ return USBHS_GADGET;
+}
+
+static void usbhs0_work_function(struct work_struct *work)
+{
+ struct usbhs_private *priv = container_of(work, struct usbhs_private,
+ work.work);
+
+ renesas_usbhs_call_notify_hotplug(priv->pdev);
+ schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
+}
+
+static int usbhs0_hardware_init(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ priv->pdev = pdev;
+ INIT_DELAYED_WORK(&priv->work, usbhs0_work_function);
+ schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
+ return 0;
+}
+
+static void usbhs0_hardware_exit(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ cancel_delayed_work_sync(&priv->work);
+}
+
+static struct usbhs_private usbhs0_private = {
+ .usbcrcaddr = 0xe605810c, /* USBCR2 */
+ .info = {
+ .platform_callback = {
+ .hardware_init = usbhs0_hardware_init,
+ .hardware_exit = usbhs0_hardware_exit,
+ .phy_reset = usbhs_phy_reset,
+ .get_id = usbhs0_get_id,
+ .get_vbus = usbhs_get_vbus,
+ },
+ .driver_param = {
+ .buswait_bwait = 4,
+ },
+ },
+};
+
+static struct resource usbhs0_resources[] = {
+ [0] = {
+ .name = "USBHS0",
+ .start = 0xe6890000,
+ .end = 0xe68900e6 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x1ca0) /* USB0_USB0I0 */,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbhs0_device = {
+ .name = "renesas_usbhs",
+ .id = 0,
+ .dev = {
+ .platform_data = &usbhs0_private.info,
+ },
+ .num_resources = ARRAY_SIZE(usbhs0_resources),
+ .resource = usbhs0_resources,
+};
+
+/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug
+ *
+ * Use J30 to select between Host and Function. This setting
+ * can however not be detected by software. Hotplug of USBHS1
+ * is provided via IRQ8.
+ */
+#define IRQ8 evt2irq(0x0300)
+
+/* USBHS1 USB Host support via r8a66597_hcd */
static void usb1_host_port_power(int port, int power)
{
if (!power) /* only power-on is supported for now */
@@ -579,9 +693,9 @@ static struct r8a66597_platdata usb1_host_data = {
static struct resource usb1_host_resources[] = {
[0] = {
- .name = "USBHS",
- .start = 0xE68B0000,
- .end = 0xE68B00E6 - 1,
+ .name = "USBHS1",
+ .start = 0xe68b0000,
+ .end = 0xe68b00e6 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -602,37 +716,14 @@ static struct platform_device usb1_host_device = {
.resource = usb1_host_resources,
};
-/* USB1 (Function) */
+/* USBHS1 USB Function support via renesas_usbhs */
+
#define USB_PHY_MODE (1 << 4)
#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
#define USB_PHY_ON (1 << 1)
#define USB_PHY_OFF (1 << 0)
#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
-struct usbhs_private {
- unsigned int irq;
- unsigned int usbphyaddr;
- unsigned int usbcrcaddr;
- struct renesas_usbhs_platform_info info;
-};
-
-#define usbhs_get_priv(pdev) \
- container_of(renesas_usbhs_get_info(pdev), \
- struct usbhs_private, info)
-
-#define usbhs_is_connected(priv) \
- (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
-
-static int usbhs1_get_id(struct platform_device *pdev)
-{
- return USBHS_GADGET;
-}
-
-static int usbhs1_get_vbus(struct platform_device *pdev)
-{
- return usbhs_is_connected(usbhs_get_priv(pdev));
-}
-
static irqreturn_t usbhs1_interrupt(int irq, void *data)
{
struct platform_device *pdev = data;
@@ -654,12 +745,10 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
struct usbhs_private *priv = usbhs_get_priv(pdev);
int ret;
- irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
-
/* clear interrupt status */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
- ret = request_irq(priv->irq, usbhs1_interrupt, 0,
+ ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH,
dev_name(&pdev->dev), pdev);
if (ret) {
dev_err(&pdev->dev, "request_irq err\n");
@@ -679,15 +768,12 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
/* clear interrupt status */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
- free_irq(priv->irq, pdev);
+ free_irq(IRQ8, pdev);
}
-static void usbhs1_phy_reset(struct platform_device *pdev)
+static int usbhs1_get_id(struct platform_device *pdev)
{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- /* init phy */
- __raw_writew(0x8a0a, priv->usbcrcaddr);
+ return USBHS_GADGET;
}
static u32 usbhs1_pipe_cfg[] = {
@@ -710,16 +796,15 @@ static u32 usbhs1_pipe_cfg[] = {
};
static struct usbhs_private usbhs1_private = {
- .irq = evt2irq(0x0300), /* IRQ8 */
- .usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */
- .usbcrcaddr = 0xE6058130, /* USBCR4 */
+ .usbphyaddr = 0xe60581e2, /* USBPHY1INTAP */
+ .usbcrcaddr = 0xe6058130, /* USBCR4 */
.info = {
.platform_callback = {
.hardware_init = usbhs1_hardware_init,
.hardware_exit = usbhs1_hardware_exit,
- .phy_reset = usbhs1_phy_reset,
.get_id = usbhs1_get_id,
- .get_vbus = usbhs1_get_vbus,
+ .phy_reset = usbhs_phy_reset,
+ .get_vbus = usbhs_get_vbus,
},
.driver_param = {
.buswait_bwait = 4,
@@ -731,9 +816,9 @@ static struct usbhs_private usbhs1_private = {
static struct resource usbhs1_resources[] = {
[0] = {
- .name = "USBHS",
- .start = 0xE68B0000,
- .end = 0xE68B00E6 - 1,
+ .name = "USBHS1",
+ .start = 0xe68b0000,
+ .end = 0xe68b00e6 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -752,7 +837,6 @@ static struct platform_device usbhs1_device = {
.resource = usbhs1_resources,
};
-
/* LED */
static struct gpio_led mackerel_leds[] = {
{
@@ -1203,6 +1287,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&lcdc_device,
+ &usbhs0_device,
&usb1_host_device,
&usbhs1_device,
&leds_device,
@@ -1301,6 +1386,7 @@ static void __init mackerel_map_io(void)
#define GPIO_PORT9CR 0xE6051009
#define GPIO_PORT10CR 0xE605100A
+#define GPIO_PORT167CR 0xE60520A7
#define GPIO_PORT168CR 0xE60520A8
#define SRCR4 0xe61580bc
#define USCCR1 0xE6058144
@@ -1354,17 +1440,17 @@ static void __init mackerel_init(void)
gpio_request(GPIO_PORT151, NULL); /* LCDDON */
gpio_direction_output(GPIO_PORT151, 1);
- /* USB enable */
- gpio_request(GPIO_FN_VBUS0_1, NULL);
- gpio_request(GPIO_FN_IDIN_1_18, NULL);
- gpio_request(GPIO_FN_PWEN_1_115, NULL);
- gpio_request(GPIO_FN_OVCN_1_114, NULL);
- gpio_request(GPIO_FN_EXTLP_1, NULL);
- gpio_request(GPIO_FN_OVCN2_1, NULL);
- gpio_pull_down(GPIO_PORT168CR);
-
- /* setup USB phy */
- __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */
+ /* USBHS0 */
+ gpio_request(GPIO_FN_VBUS0_0, NULL);
+ gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */
+
+ /* USBHS1 */
+ gpio_request(GPIO_FN_VBUS0_1, NULL);
+ gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */
+ gpio_request(GPIO_FN_IDIN_1_113, NULL);
+
+ /* USB phy tweak to make the r8a66597_hcd host driver work */
+ __raw_writew(0x8a0a, 0xe6058130); /* USBCR4 */
/* enable FSI2 port A (ak4643) */
gpio_request(GPIO_FN_FSIAIBT, NULL);
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 5d0e150..a911a60 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -250,6 +250,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
+{
+ return 0; /* always allow wakeup */
+}
+
void __init sh73a0_init_irq(void)
{
void __iomem *gic_dist_base = __io(0xf0001000);
@@ -257,6 +262,7 @@ void __init sh73a0_init_irq(void)
void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
+ gic_arch_extn.irq_set_wake = sh73a0_set_wake;
register_intc_controller(&intcs_desc);
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 2c10190..e546017 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -38,7 +38,7 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFA,
.irqs = { evt2irq(0xc00), evt2irq(0xc00),
evt2irq(0xc00), evt2irq(0xc00) },
};
@@ -57,7 +57,7 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFA,
.irqs = { evt2irq(0xc20), evt2irq(0xc20),
evt2irq(0xc20), evt2irq(0xc20) },
};
@@ -76,7 +76,7 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFA,
.irqs = { evt2irq(0xc40), evt2irq(0xc40),
evt2irq(0xc40), evt2irq(0xc40) },
};
@@ -95,7 +95,7 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFA,
.irqs = { evt2irq(0xc60), evt2irq(0xc60),
evt2irq(0xc60), evt2irq(0xc60) },
};
@@ -114,7 +114,7 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFA,
.irqs = { evt2irq(0xd20), evt2irq(0xd20),
evt2irq(0xd20), evt2irq(0xd20) },
};
@@ -133,7 +133,7 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFA,
.irqs = { evt2irq(0xd40), evt2irq(0xd40),
evt2irq(0xd40), evt2irq(0xd40) },
};
@@ -152,7 +152,7 @@ static struct plat_sci_port scif6_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
- .type = PORT_SCIF,
+ .type = PORT_SCIFB,
.irqs = { evt2irq(0xd60), evt2irq(0xd60),
evt2irq(0xd60), evt2irq(0xd60) },
};
diff --git a/arch/arm/mach-u300/clock.h b/arch/arm/mach-u300/clock.h
index c34f3ea..4f50ca8 100644
--- a/arch/arm/mach-u300/clock.h
+++ b/arch/arm/mach-u300/clock.h
@@ -31,7 +31,7 @@ struct clk {
bool reset;
__u16 clk_val;
__s8 usecount;
- __u32 res_reg;
+ void __iomem * res_reg;
__u16 res_mask;
bool hw_ctrld;
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 8b85df4..035fdc9 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -18,6 +18,12 @@
* the defines are used for setting up the I/O memory mapping.
*/
+#ifdef __ASSEMBLER__
+#define IOMEM(a) (a)
+#else
+#define IOMEM(a) (void __iomem *) a
+#endif
+
/* NAND Flash CS0 */
#define U300_NAND_CS0_PHYS_BASE 0x80000000
@@ -48,13 +54,6 @@
#endif
/*
- * All the following peripherals are specified at their PHYSICAL address,
- * so if you need to access them (in the kernel), you MUST use the macros
- * defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST()
- * etc.
- */
-
-/*
* AHB peripherals
*/
@@ -63,11 +62,11 @@
/* Vectored Interrupt Controller 0, servicing 32 interrupts */
#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000)
-#define U300_INTCON0_VBASE (U300_AHB_PER_VIRT_BASE+0x1000)
+#define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000)
/* Vectored Interrupt Controller 1, servicing 32 interrupts */
#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000)
-#define U300_INTCON1_VBASE (U300_AHB_PER_VIRT_BASE+0x2000)
+#define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000)
/* Memory Stick Pro (MSPRO) controller */
#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000)
@@ -115,7 +114,7 @@
/* SYSCON */
#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000)
-#define U300_SYSCON_VBASE (U300_SLOW_PER_VIRT_BASE+0x1000)
+#define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000)
/* Watchdog */
#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000)
@@ -125,7 +124,7 @@
/* APP side special timer */
#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000)
-#define U300_TIMER_APP_VBASE (U300_SLOW_PER_VIRT_BASE+0x4000)
+#define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000)
/* Keypad */
#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000)
@@ -181,5 +180,4 @@
* Virtual accessor macros for static devices
*/
-
#endif
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index 891cf44..18d7fa0 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -411,8 +411,7 @@ static void __init u300_timer_init(void)
/* Use general purpose timer 2 as clock source */
if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC,
"GPT2", rate, 300, 32, clocksource_mmio_readl_up))
- printk(KERN_ERR "timer: failed to initialize clock "
- "source %s\n", clocksource_u300_1mhz.name);
+ pr_err("timer: failed to initialize U300 clock source\n");
clockevents_calc_mult_shift(&clockevent_u300_1mhz,
rate, APPTIMER_MIN_RANGE);
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
index fd4cf1c..70cdbd6 100644
--- a/arch/arm/mach-ux500/board-mop500-pins.c
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = {
GPIO168_KP_O0,
/* UART */
- GPIO0_U0_CTSn | PIN_INPUT_PULLUP,
- GPIO1_U0_RTSn | PIN_OUTPUT_HIGH,
- GPIO2_U0_RXD | PIN_INPUT_PULLUP,
- GPIO3_U0_TXD | PIN_OUTPUT_HIGH,
+ /* uart-0 pins gpio configuration should be
+ * kept intact to prevent glitch in tx line
+ * when tty dev is opened. Later these pins
+ * are configured to uart mop500_pins_uart0
+ *
+ * It will be replaced with uart configuration
+ * once the issue is solved.
+ */
+ GPIO0_GPIO | PIN_INPUT_PULLUP,
+ GPIO1_GPIO | PIN_OUTPUT_HIGH,
+ GPIO2_GPIO | PIN_INPUT_PULLUP,
+ GPIO3_GPIO | PIN_OUTPUT_HIGH,
GPIO29_U2_RXD | PIN_INPUT_PULLUP,
GPIO30_U2_TXD | PIN_OUTPUT_HIGH,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index bb26f40..2a08c07 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -27,18 +27,21 @@
#include <linux/leds-lp5521.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
+#include <linux/delay.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <plat/i2c.h>
#include <plat/ste_dma40.h>
+#include <plat/pincfg.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
#include <mach/irqs.h>
+#include "pins-db8500.h"
#include "ste-dma40-db8500.h"
#include "devices-db8500.h"
#include "board-mop500.h"
@@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
};
#endif
+
+static pin_cfg_t mop500_pins_uart0[] = {
+ GPIO0_U0_CTSn | PIN_INPUT_PULLUP,
+ GPIO1_U0_RTSn | PIN_OUTPUT_HIGH,
+ GPIO2_U0_RXD | PIN_INPUT_PULLUP,
+ GPIO3_U0_TXD | PIN_OUTPUT_HIGH,
+};
+
+#define PRCC_K_SOFTRST_SET 0x18
+#define PRCC_K_SOFTRST_CLEAR 0x1C
+static void ux500_uart0_reset(void)
+{
+ void __iomem *prcc_rst_set, *prcc_rst_clr;
+
+ prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
+ PRCC_K_SOFTRST_SET);
+ prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
+ PRCC_K_SOFTRST_CLEAR);
+
+ /* Activate soft reset PRCC_K_SOFTRST_CLEAR */
+ writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr);
+ udelay(1);
+
+ /* Release soft reset PRCC_K_SOFTRST_SET */
+ writel((readl(prcc_rst_set) | 0x1), prcc_rst_set);
+ udelay(1);
+}
+
+static void ux500_uart0_init(void)
+{
+ int ret;
+
+ ret = nmk_config_pins(mop500_pins_uart0,
+ ARRAY_SIZE(mop500_pins_uart0));
+ if (ret < 0)
+ pr_err("pl011: uart pins_enable failed\n");
+}
+
+static void ux500_uart0_exit(void)
+{
+ int ret;
+
+ ret = nmk_config_pins_sleep(mop500_pins_uart0,
+ ARRAY_SIZE(mop500_pins_uart0));
+ if (ret < 0)
+ pr_err("pl011: uart pins_disable failed\n");
+}
+
static struct amba_pl011_data uart0_plat = {
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &uart0_dma_cfg_rx,
.dma_tx_param = &uart0_dma_cfg_tx,
#endif
+ .init = ux500_uart0_init,
+ .exit = ux500_uart0_exit,
+ .reset = ux500_uart0_reset,
};
static struct amba_pl011_data uart1_plat = {
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index c3c4176..4598b06 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -159,6 +159,9 @@ static void __init db8500_add_gpios(void)
/* No custom data yet */
};
+ if (cpu_is_u8500v2())
+ pdata.supports_sleepmode = true;
+
dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
IRQ_DB8500_GPIO0, &pdata);
}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 285edcd..9e6b93b 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -46,12 +46,6 @@ static struct map_desc v2m_io_desc[] __initdata = {
},
};
-static void __init v2m_init_early(void)
-{
- ct_desc->init_early();
- versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
-}
-
static void __init v2m_timer_init(void)
{
u32 scctrl;
@@ -365,6 +359,13 @@ static struct clk_lookup v2m_lookups[] = {
},
};
+static void __init v2m_init_early(void)
+{
+ ct_desc->init_early();
+ clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
+ versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
+}
+
static void v2m_power_off(void)
{
if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
@@ -418,8 +419,6 @@ static void __init v2m_init(void)
{
int i;
- clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
-
platform_device_register(&v2m_pcie_i2c_device);
platform_device_register(&v2m_ddc_i2c_device);
platform_device_register(&v2m_flash_device);
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 8bfae96..b0ee9ba 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,7 +24,9 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
/*
* We fork()ed a process, and we need a new context for the child
- * to run in.
+ * to run in. We reserve version 0 for initial tasks so we will
+ * always allocate an ASID. The ASID 0 is reserved for the TTBR
+ * register changing sequence.
*/
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -34,11 +36,8 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
- u32 ttb;
- /* Copy TTBR1 into TTBR0 */
- asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
- "mcr p15, 0, %0, c2, c0, 0"
- : "=r" (ttb));
+ /* set the reserved ASID before flushing the TLB */
+ asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
@@ -94,7 +93,7 @@ static void reset_context(void *info)
return;
smp_rmb();
- asid = cpu_last_asid + cpu;
+ asid = cpu_last_asid + cpu + 1;
flush_context();
set_mm_context(mm, asid);
@@ -144,13 +143,13 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
- asid = cpu_last_asid + smp_processor_id();
+ asid = cpu_last_asid + smp_processor_id() + 1;
flush_context();
#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
#endif
- cpu_last_asid += NR_CPUS - 1;
+ cpu_last_asid += NR_CPUS;
}
set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2c2cce9..c19571c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -331,6 +331,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size &&
+ !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
+ pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
+ phys_initrd_start, phys_initrd_size);
+ phys_initrd_start = phys_initrd_size = 0;
+ }
+ if (phys_initrd_size &&
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size);
@@ -635,7 +641,8 @@ void __init mem_init(void)
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
@@ -657,7 +664,8 @@ void __init mem_init(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(_sdata, _edata));
+ MLK_ROUNDUP(_sdata, _edata),
+ MLK_ROUNDUP(__bss_start, __bss_stop));
#undef MLK
#undef MLM
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index e4c165c..537ffcb 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -146,7 +146,7 @@ __arm7tdmi_proc_info:
.long 0
.long 0
.long v4_cache_fns
- .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info
+ .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
.type __triscenda7_proc_info, #object
__triscenda7_proc_info:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 7b7ebd4..546b54d 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -116,7 +116,7 @@ __arm9tdmi_proc_info:
.long 0
.long 0
.long v4_cache_fns
- .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info
+ .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info
.type __p2001_proc_info, #object
__p2001_proc_info:
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3b566e..089c0b5 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,16 +108,18 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
- mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
- mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
+#ifdef CONFIG_ARM_ERRATA_754322
+ dsb
+#endif
+ mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
+ isb
+1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
- mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
- isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
@@ -208,19 +210,21 @@ cpu_v7_name:
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
-.equ cpu_v7_suspend_size, 4 * 8
+.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 1 @ Context ID
+ mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID
+ stmia r0!, {r4 - r6}
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 0 @ TTB 0
mrc p15, 0, r8, c2, c0, 1 @ TTB 1
mrc p15, 0, r9, c1, c0, 0 @ Control register
mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control
- stmia r0, {r4 - r11}
+ stmia r0, {r6 - r11}
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_v7_do_suspend)
@@ -228,9 +232,11 @@ ENTRY(cpu_v7_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- ldmia r0, {r4 - r11}
+ ldmia r0!, {r4 - r6}
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 1 @ Context ID
+ mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID
+ ldmia r0, {r6 - r11}
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
mcr p15, 0, r7, c2, c0, 0 @ TTB 0
mcr p15, 0, r8, c2, c0, 1 @ TTB 1
@@ -416,9 +422,9 @@ ENTRY(v7_processor_functions)
.word cpu_v7_dcache_clean_area
.word cpu_v7_switch_mm
.word cpu_v7_set_pte_ext
- .word 0
- .word 0
- .word 0
+ .word cpu_v7_suspend_size
+ .word cpu_v7_do_suspend
+ .word cpu_v7_do_resume
.size v7_processor_functions, . - v7_processor_functions
.section ".rodata"
diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c
index 9612a87..bab73e2 100644
--- a/arch/arm/plat-iop/cp6.c
+++ b/arch/arm/plat-iop/cp6.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
#include <asm/traps.h>
+#include <asm/ptrace.h>
static int cp6_trap(struct pt_regs *regs, unsigned int instr)
{
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index 3538b85e..b130f60 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -139,7 +139,7 @@ static struct sdma_script_start_addrs addr_imx35_to2 = {
#endif
#ifdef CONFIG_SOC_IMX51
-static struct sdma_script_start_addrs addr_imx51_to1 = {
+static struct sdma_script_start_addrs addr_imx51 = {
.ap_2_ap_addr = 642,
.uart_2_mcu_addr = 817,
.mcu_2_app_addr = 747,
@@ -196,7 +196,9 @@ static int __init imxXX_add_imx_dma(void)
#if defined(CONFIG_SOC_IMX51)
if (cpu_is_mx51()) {
- imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1;
+ int to_version = mx51_revision() >> 4;
+ imx51_imx_sdma_data.pdata.to_version = to_version;
+ imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51;
ret = imx_add_imx_sdma(&imx51_imx_sdma_data);
} else
#endif
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index ea19a5b..d5d7e65 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -90,6 +90,7 @@ struct nmk_gpio_platform_data {
int num_gpio;
u32 (*get_secondary_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
+ bool supports_sleepmode;
};
#endif /* __ASM_PLAT_GPIO_H */
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index a37b8eb..49fc0df 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -84,6 +84,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/pm_runtime.h>
#include <plat/omap_device.h>
#include <plat/omap_hwmod.h>
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ ret = pm_generic_runtime_suspend(dev);
+
+ if (!ret)
+ omap_device_idle(pdev);
+
+ return ret;
+}
- return omap_device_idle(pdev);
+static int _od_runtime_idle(struct device *dev)
+{
+ return pm_generic_runtime_idle(dev);
}
static int _od_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- return omap_device_enable(pdev);
+ omap_device_enable(pdev);
+
+ return pm_generic_runtime_resume(dev);
}
static struct dev_power_domain omap_device_power_domain = {
.ops = {
.runtime_suspend = _od_runtime_suspend,
+ .runtime_idle = _od_runtime_idle,
.runtime_resume = _od_runtime_resume,
USE_PLATFORM_PM_SLEEP_OPS
}
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index c10d10c..2abf966 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1199,7 +1199,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
#ifdef CONFIG_PM
-static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp)
+static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp)
{
printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index 9aee7e1..fc8c5f8 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
@@ -668,3 +669,8 @@ void __init s3c24xx_init_irq(void)
irqdbf("s3c2410: registered interrupt handlers\n");
}
+
+struct syscore_ops s3c24xx_irq_syscore_ops = {
+ .suspend = s3c24xx_irq_suspend,
+ .resume = s3c24xx_irq_resume,
+};
diff --git a/arch/arm/plat-s5p/dev-onenand.c b/arch/arm/plat-s5p/dev-onenand.c
index 6db9262..20336c8 100644
--- a/arch/arm/plat-s5p/dev-onenand.c
+++ b/arch/arm/plat-s5p/dev-onenand.c
@@ -15,8 +15,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/onenand.h>
#include <mach/irqs.h>
#include <mach/map.h>
@@ -45,13 +43,3 @@ struct platform_device s5p_device_onenand = {
.num_resources = ARRAY_SIZE(s5p_onenand_resources),
.resource = s5p_onenand_resources,
};
-
-void s5p_onenand_set_platdata(struct onenand_platform_data *pdata)
-{
- struct onenand_platform_data *pd;
-
- pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
- if (!pd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- s5p_device_onenand.dev.platform_data = pd;
-}
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index a6c3d32..d973d39 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -39,7 +39,7 @@
#define S5P_VA_TWD S5P_VA_COREPERI(0x600)
#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
-#define S5P_VA_USB_HSPHY S3C_ADDR(0x02900000)
+#define S3C_VA_USB_HSPHY S3C_ADDR(0x02900000)
#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000))
#define VA_VIC0 VA_VIC(0)
diff --git a/arch/arm/plat-samsung/dev-onenand.c b/arch/arm/plat-samsung/dev-onenand.c
index 45ec732..f54ae71 100644
--- a/arch/arm/plat-samsung/dev-onenand.c
+++ b/arch/arm/plat-samsung/dev-onenand.c
@@ -13,8 +13,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/onenand.h>
#include <mach/irqs.h>
#include <mach/map.h>
@@ -43,13 +41,3 @@ struct platform_device s3c_device_onenand = {
.num_resources = ARRAY_SIZE(s3c_onenand_resources),
.resource = s3c_onenand_resources,
};
-
-void s3c_onenand_set_platdata(struct onenand_platform_data *pdata)
-{
- struct onenand_platform_data *pd;
-
- pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
- if (!pd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- s3c_device_onenand.dev.platform_data = pd;
-}
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index b61b8ee..4af108f 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -75,10 +75,8 @@ extern struct platform_device s5pc100_device_spi1;
extern struct platform_device s5pc100_device_spi2;
extern struct platform_device s5pv210_device_spi0;
extern struct platform_device s5pv210_device_spi1;
-extern struct platform_device s5p6440_device_spi0;
-extern struct platform_device s5p6440_device_spi1;
-extern struct platform_device s5p6450_device_spi0;
-extern struct platform_device s5p6450_device_spi1;
+extern struct platform_device s5p64x0_device_spi0;
+extern struct platform_device s5p64x0_device_spi1;
extern struct platform_device s3c_device_hwmon;
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index c151c5f..116edfe 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -224,6 +224,8 @@
#define S5PV210_UFSTAT_RXMASK (255<<0)
#define S5PV210_UFSTAT_RXSHIFT (0)
+#define NO_NEED_CHECK_CLKSRC 1
+
#ifndef __ASSEMBLY__
/* struct s3c24xx_uart_clksrc
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 6f9ca56..a06bfcc 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 7eece0a..d8f1fe8 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 387eb9d..d4c5b19 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 19f6cee..77ca4f9 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_SLUB_DEBUG is not set
@@ -109,7 +110,7 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S35390A=m
CONFIG_RTC_DRV_AT32AP700X=m
CONFIG_DMADEVICES=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index f0fe237..6e0dca4 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index e4a7c1d..7f2a344 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 6f37f70..085eeba 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 4fb01f5..d1a887e 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 9faaf9b..956f281 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 3d2a5d8..40c69f3 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 1ed8f22..511eb8a 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index aeadc95..19973b0 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -6,6 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index 1692bee..6f45681 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 8b670a6..3befab9 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
CONFIG_MODULES=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 5a51f2e..1bee51f 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
index 49a88f5..108502b 100644
--- a/arch/avr32/include/asm/processor.h
+++ b/arch/avr32/include/asm/processor.h
@@ -131,7 +131,6 @@ struct thread_struct {
*/
#define start_thread(regs, new_pc, new_sp) \
do { \
- set_fs(USER_DS); \
memset(regs, 0, sizeof(*regs)); \
regs->sr = MODE_USER; \
regs->pc = new_pc & ~1; \
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index aa677e2..7fbf0dc 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1043,8 +1043,9 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
data->regs = (void __iomem *)pdev->resource[0].start;
}
+ pdev->id = line;
pdata = pdev->dev.platform_data;
- pdata->num = portnr;
+ pdata->num = line;
at32_usarts[line] = pdev;
}
diff --git a/arch/avr32/mach-at32ap/include/mach/cpu.h b/arch/avr32/mach-at32ap/include/mach/cpu.h
index 9c96a13..8181293 100644
--- a/arch/avr32/mach-at32ap/include/mach/cpu.h
+++ b/arch/avr32/mach-at32ap/include/mach/cpu.h
@@ -31,8 +31,20 @@
#define cpu_is_at91sam9263() (0)
#define cpu_is_at91sam9rl() (0)
#define cpu_is_at91cap9() (0)
+#define cpu_is_at91cap9_revB() (0)
+#define cpu_is_at91cap9_revC() (0)
#define cpu_is_at91sam9g10() (0)
+#define cpu_is_at91sam9g20() (0)
#define cpu_is_at91sam9g45() (0)
#define cpu_is_at91sam9g45es() (0)
+#define cpu_is_at91sam9m10() (0)
+#define cpu_is_at91sam9g46() (0)
+#define cpu_is_at91sam9m11() (0)
+#define cpu_is_at91sam9x5() (0)
+#define cpu_is_at91sam9g15() (0)
+#define cpu_is_at91sam9g35() (0)
+#define cpu_is_at91sam9x35() (0)
+#define cpu_is_at91sam9g25() (0)
+#define cpu_is_at91sam9x25() (0)
#endif /* __ASM_ARCH_CPU_H */
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c
index 3e36461..c9ac2f8 100644
--- a/arch/avr32/mach-at32ap/intc.c
+++ b/arch/avr32/mach-at32ap/intc.c
@@ -167,14 +167,12 @@ static int intc_suspend(void)
return 0;
}
-static int intc_resume(void)
+static void intc_resume(void)
{
int i;
for (i = 0; i < 64; i++)
intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]);
-
- return 0;
}
#else
#define intc_suspend NULL
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 31d9542..9f1d084 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -112,7 +112,7 @@ CONFIG_USB_G_SERIAL=m
CONFIG_USB_G_PRINTER=m
CONFIG_MMC=m
CONFIG_SDH_BFIN=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_BFIN=m
CONFIG_EXT2_FS=m
# CONFIG_DNOTIFY is not set
diff --git a/arch/m32r/include/asm/mmzone.h b/arch/m32r/include/asm/mmzone.h
index 9f3b5ac..115ced3 100644
--- a/arch/m32r/include/asm/mmzone.h
+++ b/arch/m32r/include/asm/mmzone.h
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
-})
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
/*
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn)
int node;
for (node = 0 ; node < MAX_NUMNODES ; node++)
- if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
+ if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
break;
return node;
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index fc98f9b..b004dc1 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -14,6 +14,33 @@ config GENERIC_CLOCKEVENTS
bool
default n
+config M68000
+ bool
+ help
+ The Freescale (was Motorola) 68000 CPU is the first generation of
+ the well known M68K family of processors. The CPU core as well as
+ being available as a stand alone CPU was also used in many
+ System-On-Chip devices (eg 68328, 68302, etc). It does not contain
+ a paging MMU.
+
+config MCPU32
+ bool
+ help
+ The Freescale (was then Motorola) CPU32 is a CPU core that is
+ based on the 68020 processor. For the most part it is used in
+ System-On-Chip parts, and does not contain a paging MMU.
+
+config COLDFIRE
+ bool
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ help
+ The Freescale ColdFire family of processors is a modern derivitive
+ of the 68000 processor family. They are mainly targeted at embedded
+ applications, and are all System-On-Chip (SOC) devices, as opposed
+ to stand alone CPUs. They implement a subset of the original 68000
+ processor instruction set.
+
config COLDFIRE_SW_A7
bool
default n
@@ -36,26 +63,31 @@ choice
config M68328
bool "MC68328"
+ select M68000
help
Motorola 68328 processor support.
config M68EZ328
bool "MC68EZ328"
+ select M68000
help
Motorola 68EX328 processor support.
config M68VZ328
bool "MC68VZ328"
+ select M68000
help
Motorola 68VZ328 processor support.
config M68360
bool "MC68360"
+ select MCPU32
help
Motorola 68360 processor support.
config M5206
bool "MCF5206"
+ select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -63,6 +95,7 @@ config M5206
config M5206e
bool "MCF5206e"
+ select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -70,6 +103,7 @@ config M5206e
config M520x
bool "MCF520x"
+ select COLDFIRE
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT
help
@@ -77,6 +111,7 @@ config M520x
config M523x
bool "MCF523x"
+ select COLDFIRE
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
@@ -85,6 +120,7 @@ config M523x
config M5249
bool "MCF5249"
+ select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -92,6 +128,7 @@ config M5249
config M5271
bool "MCF5271"
+ select COLDFIRE
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
help
@@ -99,6 +136,7 @@ config M5271
config M5272
bool "MCF5272"
+ select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -106,6 +144,7 @@ config M5272
config M5275
bool "MCF5275"
+ select COLDFIRE
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
help
@@ -113,6 +152,7 @@ config M5275
config M528x
bool "MCF528x"
+ select COLDFIRE
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
@@ -121,6 +161,7 @@ config M528x
config M5307
bool "MCF5307"
+ select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_CACHE_CB
select HAVE_MBAR
@@ -129,12 +170,14 @@ config M5307
config M532x
bool "MCF532x"
+ select COLDFIRE
select HAVE_CACHE_CB
help
Freescale (Motorola) ColdFire 532x processor support.
config M5407
bool "MCF5407"
+ select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_CACHE_CB
select HAVE_MBAR
@@ -143,6 +186,7 @@ config M5407
config M547x
bool "MCF547x"
+ select COLDFIRE
select HAVE_CACHE_CB
select HAVE_MBAR
help
@@ -150,6 +194,7 @@ config M547x
config M548x
bool "MCF548x"
+ select COLDFIRE
select HAVE_CACHE_CB
select HAVE_MBAR
help
@@ -168,13 +213,6 @@ config M54xx
depends on (M548x || M547x)
default y
-config COLDFIRE
- bool
- depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx)
- select GENERIC_GPIO
- select ARCH_REQUIRE_GPIOLIB
- default y
-
config CLOCK_SET
bool "Enable setting the CPU clock frequency"
default n
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 33f8276..1b7a14d 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -14,8 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
-#if !defined(__mc68020__) && !defined(__mc68030__) && \
- !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
/*
* Simpler 68k and ColdFire parts also need a few other gcc functions.
*/
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
index f4d715cd..7dc4087 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -84,52 +84,52 @@ SECTIONS {
/* Kernel symbol table: Normal symbols */
. = ALIGN(4);
__start___ksymtab = .;
- *(__ksymtab)
+ *(SORT(___ksymtab+*))
__stop___ksymtab = .;
/* Kernel symbol table: GPL-only symbols */
__start___ksymtab_gpl = .;
- *(__ksymtab_gpl)
+ *(SORT(___ksymtab_gpl+*))
__stop___ksymtab_gpl = .;
/* Kernel symbol table: Normal unused symbols */
__start___ksymtab_unused = .;
- *(__ksymtab_unused)
+ *(SORT(___ksymtab_unused+*))
__stop___ksymtab_unused = .;
/* Kernel symbol table: GPL-only unused symbols */
__start___ksymtab_unused_gpl = .;
- *(__ksymtab_unused_gpl)
+ *(SORT(___ksymtab_unused_gpl+*))
__stop___ksymtab_unused_gpl = .;
/* Kernel symbol table: GPL-future symbols */
__start___ksymtab_gpl_future = .;
- *(__ksymtab_gpl_future)
+ *(SORT(___ksymtab_gpl_future+*))
__stop___ksymtab_gpl_future = .;
/* Kernel symbol table: Normal symbols */
__start___kcrctab = .;
- *(__kcrctab)
+ *(SORT(___kcrctab+*))
__stop___kcrctab = .;
/* Kernel symbol table: GPL-only symbols */
__start___kcrctab_gpl = .;
- *(__kcrctab_gpl)
+ *(SORT(___kcrctab_gpl+*))
__stop___kcrctab_gpl = .;
/* Kernel symbol table: Normal unused symbols */
__start___kcrctab_unused = .;
- *(__kcrctab_unused)
+ *(SORT(___kcrctab_unused+*))
__stop___kcrctab_unused = .;
/* Kernel symbol table: GPL-only unused symbols */
__start___kcrctab_unused_gpl = .;
- *(__kcrctab_unused_gpl)
+ *(SORT(___kcrctab_unused_gpl+*))
__stop___kcrctab_unused_gpl = .;
/* Kernel symbol table: GPL-future symbols */
__start___kcrctab_gpl_future = .;
- *(__kcrctab_gpl_future)
+ *(SORT(___kcrctab_gpl_future+*))
__stop___kcrctab_gpl_future = .;
/* Kernel symbol table: strings */
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index 62182c8..0648893 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -34,8 +34,10 @@ void *memcpy(void *to, const void *from, size_t n)
if (temp) {
long *lto = to;
const long *lfrom = from;
-#if defined(__mc68020__) || defined(__mc68030__) || \
- defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+ for (; temp; temp--)
+ *lto++ = *lfrom++;
+#else
asm volatile (
" movel %2,%3\n"
" andw #7,%3\n"
@@ -56,9 +58,6 @@ void *memcpy(void *to, const void *from, size_t n)
" jpl 4b"
: "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
: "0" (lfrom), "1" (lto), "2" (temp));
-#else
- for (; temp; temp--)
- *lto++ = *lfrom++;
#endif
to = lto;
from = lfrom;
diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c
index f649e6a..8a7639f 100644
--- a/arch/m68k/lib/memset.c
+++ b/arch/m68k/lib/memset.c
@@ -32,8 +32,10 @@ void *memset(void *s, int c, size_t count)
temp = count >> 2;
if (temp) {
long *ls = s;
-#if defined(__mc68020__) || defined(__mc68030__) || \
- defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+ for (; temp; temp--)
+ *ls++ = c;
+#else
size_t temp1;
asm volatile (
" movel %1,%2\n"
@@ -55,9 +57,6 @@ void *memset(void *s, int c, size_t count)
" jpl 1b"
: "=a" (ls), "=d" (temp), "=&d" (temp1)
: "d" (c), "0" (ls), "1" (temp));
-#else
- for (; temp; temp--)
- *ls++ = c;
#endif
s = ls;
}
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 079bafca..79e928a 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,17 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#if defined(__mc68020__) || defined(__mc68030__) || \
- defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
-
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("mulu%.l %3,%1:%0" \
- : "=d" ((USItype)(w0)), \
- "=d" ((USItype)(w1)) \
- : "%0" ((USItype)(u)), \
- "dmi" ((USItype)(v)))
-
-#else
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
@@ -61,6 +51,15 @@ Boston, MA 02111-1307, USA. */
(w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
} while (0)
+#else
+
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "%0" ((USItype)(u)), \
+ "dmi" ((USItype)(v)))
+
#endif
#define __umulsidi3(u, v) \
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 37862b2..807c97e 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -678,7 +678,7 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_TEST=m
CONFIG_RTC_DRV_DS1307=m
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 3d6e60d..780560b 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -15,6 +15,7 @@
* User space memory access functions
*/
#include <linux/thread_info.h>
+#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/errno.h>
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index 9608d2c..e67eb9c 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -14,13 +14,6 @@ extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
-})
-
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 3d80c3e..12da77e 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,5 +1,4 @@
addnote
-dtc
empty.c
hack-coff
infblock.c
diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore
deleted file mode 100644
index a7c3f94..0000000
--- a/arch/powerpc/boot/dtc-src/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-dtc-lexer.lex.c
-dtc-parser.tab.c
-dtc-parser.tab.h
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 4f685a7..98d9426 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -209,8 +209,10 @@
wm8776:codec@1a {
compatible = "wlf,wm8776";
reg = <0x1a>;
- /* MCLK source is a stand-alone oscillator */
- clock-frequency = <12288000>;
+ /*
+ * clock-frequency will be set by U-Boot if
+ * the clock is enabled.
+ */
};
};
@@ -280,7 +282,8 @@
codec-handle = <&wm8776>;
fsl,playback-dma = <&dma00>;
fsl,capture-dma = <&dma01>;
- fsl,fifo-depth = <16>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-asynchronous;
};
dma@c300 {
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 7f7e4a8..22e7195 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 6472322..185c292 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_STORAGE=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PS3=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index c9f212b..80bc5de 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -148,7 +148,6 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_CXGB3_ISCSI=m
CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
-CONFIG_SCSI_BNX2_ISCSI=m
CONFIG_BE2ISCSI=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index fd3fd58..7b58917 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -38,13 +38,6 @@ u64 memory_hotplug_max(void);
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
-
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NEED_MULTIPLE_NODES */
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index d902abd..b1d2dec 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,7 +14,7 @@
#define ASM_PPC_RIO_H
extern void platform_rio_init(void);
-#ifdef CONFIG_RAPIDIO
+#ifdef CONFIG_FSL_RIO
extern int fsl_rio_mcheck_exception(struct pt_regs *);
#else
static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 34d2722..9fb9332 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_value = 0x80240000,
.cpu_name = "e5500",
.cpu_features = CPU_FTRS_E5500,
- .cpu_user_features = COMMON_USER_BOOKE,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b8e6189..174e1e9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -83,11 +83,29 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
+/*
+ * overlaps_initrd - check for overlap with page aligned extension of
+ * initrd.
+ */
+static inline int overlaps_initrd(unsigned long start, unsigned long size)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start)
+ return 0;
+
+ return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
+ start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
+#else
+ return 0;
+#endif
+}
+
/**
* move_device_tree - move tree to an unused area, if needed.
*
* The device tree may be allocated beyond our memory limit, or inside the
- * crash kernel region for kdump. If so, move it out of the way.
+ * crash kernel region for kdump, or within the page aligned range of initrd.
+ * If so, move it out of the way.
*/
static void __init move_device_tree(void)
{
@@ -100,7 +118,8 @@ static void __init move_device_tree(void)
size = be32_to_cpu(initial_boot_params->totalsize);
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
- overlaps_crashkernel(start, size)) {
+ overlaps_crashkernel(start, size) ||
+ overlaps_initrd(start, size)) {
p = __va(memblock_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
initial_boot_params = (struct boot_param_header *)p;
@@ -556,7 +575,9 @@ static void __init early_reserve_mem(void)
#ifdef CONFIG_BLK_DEV_INITRD
/* then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start))
- memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
+ memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
+ _ALIGN_UP(initrd_end, PAGE_SIZE) -
+ _ALIGN_DOWN(initrd_start, PAGE_SIZE));
#endif /* CONFIG_BLK_DEV_INITRD */
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 77578c0..c57c193 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -4,6 +4,7 @@
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
@@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void)
}
} while (wait_time && (get_tb() < max_wait_tb));
- if (error != 0 && printk_ratelimit()) {
- printk(KERN_WARNING "error: reading the clock failed (%d)\n",
- error);
+ if (error != 0) {
+ printk_ratelimited(KERN_WARNING
+ "error: reading the clock failed (%d)\n",
+ error);
return 0;
}
@@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
wait_time = rtas_busy_delay_time(error);
if (wait_time) {
- if (in_interrupt() && printk_ratelimit()) {
+ if (in_interrupt()) {
memset(rtc_tm, 0, sizeof(struct rtc_time));
- printk(KERN_WARNING "error: reading clock"
- " would delay interrupt\n");
+ printk_ratelimited(KERN_WARNING
+ "error: reading clock "
+ "would delay interrupt\n");
return; /* delay not allowed */
}
msleep(wait_time);
}
} while (wait_time && (get_tb() < max_wait_tb));
- if (error != 0 && printk_ratelimit()) {
- printk(KERN_WARNING "error: reading the clock failed (%d)\n",
- error);
+ if (error != 0) {
+ printk_ratelimited(KERN_WARNING
+ "error: reading the clock failed (%d)\n",
+ error);
return;
}
@@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm)
}
} while (wait_time && (get_tb() < max_wait_tb));
- if (error != 0 && printk_ratelimit())
- printk(KERN_WARNING "error: setting the clock failed (%d)\n",
- error);
+ if (error != 0)
+ printk_ratelimited(KERN_WARNING
+ "error: setting the clock failed (%d)\n",
+ error);
return 0;
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index b96a3a0..78b76dc 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -25,6 +25,7 @@
#include <linux/errno.h>
#include <linux/elf.h>
#include <linux/ptrace.h>
+#include <linux/ratelimit.h>
#ifdef CONFIG_PPC64
#include <linux/syscalls.h>
#include <linux/compat.h>
@@ -892,11 +893,12 @@ badframe:
printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
#endif
- if (show_unhandled_signals && printk_ratelimit())
- printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
- "%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
- addr, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(KERN_INFO
+ "%s[%d]: bad frame in handle_rt_signal32: "
+ "%p nip %08lx lr %08lx\n",
+ current->comm, current->pid,
+ addr, regs->nip, regs->link);
force_sigsegv(sig, current);
return 0;
@@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
return 0;
bad:
- if (show_unhandled_signals && printk_ratelimit())
- printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
- "%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
- rt_sf, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(KERN_INFO
+ "%s[%d]: bad frame in sys_rt_sigreturn: "
+ "%p nip %08lx lr %08lx\n",
+ current->comm, current->pid,
+ rt_sf, regs->nip, regs->link);
force_sig(SIGSEGV, current);
return 0;
@@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
* We kill the task with a SIGSEGV in this situation.
*/
if (do_setcontext(ctx, regs, 1)) {
- if (show_unhandled_signals && printk_ratelimit())
- printk(KERN_INFO "%s[%d]: bad frame in "
- "sys_debug_setcontext: %p nip %08lx "
- "lr %08lx\n",
- current->comm, current->pid,
- ctx, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
+ "sys_debug_setcontext: %p nip %08lx "
+ "lr %08lx\n",
+ current->comm, current->pid,
+ ctx, regs->nip, regs->link);
force_sig(SIGSEGV, current);
goto out;
@@ -1236,11 +1239,12 @@ badframe:
printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
#endif
- if (show_unhandled_signals && printk_ratelimit())
- printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
- "%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
- frame, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(KERN_INFO
+ "%s[%d]: bad frame in handle_signal32: "
+ "%p nip %08lx lr %08lx\n",
+ current->comm, current->pid,
+ frame, regs->nip, regs->link);
force_sigsegv(sig, current);
return 0;
@@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
return 0;
badframe:
- if (show_unhandled_signals && printk_ratelimit())
- printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
- "%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
- addr, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(KERN_INFO
+ "%s[%d]: bad frame in sys_sigreturn: "
+ "%p nip %08lx lr %08lx\n",
+ current->comm, current->pid,
+ addr, regs->nip, regs->link);
force_sig(SIGSEGV, current);
return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index da989ff..e91c736 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -24,6 +24,7 @@
#include <linux/elf.h>
#include <linux/ptrace.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
@@ -380,10 +381,10 @@ badframe:
printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
regs, uc, &uc->uc_mcontext);
#endif
- if (show_unhandled_signals && printk_ratelimit())
- printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, "rt_sigreturn",
- (long)uc, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+ current->comm, current->pid, "rt_sigreturn",
+ (long)uc, regs->nip, regs->link);
force_sig(SIGSEGV, current);
return 0;
@@ -468,10 +469,10 @@ badframe:
printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
#endif
- if (show_unhandled_signals && printk_ratelimit())
- printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, "setup_rt_frame",
- (long)frame, regs->nip, regs->link);
+ if (show_unhandled_signals)
+ printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+ current->comm, current->pid, "setup_rt_frame",
+ (long)frame, regs->nip, regs->link);
force_sigsegv(signr, current);
return 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0ff4ab9..1a01414 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -34,6 +34,7 @@
#include <linux/bug.h>
#include <linux/kdebug.h>
#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
#include <asm/emulated_ops.h>
#include <asm/pgtable.h>
@@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
if (die("Exception in kernel mode", regs, signr))
return;
} else if (show_unhandled_signals &&
- unhandled_signal(current, signr) &&
- printk_ratelimit()) {
- printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, signr,
- addr, regs->nip, regs->link, code);
- }
+ unhandled_signal(current, signr)) {
+ printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+ current->comm, current->pid, signr,
+ addr, regs->nip, regs->link, code);
+ }
memset(&info, 0, sizeof(info));
info.si_signo = signr;
@@ -425,7 +425,7 @@ int machine_check_e500mc(struct pt_regs *regs)
unsigned long reason = mcsr;
int recoverable = 1;
- if (reason & MCSR_BUS_RBERR) {
+ if (reason & MCSR_LD) {
recoverable = fsl_rio_mcheck_exception(regs);
if (recoverable == 1)
goto silent_out;
@@ -1342,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs)
} else {
/* didn't recognize the instruction */
/* XXX quick hack for now: set the non-Java bit in the VSCR */
- if (printk_ratelimit())
- printk(KERN_ERR "Unrecognized altivec instruction "
- "in %s at %lx\n", current->comm, regs->nip);
+ printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
+ "in %s at %lx\n", current->comm, regs->nip);
current->thread.vscr.u[3] |= 0x10000;
}
}
@@ -1548,9 +1547,8 @@ u32 ppc_warn_emulated;
void ppc_warn_emulated_print(const char *type)
{
- if (printk_ratelimit())
- pr_warning("%s used emulated %s instruction\n", current->comm,
- type);
+ pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
+ type);
}
static int __init ppc_warn_emulated_init(void)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 54f4fb9..ad35f66 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -31,6 +31,7 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/magic.h>
+#include <linux/ratelimit.h>
#include <asm/firmware.h>
#include <asm/page.h>
@@ -346,11 +347,10 @@ bad_area_nosemaphore:
return 0;
}
- if (is_exec && (error_code & DSISR_PROTFAULT)
- && printk_ratelimit())
- printk(KERN_CRIT "kernel tried to execute NX-protected"
- " page (%lx) - exploit attempt? (uid: %d)\n",
- address, current_uid());
+ if (is_exec && (error_code & DSISR_PROTFAULT))
+ printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
+ " page (%lx) - exploit attempt? (uid: %d)\n",
+ address, current_uid());
return SIGSEGV;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index d65b591..5de0f25 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -223,21 +223,6 @@ void free_initmem(void)
#undef FREESEC
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- if (start < end)
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
-}
-#endif
-
-
#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6374b21..f6dbb4c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -99,20 +99,6 @@ void free_initmem(void)
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- if (start < end)
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
-}
-#endif
-
static void pgd_ctor(void *addr)
{
memset(addr, 0, PGD_TABLE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 097b288..89cbfef 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -383,6 +383,25 @@ void __init mem_init(void)
mem_init_done = 1;
}
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start >= end)
+ return;
+
+ start = _ALIGN_DOWN(start, PAGE_SIZE);
+ end = _ALIGN_UP(end, PAGE_SIZE);
+ pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ init_page_count(virt_to_page(start));
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
/*
* This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 0608b16..d917573 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
out_be32(&lbc->lteccr, LTECCR_CLEAR);
out_be32(&lbc->ltedr, LTEDR_ENABLE);
- /* Enable interrupts for any detected events */
- out_be32(&lbc->lteir, LTEIR_ENABLE);
-
/* Set the monitor timeout value to the maximum for erratum A001 */
if (of_device_is_compatible(node, "fsl,elbc"))
clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
@@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
goto err;
}
+ /* Enable interrupts for any detected events */
+ out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
+
return 0;
err:
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 5b206a2..b3fd081 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -283,23 +283,24 @@ static void __iomem *rio_regs_win;
#ifdef CONFIG_E500
int fsl_rio_mcheck_exception(struct pt_regs *regs)
{
- const struct exception_table_entry *entry = NULL;
- unsigned long reason = mfspr(SPRN_MCSR);
-
- if (reason & MCSR_BUS_RBERR) {
- reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
- if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
- /* Check if we are prepared to handle this fault */
- entry = search_exception_tables(regs->nip);
- if (entry) {
- pr_debug("RIO: %s - MC Exception handled\n",
- __func__);
- out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
- 0);
- regs->msr |= MSR_RI;
- regs->nip = entry->fixup;
- return 1;
- }
+ const struct exception_table_entry *entry;
+ unsigned long reason;
+
+ if (!rio_regs_win)
+ return 0;
+
+ reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
+ if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
+ /* Check if we are prepared to handle this fault */
+ entry = search_exception_tables(regs->nip);
+ if (entry) {
+ pr_debug("RIO: %s - MC Exception handled\n",
+ __func__);
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
+ 0);
+ regs->msr |= MSR_RI;
+ regs->nip = entry->fixup;
+ return 1;
}
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index d3bc7e5..d5d3ff3 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
+#include <linux/ratelimit.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
@@ -1612,9 +1613,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
return NO_IRQ;
}
if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
- if (printk_ratelimit())
- printk(KERN_WARNING "%s: Got protected source %d !\n",
- mpic->name, (int)src);
+ printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
+ mpic->name, (int)src);
mpic_eoi(mpic);
return NO_IRQ;
}
@@ -1652,9 +1652,8 @@ unsigned int mpic_get_coreint_irq(void)
return NO_IRQ;
}
if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
- if (printk_ratelimit())
- printk(KERN_WARNING "%s: Got protected source %d !\n",
- mpic->name, (int)src);
+ printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
+ mpic->name, (int)src);
return NO_IRQ;
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 90d77bd..c03fef7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -579,6 +579,7 @@ config S390_GUEST
def_bool y
prompt "s390 guest support for KVM (EXPERIMENTAL)"
depends on 64BIT && EXPERIMENTAL
+ select VIRTUALIZATION
select VIRTIO
select VIRTIO_RING
select VIRTIO_CONSOLE
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 52420d2..1d55c95 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit)
memset(&parms.orvals, 0, sizeof(parms.orvals));
memset(&parms.andvals, 0xff, sizeof(parms.andvals));
- parms.orvals[cr] = 1 << bit;
+ parms.orvals[cr] = 1UL << bit;
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
}
EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit)
memset(&parms.orvals, 0, sizeof(parms.orvals));
memset(&parms.andvals, 0xff, sizeof(parms.andvals));
- parms.andvals[cr] = ~(1L << bit);
+ parms.andvals[cr] = ~(1UL << bit);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
}
EXPORT_SYMBOL(smp_ctl_clear_bit);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 5995e9b..0e358c2 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#include "hwsampler.h"
-#define DEFAULT_INTERVAL 4096
+#define DEFAULT_INTERVAL 4127518
#define DEFAULT_SDBT_BLOCKS 1
#define DEFAULT_SDB_BLOCKS 511
@@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
if (oprofile_max_interval == 0)
return -ENODEV;
+ /* The initial value should be sane */
+ if (oprofile_hw_interval < oprofile_min_interval)
+ oprofile_hw_interval = oprofile_min_interval;
+ if (oprofile_hw_interval > oprofile_max_interval)
+ oprofile_hw_interval = oprofile_max_interval;
+
if (oprofile_timer_init(ops))
return -ENODEV;
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3a32741..513cb1a 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/r8a66597.h>
+#include <linux/usb/renesas_usbhs.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
#include <linux/spi/spi.h>
@@ -232,6 +233,52 @@ static struct platform_device usb1_common_device = {
.resource = usb1_common_resources,
};
+/*
+ * USBHS
+ */
+static int usbhs_get_id(struct platform_device *pdev)
+{
+ return gpio_get_value(GPIO_PTB3);
+}
+
+static struct renesas_usbhs_platform_info usbhs_info = {
+ .platform_callback = {
+ .get_id = usbhs_get_id,
+ },
+ .driver_param = {
+ .buswait_bwait = 4,
+ .detection_delay = 5,
+ },
+};
+
+static struct resource usbhs_resources[] = {
+ [0] = {
+ .start = 0xa4d90000,
+ .end = 0xa4d90124 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 66,
+ .end = 66,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbhs_device = {
+ .name = "renesas_usbhs",
+ .id = 1,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbhs_info,
+ },
+ .num_resources = ARRAY_SIZE(usbhs_resources),
+ .resource = usbhs_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_USB1,
+ },
+};
+
/* LCDC */
const static struct fb_videomode ecovec_lcd_modes[] = {
{
@@ -897,6 +944,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
&sh_eth_device,
&usb0_host_device,
&usb1_common_device,
+ &usbhs_device,
&lcdc_device,
&ceu0_device,
&ceu1_device,
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 780e083..23bc849 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -27,8 +27,6 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
$(CONFIG_BOOT_LINK_OFFSET)]')
endif
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
ifeq ($(CONFIG_MCOUNT),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
@@ -37,7 +35,25 @@ endif
LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
-T $(obj)/../../kernel/vmlinux.lds
-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
+#
+# Pull in the necessary libgcc bits from the in-kernel implementation.
+#
+lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
+ lshrsi3.S
+lib1funcs-obj := \
+ $(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
+
+lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
+ifeq ($(BITS),64)
+ lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
+endif
+
+KBUILD_CFLAGS += -I$(lib1funcs-dir)
+
+$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
+ $(call cmd,shipped)
+
+$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
$(call if_changed,ld)
@:
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 0f55891..e2cbd92 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -227,7 +227,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_ARK3116=m
CONFIG_USB_SERIAL_PL2303=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SH=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h
index 4676bf5..f848dec 100644
--- a/arch/sh/include/asm/cmpxchg-grb.h
+++ b/arch/sh/include/asm/cmpxchg-grb.h
@@ -15,8 +15,9 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
" mov.l %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
- "+r" (m)
- : "r" (val)
+ "+r" (m),
+ "+r" (val) /* inhibit r15 overloading */
+ :
: "memory", "r0", "r1");
return retval;
@@ -36,8 +37,9 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
" mov.b %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
- "+r" (m)
- : "r" (val)
+ "+r" (m),
+ "+r" (val) /* inhibit r15 overloading */
+ :
: "memory" , "r0", "r1");
return retval;
@@ -54,13 +56,14 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-8, r15 \n\t" /* LOGIN */
- " mov.l @%1, %0 \n\t" /* load old value */
- " cmp/eq %0, %2 \n\t"
+ " mov.l @%3, %0 \n\t" /* load old value */
+ " cmp/eq %0, %1 \n\t"
" bf 1f \n\t" /* if not equal */
- " mov.l %3, @%1 \n\t" /* store new value */
+ " mov.l %2, @%3 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (retval)
- : "r" (m), "r" (old), "r" (new)
+ : "=&r" (retval),
+ "+r" (old), "+r" (new) /* old or new can be r15 */
+ : "r" (m)
: "memory" , "r0", "r1", "t");
return retval;
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 8887baf..15a8496 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -9,10 +9,6 @@
extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
- NODE_DATA(nid)->node_spanned_pages)
-
static inline int pfn_to_nid(unsigned long pfn)
{
int nid;
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 2a541dd..e25c4c7 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -150,7 +150,6 @@ struct thread_struct {
#define SR_USER (SR_MMU | SR_FD)
#define start_thread(_regs, new_pc, new_sp) \
- set_fs(USER_DS); \
_regs->sr = SR_USER; /* User mode. */ \
_regs->pc = new_pc - 4; /* Compensate syscall exit */ \
_regs->pc |= 1; /* Set SHmedia ! */ \
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 3daef8e..cbc47e6 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -298,6 +298,14 @@ enum {
SHDMA_SLAVE_SCIF4_RX,
SHDMA_SLAVE_SCIF5_TX,
SHDMA_SLAVE_SCIF5_RX,
+ SHDMA_SLAVE_USB0D0_TX,
+ SHDMA_SLAVE_USB0D0_RX,
+ SHDMA_SLAVE_USB0D1_TX,
+ SHDMA_SLAVE_USB0D1_RX,
+ SHDMA_SLAVE_USB1D0_TX,
+ SHDMA_SLAVE_USB1D0_RX,
+ SHDMA_SLAVE_USB1D1_TX,
+ SHDMA_SLAVE_USB1D1_RX,
SHDMA_SLAVE_SDHI0_TX,
SHDMA_SLAVE_SDHI0_RX,
SHDMA_SLAVE_SDHI1_TX,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 0333fe9..134a397 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -93,6 +93,46 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x36,
}, {
+ .slave_id = SHDMA_SLAVE_USB0D0_TX,
+ .addr = 0xA4D80100,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x73,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D0_RX,
+ .addr = 0xA4D80100,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x73,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D1_TX,
+ .addr = 0xA4D80120,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x77,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D1_RX,
+ .addr = 0xA4D80120,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x77,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D0_TX,
+ .addr = 0xA4D90100,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xab,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D0_RX,
+ .addr = 0xA4D90100,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xab,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D1_TX,
+ .addr = 0xA4D90120,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xaf,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D1_RX,
+ .addr = 0xA4D90120,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xaf,
+ }, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0x04ce0030,
.chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index b473f0c..aaf6d59 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -102,8 +102,6 @@ EXPORT_SYMBOL(kernel_thread);
void start_thread(struct pt_regs *regs, unsigned long new_pc,
unsigned long new_sp)
{
- set_fs(USER_DS);
-
regs->pr = 0;
regs->sr = SR_FD;
regs->pc = new_pc;
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 5241146..1157251 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -26,9 +26,9 @@ static int cache_seq_show(struct seq_file *file, void *iter)
{
unsigned int cache_type = (unsigned int)file->private;
struct cache_info *cache;
- unsigned int waysize, way, cache_size;
- unsigned long ccr, base;
- static unsigned long addrstart = 0;
+ unsigned int waysize, way;
+ unsigned long ccr;
+ unsigned long addrstart = 0;
/*
* Go uncached immediately so we don't skew the results any
@@ -45,28 +45,13 @@ static int cache_seq_show(struct seq_file *file, void *iter)
}
if (cache_type == CACHE_TYPE_DCACHE) {
- base = CACHE_OC_ADDRESS_ARRAY;
+ addrstart = CACHE_OC_ADDRESS_ARRAY;
cache = &current_cpu_data.dcache;
} else {
- base = CACHE_IC_ADDRESS_ARRAY;
+ addrstart = CACHE_IC_ADDRESS_ARRAY;
cache = &current_cpu_data.icache;
}
- /*
- * Due to the amount of data written out (depending on the cache size),
- * we may be iterated over multiple times. In this case, keep track of
- * the entry position in addrstart, and rewind it when we've hit the
- * end of the cache.
- *
- * Likewise, the same code is used for multiple caches, so care must
- * be taken for bouncing addrstart back and forth so the appropriate
- * cache is hit.
- */
- cache_size = cache->ways * cache->sets * cache->linesz;
- if (((addrstart & 0xff000000) != base) ||
- (addrstart & 0x00ffffff) > cache_size)
- addrstart = base;
-
waysize = cache->sets;
/*
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index af32e17..253986b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,7 +26,6 @@ config SPARC
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
select USE_GENERIC_SMP_HELPERS if SMP
@@ -528,6 +527,23 @@ config PCI_DOMAINS
config PCI_SYSCALL
def_bool PCI
+config PCIC_PCI
+ bool
+ depends on PCI && SPARC32 && !SPARC_LEON
+ default y
+
+config LEON_PCI
+ bool
+ depends on PCI && SPARC_LEON
+ default y
+
+config GRPCI2
+ bool "GRPCI2 Host Bridge Support"
+ depends on LEON_PCI
+ default y
+ help
+ Say Y here to include the GRPCI2 Host Bridge Driver.
+
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 482c79e..7440915 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port)
return sun_fdc->data_82072;
case 7: /* FD_DIR */
return sun_read_dir();
- };
+ }
panic("sun_82072_fd_inb: How did I get here?");
}
@@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port)
case 4: /* FD_STATUS */
sun_fdc->status_82072 = value;
break;
- };
+ }
return;
}
@@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port)
return sun_fdc->data_82077;
case 7: /* FD_DIR */
return sun_read_dir();
- };
+ }
panic("sun_82077_fd_inb: How did I get here?");
}
@@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port)
case 3: /* FD_TDR */
sun_fdc->tapectl_82077 = value;
break;
- };
+ }
return;
}
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 6597ce8..bcef1f5 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port)
case 7: /* FD_DIR */
/* XXX: Is DCL on 0x80 in sun4m? */
return sbus_readb(&sun_fdc->dir_82077);
- };
+ }
panic("sun_82072_fd_inb: How did I get here?");
}
@@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port)
case 4: /* FD_STATUS */
sbus_writeb(value, &sun_fdc->status_82077);
break;
- };
+ }
return;
}
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 6bdaf1e..a4e457f 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -318,6 +318,9 @@ struct device_node;
extern unsigned int leon_build_device_irq(unsigned int real_irq,
irq_flow_handler_t flow_handler,
const char *name, int do_ack);
+extern void leon_update_virq_handling(unsigned int virq,
+ irq_flow_handler_t flow_handler,
+ const char *name, int do_ack);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_init_timers(irq_handler_t counter_fn);
diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h
new file mode 100644
index 0000000..42b4b31
--- /dev/null
+++ b/arch/sparc/include/asm/leon_pci.h
@@ -0,0 +1,21 @@
+/*
+ * asm/leon_pci.h
+ *
+ * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
+ */
+
+#ifndef _ASM_LEON_PCI_H_
+#define _ASM_LEON_PCI_H_
+
+/* PCI related definitions */
+struct leon_pci_info {
+ struct pci_ops *ops;
+ struct resource io_space;
+ struct resource mem_space;
+ int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin);
+};
+
+extern void leon_pci_init(struct platform_device *ofdev,
+ struct leon_pci_info *info);
+
+#endif /* _ASM_LEON_PCI_H_ */
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index e8c6487..99d9b9f 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -8,8 +8,6 @@
extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 332ac9a..862e3ce 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -47,7 +47,31 @@ extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
#endif /* __KERNEL__ */
+#ifndef CONFIG_LEON_PCI
/* generic pci stuff */
#include <asm-generic/pci.h>
+#else
+/*
+ * On LEON PCI Memory space is mapped 1:1 with physical address space.
+ *
+ * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
+ * are converted into CPU addresses to virtual addresses that are mapped with
+ * MMU to the PCI Host PCI I/O space window which are translated to the low
+ * 64Kbytes by the Host controller.
+ */
+
+extern void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res);
+
+extern void
+pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region);
+
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return PCI_IRQ_NONE;
+}
+#endif
#endif /* __SPARC_PCI_H */
diff --git a/arch/sparc/include/asm/pcic.h b/arch/sparc/include/asm/pcic.h
index 7eb5d78..6676cbc 100644
--- a/arch/sparc/include/asm/pcic.h
+++ b/arch/sparc/include/asm/pcic.h
@@ -29,7 +29,7 @@ struct linux_pcic {
int pcic_imdim;
};
-#ifdef CONFIG_PCI
+#ifdef CONFIG_PCIC_PCI
extern int pcic_present(void);
extern int pcic_probe(void);
extern void pci_time_init(void);
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 47a7e86..aba1609 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -220,7 +220,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
switch (size) {
case 4:
return xchg_u32(ptr, x);
- };
+ }
__xchg_called_with_bad_pointer();
return x;
}
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 3c96d3b..10bcabc 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -234,7 +234,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
return xchg32(ptr, x);
case 8:
return xchg64(ptr, x);
- };
+ }
__xchg_called_with_bad_pointer();
return x;
}
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 9cff270..b90b4a1 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -73,7 +73,9 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
obj-y += dma.o
-obj-$(CONFIG_SPARC32_PCI) += pcic.o
+obj-$(CONFIG_PCIC_PCI) += pcic.o
+obj-$(CONFIG_LEON_PCI) += leon_pci.o
+obj-$(CONFIG_GRPCI2) += leon_pci_grpci2.o
obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o
obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 1e34f29..caef9de 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -123,7 +123,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 8505e0a..acf5151 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -101,7 +101,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
break;
default:
panic("Can't set AUXIO register on this machine.");
- };
+ }
spin_unlock_irqrestore(&auxio_lock, flags);
}
EXPORT_SYMBOL(set_auxio);
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 668c7be..5f45026 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -664,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va
case 0x0:
bp->interleave = 16;
break;
- };
+ }
/* UK[10] is reserved, and UK[11] is not set for the SDRAM
* bank size definition.
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 8341963..9fe08a1 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -229,7 +229,7 @@ real_irq_entry:
#ifdef CONFIG_SMP
.globl patchme_maybe_smp_msg
- cmp %l7, 12
+ cmp %l7, 11
patchme_maybe_smp_msg:
bgu maybe_smp4m_msg
nop
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
- sll %o2, 28, %o2 ! shift for simpler checks below
+ sll %o3, 28, %o2 ! shift for simpler checks below
maybe_smp4m_msg_check_single:
andcc %o2, 0x1, %g0
beq,a maybe_smp4m_msg_check_mask
@@ -1604,7 +1604,7 @@ restore_current:
retl
nop
-#ifdef CONFIG_PCI
+#ifdef CONFIG_PCIC_PCI
#include <asm/pcic.h>
.align 4
@@ -1650,7 +1650,7 @@ pcic_nmi_trap_patch:
rd %psr, %l0
.word 0
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCIC_PCI */
.globl flushw_all
flushw_all:
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 2f538ac..d17255a 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -236,6 +236,21 @@ static unsigned int _leon_build_device_irq(struct platform_device *op,
return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
}
+void leon_update_virq_handling(unsigned int virq,
+ irq_flow_handler_t flow_handler,
+ const char *name, int do_ack)
+{
+ unsigned long mask = (unsigned long)irq_get_chip_data(virq);
+
+ mask &= ~LEON_DO_ACK_HW;
+ if (do_ack)
+ mask |= LEON_DO_ACK_HW;
+
+ irq_set_chip_and_handler_name(virq, &leon_irq,
+ flow_handler, name);
+ irq_set_chip_data(virq, (void *)mask);
+}
+
void __init leon_init_timers(irq_handler_t counter_fn)
{
int irq, eirq;
@@ -361,6 +376,22 @@ void __init leon_init_timers(irq_handler_t counter_fn)
prom_halt();
}
+#ifdef CONFIG_SMP
+ {
+ unsigned long flags;
+
+ /*
+ * In SMP, sun4m adds a IPI handler to IRQ trap handler that
+ * LEON never must take, sun4d and LEON overwrites the branch
+ * with a NOP.
+ */
+ local_irq_save(flags);
+ patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
+ local_flush_cache_all();
+ local_irq_restore(flags);
+ }
+#endif
+
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
LEON3_GPTIMER_EN |
LEON3_GPTIMER_RL |
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
new file mode 100644
index 0000000..a8a9a27
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci.c
@@ -0,0 +1,253 @@
+/*
+ * leon_pci.c: LEON Host PCI support
+ *
+ * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
+ *
+ * Code is partially derived from pcic.c
+ */
+
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/leon.h>
+#include <asm/leon_pci.h>
+
+/* The LEON architecture does not rely on a BIOS or bootloader to setup
+ * PCI for us. The Linux generic routines are used to setup resources,
+ * reset values of confuration-space registers settings ae preseved.
+ */
+void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
+{
+ struct pci_bus *root_bus;
+
+ root_bus = pci_scan_bus_parented(&ofdev->dev, 0, info->ops, info);
+ if (root_bus) {
+ root_bus->resource[0] = &info->io_space;
+ root_bus->resource[1] = &info->mem_space;
+ root_bus->resource[2] = NULL;
+
+ /* Init all PCI devices into PCI tree */
+ pci_bus_add_devices(root_bus);
+
+ /* Setup IRQs of all devices using custom routines */
+ pci_fixup_irqs(pci_common_swizzle, info->map_irq);
+
+ /* Assign devices with resources */
+ pci_assign_unassigned_resources();
+ }
+}
+
+/* PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is
+ * accessed through a Window which is translated to low 64KB in PCI space, the
+ * first 4KB is not used so 60KB is available.
+ *
+ * This function is used by generic code to translate resource addresses into
+ * PCI addresses.
+ */
+void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct leon_pci_info *info = dev->bus->sysdata;
+
+ region->start = res->start;
+ region->end = res->end;
+
+ if (res->flags & IORESOURCE_IO) {
+ region->start -= (info->io_space.start - 0x1000);
+ region->end -= (info->io_space.start - 0x1000);
+ }
+}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+
+/* see pcibios_resource_to_bus() comment */
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region)
+{
+ struct leon_pci_info *info = dev->bus->sysdata;
+
+ res->start = region->start;
+ res->end = region->end;
+
+ if (res->flags & IORESOURCE_IO) {
+ res->start += (info->io_space.start - 0x1000);
+ res->end += (info->io_space.start - 0x1000);
+ }
+}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
+
+void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
+{
+ struct leon_pci_info *info = pbus->sysdata;
+ struct pci_dev *dev;
+ int i, has_io, has_mem;
+ u16 cmd;
+
+ /* Generic PCI bus probing sets these to point at
+ * &io{port,mem}_resouce which is wrong for us.
+ */
+ if (pbus->self == NULL) {
+ pbus->resource[0] = &info->io_space;
+ pbus->resource[1] = &info->mem_space;
+ pbus->resource[2] = NULL;
+ }
+
+ list_for_each_entry(dev, &pbus->devices, bus_list) {
+ /*
+ * We can not rely on that the bootloader has enabled I/O
+ * or memory access to PCI devices. Instead we enable it here
+ * if the device has BARs of respective type.
+ */
+ has_io = has_mem = 0;
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ unsigned long f = dev->resource[i].flags;
+ if (f & IORESOURCE_IO)
+ has_io = 1;
+ else if (f & IORESOURCE_MEM)
+ has_mem = 1;
+ }
+ /* ROM BARs are mapped into 32-bit memory space */
+ if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
+ dev->resource[PCI_ROM_RESOURCE].flags |=
+ IORESOURCE_ROM_ENABLE;
+ has_mem = 1;
+ }
+ pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
+ if (has_io && !(cmd & PCI_COMMAND_IO)) {
+#ifdef CONFIG_PCI_DEBUG
+ printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
+ pci_name(dev));
+#endif
+ cmd |= PCI_COMMAND_IO;
+ pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
+ cmd);
+ }
+ if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
+#ifdef CONFIG_PCI_DEBUG
+ printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
+ "%s\n", pci_name(dev));
+#endif
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
+ cmd);
+ }
+ }
+}
+
+/*
+ * Other archs parse arguments here.
+ */
+char * __devinit pcibios_setup(char *str)
+{
+ return str;
+}
+
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ return pci_enable_resources(dev, mask);
+}
+
+struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+{
+ /*
+ * Currently the OpenBoot nodes are not connected with the PCI device,
+ * this is because the LEON PROM does not create PCI nodes. Eventually
+ * this will change and the same approach as pcic.c can be used to
+ * match PROM nodes with pci devices.
+ */
+ return NULL;
+}
+EXPORT_SYMBOL(pci_device_to_OF_node);
+
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+#ifdef CONFIG_PCI_DEBUG
+ printk(KERN_DEBUG "LEONPCI: Assigning IRQ %02d to %s\n", irq,
+ pci_name(dev));
+#endif
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/* in/out routines taken from pcic.c
+ *
+ * This probably belongs here rather than ioport.c because
+ * we do not want this crud linked into SBus kernels.
+ * Also, think for a moment about likes of floppy.c that
+ * include architecture specific parts. They may want to redefine ins/outs.
+ *
+ * We do not use horrible macros here because we want to
+ * advance pointer by sizeof(size).
+ */
+void outsb(unsigned long addr, const void *src, unsigned long count)
+{
+ while (count) {
+ count -= 1;
+ outb(*(const char *)src, addr);
+ src += 1;
+ /* addr += 1; */
+ }
+}
+EXPORT_SYMBOL(outsb);
+
+void outsw(unsigned long addr, const void *src, unsigned long count)
+{
+ while (count) {
+ count -= 2;
+ outw(*(const short *)src, addr);
+ src += 2;
+ /* addr += 2; */
+ }
+}
+EXPORT_SYMBOL(outsw);
+
+void outsl(unsigned long addr, const void *src, unsigned long count)
+{
+ while (count) {
+ count -= 4;
+ outl(*(const long *)src, addr);
+ src += 4;
+ /* addr += 4; */
+ }
+}
+EXPORT_SYMBOL(outsl);
+
+void insb(unsigned long addr, void *dst, unsigned long count)
+{
+ while (count) {
+ count -= 1;
+ *(unsigned char *)dst = inb(addr);
+ dst += 1;
+ /* addr += 1; */
+ }
+}
+EXPORT_SYMBOL(insb);
+
+void insw(unsigned long addr, void *dst, unsigned long count)
+{
+ while (count) {
+ count -= 2;
+ *(unsigned short *)dst = inw(addr);
+ dst += 2;
+ /* addr += 2; */
+ }
+}
+EXPORT_SYMBOL(insw);
+
+void insl(unsigned long addr, void *dst, unsigned long count)
+{
+ while (count) {
+ count -= 4;
+ /*
+ * XXX I am sure we are in for an unaligned trap here.
+ */
+ *(unsigned long *)dst = inl(addr);
+ dst += 4;
+ /* addr += 4; */
+ }
+}
+EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
new file mode 100644
index 0000000..44dc093
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -0,0 +1,897 @@
+/*
+ * leon_pci_grpci2.c: GRPCI2 Host PCI driver
+ *
+ * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
+ *
+ */
+
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/leon.h>
+#include <asm/vaddrs.h>
+#include <asm/sections.h>
+#include <asm/leon_pci.h>
+
+#include "irq.h"
+
+struct grpci2_barcfg {
+ unsigned long pciadr; /* PCI Space Address */
+ unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */
+};
+
+/* Device Node Configuration options:
+ * - barcfgs : Custom Configuration of Host's 6 target BARs
+ * - irq_mask : Limit which PCI interrupts are enabled
+ * - do_reset : Force PCI Reset on startup
+ *
+ * barcfgs
+ * =======
+ *
+ * Optional custom Target BAR configuration (see struct grpci2_barcfg). All
+ * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes)
+ *
+ * -1 means not configured (let host driver do default setup).
+ *
+ * [i*2+0] = PCI Address of BAR[i] on target interface
+ * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address
+ *
+ *
+ * irq_mask
+ * ========
+ *
+ * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default
+ * all are enabled. Use this when PCI interrupt pins are floating on PCB.
+ * int, len=4.
+ * bit0 = PCI INTA#
+ * bit1 = PCI INTB#
+ * bit2 = PCI INTC#
+ * bit3 = PCI INTD#
+ *
+ *
+ * reset
+ * =====
+ *
+ * Force PCI reset on startup. int, len=4
+ */
+
+/* Enable Debugging Configuration Space Access */
+#undef GRPCI2_DEBUG_CFGACCESS
+
+/*
+ * GRPCI2 APB Register MAP
+ */
+struct grpci2_regs {
+ unsigned int ctrl; /* 0x00 Control */
+ unsigned int sts_cap; /* 0x04 Status / Capabilities */
+ int res1; /* 0x08 */
+ unsigned int io_map; /* 0x0C I/O Map address */
+ unsigned int dma_ctrl; /* 0x10 DMA */
+ unsigned int dma_bdbase; /* 0x14 DMA */
+ int res2[2]; /* 0x18 */
+ unsigned int bars[6]; /* 0x20 read-only PCI BARs */
+ int res3[2]; /* 0x38 */
+ unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */
+
+ /* PCI Trace Buffer Registers (OPTIONAL) */
+ unsigned int t_ctrl; /* 0x80 */
+ unsigned int t_cnt; /* 0x84 */
+ unsigned int t_adpat; /* 0x88 */
+ unsigned int t_admask; /* 0x8C */
+ unsigned int t_sigpat; /* 0x90 */
+ unsigned int t_sigmask; /* 0x94 */
+ unsigned int t_adstate; /* 0x98 */
+ unsigned int t_sigstate; /* 0x9C */
+};
+
+#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
+#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
+
+#define CTRL_BUS_BIT 16
+
+#define CTRL_RESET (1<<31)
+#define CTRL_SI (1<<27)
+#define CTRL_PE (1<<26)
+#define CTRL_EI (1<<25)
+#define CTRL_ER (1<<24)
+#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
+#define CTRL_HOSTINT 0xf
+
+#define STS_HOST_BIT 31
+#define STS_MST_BIT 30
+#define STS_TAR_BIT 29
+#define STS_DMA_BIT 28
+#define STS_DI_BIT 27
+#define STS_HI_BIT 26
+#define STS_IRQMODE_BIT 24
+#define STS_TRACE_BIT 23
+#define STS_CFGERRVALID_BIT 20
+#define STS_CFGERR_BIT 19
+#define STS_INTTYPE_BIT 12
+#define STS_INTSTS_BIT 8
+#define STS_FDEPTH_BIT 2
+#define STS_FNUM_BIT 0
+
+#define STS_HOST (1<<STS_HOST_BIT)
+#define STS_MST (1<<STS_MST_BIT)
+#define STS_TAR (1<<STS_TAR_BIT)
+#define STS_DMA (1<<STS_DMA_BIT)
+#define STS_DI (1<<STS_DI_BIT)
+#define STS_HI (1<<STS_HI_BIT)
+#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT)
+#define STS_TRACE (1<<STS_TRACE_BIT)
+#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT)
+#define STS_CFGERR (1<<STS_CFGERR_BIT)
+#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT)
+#define STS_INTSTS (0xf<<STS_INTSTS_BIT)
+#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT)
+#define STS_FNUM (0x3<<STS_FNUM_BIT)
+
+#define STS_ISYSERR (1<<17)
+#define STS_IDMA (1<<16)
+#define STS_IDMAERR (1<<15)
+#define STS_IMSTABRT (1<<14)
+#define STS_ITGTABRT (1<<13)
+#define STS_IPARERR (1<<12)
+
+#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR)
+
+struct grpci2_bd_chan {
+ unsigned int ctrl; /* 0x00 DMA Control */
+ unsigned int nchan; /* 0x04 Next DMA Channel Address */
+ unsigned int nbd; /* 0x08 Next Data Descriptor in chan */
+ unsigned int res; /* 0x0C Reserved */
+};
+
+#define BD_CHAN_EN 0x80000000
+#define BD_CHAN_TYPE 0x00300000
+#define BD_CHAN_BDCNT 0x0000ffff
+#define BD_CHAN_EN_BIT 31
+#define BD_CHAN_TYPE_BIT 20
+#define BD_CHAN_BDCNT_BIT 0
+
+struct grpci2_bd_data {
+ unsigned int ctrl; /* 0x00 DMA Data Control */
+ unsigned int pci_adr; /* 0x04 PCI Start Address */
+ unsigned int ahb_adr; /* 0x08 AHB Start address */
+ unsigned int next; /* 0x0C Next Data Descriptor in chan */
+};
+
+#define BD_DATA_EN 0x80000000
+#define BD_DATA_IE 0x40000000
+#define BD_DATA_DR 0x20000000
+#define BD_DATA_TYPE 0x00300000
+#define BD_DATA_ER 0x00080000
+#define BD_DATA_LEN 0x0000ffff
+#define BD_DATA_EN_BIT 31
+#define BD_DATA_IE_BIT 30
+#define BD_DATA_DR_BIT 29
+#define BD_DATA_TYPE_BIT 20
+#define BD_DATA_ER_BIT 19
+#define BD_DATA_LEN_BIT 0
+
+/* GRPCI2 Capability */
+struct grpci2_cap_first {
+ unsigned int ctrl;
+ unsigned int pci2ahb_map[6];
+ unsigned int ext2ahb_map;
+ unsigned int io_map;
+ unsigned int pcibar_size[6];
+};
+#define CAP9_CTRL_OFS 0
+#define CAP9_BAR_OFS 0x4
+#define CAP9_IOMAP_OFS 0x20
+#define CAP9_BARSIZE_OFS 0x24
+
+struct grpci2_priv {
+ struct leon_pci_info info; /* must be on top of this structure */
+ struct grpci2_regs *regs;
+ char irq;
+ char irq_mode; /* IRQ Mode from CAPSTS REG */
+ char bt_enabled;
+ char do_reset;
+ char irq_mask;
+ u32 pciid; /* PCI ID of Host */
+ unsigned char irq_map[4];
+
+ /* Virtual IRQ numbers */
+ unsigned int virq_err;
+ unsigned int virq_dma;
+
+ /* AHB PCI Windows */
+ unsigned long pci_area; /* MEMORY */
+ unsigned long pci_area_end;
+ unsigned long pci_io; /* I/O */
+ unsigned long pci_conf; /* CONFIGURATION */
+ unsigned long pci_conf_end;
+ unsigned long pci_io_va;
+
+ struct grpci2_barcfg tgtbars[6];
+};
+
+DEFINE_SPINLOCK(grpci2_dev_lock);
+struct grpci2_priv *grpci2priv;
+
+int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct grpci2_priv *priv = dev->bus->sysdata;
+ int irq_group;
+
+ /* Use default IRQ decoding on PCI BUS0 according slot numbering */
+ irq_group = slot & 0x3;
+ pin = ((pin - 1) + irq_group) & 0x3;
+
+ return priv->irq_map[pin];
+}
+
+static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
+ unsigned int devfn, int where, u32 *val)
+{
+ unsigned int *pci_conf;
+ unsigned long flags;
+ u32 tmp;
+
+ if (where & 0x3)
+ return -EINVAL;
+
+ if (bus == 0 && PCI_SLOT(devfn) != 0)
+ devfn += (0x8 * 6);
+
+ /* Select bus */
+ spin_lock_irqsave(&grpci2_dev_lock, flags);
+ REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
+ (bus << 16));
+ spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+
+ /* clear old status */
+ REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
+
+ pci_conf = (unsigned int *) (priv->pci_conf |
+ (devfn << 8) | (where & 0xfc));
+ tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
+
+ /* Wait until GRPCI2 signals that CFG access is done, it should be
+ * done instantaneously unless a DMA operation is ongoing...
+ */
+ while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
+ ;
+
+ if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) {
+ *val = 0xffffffff;
+ } else {
+ /* Bus always little endian (unaffected by byte-swapping) */
+ *val = flip_dword(tmp);
+ }
+
+ return 0;
+}
+
+static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus,
+ unsigned int devfn, int where, u32 *val)
+{
+ u32 v;
+ int ret;
+
+ if (where & 0x1)
+ return -EINVAL;
+ ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
+ *val = 0xffff & (v >> (8 * (where & 0x3)));
+ return ret;
+}
+
+static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus,
+ unsigned int devfn, int where, u32 *val)
+{
+ u32 v;
+ int ret;
+
+ ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
+ *val = 0xff & (v >> (8 * (where & 3)));
+
+ return ret;
+}
+
+static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
+ unsigned int devfn, int where, u32 val)
+{
+ unsigned int *pci_conf;
+ unsigned long flags;
+
+ if (where & 0x3)
+ return -EINVAL;
+
+ if (bus == 0 && PCI_SLOT(devfn) != 0)
+ devfn += (0x8 * 6);
+
+ /* Select bus */
+ spin_lock_irqsave(&grpci2_dev_lock, flags);
+ REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
+ (bus << 16));
+ spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+
+ /* clear old status */
+ REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
+
+ pci_conf = (unsigned int *) (priv->pci_conf |
+ (devfn << 8) | (where & 0xfc));
+ LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+
+ /* Wait until GRPCI2 signals that CFG access is done, it should be
+ * done instantaneously unless a DMA operation is ongoing...
+ */
+ while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
+ ;
+
+ return 0;
+}
+
+static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus,
+ unsigned int devfn, int where, u32 val)
+{
+ int ret;
+ u32 v;
+
+ if (where & 0x1)
+ return -EINVAL;
+ ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v);
+ if (ret)
+ return ret;
+ v = (v & ~(0xffff << (8 * (where & 0x3)))) |
+ ((0xffff & val) << (8 * (where & 0x3)));
+ return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
+}
+
+static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus,
+ unsigned int devfn, int where, u32 val)
+{
+ int ret;
+ u32 v;
+
+ ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
+ if (ret != 0)
+ return ret;
+ v = (v & ~(0xff << (8 * (where & 0x3)))) |
+ ((0xff & val) << (8 * (where & 0x3)));
+ return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
+}
+
+/* Read from Configuration Space. When entering here the PCI layer has taken
+ * the pci_lock spinlock and IRQ is off.
+ */
+static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct grpci2_priv *priv = grpci2priv;
+ unsigned int busno = bus->number;
+ int ret;
+
+ if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
+ *val = ~0;
+ return 0;
+ }
+
+ switch (size) {
+ case 1:
+ ret = grpci2_cfg_r8(priv, busno, devfn, where, val);
+ break;
+ case 2:
+ ret = grpci2_cfg_r16(priv, busno, devfn, where, val);
+ break;
+ case 4:
+ ret = grpci2_cfg_r32(priv, busno, devfn, where, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+#ifdef GRPCI2_DEBUG_CFGACCESS
+ printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x "
+ "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where,
+ *val, size);
+#endif
+
+ return ret;
+}
+
+/* Write to Configuration Space. When entering here the PCI layer has taken
+ * the pci_lock spinlock and IRQ is off.
+ */
+static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct grpci2_priv *priv = grpci2priv;
+ unsigned int busno = bus->number;
+
+ if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
+ return 0;
+
+#ifdef GRPCI2_DEBUG_CFGACCESS
+ printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d "
+ "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ where, size, val);
+#endif
+
+ switch (size) {
+ default:
+ return -EINVAL;
+ case 1:
+ return grpci2_cfg_w8(priv, busno, devfn, where, val);
+ case 2:
+ return grpci2_cfg_w16(priv, busno, devfn, where, val);
+ case 4:
+ return grpci2_cfg_w32(priv, busno, devfn, where, val);
+ }
+}
+
+static struct pci_ops grpci2_ops = {
+ .read = grpci2_read_config,
+ .write = grpci2_write_config,
+};
+
+/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration
+ * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
+ * this is not needed and the standard IRQ controller can be used.
+ */
+
+static void grpci2_mask_irq(struct irq_data *data)
+{
+ unsigned long flags;
+ unsigned int irqidx;
+ struct grpci2_priv *priv = grpci2priv;
+
+ irqidx = (unsigned int)data->chip_data - 1;
+ if (irqidx > 3) /* only mask PCI interrupts here */
+ return;
+
+ spin_lock_irqsave(&grpci2_dev_lock, flags);
+ REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx));
+ spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+}
+
+static void grpci2_unmask_irq(struct irq_data *data)
+{
+ unsigned long flags;
+ unsigned int irqidx;
+ struct grpci2_priv *priv = grpci2priv;
+
+ irqidx = (unsigned int)data->chip_data - 1;
+ if (irqidx > 3) /* only unmask PCI interrupts here */
+ return;
+
+ spin_lock_irqsave(&grpci2_dev_lock, flags);
+ REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx));
+ spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+}
+
+static unsigned int grpci2_startup_irq(struct irq_data *data)
+{
+ grpci2_unmask_irq(data);
+ return 0;
+}
+
+static void grpci2_shutdown_irq(struct irq_data *data)
+{
+ grpci2_mask_irq(data);
+}
+
+static struct irq_chip grpci2_irq = {
+ .name = "grpci2",
+ .irq_startup = grpci2_startup_irq,
+ .irq_shutdown = grpci2_shutdown_irq,
+ .irq_mask = grpci2_mask_irq,
+ .irq_unmask = grpci2_unmask_irq,
+};
+
+/* Handle one or multiple IRQs from the PCI core */
+static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct grpci2_priv *priv = grpci2priv;
+ int i, ack = 0;
+ unsigned int ctrl, sts_cap, pci_ints;
+
+ ctrl = REGLOAD(priv->regs->ctrl);
+ sts_cap = REGLOAD(priv->regs->sts_cap);
+
+ /* Error Interrupt? */
+ if (sts_cap & STS_ERR_IRQ) {
+ generic_handle_irq(priv->virq_err);
+ ack = 1;
+ }
+
+ /* PCI Interrupt? */
+ pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT;
+ if (pci_ints) {
+ /* Call respective PCI Interrupt handler */
+ for (i = 0; i < 4; i++) {
+ if (pci_ints & (1 << i))
+ generic_handle_irq(priv->irq_map[i]);
+ }
+ ack = 1;
+ }
+
+ /*
+ * Decode DMA Interrupt only when shared with Err and PCI INTX#, when
+ * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they
+ * goes directly to DMA ISR.
+ */
+ if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) {
+ generic_handle_irq(priv->virq_dma);
+ ack = 1;
+ }
+
+ /*
+ * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
+ * Controller, this must be done after IRQ sources have been handled to
+ * avoid double IRQ generation
+ */
+ if (ack)
+ desc->irq_data.chip->irq_eoi(&desc->irq_data);
+}
+
+/* Create a virtual IRQ */
+static unsigned int grpci2_build_device_irq(unsigned int irq)
+{
+ unsigned int virq = 0, pil;
+
+ pil = 1 << 8;
+ virq = irq_alloc(irq, pil);
+ if (virq == 0)
+ goto out;
+
+ irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq,
+ "pcilvl");
+ irq_set_chip_data(virq, (void *)irq);
+
+out:
+ return virq;
+}
+
+void grpci2_hw_init(struct grpci2_priv *priv)
+{
+ u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
+ struct grpci2_regs *regs = priv->regs;
+ int i;
+ struct grpci2_barcfg *barcfg = priv->tgtbars;
+
+ /* Reset any earlier setup */
+ if (priv->do_reset) {
+ printk(KERN_INFO "GRPCI2: Resetting PCI bus\n");
+ REGSTORE(regs->ctrl, CTRL_RESET);
+ ssleep(1); /* Wait for boards to settle */
+ }
+ REGSTORE(regs->ctrl, 0);
+ REGSTORE(regs->sts_cap, ~0); /* Clear Status */
+ REGSTORE(regs->dma_ctrl, 0);
+ REGSTORE(regs->dma_bdbase, 0);
+
+ /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
+ REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
+
+ /* set 1:1 mapping between AHB -> PCI memory space, for all Masters
+ * Each AHB master has it's own mapping registers. Max 16 AHB masters.
+ */
+ for (i = 0; i < 16; i++)
+ REGSTORE(regs->ahbmst_map[i], priv->pci_area);
+
+ /* Get the GRPCI2 Host PCI ID */
+ grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
+
+ /* Get address to first (always defined) capability structure */
+ grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
+
+ /* Enable/Disable Byte twisting */
+ grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
+ io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
+ grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
+
+ /* Setup the Host's PCI Target BARs for other peripherals to access,
+ * and do DMA to the host's memory. The target BARs can be sized and
+ * enabled individually.
+ *
+ * User may set custom target BARs, but default is:
+ * The first BARs is used to map kernel low (DMA is part of normal
+ * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the
+ * PCI bus, the other BARs are disabled. We assume that the first BAR
+ * is always available.
+ */
+ for (i = 0; i < 6; i++) {
+ if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) {
+ /* Target BARs must have the proper alignment */
+ ahbadr = barcfg[i].ahbadr;
+ pciadr = barcfg[i].pciadr;
+ bar_sz = ((pciadr - 1) & ~pciadr) + 1;
+ } else {
+ if (i == 0) {
+ /* Map main memory */
+ bar_sz = 0xf0000008; /* 256MB prefetchable */
+ ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN(
+ (unsigned long) &_end));
+ pciadr = ahbadr;
+ } else {
+ bar_sz = 0;
+ ahbadr = 0;
+ pciadr = 0;
+ }
+ }
+ grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
+ grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
+ grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
+ printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
+ i, pciadr, ahbadr);
+ }
+
+ /* set as bus master and enable pci memory responses */
+ grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
+ data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
+
+ /* Enable Error respone (CPU-TRAP) on illegal memory access. */
+ REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
+}
+
+static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
+{
+ printk(KERN_ERR "GRPCI2: Jump IRQ happened\n");
+ return IRQ_NONE;
+}
+
+/* Handle GRPCI2 Error Interrupt */
+static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
+{
+ struct grpci2_priv *priv = arg;
+ struct grpci2_regs *regs = priv->regs;
+ unsigned int status;
+
+ status = REGLOAD(regs->sts_cap);
+ if ((status & STS_ERR_IRQ) == 0)
+ return IRQ_NONE;
+
+ if (status & STS_IPARERR)
+ printk(KERN_ERR "GRPCI2: Parity Error\n");
+
+ if (status & STS_ITGTABRT)
+ printk(KERN_ERR "GRPCI2: Target Abort\n");
+
+ if (status & STS_IMSTABRT)
+ printk(KERN_ERR "GRPCI2: Master Abort\n");
+
+ if (status & STS_ISYSERR)
+ printk(KERN_ERR "GRPCI2: System Error\n");
+
+ /* Clear handled INT TYPE IRQs */
+ REGSTORE(regs->sts_cap, status & STS_ERR_IRQ);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit grpci2_of_probe(struct platform_device *ofdev)
+{
+ struct grpci2_regs *regs;
+ struct grpci2_priv *priv;
+ int err, i, len;
+ const int *tmp;
+ unsigned int capability;
+
+ if (grpci2priv) {
+ printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n");
+ return -ENODEV;
+ }
+
+ if (ofdev->num_resources < 3) {
+ printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n");
+ return -EIO;
+ }
+
+ /* Find Device Address */
+ regs = of_ioremap(&ofdev->resource[0], 0,
+ resource_size(&ofdev->resource[0]),
+ "grlib-grpci2 regs");
+ if (regs == NULL) {
+ printk(KERN_ERR "GRPCI2: ioremap failed\n");
+ return -EIO;
+ }
+
+ /*
+ * Check that we're in Host Slot and that we can act as a Host Bridge
+ * and not only as target.
+ */
+ capability = REGLOAD(regs->sts_cap);
+ if ((capability & STS_HOST) || !(capability & STS_MST)) {
+ printk(KERN_INFO "GRPCI2: not in host system slot\n");
+ err = -EIO;
+ goto err1;
+ }
+
+ priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL);
+ if (grpci2priv == NULL) {
+ err = -ENOMEM;
+ goto err1;
+ }
+ memset(grpci2priv, 0, sizeof(*grpci2priv));
+ priv->regs = regs;
+ priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
+ priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
+
+ printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq);
+
+ /* Byte twisting should be made configurable from kernel command line */
+ priv->bt_enabled = 1;
+
+ /* Let user do custom Target BAR assignment */
+ tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len);
+ if (tmp && (len == 2*4*6))
+ memcpy(priv->tgtbars, tmp, 2*4*6);
+ else
+ memset(priv->tgtbars, -1, 2*4*6);
+
+ /* Limit IRQ unmasking in irq_mode 2 and 3 */
+ tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len);
+ if (tmp && (len == 4))
+ priv->do_reset = *tmp;
+ else
+ priv->irq_mask = 0xf;
+
+ /* Optional PCI reset. Force PCI reset on startup */
+ tmp = of_get_property(ofdev->dev.of_node, "reset", &len);
+ if (tmp && (len == 4))
+ priv->do_reset = *tmp;
+ else
+ priv->do_reset = 0;
+
+ /* Find PCI Memory, I/O and Configuration Space Windows */
+ priv->pci_area = ofdev->resource[1].start;
+ priv->pci_area_end = ofdev->resource[1].end+1;
+ priv->pci_io = ofdev->resource[2].start;
+ priv->pci_conf = ofdev->resource[2].start + 0x10000;
+ priv->pci_conf_end = priv->pci_conf + 0x10000;
+ priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
+ if (!priv->pci_io_va) {
+ err = -EIO;
+ goto err2;
+ }
+
+ printk(KERN_INFO
+ "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
+ " I/O SPACE [0x%08lx - 0x%08lx]\n"
+ " CONFIG SPACE [0x%08lx - 0x%08lx]\n",
+ priv->pci_area, priv->pci_area_end-1,
+ priv->pci_io, priv->pci_conf-1,
+ priv->pci_conf, priv->pci_conf_end-1);
+
+ /*
+ * I/O Space resources in I/O Window mapped into Virtual Adr Space
+ * We never use low 4KB because some devices seem have problems using
+ * address 0.
+ */
+ memset(&priv->info.io_space, 0, sizeof(struct resource));
+ priv->info.io_space.name = "GRPCI2 PCI I/O Space";
+ priv->info.io_space.start = priv->pci_io_va + 0x1000;
+ priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
+ priv->info.io_space.flags = IORESOURCE_IO;
+
+ /*
+ * GRPCI2 has no prefetchable memory, map everything as
+ * non-prefetchable memory
+ */
+ memset(&priv->info.mem_space, 0, sizeof(struct resource));
+ priv->info.mem_space.name = "GRPCI2 PCI MEM Space";
+ priv->info.mem_space.start = priv->pci_area;
+ priv->info.mem_space.end = priv->pci_area_end - 1;
+ priv->info.mem_space.flags = IORESOURCE_MEM;
+
+ if (request_resource(&iomem_resource, &priv->info.mem_space) < 0)
+ goto err3;
+ if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
+ goto err4;
+
+ grpci2_hw_init(priv);
+
+ /*
+ * Get PCI Interrupt to System IRQ mapping and setup IRQ handling
+ * Error IRQ always on PCI INTA.
+ */
+ if (priv->irq_mode < 2) {
+ /* All PCI interrupts are shared using the same system IRQ */
+ leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq,
+ "pcilvl", 0);
+
+ priv->irq_map[0] = grpci2_build_device_irq(1);
+ priv->irq_map[1] = grpci2_build_device_irq(2);
+ priv->irq_map[2] = grpci2_build_device_irq(3);
+ priv->irq_map[3] = grpci2_build_device_irq(4);
+
+ priv->virq_err = grpci2_build_device_irq(5);
+ if (priv->irq_mode & 1)
+ priv->virq_dma = ofdev->archdata.irqs[1];
+ else
+ priv->virq_dma = grpci2_build_device_irq(6);
+
+ /* Enable IRQs on LEON IRQ controller */
+ err = request_irq(priv->irq, grpci2_jump_interrupt, 0,
+ "GRPCI2_JUMP", priv);
+ if (err)
+ printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n");
+ } else {
+ /* All PCI interrupts have an unique IRQ interrupt */
+ for (i = 0; i < 4; i++) {
+ /* Make LEON IRQ layer handle level IRQ by acking */
+ leon_update_virq_handling(ofdev->archdata.irqs[i],
+ handle_fasteoi_irq, "pcilvl",
+ 1);
+ priv->irq_map[i] = ofdev->archdata.irqs[i];
+ }
+ priv->virq_err = priv->irq_map[0];
+ if (priv->irq_mode & 1)
+ priv->virq_dma = ofdev->archdata.irqs[4];
+ else
+ priv->virq_dma = priv->irq_map[0];
+
+ /* Unmask all PCI interrupts, request_irq will not do that */
+ REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf));
+ }
+
+ /* Setup IRQ handler for non-configuration space access errors */
+ err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED,
+ "GRPCI2_ERR", priv);
+ if (err) {
+ printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err);
+ goto err5;
+ }
+
+ /*
+ * Enable Error Interrupts. PCI interrupts are unmasked once request_irq
+ * is called by the PCI Device drivers
+ */
+ REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI);
+
+ /* Init common layer and scan buses */
+ priv->info.ops = &grpci2_ops;
+ priv->info.map_irq = grpci2_map_irq;
+ leon_pci_init(ofdev, &priv->info);
+
+ return 0;
+
+err5:
+ release_resource(&priv->info.io_space);
+err4:
+ release_resource(&priv->info.mem_space);
+err3:
+ err = -ENOMEM;
+ iounmap((void *)priv->pci_io_va);
+err2:
+ kfree(priv);
+err1:
+ of_iounmap(&ofdev->resource[0], regs,
+ resource_size(&ofdev->resource[0]));
+ return err;
+}
+
+static struct of_device_id grpci2_of_match[] = {
+ {
+ .name = "GAISLER_GRPCI2",
+ },
+ {
+ .name = "01_07c",
+ },
+ {},
+};
+
+static struct platform_driver grpci2_of_driver = {
+ .driver = {
+ .name = "grpci2",
+ .owner = THIS_MODULE,
+ .of_match_table = grpci2_of_match,
+ },
+ .probe = grpci2_of_probe,
+};
+
+static int __init grpci2_init(void)
+{
+ return platform_driver_register(&grpci2_of_driver);
+}
+
+subsys_initcall(grpci2_init);
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 8d348c4..99ba5ba 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -214,7 +214,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
me->name,
(int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
return -ENOEXEC;
- };
+ }
}
return 0;
}
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6e3874b..a689598 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -281,7 +281,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
case 4:
*value = ret & 0xffffffff;
break;
- };
+ }
return PCIBIOS_SUCCESSFUL;
@@ -456,7 +456,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
default:
break;
- };
+ }
}
if (!saw_io || !saw_mem) {
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 283fbc3..f030b02 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -264,7 +264,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
default:
type_string = "ECC Error";
break;
- };
+ }
printk("%s: IOMMU Error, type[%s]\n",
pbm->name, type_string);
@@ -319,7 +319,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
default:
type_string = "ECC Error";
break;
- };
+ }
printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
"sz(%dK) vpg(%08lx)]\n",
pbm->name, i, type_string,
@@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
default:
chipset_name = "SCHIZO";
break;
- };
+ }
/* For SCHIZO, three OBP regs:
* 1) PBM controller regs
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index 570b98f..40e4936 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
- };
+ }
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index fe2af66..8db48e8 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm,
default:
type_str = "ECC Error";
break;
- };
+ }
printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
pbm->name, type_str);
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 2ca32d1..a161b9c 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -97,7 +97,7 @@ void sbus_set_sbus64(struct device *dev, int bursts)
default:
return;
- };
+ }
val = upa_readq(cfg_reg);
if (val & (1UL << 14UL)) {
@@ -244,7 +244,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
- };
+ }
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 3249d3f..d26e1f6 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -267,7 +267,7 @@ void __init setup_arch(char **cmdline_p)
default:
printk("UNKNOWN!\n");
break;
- };
+ }
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f3b6850..c4dd099 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -209,7 +209,7 @@ void __init per_cpu_patch(void)
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
- };
+ }
*(unsigned int *) (addr + 0) = insns[0];
wmb();
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index d5b3958..21b1253 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -114,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
printk("UNKNOWN!\n");
BUG();
break;
- };
+ }
}
void cpu_panic(void)
@@ -374,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
printk("UNKNOWN!\n");
BUG();
break;
- };
+ }
}
/* Set this up early so that things like the scheduler can init
@@ -447,7 +447,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
printk("UNKNOWN!\n");
BUG();
break;
- };
+ }
if (!ret) {
cpumask_set_cpu(cpu, &smp_commenced_mask);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index a9ea60e..1d13c5b 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -103,10 +103,9 @@ static void sun4d_sbus_handler_irq(int sbusl)
sbil = (sbusl << 2);
/* Loop for each pending SBI */
- for (sbino = 0; bus_mask; sbino++) {
+ for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) {
unsigned int idx, mask;
- bus_mask >>= 1;
if (!(bus_mask & 1))
continue;
/* XXX This seems to ACK the irq twice. acquire_sbi()
@@ -118,19 +117,16 @@ static void sun4d_sbus_handler_irq(int sbusl)
mask &= (0xf << sbil);
/* Loop for each pending SBI slot */
- idx = 0;
slot = (1 << sbil);
- while (mask != 0) {
+ for (idx = 0; mask != 0; idx++, slot <<= 1) {
unsigned int pil;
struct irq_bucket *p;
- idx++;
- slot <<= 1;
if (!(mask & slot))
continue;
mask &= ~slot;
- pil = sun4d_encode_irq(sbino, sbil, idx);
+ pil = sun4d_encode_irq(sbino, sbusl, idx);
p = irq_map[pil];
while (p) {
@@ -218,10 +214,10 @@ static void sun4d_unmask_irq(struct irq_data *data)
#ifdef CONFIG_SMP
spin_lock_irqsave(&sun4d_imsk_lock, flags);
- cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq));
+ cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq));
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
#else
- cc_set_imsk(cc_get_imsk() | ~(1 << real_irq));
+ cc_set_imsk(cc_get_imsk() & ~(1 << real_irq));
#endif
}
@@ -299,26 +295,68 @@ static void __init sun4d_load_profile_irqs(void)
}
}
+unsigned int _sun4d_build_device_irq(unsigned int real_irq,
+ unsigned int pil,
+ unsigned int board)
+{
+ struct sun4d_handler_data *handler_data;
+ unsigned int irq;
+
+ irq = irq_alloc(real_irq, pil);
+ if (irq == 0) {
+ prom_printf("IRQ: allocate for %d %d %d failed\n",
+ real_irq, pil, board);
+ goto err_out;
+ }
+
+ handler_data = irq_get_handler_data(irq);
+ if (unlikely(handler_data))
+ goto err_out;
+
+ handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
+ prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
+ prom_halt();
+ }
+ handler_data->cpuid = board_to_cpu[board];
+ handler_data->real_irq = real_irq;
+ irq_set_chip_and_handler_name(irq, &sun4d_irq,
+ handle_level_irq, "level");
+ irq_set_handler_data(irq, handler_data);
+
+err_out:
+ return irq;
+}
+
+
+
unsigned int sun4d_build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
struct device_node *dp = op->dev.of_node;
- struct device_node *io_unit, *sbi = dp->parent;
+ struct device_node *board_parent, *bus = dp->parent;
+ char *bus_connection;
const struct linux_prom_registers *regs;
- struct sun4d_handler_data *handler_data;
unsigned int pil;
unsigned int irq;
int board, slot;
int sbusl;
- irq = 0;
- while (sbi) {
- if (!strcmp(sbi->name, "sbi"))
+ irq = real_irq;
+ while (bus) {
+ if (!strcmp(bus->name, "sbi")) {
+ bus_connection = "io-unit";
+ break;
+ }
+
+ if (!strcmp(bus->name, "bootbus")) {
+ bus_connection = "cpu-unit";
break;
+ }
- sbi = sbi->parent;
+ bus = bus->parent;
}
- if (!sbi)
+ if (!bus)
goto err_out;
regs = of_get_property(dp, "reg", NULL);
@@ -328,17 +366,19 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
slot = regs->which_io;
/*
- * If SBI's parent is not io-unit or the io-unit lacks
- * a "board#" property, something is very wrong.
+ * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
+ * lacks a "board#" property, something is very wrong.
*/
- if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
- printk("%s: Error, parent is not io-unit.\n", sbi->full_name);
+ if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
+ printk(KERN_ERR "%s: Error, parent is not %s.\n",
+ bus->full_name, bus_connection);
goto err_out;
}
- io_unit = sbi->parent;
- board = of_getintprop_default(io_unit, "board#", -1);
+ board_parent = bus->parent;
+ board = of_getintprop_default(board_parent, "board#", -1);
if (board == -1) {
- printk("%s: Error, lacks board# property.\n", io_unit->full_name);
+ printk(KERN_ERR "%s: Error, lacks board# property.\n",
+ board_parent->full_name);
goto err_out;
}
@@ -348,29 +388,17 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
else
pil = real_irq;
- irq = irq_alloc(real_irq, pil);
- if (irq == 0)
- goto err_out;
-
- handler_data = irq_get_handler_data(irq);
- if (unlikely(handler_data))
- goto err_out;
-
- handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
- if (unlikely(!handler_data)) {
- prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
- prom_halt();
- }
- handler_data->cpuid = board_to_cpu[board];
- handler_data->real_irq = real_irq;
- irq_set_chip_and_handler_name(irq, &sun4d_irq,
- handle_level_irq, "level");
- irq_set_handler_data(irq, handler_data);
-
+ irq = _sun4d_build_device_irq(real_irq, pil, board);
err_out:
- return real_irq;
+ return irq;
}
+unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
+{
+ return _sun4d_build_device_irq(real_irq, real_irq, board);
+}
+
+
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
@@ -402,6 +430,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
unsigned int irq;
const u32 *reg;
int err;
+ int board;
dp = of_find_node_by_name(NULL, "cpu-unit");
if (!dp) {
@@ -414,12 +443,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
* bootbus.
*/
reg = of_get_property(dp, "reg", NULL);
- of_node_put(dp);
if (!reg) {
prom_printf("sun4d_init_timers: No reg property\n");
prom_halt();
}
+ board = of_getintprop_default(dp, "board#", -1);
+ if (board == -1) {
+ prom_printf("sun4d_init_timers: No board# property on cpu-unit\n");
+ prom_halt();
+ }
+
+ of_node_put(dp);
+
res.start = reg[1];
res.end = reg[2] - 1;
res.flags = reg[0] & 0xff;
@@ -434,7 +470,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
master_l10_counter = &sun4d_timers->l10_cur_count;
- irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ);
+ irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 6db18c6..170cd8e 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -109,7 +109,7 @@ asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compa
default:
return -ENOSYS;
- };
+ }
return -ENOSYS;
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 96082d3..908b47a 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -460,7 +460,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
default:
err = -ENOSYS;
goto out;
- };
+ }
}
if (call <= MSGCTL) {
switch (call) {
@@ -481,7 +481,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
default:
err = -ENOSYS;
goto out;
- };
+ }
}
if (call <= SHMCTL) {
switch (call) {
@@ -507,7 +507,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
default:
err = -ENOSYS;
goto out;
- };
+ }
} else {
err = -ENOSYS;
}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2b8d54b..1db6b18 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -708,7 +708,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
case CLOCK_EVT_MODE_UNUSED:
WARN_ON(1);
break;
- };
+ }
}
static struct clock_event_device sparc64_clockevent = {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 1ed547b..0cbdaa4 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -1804,7 +1804,7 @@ static const char *sun4v_err_type_to_str(u32 type)
return "warning resumable";
default:
return "unknown";
- };
+ }
}
static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index c752c4c..b2b019e 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -211,7 +211,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
default:
BUG();
break;
- };
+ }
}
return __do_int_store(dst_addr, size, src_val, asi);
}
@@ -328,7 +328,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
case ASI_SNFL:
asi &= ~0x08;
break;
- };
+ }
switch (dir) {
case load:
reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
@@ -351,7 +351,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
default:
BUG();
break;
- };
+ }
*reg_addr = val_in;
}
break;
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 531d54f..489fc15 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -176,7 +176,7 @@ static unsigned long index_to_estar_mode(unsigned int index)
default:
BUG();
- };
+ }
}
static unsigned long index_to_divisor(unsigned int index)
@@ -199,7 +199,7 @@ static unsigned long index_to_divisor(unsigned int index)
default:
BUG();
- };
+ }
}
static unsigned long estar_to_divisor(unsigned long estar)
@@ -224,7 +224,7 @@ static unsigned long estar_to_divisor(unsigned long estar)
break;
default:
BUG();
- };
+ }
return ret;
}
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index 9a8ceb7..eb1624b 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -71,7 +71,7 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg
break;
default:
BUG();
- };
+ }
return ret;
}
@@ -125,7 +125,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
default:
BUG();
- };
+ }
reg = read_safari_cfg();
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index aa6ac70..29348ea 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
default:
return handshake_failure(vio);
- };
+ }
}
static int process_attr(struct vio_driver_state *vio, void *pkt)
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 9dfd2eb..3635771 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -334,7 +334,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
break;
- };
+ }
if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
rd_val = right & left;
@@ -360,7 +360,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
regs->tstate = tstate | (ccr << 32UL);
}
- };
+ }
}
static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -392,7 +392,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
case ARRAY32_OPF:
rd_val <<= 2;
- };
+ }
store_reg(regs, rd_val, RD(insn));
}
@@ -577,7 +577,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
- };
+ }
}
static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -693,7 +693,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
- };
+ }
}
static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -786,7 +786,7 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
rd_val |= 1 << i;
}
break;
- };
+ }
maybe_flush_windows(0, 0, RD(insn), 0);
store_reg(regs, rd_val, RD(insn));
@@ -885,7 +885,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
case BSHUFFLE_OPF:
bshuffle(regs, insn);
break;
- };
+ }
regs->tpc = regs->tnpc;
regs->tnpc += 4;
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b10ac4d..7543ddb 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -135,7 +135,7 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
default:
break;
- };
+ }
memset(&regs, 0, sizeof (regs));
regs.pc = pc;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index ca21732..7b00de6 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -340,7 +340,7 @@ void __init paging_init(void)
prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
prom_printf("paging_init: Halting...\n");
prom_halt();
- };
+ }
/* Initialize the protection map with non-constant, MMU dependent values. */
protection_map[0] = PAGE_NONE;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e10cd03..3fd8e18 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1625,7 +1625,7 @@ static void __init sun4v_ktsb_init(void)
ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
break;
- };
+ }
ktsb_descr[0].assoc = 1;
ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
@@ -2266,7 +2266,7 @@ unsigned long pte_sz_bits(unsigned long sz)
return _PAGE_SZ512K_4V;
case 4 * 1024 * 1024:
return _PAGE_SZ4MB_4V;
- };
+ }
} else {
switch (sz) {
case 8 * 1024:
@@ -2278,7 +2278,7 @@ unsigned long pte_sz_bits(unsigned long sz)
return _PAGE_SZ512K_4U;
case 4 * 1024 * 1024:
return _PAGE_SZ4MB_4U;
- };
+ }
}
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index fe09fd8..cbef74e 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1665,7 +1665,7 @@ static void __init init_swift(void)
default:
srmmu_modtype = Swift_ok;
break;
- };
+ }
BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
@@ -2069,7 +2069,7 @@ static void __init get_srmmu_type(void)
/* Some other Cypress revision, assume a 605. */
init_cypress_605(mod_rev);
break;
- };
+ }
return;
}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index a2350b5..1cf4f19 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -318,7 +318,7 @@ void __init sun4c_probe_vac(void)
prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
sun4c_vacinfo.linesize);
prom_halt();
- };
+ }
sun4c_flush_all();
sun4c_enable_vac();
@@ -364,7 +364,7 @@ static void __init patch_kernel_fault_handler(void)
prom_printf("Unhandled number of segmaps: %d\n",
num_segmaps);
prom_halt();
- };
+ }
switch (num_contexts) {
case 8:
/* Default, nothing to do. */
@@ -377,7 +377,7 @@ static void __init patch_kernel_fault_handler(void)
prom_printf("Unhandled number of contexts: %d\n",
num_contexts);
prom_halt();
- };
+ }
if (sun4c_vacinfo.do_hwflushes != 0) {
PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
@@ -394,7 +394,7 @@ static void __init patch_kernel_fault_handler(void)
prom_printf("Impossible VAC linesize %d, halting...\n",
sun4c_vacinfo.linesize);
prom_halt();
- };
+ }
}
}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 9484615..a5f51b2 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -180,7 +180,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
current->comm, current->pid, tsb_bytes);
do_exit(SIGSEGV);
- };
+ }
tte |= pte_sz_bits(page_sz);
if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
@@ -215,7 +215,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
#endif
default:
BUG();
- };
+ }
hp->assoc = 1;
hp->num_ttes = tsb_bytes / 16;
hp->ctx_idx = 0;
@@ -230,7 +230,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
#endif
default:
BUG();
- };
+ }
hp->tsb_base = tsb_paddr;
hp->resv = 0;
}
diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c
index b05e3db..a00f47b 100644
--- a/arch/sparc/prom/console_32.c
+++ b/arch/sparc/prom/console_32.c
@@ -38,7 +38,7 @@ static int prom_nbputchar(const char *buf)
break;
default:
break;
- };
+ }
restore_current();
spin_unlock_irqrestore(&prom_lock, flags);
return i; /* Ugh, we could spin forever on unsupported proms ;( */
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index 0a601b3..26c64ce 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -53,7 +53,7 @@ void __init prom_init(struct linux_romvec *rp)
romvec->pv_romvers);
prom_halt();
break;
- };
+ }
prom_rev = romvec->pv_plugin_revision;
prom_prev = romvec->pv_printrev;
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
index 97c44c9..0da8256 100644
--- a/arch/sparc/prom/mp.c
+++ b/arch/sparc/prom/mp.c
@@ -35,7 +35,7 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
case PROM_V3:
ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
break;
- };
+ }
restore_current();
spin_unlock_irqrestore(&prom_lock, flags);
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h
index c6344c4..9d3dbce 100644
--- a/arch/tile/include/asm/mmzone.h
+++ b/arch/tile/include/asm/mmzone.h
@@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn)
return highbits_to_node[__pfn_to_highbits(pfn)];
}
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
-})
-
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
static inline int pfn_valid(int pfn)
diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h
new file mode 100644
index 0000000..efe7508
--- /dev/null
+++ b/arch/um/include/asm/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __UM_PERCPU_H
+#define __UM_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif /* __UM_PERCPU_H */
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index d3a3032..e57dcce 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -231,10 +231,6 @@ config PUV3_PWM
help
Enable support for NB0916 PWM controllers
-config PUV3_RTC
- tristate "PKUnity v3 RTC Support"
- depends on !ARCH_FPGA
-
if PUV3_NB0916
menu "PKUnity NetBook-0916 Features"
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
index 76a8bee..6af4bc4 100644
--- a/arch/unicore32/Makefile
+++ b/arch/unicore32/Makefile
@@ -40,42 +40,10 @@ core-y += arch/unicore32/mm/
libs-y += arch/unicore32/lib/
-ASM_GENERATED_DIR := $(srctree)/arch/unicore32/include/generated
-LINUXINCLUDE += -I$(ASM_GENERATED_DIR)
-
-ASM_GENERIC_HEADERS := atomic.h auxvec.h
-ASM_GENERIC_HEADERS += bitsperlong.h bug.h bugs.h
-ASM_GENERIC_HEADERS += cputime.h current.h
-ASM_GENERIC_HEADERS += device.h div64.h
-ASM_GENERIC_HEADERS += emergency-restart.h errno.h
-ASM_GENERIC_HEADERS += fb.h fcntl.h ftrace.h futex.h
-ASM_GENERIC_HEADERS += hardirq.h hw_irq.h
-ASM_GENERIC_HEADERS += ioctl.h ioctls.h ipcbuf.h irq_regs.h
-ASM_GENERIC_HEADERS += kdebug.h kmap_types.h
-ASM_GENERIC_HEADERS += local.h
-ASM_GENERIC_HEADERS += mman.h module.h msgbuf.h
-ASM_GENERIC_HEADERS += param.h parport.h percpu.h poll.h posix_types.h
-ASM_GENERIC_HEADERS += resource.h
-ASM_GENERIC_HEADERS += scatterlist.h sections.h segment.h sembuf.h serial.h
-ASM_GENERIC_HEADERS += setup.h shmbuf.h shmparam.h
-ASM_GENERIC_HEADERS += siginfo.h signal.h sizes.h
-ASM_GENERIC_HEADERS += socket.h sockios.h stat.h statfs.h swab.h syscalls.h
-ASM_GENERIC_HEADERS += termbits.h termios.h topology.h types.h
-ASM_GENERIC_HEADERS += ucontext.h unaligned.h user.h
-ASM_GENERIC_HEADERS += vga.h
-ASM_GENERIC_HEADERS += xor.h
-
-archprepare:
-ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR)))
- $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm
- $(Q)$(foreach a, $(ASM_GENERIC_HEADERS), \
- echo '#include <asm-generic/$a>' \
- > $(ASM_GENERATED_DIR)/asm/$a; )
-endif
-
boot := arch/unicore32/boot
-# Default target when executing plain make
+# Default defconfig and target when executing plain make
+KBUILD_DEFCONFIG := $(ARCH)_defconfig
KBUILD_IMAGE := zImage
all: $(KBUILD_IMAGE)
@@ -83,8 +51,6 @@ all: $(KBUILD_IMAGE)
zImage Image uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-MRPROPER_DIRS += $(ASM_GENERATED_DIR)
-
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 9537342..b0954a2 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -59,7 +59,7 @@ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
# We now have a PIC decompressor implementation. Decompressors running
# from RAM should not define ZTEXTADDR. Decompressors running directly
# from ROM or Flash must define ZTEXTADDR (preferably via the config)
-ZTEXTADDR := 0
+ZTEXTADDR := 0x03000000
ZBSSADDR := ALIGN(4)
SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
diff --git a/arch/unicore32/configs/debug_defconfig b/arch/unicore32/configs/unicore32_defconfig
index b5fbde9..c9dd319 100644
--- a/arch/unicore32/configs/debug_defconfig
+++ b/arch/unicore32/configs/unicore32_defconfig
@@ -1,6 +1,6 @@
### General setup
CONFIG_EXPERIMENTAL=y
-CONFIG_LOCALVERSION="-debug"
+CONFIG_LOCALVERSION="-unicore32"
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -64,7 +64,6 @@ CONFIG_I2C_BATTERY_BQ27200=n
CONFIG_I2C_EEPROM_AT24=n
CONFIG_LCD_BACKLIGHT=n
-CONFIG_PUV3_RTC=y
CONFIG_PUV3_UMAL=y
CONFIG_PUV3_MUSB=n
CONFIG_PUV3_AC97=n
@@ -167,8 +166,9 @@ CONFIG_LEDS_TRIGGER_IDE_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# Real Time Clock
-CONFIG_RTC_LIB=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PUV3=y
### File systems
CONFIG_EXT2_FS=m
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index b200fda..ca113d6 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -1,2 +1,61 @@
include include/asm-generic/Kbuild.asm
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += syscalls.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
index ec23a2f..aeb0f18 100644
--- a/arch/unicore32/kernel/Makefile
+++ b/arch/unicore32/kernel/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o
obj-$(CONFIG_PUV3_GPIO) += gpio.o
-obj-$(CONFIG_PUV3_RTC) += rtc.o
obj-$(CONFIG_PUV3_PWM) += pwm.o
obj-$(CONFIG_PUV3_PM) += pm.o sleep.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
index 9bf7f7a..77e407e 100644
--- a/arch/unicore32/kernel/vmlinux.lds.S
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -30,7 +30,7 @@ SECTIONS
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
- PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
_stext = .;
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14b..0cd3800 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
#define ARCH_DISCARD_MEMBLOCK
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-void memblock_x86_to_bootmem(u64 start, u64 end);
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
+bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 5e83a41..224e8c5 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
#endif
}
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
-})
-
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index b3f88d7..129d9aa 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
- NODE_DATA(nid)->node_spanned_pages)
#endif
#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84ac..a518c0a 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
u64 product;
#ifdef __i386__
u32 tmp1, tmp2;
+#else
+ ulong tmp;
#endif
if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
#elif defined(__x86_64__)
__asm__ (
- "mul %%rdx ; shrd $32,%%rdx,%%rax"
- : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+ "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+ : [lo]"=a"(product),
+ [hi]"=d"(tmp)
+ : "0"(delta),
+ [mul_frac]"rm"((u64)mul_frac));
#else
#error implement me!
#endif
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af8..b9338b8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
/*
* If mask=1, the LVT entry does not generate interrupts while mask=0
- * enables the vector. See also the BKDGs.
+ * enables the vector. See also the BKDGs. Must be called with
+ * preemption disabled.
*/
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b511a01..adc66c3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
/* Direct Legacy VGA I/O traffic to designated IOH */
int uv_set_vga_state(struct pci_dev *pdev, bool decode,
- unsigned int command_bits, bool change_bridge)
+ unsigned int command_bits, u32 flags)
{
int domain, bus, rc;
- PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n",
- pdev->devfn, decode, command_bits, change_bridge);
+ PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
+ pdev->devfn, decode, command_bits, flags);
- if (!change_bridge)
+ if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
return 0;
if ((command_bits & PCI_COMMAND_IO) == 0)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc84..9aeb78a 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/of_pci.h>
+#include <linux/initrd.h>
#include <asm/hpet.h>
#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
void __init add_dtb(u64 data)
{
initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2e4928d..e1ba8cb2 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
-#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
+#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(pm_idle);
#endif
@@ -399,7 +399,7 @@ void default_idle(void)
cpu_relax();
}
}
-#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
+#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d12878..a3d0dc5 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
set_user_gs(regs, 0);
regs->fs = 0;
- set_fs(USER_DS);
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd92..ca6f7ab 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
regs->cs = _cs;
regs->ss = _ss;
regs->flags = X86_EFLAGS_IF;
- set_fs(USER_DS);
/*
* Free the old FP and other extended state
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33a0c11..9fd3137 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init();
+ /*
+ * Wait until the cpu which brought this one up marked it
+ * online before enabling interrupts. If we don't do that then
+ * we can end up waking up the softirq thread before this cpu
+ * reached the active state, which makes the scheduler unhappy
+ * and schedule the softirq thread on the wrong cpu. This is
+ * only observable with forced threaded interrupts, but in
+ * theory it could also happen w/o them. It's just way harder
+ * to achieve.
+ */
+ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+ cpu_relax();
+
/* enable local interrupts */
local_irq_enable();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6df88c7..adc9867 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3372,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
struct opcode opcode;
- struct operand memop = { .type = OP_NONE };
+ struct operand memop = { .type = OP_NONE }, *memopp = NULL;
c->eip = ctxt->eip;
c->fetch.start = c->eip;
@@ -3547,9 +3547,6 @@ done_prefixes:
if (memop.type == OP_MEM && c->ad_bytes != 8)
memop.addr.mem.ea = (u32)memop.addr.mem.ea;
- if (memop.type == OP_MEM && c->rip_relative)
- memop.addr.mem.ea += c->eip;
-
/*
* Decode and fetch the source operand: register, memory
* or immediate.
@@ -3571,6 +3568,7 @@ done_prefixes:
c->op_bytes;
srcmem_common:
c->src = memop;
+ memopp = &c->src;
break;
case SrcImmU16:
rc = decode_imm(ctxt, &c->src, 2, false);
@@ -3667,6 +3665,7 @@ done_prefixes:
case DstMem:
case DstMem64:
c->dst = memop;
+ memopp = &c->dst;
if ((c->d & DstMask) == DstMem64)
c->dst.bytes = 8;
else
@@ -3700,10 +3699,13 @@ done_prefixes:
/* Special instructions do their own operand decoding. */
default:
c->dst.type = OP_NONE; /* Disable writeback. */
- return 0;
+ break;
}
done:
+ if (memopp && memopp->type == OP_MEM && c->rip_relative)
+ memopp->addr.mem.ea += c->eip;
+
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4..aee3862 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
- return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
+ return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010..9d03ad4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gva_t addr, u32 access)
{
pt_element_t pte;
- pt_element_t __user *ptep_user;
+ pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f..d48ec60 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
struct kvm_vcpu *vcpu)
{
- vmx_decache_cr3(vcpu);
+ if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+ vmx_decache_cr3(vcpu);
if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa11693..992da5e 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
#include <linux/range.h>
/* Check for already reserved areas */
-static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
+bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
{
struct memblock_region *r;
u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
if (addr >= ei_last)
continue;
*sizep = ei_last - addr;
- while (check_with_memblock_reserved_size(&addr, sizep, align))
+ while (memblock_x86_check_reserved_size(&addr, sizep, align))
;
if (*sizep)
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a56..9cbb710 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
return 0;
}
+/*
+ * This runs only on the current cpu. We try to find an LVT offset and
+ * setup the local APIC. For this we must disable preemption. On
+ * success we initialize all nodes with this offset. This updates then
+ * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
+ * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
+ * amd_cpu_shutdown() using the new offset.
+ */
static int force_ibs_eilvt_setup(void)
{
int offset;
int ret;
- /*
- * find the next free available EILVT entry, skip offset 0,
- * pin search to this cpu
- */
preempt_disable();
+ /* find the next free available EILVT entry, skip offset 0 */
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
if (get_eilvt(offset))
break;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 0972315..68c3c13 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
return false;
}
-static void coalesce_windows(struct pci_root_info *info, int type)
+static void coalesce_windows(struct pci_root_info *info, unsigned long type)
{
int i, j;
struct resource *res1, *res2;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa..474356b 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
- unsigned long long start = md->phys_addr;
- unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 start = md->phys_addr;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
-
- memblock_x86_reserve_range(start, start + size, "EFI Boot");
+ /* Only reserve where possible:
+ * - Not within any already allocated areas
+ * - Not over any memory area (really needed, if above?)
+ * - Not within any part of the kernel
+ * - Not the bios reserved area
+ */
+ if ((start+size >= virt_to_phys(_text)
+ && start <= virt_to_phys(_end)) ||
+ !e820_all_mapped(start, start+size, E820_RAM) ||
+ memblock_x86_check_reserved_size(&start, &size,
+ 1<<EFI_PAGE_SHIFT)) {
+ /* Could not reserve, skip it */
+ md->num_pages = 0;
+ memblock_dbg(PFX "Could not reserve boot range "
+ "[0x%010llx-0x%010llx]\n",
+ start, start+size-1);
+ } else
+ memblock_x86_reserve_range(start, start+size,
+ "EFI Boot");
}
}
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
md->type != EFI_BOOT_SERVICES_DATA)
continue;
+ /* Could not reserve boot area */
+ if (!size)
+ continue;
+
free_bootmem_late(start, size);
}
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f..5525163 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
xen_reboot(SHUTDOWN_poweroff);
}
+static void xen_machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+ xen_reboot(SHUTDOWN_poweroff);
+}
+
static void xen_crash_shutdown(struct pt_regs *regs)
{
xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
static const struct machine_ops xen_machine_ops __initconst = {
.restart = xen_restart,
.halt = xen_machine_halt,
- .power_off = xen_machine_halt,
+ .power_off = xen_machine_power_off,
.shutdown = xen_machine_halt,
.crash_shutdown = xen_crash_shutdown,
.emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dc..673e968 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
#include <asm/page.h>
#include <asm/init.h>
#include <asm/pat.h>
+#include <asm/smp.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
{
struct {
struct mmuext_op op;
- DECLARE_BITMAP(mask, NR_CPUS);
+ DECLARE_BITMAP(mask, num_processors);
} *args;
struct multicall_space mcs;
@@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
+#ifdef CONFIG_X86_32
+ if (pfn > max_pfn_mapped)
+ max_pfn_mapped = pfn;
+#endif
+
if (!pte_none(pte_page[pteidx]))
continue;
@@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
initial_kernel_pmd =
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
+ xen_start_info->nr_pt_frames * PAGE_SIZE +
+ 512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464..60aeeb5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
-#ifdef CONFIG_X86_32
xen_extra_mem_start = mem_end;
-#else
- xen_extra_mem_start = max((1ULL << 32), mem_end);
-#endif
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end;
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
if (map[i].size > 0)
e820_add_region(map[i].addr, map[i].size, map[i].type);
}
+ /* Align the balloon area so that max_low_pfn does not get set
+ * to be at the _end_ of the PCI gap at the far end (fee01000).
+ * Note that xen_extra_mem_start gets set in the loop above to be
+ * past the last E820 region. */
+ if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
+ xen_extra_mem_start = (1ULL<<32);
/*
* In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c0..b4533a8 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned cpu;
+ unsigned int i;
xen_init_lock_cpu(0);
smp_store_cpu_info(0);
cpu_data(0).x86_max_cores = 1;
+
+ for_each_possible_cpu(i) {
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
+ }
set_cpu_sibling_map(0);
if (xen_smp_intr_init(0))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a62be8d..3689f83 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q)
bio_list_init(&bio_list_on_stack);
- throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
+ throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
total_nr_queued(td), td->nr_queued[READ],
td->nr_queued[WRITE]);
@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
}
queue_bio:
- throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
+ throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
" iodisp=%u iops=%u queued=%d/%d",
rw == READ ? 'R' : 'W',
tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c7b537..f379943 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
- cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
- " sect=%u", used_sl, cfqq->slice_dispatch, charge,
- iops_mode(cfqd), cfqq->nr_sectors);
+ cfq_log_cfqq(cfqq->cfqd, cfqq,
+ "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
+ used_sl, cfqq->slice_dispatch, charge,
+ iops_mode(cfqd), cfqq->nr_sectors);
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
unaccounted_sl);
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
@@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
*/
if (sample_valid(cic->ttime_samples) &&
(cfqq->slice_end - jiffies < cic->ttime_mean)) {
- cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
- cic->ttime_mean);
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
+ cic->ttime_mean);
return;
}
@@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
smp_wmb();
cic->key = cfqd_dead_key(cfqd);
- if (ioc->ioc_data == cic)
+ if (rcu_dereference(ioc->ioc_data) == cic) {
+ spin_lock(&ioc->lock);
rcu_assign_pointer(ioc->ioc_data, NULL);
+ spin_unlock(&ioc->lock);
+ }
if (cic->cfqq[BLK_RW_ASYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
diff --git a/block/genhd.c b/block/genhd.c
index 95822ae..3608289 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1371,6 +1371,7 @@ struct disk_events {
struct gendisk *disk; /* the associated disk */
spinlock_t lock;
+ struct mutex block_mutex; /* protects blocking */
int block; /* event blocking depth */
unsigned int pending; /* events already sent out */
unsigned int clearing; /* events being cleared */
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
return msecs_to_jiffies(intv_msecs);
}
-static void __disk_block_events(struct gendisk *disk, bool sync)
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events(). Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
{
struct disk_events *ev = disk->ev;
unsigned long flags;
bool cancel;
+ if (!ev)
+ return;
+
+ /*
+ * Outer mutex ensures that the first blocker completes canceling
+ * the event work before further blockers are allowed to finish.
+ */
+ mutex_lock(&ev->block_mutex);
+
spin_lock_irqsave(&ev->lock, flags);
cancel = !ev->block++;
spin_unlock_irqrestore(&ev->lock, flags);
- if (cancel) {
- if (sync)
- cancel_delayed_work_sync(&disk->ev->dwork);
- else
- cancel_delayed_work(&disk->ev->dwork);
- }
+ if (cancel)
+ cancel_delayed_work_sync(&disk->ev->dwork);
+
+ mutex_unlock(&ev->block_mutex);
}
static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1461,27 +1484,6 @@ out_unlock:
}
/**
- * disk_block_events - block and flush disk event checking
- * @disk: disk to block events for
- *
- * On return from this function, it is guaranteed that event checking
- * isn't in progress and won't happen until unblocked by
- * disk_unblock_events(). Events blocking is counted and the actual
- * unblocking happens after the matching number of unblocks are done.
- *
- * Note that this intentionally does not block event checking from
- * disk_clear_events().
- *
- * CONTEXT:
- * Might sleep.
- */
-void disk_block_events(struct gendisk *disk)
-{
- if (disk->ev)
- __disk_block_events(disk, true);
-}
-
-/**
* disk_unblock_events - unblock disk event checking
* @disk: disk to unblock events for
*
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk)
*/
void disk_check_events(struct gendisk *disk)
{
- if (disk->ev) {
- __disk_block_events(disk, false);
- __disk_unblock_events(disk, true);
+ struct disk_events *ev = disk->ev;
+ unsigned long flags;
+
+ if (!ev)
+ return;
+
+ spin_lock_irqsave(&ev->lock, flags);
+ if (!ev->block) {
+ cancel_delayed_work(&ev->dwork);
+ queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
}
+ spin_unlock_irqrestore(&ev->lock, flags);
}
EXPORT_SYMBOL_GPL(disk_check_events);
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
spin_unlock_irq(&ev->lock);
/* uncondtionally schedule event check and wait for it to finish */
- __disk_block_events(disk, true);
+ disk_block_events(disk);
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
if (intv < 0 && intv != -1)
return -EINVAL;
- __disk_block_events(disk, true);
+ disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk)
INIT_LIST_HEAD(&ev->node);
ev->disk = disk;
spin_lock_init(&ev->lock);
+ mutex_init(&ev->block_mutex);
ev->block = 1;
ev->poll_msecs = -1;
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk)
if (!disk->ev)
return;
- __disk_block_events(disk, true);
+ disk_block_events(disk);
mutex_lock(&disk_events_mutex);
list_del_init(&disk->ev->node);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d38c40f..41223c7 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,7 +452,7 @@ void ahci_save_initial_config(struct device *dev,
}
if (mask_port_map) {
- dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+ dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
port_map,
port_map & mask_port_map);
port_map &= mask_port_map;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 736bee5..000d03a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
- { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* End Marker */
{ }
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index dfb6e9d..7f099d6 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2802,10 +2802,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
/*
- * Some controllers can't be frozen very well and may set
- * spuruious error conditions during reset. Clear accumulated
- * error information. As reset is the final recovery action,
- * nothing is lost by doing this.
+ * Some controllers can't be frozen very well and may set spurious
+ * error conditions during reset. Clear accumulated error
+ * information and re-thaw the port if frozen. As reset is the
+ * final recovery action and we cross check link onlineness against
+ * device classification later, no hotplug event is lost by this.
*/
spin_lock_irqsave(link->ap->lock, flags);
memset(&link->eh_info, 0, sizeof(link->eh_info));
@@ -2814,6 +2815,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
spin_unlock_irqrestore(link->ap->lock, flags);
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ ata_eh_thaw_port(ap);
+
/*
* Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d51f979..927f968 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
*/
int ata_sas_port_start(struct ata_port *ap)
{
+ /*
+ * the port is marked as frozen at allocation time, but if we don't
+ * have new eh, we won't thaw it
+ */
+ if (!ap->ops->error_handler)
+ ap->pflags &= ~ATA_PFLAG_FROZEN;
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_port_start);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 75a6a0c..5d7f58a 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
{ PCI_DEVICE(0x11AB, 0x6121), },
{ PCI_DEVICE(0x11AB, 0x6123), },
{ PCI_DEVICE(0x11AB, 0x6145), },
+ { PCI_DEVICE(0x1B4B, 0x91A0), },
+ { PCI_DEVICE(0x1B4B, 0x91A4), },
+
{ } /* terminate list */
};
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 1c4b3aa..dc88a39 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
/*
* Function: get_burst_length_encode
* arguments: datalength: length in bytes of data
- * returns value to be programmed in register corrresponding to data length
+ * returns value to be programmed in register corresponding to data length
* This value is effectively the log(base 2) of the length
*/
static int get_burst_length_encode(int datalength)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c291af..6040717 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device *__init_or_module platform_device_register_resndata(
+struct platform_device *platform_device_register_resndata(
struct device *parent,
const char *name, int id,
const struct resource *res, unsigned int num,
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eaa8a85..ad367c4 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
enable_clock(dev, *con_id);
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
enable_clock(dev, NULL);
}
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
disable_clock(dev, *con_id);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa632020..06f09bf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.in_suspend)
+ if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.in_suspend = false;
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
+
+ if (!dev->power.is_suspended)
+ goto Unlock;
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = false;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (async_error)
- goto End;
+ goto Unlock;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
- goto End;
+ goto Unlock;
}
if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = !error;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
break;
}
- dev->power.in_suspend = true;
+ dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305b..8ecf4c6 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hscfgcmd = result;
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.psmode = result;
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.pscmd = result;
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 16, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.gpio_gap = result;
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hscmd = result;
if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hsmode = result;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 051474c..34d6a1c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -163,11 +163,32 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
- unsigned long m, t;
+ unsigned long m, t, mc, base, k;
+ struct hpet __iomem *hpet = devp->hd_hpet;
+ struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
m = read_counter(&devp->hd_timer->hpet_compare);
- write_counter(t + m, &devp->hd_timer->hpet_compare);
+ mc = read_counter(&hpet->hpet_mc);
+ /* The time for the next interrupt would logically be t + m,
+ * however, if we are very unlucky and the interrupt is delayed
+ * for longer than t then we will completely miss the next
+ * interrupt if we set t + m and an application will hang.
+ * Therefore we need to make a more complex computation assuming
+ * that there exists a k for which the following is true:
+ * k * t + base < mc + delta
+ * (k + 1) * t + base > mc + delta
+ * where t is the interval in hpet ticks for the given freq,
+ * base is the theoretical start value 0 < base < t,
+ * mc is the main counter value at the time of the interrupt,
+ * delta is the time it takes to write the a value to the
+ * comparator.
+ * k may then be computed as (mc - base + delta) / t .
+ */
+ base = mc % t;
+ k = (mc - base + hpetp->hp_delta) / t;
+ write_counter(t * (k + 1) + base,
+ &devp->hd_timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 219d88a..dde6a0f 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -139,6 +139,7 @@ static int cn_call_callback(struct sk_buff *skb)
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
+ err = 0;
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index b60a4c2..faf7c52 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
old_index = stat->last_index;
new_index = freq_table_get_index(stat, freq->new);
- cpufreq_stats_update(freq->cpu);
- if (old_index == new_index)
+ /* We can't do stat->time_in_state[-1]= .. */
+ if (old_index == -1 || new_index == -1)
return 0;
- if (old_index == -1 || new_index == -1)
+ cpufreq_stats_update(freq->cpu);
+
+ if (old_index == new_index)
return 0;
spin_lock(&cpufreq_stats_lock);
@@ -387,6 +389,7 @@ static void __exit cpufreq_stats_exit(void)
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
cpufreq_stats_free_table(cpu);
+ cpufreq_stats_free_sysfs(cpu);
}
}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 83479b6..bce576d 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
}
res = transition_fid_vid(data, fid, vid);
+ if (res)
+ return res;
+
freqs.new = find_khz_freq_from_fid(data->currfid);
for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
/* get MSR index for hardware pstate transition */
pstate = index & HW_PSTATE_MASK;
if (pstate > data->max_hw_pstate)
- return 0;
+ return -EINVAL;
+
freqs.old = find_khz_freq_from_pstate(data->powernow_table,
data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d0e65d6..676d957 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -238,9 +238,9 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx)
/* build shared descriptor for this session */
sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
- keys_fit_inline ?
- ctx->split_key_pad_len + ctx->enckeylen :
- CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
+ (keys_fit_inline ?
+ ctx->split_key_pad_len + ctx->enckeylen :
+ CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
if (!sh_desc) {
dev_err(jrdev, "could not allocate shared descriptor\n");
return -ENOMEM;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2a638f9..0283300 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1221,6 +1221,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
} else {
do {
for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+ if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
+ irq_cap = 1;
+ break;
+ }
+
if ((errirq_res->flags & IORESOURCE_BITS) ==
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
@@ -1230,15 +1235,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
"Found IRQ %d for channel %d\n",
i, irq_cnt);
chan_irq[irq_cnt++] = i;
-
- if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
- break;
}
- if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
- irq_cap = 1;
+ if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
break;
- }
+
chanirq_res = platform_get_resource(pdev,
IORESOURCE_IRQ, ++irqres);
} while (irq_cnt < pdata->channel_num && chanirq_res);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 87096b6..2f21b0b 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -13,6 +13,7 @@ menu "Google Firmware Drivers"
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
depends on ACPI && DMI
+ select EFI
select EFI_VARS
help
Say Y here if you want to enable SMI callbacks for Google
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index f032e44..bfe7232 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -108,7 +108,9 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
+#ifdef CONFIG_ACPI
int i;
+#endif
ibft_addr = NULL;
#ifdef CONFIG_ACPI
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 4961ef9..2c212c7 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008,2009 STMicroelectronics
* Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
* Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -49,6 +50,7 @@ struct nmk_gpio_chip {
u32 (*get_secondary_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
spinlock_t lock;
+ bool sleepmode;
/* Keep track of configured edges */
u32 edge_rising;
u32 edge_falling;
@@ -393,14 +395,25 @@ EXPORT_SYMBOL(nmk_config_pins_sleep);
* @gpio: pin number
* @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
*
- * Sets the sleep mode of a pin. If @mode is NMK_GPIO_SLPM_INPUT, the pin is
- * changed to an input (with pullup/down enabled) in sleep and deep sleep. If
- * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was
- * configured even when in sleep and deep sleep.
+ * This register is actually in the pinmux layer, not the GPIO block itself.
+ * The GPIO1B_SLPM register defines the GPIO mode when SLEEP/DEEP-SLEEP
+ * mode is entered (i.e. when signal IOFORCE is HIGH by the platform code).
+ * Each GPIO can be configured to be forced into GPIO mode when IOFORCE is
+ * HIGH, overriding the normal setting defined by GPIO_AFSELx registers.
+ * When IOFORCE returns LOW (by software, after SLEEP/DEEP-SLEEP exit),
+ * the GPIOs return to the normal setting defined by GPIO_AFSELx registers.
*
- * On DB8500v2 onwards, this setting loses the previous meaning and instead
- * indicates if wakeup detection is enabled on the pin. Note that
- * enable_irq_wake() will automatically enable wakeup detection.
+ * If @mode is NMK_GPIO_SLPM_INPUT, the corresponding GPIO is switched to GPIO
+ * mode when signal IOFORCE is HIGH (i.e. when SLEEP/DEEP-SLEEP mode is
+ * entered) regardless of the altfunction selected. Also wake-up detection is
+ * ENABLED.
+ *
+ * If @mode is NMK_GPIO_SLPM_NOCHANGE, the corresponding GPIO remains
+ * controlled by NMK_GPIO_DATC, NMK_GPIO_DATS, NMK_GPIO_DIR, NMK_GPIO_PDIS
+ * (for altfunction GPIO) or respective on-chip peripherals (for other
+ * altfuncs) when IOFORCE is HIGH. Also wake-up detection DISABLED.
+ *
+ * Note that enable_irq_wake() will automatically enable wakeup detection.
*/
int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
{
@@ -551,6 +564,12 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
int gpio, bool on)
{
+ if (nmk_chip->sleepmode) {
+ __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
+ on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
+ : NMK_GPIO_SLPM_WAKEUP_DISABLE);
+ }
+
__nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
}
@@ -901,7 +920,7 @@ void nmk_gpio_wakeups_suspend(void)
writel(chip->fwimsc & chip->real_wake,
chip->addr + NMK_GPIO_FWIMSC);
- if (cpu_is_u8500v2()) {
+ if (chip->sleepmode) {
chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
/* 0 -> wakeup enable */
@@ -923,7 +942,7 @@ void nmk_gpio_wakeups_resume(void)
writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
- if (cpu_is_u8500v2())
+ if (chip->sleepmode)
writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
}
}
@@ -1010,6 +1029,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
nmk_chip->secondary_parent_irq = secondary_irq;
nmk_chip->get_secondary_status = pdata->get_secondary_status;
nmk_chip->set_ioforce = pdata->set_ioforce;
+ nmk_chip->sleepmode = pdata->supports_sleepmode;
spin_lock_init(&nmk_chip->lock);
chip = &nmk_chip->chip;
@@ -1065,5 +1085,3 @@ core_initcall(nmk_gpio_init);
MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
MODULE_DESCRIPTION("Nomadik GPIO Driver");
MODULE_LICENSE("GPL");
-
-
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 01f74a8..35bebde 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -469,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ OMAP24XX_GPIO_CLEARWKUENA);
}
}
- /* This part needs to be executed always for OMAP34xx */
- if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+ /* This part needs to be executed always for OMAP{34xx, 44xx} */
+ if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
+ (bank->non_wakeup_gpios & gpio_bit)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3e257a5..61e1ef9 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -46,10 +46,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
list_for_each_entry(entry, &dev->maplist, head) {
/*
* Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we ignore the map
- * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that each driver will have only one resource of
- * each type.
+ * while PCI resources may live above that, we only compare the
+ * lower 32 bits of the map offset for maps of type
+ * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+ * It is assumed that if a driver have more than one resource
+ * of each type, the lower 32 bits are different.
*/
if (!entry->map ||
map->type != entry->map->type ||
@@ -59,9 +60,12 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
case _DRM_SHM:
if (map->flags != _DRM_CONTAINS_LOCK)
break;
+ return entry;
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- return entry;
+ if ((entry->map->offset & 0xffffffff) ==
+ (map->offset & 0xffffffff))
+ return entry;
default: /* Make gcc happy */
;
}
@@ -183,9 +187,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return -EINVAL;
}
#endif
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 872747c..21058e6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1113,7 +1113,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_fbs >= fb_count) {
copied = 0;
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, head) {
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (put_user(fb->base.id, fb_id + copied)) {
ret = -EFAULT;
goto out;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a9357c..0929219 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
bad:
if (raw_edid) {
- DRM_ERROR("Raw EDID:\n");
+ printk(KERN_ERR "Raw EDID:\n");
print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk("\n");
+ printk(KERN_ERR "\n");
}
return 0;
}
@@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
return ret == 2 ? 0 : -1;
}
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+ int i;
+ u32 *raw_edid = (u32 *)in_edid;
+
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
+ return true;
+}
+
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
@@ -273,6 +284,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
goto out;
if (drm_edid_block_valid(block))
break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
+ }
}
if (i == 4)
goto carp;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 74e4ff5..4012fe4 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
#include "drmP.h"
/** @file drm_gem.c
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d61d185..4a058c7 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -28,6 +28,7 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
+#include <linux/ratelimit.h>
#include "drmP.h"
#include "drm_core.h"
@@ -253,10 +254,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
return -EFAULT;
m32.handle = (unsigned long)handle;
- if (m32.handle != (unsigned long)handle && printk_ratelimit())
- printk(KERN_ERR "compat_drm_addmap truncated handle"
- " %p for type %d offset %x\n",
- handle, m32.type, m32.offset);
+ if (m32.handle != (unsigned long)handle)
+ printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
+ " %p for type %d offset %x\n",
+ handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e1aee4f..b6a19cb 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -251,7 +251,7 @@ err:
}
-int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
@@ -292,6 +292,7 @@ static struct drm_bus drm_pci_bus = {
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
+ .irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
};
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2c3fcbd..5db96d45 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -526,7 +526,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
- return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+ return dev->hose->dense_mem_base;
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e99..2b79588 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2182,9 +2182,8 @@ int i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
- i915_gem_free_all_phys_object(dev);
-
mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd42..609358f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
} else switch (INTEL_INFO(dev)->gen) {
case 6:
ret = gen6_do_reset(dev, flags);
+ /* If reset with a user forcewake, try to restore */
+ if (atomic_read(&dev_priv->forcewake_count))
+ __gen6_gt_force_wake_get(dev_priv);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee16..eddabf6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12d3257..5c0d124 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -463,10 +463,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
if (do_bit17_swizzling) {
slow_shmem_bit17_copy(page,
@@ -795,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -905,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1217,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
@@ -1556,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -1699,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
+ * backing pages, *now*.
*/
inode = obj->base.filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
@@ -2078,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(ring->dev);
- i915_driver_irq_postinstall(ring->dev);
+ ring->dev->driver->irq_preinstall(ring->dev);
+ ring->dev->driver->irq_postinstall(ring->dev);
}
trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2924,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
- i915_gem_release_mmap(obj);
-
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
i915_gem_info_add_obj(dev_priv, size);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5..4934cf8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- /* blow away mappings if mapped through GTT */
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
- i915_gem_release_mmap(obj);
-
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b9fafe3..ae2b499 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1740,6 +1740,17 @@ void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
+ if (IS_GEN6(dev)) {
+ /* Workaround stalls observed on Sandy Bridge GPUs by
+ * making the blitter command streamer generate a
+ * write to the Hardware Status Page for
+ * MI_USER_INTERRUPT. This appears to serialize the
+ * previous seqno write out before the interrupt
+ * happens.
+ */
+ I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
+ }
/* XXX hotplug from PCH */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af..5d5def7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -531,6 +531,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2..e8152d2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -678,6 +678,7 @@ void i915_save_display(struct drm_device *dev)
}
/* VGA state */
+ mutex_lock(&dev->struct_mutex);
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -687,6 +688,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_restore_display(struct drm_device *dev)
@@ -780,6 +782,8 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
+ mutex_lock(&dev->struct_mutex);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -787,6 +791,7 @@ void i915_restore_display(struct drm_device *dev)
udelay(150);
i915_restore_vga(dev);
+ mutex_unlock(&dev->struct_mutex);
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81a9059..21b6f93 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- if (!HAS_PCH_SPLIT(dev))
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -6262,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
+
+ pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto out;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto out;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_advance(ring);
+out:
+ return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
@@ -6272,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- unsigned long flags, offset;
- int pipe = intel_crtc->pipe;
- u32 pf, pipesrc;
+ unsigned long flags;
int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
- if (ret)
- goto cleanup_work;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
- u32 flip_mask;
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- ret = BEGIN_LP_RING(2);
- if (ret)
- goto cleanup_objs;
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
-
- ret = BEGIN_LP_RING(4);
- if (ret)
- goto cleanup_objs;
-
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 3:
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 4:
- case 5:
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
-
- case 6:
- case 7:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
-
- pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
- }
- ADVANCE_LP_RING();
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
mutex_unlock(&dev->struct_mutex);
@@ -6409,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
+cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
-cleanup_work:
mutex_unlock(&dev->struct_mutex);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -7657,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
}
/*
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d3b903b..d98cee6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -401,8 +401,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
- if (IS_GEN2(dev))
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c00..56a8e2a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1416,6 +1416,8 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = reg_bo;
+ mutex_lock(&dev->struct_mutex);
+
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
@@ -1440,6 +1442,8 @@ void intel_setup_overlay(struct drm_device *dev)
}
}
+ mutex_unlock(&dev->struct_mutex);
+
/* init all values */
overlay->color_key = 0x0101fe;
overlay->brightness = -19;
@@ -1464,6 +1468,7 @@ out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
+ mutex_unlock(&dev->struct_mutex);
out_free:
kfree(overlay);
return;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 1084fa4..54558a0 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -195,29 +195,10 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
-#if defined(__linux__) && defined(__alpha__)
-#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
-#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
-
-#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
-#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
-
-#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
-#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
-#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
-
-static inline u32 _MGA_READ(u32 *addr)
-{
- DRM_MEMORYBARRIER();
- return *(volatile u32 *)addr;
-}
-#else
#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
-#endif
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f0d459b..525744d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
vga_count++;
retval = nouveau_dsm_pci_probe(pdev);
- printk("ret val is %d\n", retval);
if (retval & NOUVEAU_DSM_HAS_MUX)
has_dsm |= 1;
if (retval & NOUVEAU_DSM_HAS_OPT)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4b9f449..7347075 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
int ret;
if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 3);
+ ret = RING_SPACE(chan, 4);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
+ OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
OUT_RING (chan, 1);
} else
@@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
- ret = RING_SPACE(chan, 5);
+ ret = RING_SPACE(chan, 7);
if (ret)
return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
int ret;
if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 4);
+ ret = RING_SPACE(chan, 5);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
+ OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
@@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
- ret = RING_SPACE(chan, 5);
+ ret = RING_SPACE(chan, 7);
if (ret)
return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
- if (dev_priv->card_type >= NV_C0)
- goto out_initialised;
+ if (dev_priv->card_type < NV_C0) {
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
+ if (ret)
+ return ret;
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
- if (ret)
- return ret;
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
- /* we leave subchannel empty for nvc0 */
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING (chan, NvSw);
+ FIRE_RING (chan);
+ }
- /* Create a DMA object for the shared cross-channel sync area. */
+ /* Setup area of memory shared between all channels for x-chan sync */
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
@@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING(chan, NvSema);
- } else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle); /* whole VM */
}
- FIRE_RING(chan);
-
-out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 922fb6b..ef9dec0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
entries = perf[2];
}
+ if (entries > NOUVEAU_PM_MAX_LEVEL) {
+ NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
+ entries = NOUVEAU_PM_MAX_LEVEL;
+ }
+
entry = perf + headerlen;
for (i = 0; i < entries; i++) {
struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 8021888..144f79a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -881,8 +881,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
#ifdef __BIG_ENDIAN
/* Put the card in BE mode if it's not */
- if (nv_rd32(dev, NV03_PMC_BOOT_1))
- nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
+ if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
+ nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
DRM_MEMORYBARRIER();
#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 74a3f68..08da478 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -409,7 +409,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *evo = dispc->sync;
int ret;
- ret = RING_SPACE(evo, 24);
+ ret = RING_SPACE(evo, chan ? 25 : 27);
if (unlikely(ret))
return ret;
@@ -458,8 +458,19 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* queue the flip on the crtc's "display sync" channel */
BEGIN_RING(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
- BEGIN_RING(evo, 0, 0x0084, 5);
- OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
+ if (chan) {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000100);
+ } else {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000010);
+ /* allows gamma somehow, PDISP will bitch at you if
+ * you don't wait for vblank before changing this..
+ */
+ BEGIN_RING(evo, 0, 0x00e0, 1);
+ OUT_RING (evo, 0x40000000);
+ }
+ BEGIN_RING(evo, 0, 0x0088, 4);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 49611e2..1b50ad8 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1200,6 +1200,7 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14
// ucConfig
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 84a69e7..9541995 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -671,6 +671,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
DISPPLL_CONFIG_DUAL_LINK;
}
}
+ if (radeon_encoder_is_dp_bridge(encoder)) {
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+ args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
+ } else
+ args.v3.sInput.ucExtTransmitterID = 0;
+
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 98ea597..12d2fdc 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -88,7 +88,8 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* get temperature in millidegrees */
int evergreen_get_temp(struct radeon_device *rdev)
{
- u32 temp, toffset, actual_temp = 0;
+ u32 temp, toffset;
+ int actual_temp = 0;
if (rdev->family == CHIP_JUNIPER) {
toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
@@ -139,11 +140,17 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if (voltage->type == VOLTAGE_SW) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->vddci == 0xff01)
+ return;
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
rdev->pm.current_vddci = voltage->vddci;
@@ -2006,9 +2013,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |= (3 << 0);
break;
}
- /* num banks is 8 on all fusion asics */
+ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 8 << 4;
+ rdev->config.evergreen.tile_config |= 1 << 4;
else
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2694,28 +2701,25 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
int evergreen_irq_process(struct radeon_device *rdev)
{
- u32 wptr = evergreen_get_ih_wptr(rdev);
- u32 rptr = rdev->ih.rptr;
+ u32 wptr;
+ u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
- spin_lock_irqsave(&rdev->ih.lock, flags);
+ wptr = evergreen_get_ih_wptr(rdev);
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
- if (rdev->shutdown) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
-
restart_ih:
/* display interrupts */
evergreen_irq_ack(rdev);
@@ -2944,7 +2948,7 @@ restart_ih:
radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
- DRM_DEBUG("IH: CP EOP\n");
+ DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 2fef9de..686f9dc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -63,7 +63,7 @@ struct r100_cs_track {
unsigned num_arrays;
unsigned max_indx;
unsigned color_channel_mask;
- struct r100_cs_track_array arrays[11];
+ struct r100_cs_track_array arrays[16];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
struct r100_cs_track_cb aa;
@@ -146,6 +146,12 @@ static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d74d4d7..f79d2cc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -590,6 +590,9 @@ void r600_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
@@ -3294,27 +3297,26 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
int r600_irq_process(struct radeon_device *rdev)
{
- u32 wptr = r600_get_ih_wptr(rdev);
- u32 rptr = rdev->ih.rptr;
+ u32 wptr;
+ u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
+ wptr = r600_get_ih_wptr(rdev);
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
- if (rdev->shutdown) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
restart_ih:
/* display interrupts */
@@ -3444,7 +3446,7 @@ restart_ih:
radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
- DRM_DEBUG("IH: CP EOP\n");
+ DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ba643b5..ef0e0e0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -165,6 +165,7 @@ struct radeon_clock {
uint32_t default_sclk;
uint32_t default_dispclk;
uint32_t dp_extclk;
+ uint32_t max_pixel_clock;
};
/*
@@ -178,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 9bd162f..b244962 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -938,6 +938,13 @@ static struct radeon_asic cayman_asic = {
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
+
+ /* set the number of crtcs */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ rdev->num_crtc = 1;
+ else
+ rdev->num_crtc = 2;
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -1017,6 +1024,11 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
+ /* set num crtcs */
+ if (rdev->family == CHIP_CEDAR)
+ rdev->num_crtc = 4;
+ else
+ rdev->num_crtc = 6;
rdev->asic = &evergreen_asic;
break;
case CHIP_PALM:
@@ -1027,10 +1039,17 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
+ /* set num crtcs */
+ if (rdev->family == CHIP_CAICOS)
+ rdev->num_crtc = 4;
+ else
+ rdev->num_crtc = 6;
rdev->asic = &btc_asic;
break;
case CHIP_CAYMAN:
rdev->asic = &cayman_asic;
+ /* set num crtcs */
+ rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
@@ -1042,18 +1061,6 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic->set_memory_clock = NULL;
}
- /* set the number of crtcs */
- if (rdev->flags & RADEON_SINGLE_CRTC)
- rdev->num_crtc = 1;
- else {
- if (ASIC_IS_DCE41(rdev))
- rdev->num_crtc = 2;
- else if (ASIC_IS_DCE4(rdev))
- rdev->num_crtc = 6;
- else
- rdev->num_crtc = 2;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 90dfb2b..bf2b615 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1246,6 +1246,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
}
*dcpll = *p1pll;
+ rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+ if (rdev->clock.max_pixel_clock == 0)
+ rdev->clock.max_pixel_clock = 40000;
+
return true;
}
@@ -2316,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->r600.usVDDC);
}
+ /* patch up vddc if necessary */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
+ u16 vddc;
+
+ if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+ }
+
if (rdev->flags & RADEON_IS_IGP) {
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2603,6 +2615,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage_level == 0xff01)
+ return;
+
switch (crev) {
case 1:
args.v1.ucVoltageType = voltage_type;
@@ -2622,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+ u16 *voltage)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+ switch (crev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+ args.v2.ucVoltageMode = 0;
+ args.v2.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 1aba85c..3fc5fa1 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
int ret;
- int size = 64 * 1024;
+ int size = 256 * 1024;
int i;
if (!radeon_atrm_supported(rdev->pdev))
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 5249af8..2d48e7a 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -117,7 +117,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
if (p1pll->reference_div < 2)
p1pll->reference_div = 12;
- p2pll->reference_div = p1pll->reference_div;
+ p2pll->reference_div = p1pll->reference_div;
/* These aren't in the device-tree */
if (rdev->family >= CHIP_R420) {
@@ -139,6 +139,8 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
p2pll->pll_out_min = 12500;
p2pll->pll_out_max = 35000;
}
+ /* not sure what the max should be in all cases */
+ rdev->clock.max_pixel_clock = 35000;
spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
spll->reference_div = mpll->reference_div =
@@ -151,7 +153,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
else
rdev->clock.default_sclk =
radeon_legacy_get_engine_clock(rdev);
-
+
val = of_get_property(dp, "ATY,MCLK", NULL);
if (val && *val)
rdev->clock.default_mclk = (*val) / 10;
@@ -160,7 +162,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
radeon_legacy_get_memory_clock(rdev);
DRM_INFO("Using device-tree clock info\n");
-
+
return true;
}
#else
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5b991f7..e459467 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -866,6 +866,11 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
rdev->clock.default_sclk = sclk;
rdev->clock.default_mclk = mclk;
+ if (RBIOS32(pll_info + 0x16))
+ rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+ else
+ rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
return true;
}
return false;
@@ -1548,10 +1553,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4a48)) {
/* Mac X800 */
rdev->mode_info.connector_table = CT_MAC_X800;
- } else if ((rdev->pdev->device == 0x4150) &&
+ } else if ((of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3")) &&
+ (rdev->pdev->device == 0x4150) &&
(rdev->pdev->subsystem_vendor == 0x1002) &&
(rdev->pdev->subsystem_device == 0x4150)) {
- /* Mac G5 9600 */
+ /* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else
#endif /* CONFIG_PPC_PMAC */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee1dccb..cbfca3a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,6 +44,8 @@ extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
+bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
+
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -626,8 +628,14 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
static int radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
/* XXX check mode bandwidth */
- /* XXX verify against max DAC output frequency */
+
+ if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -830,6 +838,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
drm_get_connector_name(connector));
+ /* rs690 seems to have a problem with connectors not existing and always
+ * return a block of 0's. If we see this just stop polling on this output */
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ ret = connector_status_disconnected;
+ DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+ radeon_connector->ddc_bus = NULL;
+ }
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1015,6 +1030,11 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
} else
return MODE_CLOCK_HIGH;
}
+
+ /* check against the max pixel clock */
+ if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -1052,10 +1072,11 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int ret;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- struct drm_encoder *encoder;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
if (!radeon_dig_connector->edp_on)
@@ -1067,7 +1088,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
ATOM_TRANSMITTER_ACTION_POWER_OFF);
if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
if (encoder) {
radeon_fixup_lvds_native_mode(encoder, connector);
/* add scaled modes */
@@ -1091,8 +1111,14 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
- } else
+ } else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
ret = radeon_ddc_get_modes(radeon_connector);
+ }
return ret;
}
@@ -1176,14 +1202,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
@@ -1203,6 +1230,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
@@ -1217,6 +1249,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
}
+
+ if ((ret == connector_status_disconnected) &&
+ radeon_connector->dac_load_detect) {
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ if (encoder) {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ }
}
radeon_connector_update_scratch_regs(connector, ret);
@@ -1231,7 +1273,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
@@ -1241,7 +1284,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- /* AVIVO hardware supports downscaling modes larger than the panel
+ /* AVIVO hardware supports downscaling modes larger than the panel
* to the panel size, but I'm not sure this is desirable.
*/
if ((mode->hdisplay > native_mode->hdisplay) ||
@@ -1390,6 +1433,10 @@ radeon_add_atom_connector(struct drm_device *dev,
default:
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1411,6 +1458,12 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e680501..7cfaa7e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -215,6 +215,8 @@ int radeon_wb_init(struct radeon_device *rdev)
return r;
}
+ /* clear wb memory */
+ memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 03f124d..b293487 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -367,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
if (ASIC_IS_DCE3(rdev) &&
- (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ radeon_encoder_is_dp_bridge(encoder))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
@@ -660,21 +661,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_encoder_is_dp_bridge(encoder))
return ATOM_ENCODER_MODE_DP;
+ /* DVO is always DVO */
+ if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+ return ATOM_ENCODER_MODE_DVO;
+
connector = radeon_get_connector_for_encoder(encoder);
- if (!connector) {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- return ATOM_ENCODER_MODE_DVI;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- default:
- return ATOM_ENCODER_MODE_CRT;
- }
- }
+ /* if we don't have an active device yet, just use one of
+ * the connectors tied to the encoder.
+ */
+ if (!connector)
+ connector = radeon_get_connector_for_encoder_init(encoder);
radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) {
@@ -1094,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
break;
}
- if (is_dp)
+ if (is_dp) {
args.v2.acConfig.fCoherentMode = 1;
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ args.v2.acConfig.fDPConnector = 1;
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
@@ -1435,7 +1432,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -1526,26 +1527,29 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
}
if (ext_encoder) {
- int action;
-
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
- if (ASIC_IS_DCE41(rdev))
- action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
- else
- action = ATOM_ENABLE;
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev))
- action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
- else
- action = ATOM_DISABLE;
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
- atombios_external_encoder_setup(encoder, ext_encoder, action);
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
@@ -2004,6 +2008,65 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
return connector_status_disconnected;
}
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ u32 bios_0_scratch;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return connector_status_unknown;
+
+ if (!ext_encoder)
+ return connector_status_unknown;
+
+ if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+ return connector_status_unknown;
+
+ /* load detect on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+
+ if (ext_encoder)
+ /* ddc_setup on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2167,7 +2230,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.disable = radeon_atom_encoder_disable,
- /* no detect for TMDS/LVDS yet */
+ .detect = radeon_atom_dig_detect,
};
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 1f82294..021d2b6 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,6 +40,35 @@
#include "radeon.h"
#include "radeon_trace.h"
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+{
+ if (rdev->wb.enabled) {
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
+ } else
+ WREG32(rdev->fence_drv.scratch_reg, seq);
+}
+
+static u32 radeon_fence_read(struct radeon_device *rdev)
+{
+ u32 seq;
+
+ if (rdev->wb.enabled) {
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
+ } else
+ seq = RREG32(rdev->fence_drv.scratch_reg);
+ return seq;
+}
+
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
@@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
return 0;
}
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
- if (!rdev->cp.ready) {
+ if (!rdev->cp.ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- } else
+ radeon_fence_write(rdev, fence->seq);
+ else
radeon_fence_ring_emit(rdev, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
@@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
- } else
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ seq = radeon_fence_read(rdev);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
@@ -251,7 +272,7 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ radeon_fence_write(rdev, fence->seq);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
- WREG32(rdev->fence_drv.scratch_reg, 0);
+ radeon_fence_write(rdev, 0);
atomic_set(&rdev->fence_drv.seq, 0);
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
@@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct radeon_fence *fence;
seq_printf(m, "Last signaled fence 0x%08X\n",
- RREG32(rdev->fence_drv.scratch_reg));
+ radeon_fence_read(rdev));
if (!list_empty(&rdev->fence_drv.emited)) {
fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 977a341..6df4e3c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -483,6 +483,8 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ef8a5bab..6f508ff 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -105,6 +105,9 @@ void rv770_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bf5f83e..cb1ee4e 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -647,9 +647,6 @@ int savage_driver_firstopen(struct drm_device *dev)
ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
&dev_priv->aperture);
- if (ret)
- return ret;
-
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 90e23e0..58c271e 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,6 +31,7 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
- from_page = read_mapping_page(swap_space, i, NULL);
+ from_page = shmem_read_mapping_page(swap_space, i);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = read_mapping_page(swap_space, i, NULL);
+ to_page = shmem_read_mapping_page(swap_space, i);
if (unlikely(IS_ERR(to_page))) {
ret = PTR_ERR(to_page);
goto out_err;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 67d2a75..36ca465 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
- 3M PCT touch screens
- ActionStar dual touch panels
- Cando dual touch panels
+ - Chunghwa panels
- CVTouch panels
- Cypress TrueTouch panels
- Elo TouchSystems IntelliTouch Plus panels
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c957c4b..6f3289a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1422,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0b374a6..a756ee6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -173,6 +173,9 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+#define USB_VENDOR_ID_CHUNGHWAT 0x2247
+#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
+
#define USB_VENDOR_ID_CIDC 0x1677
#define USB_VENDOR_ID_CMEDIA 0x0d8c
@@ -446,6 +449,7 @@
#define USB_VENDOR_ID_LUMIO 0x202e
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -622,6 +626,7 @@
#define USB_VENDOR_ID_UCLOGIC 0x5543
#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
+#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index a5eda4c..0ec91c1 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- /*
- * The device reponds with 'invalid report id' when feature
- * report switching it into multitouch mode is sent to it.
- *
- * This results in -EIO from the _raw low-level transport callback,
- * but there seems to be no other way of switching the mode.
- * Thus the super-ugly hacky success check below.
- */
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
HID_FEATURE_REPORT);
- if (ret != -EIO) {
+ if (ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ecd4d2d..62cac4d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -64,6 +64,7 @@ struct mt_device {
struct mt_class *mtclass; /* our mt device class */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
+ int last_mt_collection; /* last known mt-related collection */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
__u8 num_received; /* how many contacts we received */
__u8 num_expected; /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_move);
/* touchscreen emulation */
set_abs(hi->input, ABS_X, field, cls->sn_move);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_GD_Y:
if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_move);
/* touchscreen emulation */
set_abs(hi->input, ABS_Y, field, cls->sn_move);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
}
return 0;
@@ -246,31 +251,42 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_INRANGE:
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_CONFIDENCE:
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_TIPSWITCH:
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_CONTACTID:
+ if (!td->maxcontacts)
+ td->maxcontacts = MT_DEFAULT_MAXCONTACT;
input_mt_init_slots(hi->input, td->maxcontacts);
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
+ td->last_mt_collection = usage->collection_index;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TOUCH_MAJOR);
set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
cls->sn_width);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_HEIGHT:
hid_map_usage(hi, usage, bit, max,
@@ -279,8 +295,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_height);
input_set_abs_params(hi->input,
ABS_MT_ORIENTATION, 0, 1, 0, 0);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_TIPPRESSURE:
if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +310,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/* touchscreen emulation */
set_abs(hi->input, ABS_PRESSURE, field,
cls->sn_pressure);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_CONTACTCOUNT:
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index)
+ td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTMAX:
/* we don't set td->last_slot_field as contactcount and
* contact max are global to the report */
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index)
+ td->last_field_index = field->index;
return -1;
}
/* let hid-input decide for the others */
@@ -516,6 +538,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
td->mtclass = mtclass;
td->inputmode = -1;
+ td->last_mt_collection = -1;
hid_set_drvdata(hdev, td);
ret = hid_parse(hdev);
@@ -526,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto fail;
- if (!td->maxcontacts)
- td->maxcontacts = MT_DEFAULT_MAXCONTACT;
-
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -593,6 +613,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
+ /* Chunghwa Telecom touch panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
+ USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+
/* CVTouch panels */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
@@ -651,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
USB_DEVICE_ID_CRYSTALTOUCH) },
+ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
+ USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -681,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
USB_DEVICE_ID_MTP)},
{ .driver_data = MT_CLS_CONFIDENCE,
- HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
{ .driver_data = MT_CLS_CONFIDENCE,
- HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
USB_DEVICE_ID_MTP_SITRONIX)},
/* Touch International panels */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0e30b14..621959d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index ff3c644..7c1188b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
usbhid_close(list->hiddev->hid);
usbhid_put_power(list->hiddev->hid);
} else {
+ mutex_unlock(&list->hiddev->existancelock);
kfree(list->hiddev);
+ kfree(list);
+ return 0;
}
}
- kfree(list);
mutex_unlock(&list->hiddev->existancelock);
+ kfree(list);
return 0;
}
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
usb_deregister_dev(usbhid->intf, &hiddev_class);
if (hiddev->open) {
+ mutex_unlock(&hiddev->existancelock);
usbhid_close(hiddev->hid);
wake_up_interruptible(&hiddev->wait);
} else {
+ mutex_unlock(&hiddev->existancelock);
kfree(hiddev);
}
- mutex_unlock(&hiddev->existancelock);
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b5e8920..dcb78a7 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
static void atk_init_attribute(struct device_attribute *attr, char *name,
sysfs_show_func show)
{
+ sysfs_attr_init(&attr->attr);
attr->attr.name = name;
attr->attr.mode = 0444;
attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
int err;
list_for_each_entry(s, &data->sensor_list, list) {
- sysfs_attr_init(&s->input_attr.attr);
err = device_create_file(data->hwmon_dev, &s->input_attr);
if (err)
return err;
- sysfs_attr_init(&s->label_attr.attr);
err = device_create_file(data->hwmon_dev, &s->label_attr);
if (err)
return err;
- sysfs_attr_init(&s->limit1_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit1_attr);
if (err)
return err;
- sysfs_attr_init(&s->limit2_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit2_attr);
if (err)
return err;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 85e9379..0070d54 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -97,9 +97,7 @@ struct platform_data {
struct pdev_entry {
struct list_head list;
struct platform_device *pdev;
- unsigned int cpu;
u16 phys_proc_id;
- u16 cpu_core_id;
};
static LIST_HEAD(pdev_list);
@@ -653,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
}
pdev_entry->pdev = pdev;
- pdev_entry->cpu = cpu;
pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
- pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 537409d..1a409c5 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
/* Set up read-only sensors */
while (ro->label) {
+ sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = ro->label;
sensors->dev_attr.attr.mode = S_IRUGO;
sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
/* Set up read-write sensors */
while (rw->label) {
+ sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = rw->label;
sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
sensors->dev_attr.show = rw->show;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 06d4eaf..41dbf81 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
else if (type == POWER_SENSOR)
sprintf(n, power_sensor_name_templates[func], "power", counter);
+ sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
data->sensors[sensor].attr[func].dev_attr.attr.name = n;
data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 92b42db..b39f52e 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
attr = &attrs->in;
attr->index = channel;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attrs->in_name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
attr = &attrs->label;
attr->index = channel;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attrs->label_name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = s3c_hwmon_label_show;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index dd39c1e..26c352a 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
if (taos->state != TAOS_STATE_IDLE) {
err = -ENODEV;
- dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, "
+ dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
"pos=%d)\n", taos->state, taos->pos);
goto exit_close;
}
@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
msecs_to_jiffies(250));
if (taos->state != TAOS_STATE_IDLE) {
err = -ENODEV;
- dev_err(&adapter->dev, "Echo off failed "
+ dev_err(&serio->dev, "TAOS EVM echo off failed "
"(state=%d)\n", taos->state);
goto exit_close;
}
@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
err = i2c_add_adapter(adapter);
if (err)
goto exit_close;
- dev_dbg(&serio->dev, "Connected to TAOS EVM\n");
+ dev_info(&serio->dev, "Connected to TAOS EVM\n");
taos->client = taos_instantiate_device(adapter);
return 0;
@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
serio_set_drvdata(serio, NULL);
kfree(taos);
- dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n");
+ dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
}
static struct serio_device_id taos_serio_ids[] = {
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
index 54e1ce7..6f89536 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/pca954x.c
@@ -201,10 +201,11 @@ static int pca954x_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
- /* Read the mux register at addr to verify
- * that the mux is in fact present.
+ /* Write the mux register at addr to verify
+ * that the mux is in fact present. This also
+ * initializes the mux to disconnected state.
*/
- if (i2c_smbus_read_byte(client) < 0) {
+ if (i2c_smbus_write_byte(client, 0) < 0) {
dev_warn(&client->dev, "probe failed\n");
goto exit_free;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 144d272..04b0956 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -778,7 +778,8 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
sector_t block)
{
struct ide_cmd cmd;
- int uptodate = 0, nsectors;
+ int uptodate = 0;
+ unsigned int nsectors;
ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
rq->cmd[0], (unsigned long long)block);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd0..31fb440 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int disconnect = 1;
int release = 0;
- int abort = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
+ int ret;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
attrs.next_state = C4IW_QP_STATE_CLOSING;
- abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- disconnect = 1;
+ if (ret != -ECONNRESET) {
+ peer_close_upcall(ep);
+ disconnect = 1;
+ }
break;
case ABORTING:
disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- mutex_unlock(&ep->com.mutex);
if (close) {
- if (abrupt)
- ret = abort_connection(ep, NULL, gfp);
- else
+ if (abrupt) {
+ close_complete_upcall(ep);
+ ret = send_abort(ep, NULL, gfp);
+ } else
ret = send_halfclose(ep, gfp);
if (ret)
fatal = 1;
}
+ mutex_unlock(&ep->com.mutex);
if (fatal)
release_ep_resources(ep);
return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ kfree_skb(skb);
+ return 0;
+ }
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ sched(dev, skb);
+ return 0;
+}
+
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_ESTABLISH] = sched,
[CPL_PEER_CLOSE] = sched,
[CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_ABORT_REQ_RSS] = peer_abort_intr,
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8ad..1720dc7 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp->cq.queue;
+ while (hwentries > T4_MAX_IQ_SIZE) {
+ memsize -= PAGE_SIZE;
+ hwentries = memsize / sizeof *chp->cq.queue;
+ }
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe4..0347eed 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -625,7 +625,7 @@ pbl_done:
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
+ mhp->attr.len = length;
err = register_mem(rhp, php, mhp, shift);
if (err)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b0..a41578e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
c4iw_get_ep(&qhp->ep->com);
}
ret = rdma_fini(rhp, qhp, ep);
- if (ret) {
- if (internal)
- c4iw_get_ep(&qhp->ep->com);
+ if (ret)
goto err;
- }
break;
case C4IW_QP_STATE_TERMINATE:
set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68..8ec5237 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
#define IB_7322_LT_STATE_CFGENH 0x10
#define IB_7322_LT_STATE_CFGTEST 0x11
+#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
+#define IB_7322_LT_STATE_CFGWAITENH 0x13
/* link state machine states from IBC */
#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [IB_7322_LT_STATE_CFGWAITRMTTEST] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITENH] =
+ IB_PHYSPORTSTATE_CFG_WAIT_ENH,
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
break;
}
- if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
+ ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
+ ibclt == IB_7322_LT_STATE_LINKUP) &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
{
u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
- ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
- if (enable)
+ u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
+
+ if (enable && !state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+ ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- else
+ } else if (!enable && state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+ ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ }
qib_write_kreg_port(ppd, krp_serdesctrl, data);
}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56..6ae57d2 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
* states, or if it transitions from any of the up (INIT or better)
* states into any of the down states (except link recovery), then
* call the chip-specific code to take appropriate actions.
+ *
+ * ppd->lflags could be 0 if this is the first time the interrupt
+ * handlers has been called but the link is already up.
*/
- if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ if (lstate >= IB_PORT_INIT &&
+ (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* transitioned to UP */
if (dd->f_ib_updown(ppd, 1, ibcs))
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index be0921e..4cf2534 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
rcu_read_unlock();
- wake_up_interruptible(&evdev->wait);
+ if (type == EV_SYN && code == SYN_REPORT)
+ wake_up_interruptible(&evdev->wait);
}
static int evdev_fasync(int fd, struct file *file, int on)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75e11c7..da38d97 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
- clamp(mt_slots, 2, 32);
+ mt_slots = clamp(mt_slots, 2, 32);
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
mt_slots = 2;
} else {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index f23a743..33d0bdc 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
#endif
}
}
+ input_sync(omap_kp_data->input);
memcpy(keypad_state, new_state, sizeof(keypad_state));
if (key_down) {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 834cf98..6876700 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -32,7 +32,7 @@ static const struct {
[SH_KEYSC_MODE_3] = { 2, 4, 7 },
[SH_KEYSC_MODE_4] = { 3, 6, 6 },
[SH_KEYSC_MODE_5] = { 4, 6, 7 },
- [SH_KEYSC_MODE_6] = { 5, 7, 7 },
+ [SH_KEYSC_MODE_6] = { 5, 8, 8 },
};
struct sh_keysc_priv {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 257e033..0110b5a 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
if (size == 0)
size = xres ? : 1;
- clamp(value, min, max);
+ value = clamp(value, min, max);
mousedev->packet.x = ((value - min) * xres) / size;
mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
if (size == 0)
size = yres ? : 1;
- clamp(value, min, max);
+ value = clamp(value, min, max);
mousedev->packet.y = yres - ((value - min) * yres) / size;
mousedev->packet.abs_event = 1;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 59de638..e35058b 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
if (!cs || !try_module_get(cs->driver->owner))
return -ENODEV;
- if (mutex_lock_interruptible(&cs->mutex))
+ if (mutex_lock_interruptible(&cs->mutex)) {
+ module_put(cs->driver->owner);
return -ERESTARTSYS;
+ }
tty->driver_data = cs;
++cs->open_count;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 3ccbff1..71a8eb6 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -283,6 +283,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
sizeof(struct ph_info_dch) + dch->dev.nrbchan *
sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
+ kfree(phi);
}
/*
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 23f0d5e..713d43b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,3 +1,10 @@
+config LEDS_GPIO_REGISTER
+ bool
+ help
+ This option provides the function gpio_led_register_device.
+ As this function is used by arch code it must not be compiled as a
+ module.
+
menuconfig NEW_LEDS
bool "LED Support"
help
@@ -7,22 +14,14 @@ menuconfig NEW_LEDS
This is not related to standard keyboard LEDs which are controlled
via the input system.
+if NEW_LEDS
+
config LEDS_CLASS
bool "LED Class Support"
- depends on NEW_LEDS
help
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
-config LEDS_GPIO_REGISTER
- bool
- help
- This option provides the function gpio_led_register_device.
- As this function is used by arch code it must not be compiled as a
- module.
-
-if NEW_LEDS
-
comment "LED drivers"
config LEDS_88PM860X
@@ -391,6 +390,7 @@ config LEDS_NETXBIG
config LEDS_ASIC3
bool "LED support for the HTC ASIC3"
+ depends on LEDS_CLASS
depends on MFD_ASIC3
default y
help
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index c0cff64..cc1dc48 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
&lp5521_led_attribute_group);
}
-static int __init lp5521_init_led(struct lp5521_led *led,
+static int __devinit lp5521_init_led(struct lp5521_led *led,
struct i2c_client *client,
int chan, struct lp5521_platform_data *pdata)
{
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
return 0;
}
-static int lp5521_probe(struct i2c_client *client,
+static int __devinit lp5521_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5521_chip *chip;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e19fed2..5971e309 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
return 0;
}
-static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
int chan, struct lp5523_platform_data *pdata)
{
char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
static struct i2c_driver lp5523_driver;
-static int lp5523_probe(struct i2c_client *client,
+static int __devinit lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5523_chip *chip;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 70bd738..574b09a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap)
kunmap_atomic(sb, KM_USER0);
}
+/*
+ * bitmap_new_disk_sb
+ * @bitmap
+ *
+ * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
+ * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
+ * This function verifies 'bitmap_info' and populates the on-disk bitmap
+ * structure, which is to be written to disk.
+ *
+ * Returns: 0 on success, -Exxx on error
+ */
+static int bitmap_new_disk_sb(struct bitmap *bitmap)
+{
+ bitmap_super_t *sb;
+ unsigned long chunksize, daemon_sleep, write_behind;
+ int err = -EINVAL;
+
+ bitmap->sb_page = alloc_page(GFP_KERNEL);
+ if (IS_ERR(bitmap->sb_page)) {
+ err = PTR_ERR(bitmap->sb_page);
+ bitmap->sb_page = NULL;
+ return err;
+ }
+ bitmap->sb_page->index = 0;
+
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+
+ sb->magic = cpu_to_le32(BITMAP_MAGIC);
+ sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
+
+ chunksize = bitmap->mddev->bitmap_info.chunksize;
+ BUG_ON(!chunksize);
+ if (!is_power_of_2(chunksize)) {
+ kunmap_atomic(sb, KM_USER0);
+ printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+ return -EINVAL;
+ }
+ sb->chunksize = cpu_to_le32(chunksize);
+
+ daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
+ if (!daemon_sleep ||
+ (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
+ printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+ daemon_sleep = 5 * HZ;
+ }
+ sb->daemon_sleep = cpu_to_le32(daemon_sleep);
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+
+ /*
+ * FIXME: write_behind for RAID1. If not specified, what
+ * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
+ */
+ write_behind = bitmap->mddev->bitmap_info.max_write_behind;
+ if (write_behind > COUNTER_MAX)
+ write_behind = COUNTER_MAX / 2;
+ sb->write_behind = cpu_to_le32(write_behind);
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+
+ /* keep the array size field of the bitmap superblock up to date */
+ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
+
+ memcpy(sb->uuid, bitmap->mddev->uuid, 16);
+
+ bitmap->flags |= BITMAP_STALE;
+ sb->state |= cpu_to_le32(BITMAP_STALE);
+ bitmap->events_cleared = bitmap->mddev->events;
+ sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+
+ bitmap->flags |= BITMAP_HOSTENDIAN;
+ sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
+
+ kunmap_atomic(sb, KM_USER0);
+
+ return 0;
+}
+
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
reason = "unrecognized superblock version";
else if (chunksize < 512)
reason = "bitmap chunksize too small";
- else if ((1 << ffz(~chunksize)) != chunksize)
+ else if (!is_power_of_2(chunksize))
reason = "bitmap chunksize not a power of 2";
else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
reason = "daemon sleep period out of range";
@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
}
printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu/%lu pages, set %lu bits\n",
- bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
+ "read %lu/%lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
return 0;
@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
return 0;
}
- if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
+ if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
DEFINE_WAIT(__wait);
/* note that it is safe to do the prepare_to_wait
* after the test as long as we do it before dropping
@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
}
- if (!success && ! (*bmc & NEEDED_MASK))
+ if (!success && !NEEDED(*bmc))
*bmc |= NEEDED_MASK;
- if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
+ if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait);
(*bmc)--;
@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev)
vfs_fsync(file, 1);
}
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
- if (!mddev->bitmap_info.external)
- err = bitmap_read_sb(bitmap);
- else {
+ if (!mddev->bitmap_info.external) {
+ /*
+ * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
+ * instructing us to create a new on-disk bitmap instance.
+ */
+ if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
+ err = bitmap_new_disk_sb(bitmap);
+ else
+ err = bitmap_read_sb(bitmap);
+ } else {
err = 0;
if (mddev->bitmap_info.chunksize == 0 ||
mddev->bitmap_info.daemon_sleep == 0)
@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunks = chunks;
bitmap->pages = pages;
bitmap->missing_pages = pages;
- bitmap->counter_bits = COUNTER_BITS;
-
- bitmap->syncchunk = ~0UL;
#ifdef INJECT_FATAL_FAULT_1
bitmap->bp = NULL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index d0aeaf4..b2a127e 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -85,7 +85,6 @@
typedef __u16 bitmap_counter_t;
#define COUNTER_BITS 16
#define COUNTER_BIT_SHIFT 4
-#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
@@ -196,19 +195,10 @@ struct bitmap {
mddev_t *mddev; /* the md device that the bitmap is for */
- int counter_bits; /* how many bits per block counter */
-
/* bitmap chunksize -- how much data does each bit represent? */
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
unsigned long chunks; /* total number of data chunks for the array */
- /* We hold a count on the chunk currently being synced, and drop
- * it when the last block is started. If the resync is aborted
- * midway, we need to be able to drop that count, so we remember
- * the counted chunk..
- */
- unsigned long syncchunk;
-
__u64 events_cleared;
int need_sync;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aa640a8..91e31e2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev)
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
+
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = {
},
};
+static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ if (mddev->sync_super) {
+ mddev->sync_super(mddev, rdev);
+ return;
+ }
+
+ BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
+
+ super_types[mddev->major_version].sync_super(mddev, rdev);
+}
+
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
mdk_rdev_t *rdev, *rdev2;
@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev)
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
- if (blk_get_integrity(mddev->gendisk))
- return 0; /* already registered */
+ if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+ return 0; /* shouldn't register, or already is */
list_for_each_entry(rdev, &mddev->disks, same_set) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
@@ -2168,8 +2183,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
- super_types[mddev->major_version].
- sync_super(mddev, rdev);
+ sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
- if (rdev->mddev->pers->hot_add_disk == NULL)
+ if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev->raid_disk);
@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- md_wakeup_thread(mddev->thread);
- md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
-
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev)
bitmap_destroy(mddev);
goto out;
}
+
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
+
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ if (!err)
+ md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return err;
}
@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
- printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
- window/2,(unsigned long long) max_sectors/2);
+ printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
+ window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev)
}
EXPORT_SYMBOL_GPL(md_do_sync);
-
static int remove_and_add_spares(mddev_t *mddev)
{
mdk_rdev_t *rdev;
@@ -7072,6 +7088,7 @@ static int remove_and_add_spares(mddev_t *mddev)
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
!test_bit(Blocked, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
@@ -7157,6 +7174,9 @@ static void reap_sync_thread(mddev_t *mddev)
*/
void md_check_recovery(mddev_t *mddev)
{
+ if (mddev->suspended)
+ return;
+
if (mddev->bitmap)
bitmap_daemon_work(mddev);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 0b1fd3f..1c26c7a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -124,6 +124,7 @@ struct mddev_s
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
+#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
int suspended;
atomic_t active_io;
@@ -330,6 +331,7 @@ struct mddev_s
atomic_t flush_pending;
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
+ void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
};
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5d09609..f7431b6 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
return best_disk;
}
-static int raid1_congested(void *data, int bits)
+int md_raid1_congested(mddev_t *mddev, int bits)
{
- mddev_t *mddev = data;
conf_t *conf = mddev->private;
int i, ret = 0;
- if (mddev_congested(mddev, bits))
- return 1;
-
rcu_read_lock();
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
+ BUG_ON(!q);
+
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
*/
@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits)
rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL_GPL(md_raid1_congested);
+static int raid1_congested(void *data, int bits)
+{
+ mddev_t *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid1_congested(mddev, bits);
+}
static void flush_pending_writes(conf_t *conf)
{
@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev)
return PTR_ERR(conf);
list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (!mddev->gendisk)
+ continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- mddev->queue->backing_dev_info.congested_fn = raid1_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
+ if (mddev->queue) {
+ mddev->queue->backing_dev_info.congested_fn = raid1_congested;
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ }
return md_integrity_register(mddev);
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5fc4ca1..e743a64 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -126,4 +126,6 @@ struct r1bio_s {
*/
#define R1BIO_Returned 6
+extern int md_raid1_congested(mddev_t *mddev, int bits);
+
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 346e69b..b72edf3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
{
- bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+ bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
}
/* Find first data disk in a raid6 stripe */
@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi = &sh->dev[i].req;
bi->bi_rw = rw;
- if (rw == WRITE)
+ if (rw & WRITE)
bi->bi_end_io = raid5_end_write_request;
else
bi->bi_end_io = raid5_end_read_request;
@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
- if (rw == WRITE &&
+ if ((rw & WRITE) &&
test_bit(R5_ReWrite, &sh->dev[i].flags))
atomic_add(STRIPE_SECTORS,
&rdev->corrected_errors);
generic_make_request(bi);
} else {
- if (rw == WRITE)
+ if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
bio_for_each_segment(bvl, bio, i) {
- int len = bio_iovec_idx(bio, i)->bv_len;
+ int len = bvl->bv_len;
int clen;
int b_offset = 0;
@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len;
if (clen > 0) {
- b_offset += bio_iovec_idx(bio, i)->bv_offset;
- bio_page = bio_iovec_idx(bio, i)->bv_page;
+ b_offset += bvl->bv_offset;
+ bio_page = bvl->bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
printk(KERN_INFO "md/raid:%s: device %s operational as raid"
" disk %d\n",
mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
- } else
+ } else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
}
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 2354336..934185c 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -25,8 +25,8 @@
#include <linux/delay.h>
#include <media/cx25840.h>
#include <linux/firmware.h>
-#include <staging/altera.h>
+#include "../../../staging/altera-stapl/altera.h"
#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-init.h"
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 200311f..e2a52e5 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -609,6 +609,7 @@ static int apds990x_detect(struct apds990x_chip *chip)
return ret;
}
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
static int apds990x_chip_on(struct apds990x_chip *chip)
{
int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
@@ -624,6 +625,7 @@ static int apds990x_chip_on(struct apds990x_chip *chip)
apds990x_mode_on(chip);
return 0;
}
+#endif
static int apds990x_chip_off(struct apds990x_chip *chip)
{
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d019746..2a40d0e 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
static inline bool needs_unaligned_copy(const void *ptr)
{
-#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false;
#else
return ((ptr - NULL) & 3) != 0;
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index e01e08c..bc685bfc 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -174,7 +174,7 @@ struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
timer_nr = t < max ? (int) t : -1;
} else {
/* check if the requested timer's available */
- if (test_bit(timer_nr, mfgpt->avail))
+ if (!test_bit(timer_nr, mfgpt->avail))
timer_nr = -1;
}
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 668d41e..df03dd3 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
return IOC4_VARIANT_PCI_RT;
}
-static void __devinit
+static void
ioc4_load_modules(struct work_struct *work)
{
request_module("sgiioc4");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 81d7fa4..150cd70 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
static enum cname cpoint = CN_INVALID;
static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(count_lock);
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
static int lkdtm_parse_commandline(void)
{
int i;
+ unsigned long flags;
if (cpoint_count < 1 || recur_count < 1)
return -EINVAL;
+ spin_lock_irqsave(&count_lock, flags);
count = cpoint_count;
+ spin_unlock_irqrestore(&count_lock, flags);
/* No special parameters */
if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
static void lkdtm_handler(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&count_lock, flags);
count--;
printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
lkdtm_do_action(cptype);
count = cpoint_count;
}
+ spin_unlock_irqrestore(&count_lock, flags);
}
static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index bb6f925..374dfcf 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -317,7 +317,8 @@ EXPORT_SYMBOL_GPL(pti_request_masterchannel);
* a master, channel ID address
* used to write to PTI HW.
*
- * @mc: master, channel apeture ID address to be released.
+ * @mc: master, channel apeture ID address to be released. This
+ * will de-allocate the structure via kfree().
*/
void pti_release_masterchannel(struct pti_masterchannel *mc)
{
@@ -475,8 +476,10 @@ static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
else
pti_tty_data->mc = pti_request_masterchannel(2);
- if (pti_tty_data->mc == NULL)
+ if (pti_tty_data->mc == NULL) {
+ kfree(pti_tty_data);
return -ENXIO;
+ }
tty->driver_data = pti_tty_data;
}
@@ -495,7 +498,7 @@ static void pti_tty_cleanup(struct tty_struct *tty)
if (pti_tty_data == NULL)
return;
pti_release_masterchannel(pti_tty_data->mc);
- kfree(tty->driver_data);
+ kfree(pti_tty_data);
tty->driver_data = NULL;
}
@@ -581,7 +584,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
static int pti_char_release(struct inode *inode, struct file *filp)
{
pti_release_masterchannel(filp->private_data);
- kfree(filp->private_data);
+ filp->private_data = NULL;
return 0;
}
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ee5109a..42f0673 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_kfree_skb(skb);
kfree(queued_msg);
}
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index 7aded90..cfbddbe 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -845,7 +845,7 @@ err_iounmap:
err_iounmap_app:
iounmap(config->va_app_base);
err_kzalloc:
- kfree(config);
+ kfree(target);
err_rel_res:
release_mem_region(res1->start, resource_size(res1));
err_rel_res0:
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index f91f82e..54c91ff 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -605,7 +605,7 @@ long st_unregister(struct st_proto_s *proto)
pr_debug("%s: %d ", __func__, proto->chnl_id);
st_kim_ref(&st_gdata, 0);
- if (proto->chnl_id >= ST_MAX_CHANNELS) {
+ if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
pr_err(" chnl_id %d not supported", proto->chnl_id);
return -EPROTONOSUPPORT;
}
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5da93ee..38fd2f0 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -245,9 +245,9 @@ void skip_change_remote_baud(unsigned char **ptr, long *len)
pr_err("invalid action after change remote baud command");
} else {
*ptr = *ptr + sizeof(struct bts_action) +
- ((struct bts_action *)nxt_action)->size;
+ ((struct bts_action *)cur_action)->size;
*len = *len - (sizeof(struct bts_action) +
- ((struct bts_action *)nxt_action)->size);
+ ((struct bts_action *)cur_action)->size);
/* warn user on not commenting these in firmware */
pr_warn("skipping the wait event of change remote baud");
}
@@ -604,6 +604,10 @@ void st_kim_ref(struct st_data_s **core_data, int id)
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
pdev = st_get_plat_device(id);
+ if (!pdev) {
+ *core_data = NULL;
+ return;
+ }
kim_gdata = dev_get_drvdata(&pdev->dev);
*core_data = kim_gdata->core_data;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 71da564..f85e422 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
mmc_blk_remove_parts(card, md);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c07322c..6413afa 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q)
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
+ * @subname: partition subname
*
* Initialise a MMC card request queue.
*/
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
mq->queue->limits.max_discard_sectors = UINT_MAX;
if (card->erased_byte == 0)
mq->queue->limits.discard_zeroes_data = 1;
- if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
- mq->queue->limits.discard_granularity =
- card->erase_size << 9;
- mq->queue->limits.discard_alignment =
- card->erase_size << 9;
- }
+ mq->queue->limits.discard_granularity = card->pref_erase << 9;
if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
mq->queue);
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
sema_init(&mq->thread_sem, 1);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
- host->index);
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+ host->index, subname ? subname : "");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0..6223ef8 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -19,7 +19,8 @@ struct mmc_queue {
unsigned int bounce_sg_len;
};
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+ const char *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 68091dd..7843efe 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d0c15b..262fff0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
+ u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ * With these steps taken, mmc_select_voltage() is also required to
+ * restore the correct voltage setting of the card.
+ */
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ ret = mmc_send_io_op_cond(host, 0, &ocr);
+ if (ret)
+ goto out;
+
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
+
+ host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+ if (!host->ocr) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
+
+out:
mmc_release_host(host);
return ret;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d29b9c3..d2565df 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev)
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
out:
return ret;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 5da5bea0..7721de9 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1144,9 +1144,17 @@ static int __devinit mmci_probe(struct amba_device *dev,
else if (ret != -ENOSYS)
goto err_gpio_cd;
+ /*
+ * A gpio pin that will detect cards when inserted and removed
+ * will most likely want to trigger on the edges if it is
+ * 0 when ejected and 1 when inserted (or mutatis mutandis
+ * for the inverted case) so we request triggers on both
+ * edges.
+ */
ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
- mmci_cd_irq, 0,
- DRIVER_NAME " (cd)", host);
+ mmci_cd_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ DRIVER_NAME " (cd)", host);
if (ret >= 0)
host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index e2aecb7..ab66f24 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -25,6 +25,11 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
MODULE_LICENSE("GPL");
enum {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5b2e215..dedf3da 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -429,7 +429,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
return -EINVAL;
}
}
- mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
/* Allow an aux regulator */
reg = regulator_get(host->dev, "vmmc_aux");
@@ -962,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
spin_unlock(&host->irq_lock);
if (host->use_dma && dma_ch != -1) {
- dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
+ dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
+ host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
}
@@ -1346,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index b365429..ce500f0 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
- if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+ if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
p->pdata = NULL;
+ tmio_mmc_host_remove(host);
+
for (i = 0; i < 3; i++) {
irq = platform_get_irq(pdev, i);
if (irq >= 0)
free_irq(irq, host);
}
- tmio_mmc_host_remove(host);
clk_disable(priv->clk);
clk_put(priv->clk);
kfree(priv);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index ad6347b..0b09e82 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_mmc_get_cd(struct mmc_host *mmc)
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cbb0330..d4455ff 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
static int vub300_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{ /* NOT irq */
- struct vub300_mmc_host *vub300 = NULL;
+ struct vub300_mmc_host *vub300;
struct usb_host_interface *iface_desc;
struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_out_urb) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the command_out_urb\n");
+ dev_err(&udev->dev, "not enough memory for command_out_urb\n");
goto error0;
}
command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_res_urb) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the command_res_urb\n");
+ dev_err(&udev->dev, "not enough memory for command_res_urb\n");
goto error1;
}
/* this also allocates memory for our VUB300 mmc host device */
mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
if (!mmc) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the mmc_host\n");
+ dev_err(&udev->dev, "not enough memory for the mmc_host\n");
goto error4;
}
/* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 0bb254c..33d8aad 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -339,9 +339,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* 5 bytes for manuf, device and exts */
- out_be32(&lbc->fbcr, 5);
- elbc_fcm_ctrl->read_bytes = 5;
+ /* nand_get_flash_type() reads 8 bytes of entire ID string */
+ out_be32(&lbc->fbcr, 8);
+ elbc_fcm_ctrl->read_bytes = 8;
elbc_fcm_ctrl->use_mdr = 1;
elbc_fcm_ctrl->mdr = 0;
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index d84f6e8..5b73298 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
msleep(1);
- free_irq(*irqp, el2_probe_interrupt);
+ free_irq(*irqp, &seen);
if (!seen)
continue;
@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
continue;
if (retval < 0)
goto err_disable;
+ break;
} while (*++irqp);
if (*irqp == 0) {
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c9217f..7b3e23f 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -50,7 +50,7 @@ static const char version[] =
#ifdef __arm__
static void write_rreg(u_long base, u_int reg, u_int val)
{
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"str%?h %0, [%2, #-4] @ NET_RDP"
:
@@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
static inline unsigned short read_rreg(u_long base_addr, u_int reg)
{
unsigned short v;
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"ldr%?h %0, [%2, #-4] @ NET_RDP"
: "=r" (v)
@@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
static inline void write_ireg(u_long base, u_int reg, u_int val)
{
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"str%?h %0, [%2, #8] @ NET_IDP"
:
@@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
static inline unsigned short read_ireg(u_long base_addr, u_int reg)
{
u_short v;
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NAT_RAP\n\t"
"ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
: "=r" (v)
@@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
-static inline void
+static void
am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
{
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
- __asm__ __volatile__("str%?h %2, [%0], #4"
+ asm volatile("str%?h %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
}
while (length > 8) {
- unsigned int tmp, tmp2;
- __asm__ __volatile__(
- "ldm%?ia %1!, {%2, %3}\n\t"
+ register unsigned int tmp asm("r2"), tmp2 asm("r3");
+ asm volatile(
+ "ldm%?ia %0!, {%1, %2}"
+ : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+ length -= 8;
+ asm volatile(
+ "str%?h %1, [%0], #4\n\t"
+ "mov%? %1, %1, lsr #16\n\t"
+ "str%?h %1, [%0], #4\n\t"
"str%?h %2, [%0], #4\n\t"
"mov%? %2, %2, lsr #16\n\t"
- "str%?h %2, [%0], #4\n\t"
- "str%?h %3, [%0], #4\n\t"
- "mov%? %3, %3, lsr #16\n\t"
- "str%?h %3, [%0], #4"
- : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
- : "0" (offset), "1" (buf));
- length -= 8;
+ "str%?h %2, [%0], #4"
+ : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
}
while (length > 0) {
- __asm__ __volatile__("str%?h %2, [%0], #4"
+ asm volatile("str%?h %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
}
}
-static inline void
+static void
am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
{
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
unsigned int tmp;
- __asm__ __volatile__(
+ asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"str%?b %2, [%1], #1\n\t"
"mov%? %2, %2, lsr #8\n\t"
@@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
length -= 2;
}
while (length > 8) {
- unsigned int tmp, tmp2, tmp3;
- __asm__ __volatile__(
+ register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+ asm volatile(
"ldr%?h %2, [%0], #4\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
"ldr%?h %3, [%0], #4\n\t"
- "orr%? %2, %2, %3, lsl #16\n\t"
- "ldr%?h %3, [%0], #4\n\t"
+ "orr%? %2, %2, %4, lsl #16\n\t"
"ldr%?h %4, [%0], #4\n\t"
"orr%? %3, %3, %4, lsl #16\n\t"
"stm%?ia %1!, {%2, %3}"
@@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
}
while (length > 0) {
unsigned int tmp;
- __asm__ __volatile__(
+ asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"str%?b %2, [%1], #1\n\t"
"mov%? %2, %2, lsr #8\n\t"
@@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
return errorcount;
}
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+ if (addr[0] & 0x01) {
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+ }
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+ unsigned int mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(hash, 0, 4 * sizeof(*hash));
+
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, hash);
+ }
+
+ return mode;
+}
+
static void
am79c961_init_for_open(struct net_device *dev)
{
@@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
unsigned long flags;
unsigned char *p;
u_int hdr_addr, first_free_addr;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
int i;
/*
@@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
for (i = LADRL; i <= LADRH; i++)
- write_rreg (dev->base_addr, i, 0);
+ write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
- i = MODE_PORT_10BT;
- if (dev->flags & IFF_PROMISC)
- i |= MODE_PROMISC;
-
- write_rreg (dev->base_addr, MODE, i);
+ write_rreg (dev->base_addr, MODE, mode);
write_rreg (dev->base_addr, POLLINT, 0);
write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
return 0;
}
-static void am79c961_mc_hash(char *addr, unsigned short *hash)
-{
- if (addr[0] & 0x01) {
- int idx, bit;
- u32 crc;
-
- crc = ether_crc_le(ETH_ALEN, addr);
-
- idx = crc >> 30;
- bit = (crc >> 26) & 15;
-
- hash[idx] |= 1 << bit;
- }
-}
-
/*
* Set or clear promiscuous/multicast mode filter for this adapter.
*/
@@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
unsigned long flags;
- unsigned short multi_hash[4], mode;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
int i, stopped;
- mode = MODE_PORT_10BT;
-
- if (dev->flags & IFF_PROMISC) {
- mode |= MODE_PROMISC;
- } else if (dev->flags & IFF_ALLMULTI) {
- memset(multi_hash, 0xff, sizeof(multi_hash));
- } else {
- struct netdev_hw_addr *ha;
-
- memset(multi_hash, 0x00, sizeof(multi_hash));
-
- netdev_for_each_mc_addr(ha, dev)
- am79c961_mc_hash(ha->addr, multi_hash);
- }
-
spin_lock_irqsave(&priv->chip_lock, flags);
stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 5a77001..0b46b8e 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb = dev_alloc_skb(length + 2);
if (likely(skb != NULL)) {
+ struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
skb_reserve(skb, 2);
- dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
+ dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
+ dma_sync_single_for_device(dev->dev.parent,
+ rxd->buf_addr, length,
+ DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
@@ -348,6 +352,7 @@ poll_some_more:
static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
+ struct ep93xx_tdesc *txd;
int entry;
if (unlikely(skb->len > MAX_PKT_SIZE)) {
@@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->tx_pointer;
ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
- ep->descs->tdesc[entry].tdesc1 =
- TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+ txd = &ep->descs->tdesc[entry];
+
+ txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+ dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
+ DMA_TO_DEVICE);
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
- dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
- skb->len, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
spin_lock_irq(&ep->tx_pending_lock);
@@ -457,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
static void ep93xx_free_buffers(struct ep93xx_priv *ep)
{
+ struct device *dev = ep->dev->dev.parent;
int i;
- for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
d = ep->descs->rdesc[i].buf_addr;
if (d)
- dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
if (ep->rx_buf[i] != NULL)
- free_page((unsigned long)ep->rx_buf[i]);
+ kfree(ep->rx_buf[i]);
}
- for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
d = ep->descs->tdesc[i].buf_addr;
if (d)
- dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
if (ep->tx_buf[i] != NULL)
- free_page((unsigned long)ep->tx_buf[i]);
+ kfree(ep->tx_buf[i]);
}
- dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs,
+ dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
}
-/*
- * The hardware enforces a sub-2K maximum packet size, so we put
- * two buffers on every hardware page.
- */
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
{
+ struct device *dev = ep->dev->dev.parent;
int i;
- ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs),
- &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA);
+ ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
+ &ep->descs_dma_addr, GFP_KERNEL);
if (ep->descs == NULL)
return 1;
- for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
- void *page;
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ void *buf;
dma_addr_t d;
- page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (page == NULL)
+ buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL)
goto err;
- d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(NULL, d)) {
- free_page((unsigned long)page);
+ d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, d)) {
+ kfree(buf);
goto err;
}
- ep->rx_buf[i] = page;
+ ep->rx_buf[i] = buf;
ep->descs->rdesc[i].buf_addr = d;
ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
-
- ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
- ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
- ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
}
- for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
- void *page;
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ void *buf;
dma_addr_t d;
- page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (page == NULL)
+ buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL)
goto err;
- d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(NULL, d)) {
- free_page((unsigned long)page);
+ d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, d)) {
+ kfree(buf);
goto err;
}
- ep->tx_buf[i] = page;
+ ep->tx_buf[i] = buf;
ep->descs->tdesc[i].buf_addr = d;
-
- ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
- ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
}
return 0;
@@ -829,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
}
ep = netdev_priv(dev);
ep->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 68d45ba..6c019e1 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
MODULE_ALIAS("platform:bfin_mac");
#if defined(CONFIG_BFIN_MAC_USE_L1)
-# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
-# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr)
+# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
+# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
#else
-# define bfin_mac_alloc(dma_handle, size) \
- dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
-# define bfin_mac_free(dma_handle, ptr) \
- dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
+# define bfin_mac_alloc(dma_handle, size, num) \
+ dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr, num) \
+ dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
#endif
#define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
t = t->next;
}
}
- bfin_mac_free(dma_handle, tx_desc);
+ bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
}
if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
r = r->next;
}
}
- bfin_mac_free(dma_handle, rx_desc);
+ bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
}
}
@@ -126,13 +126,13 @@ static int desc_list_init(void)
#endif
tx_desc = bfin_mac_alloc(&dma_handle,
- sizeof(struct net_dma_desc_tx) *
+ sizeof(struct net_dma_desc_tx),
CONFIG_BFIN_TX_DESC_NUM);
if (tx_desc == NULL)
goto init_error;
rx_desc = bfin_mac_alloc(&dma_handle,
- sizeof(struct net_dma_desc_rx) *
+ sizeof(struct net_dma_desc_rx),
CONFIG_BFIN_RX_DESC_NUM);
if (rx_desc == NULL)
goto init_error;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 17b4dd9..eafe44a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -388,6 +388,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
return next;
}
+#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -400,6 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
skb->priority = 1;
+
+ skb->queue_mapping = bond_queue_mapping(skb);
+
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
@@ -1292,6 +1297,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
goto out;
np->dev = slave->dev;
+ strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
err = __netpoll_setup(np);
if (err) {
kfree(np);
@@ -4206,6 +4212,7 @@ static inline int bond_slave_override(struct bonding *bond,
return res;
}
+
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
@@ -4216,6 +4223,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
*/
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+ bond_queue_mapping(skb) = skb->queue_mapping;
+
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
txq -= dev->real_num_tx_queues;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c445457..23179db 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -346,7 +346,7 @@ parse_eeprom (struct net_device *dev)
if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
/* Check CRC */
crc = ~ether_crc_le (256 - 4, sromdata);
- if (psrom->crc != crc) {
+ if (psrom->crc != cpu_to_le32(crc)) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n",
dev->name);
return -1;
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 7a84e45..7583a95 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr;
- fpi->dpram_offset = cpm_dpalloc(128, 8);
+ fpi->dpram_offset = cpm_dpalloc(128, 32);
if (IS_ERR_VALUE(fpi->dpram_offset)) {
ret = fpi->dpram_offset;
goto out_fcccp;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ff60b23..2dfcc80 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -10,7 +10,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -476,9 +476,6 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
-unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
-unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
-
void lock_rx_qs(struct gfar_private *priv)
{
int i = 0x0;
@@ -868,28 +865,28 @@ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
rqfar--;
rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
- ftp_rqfpr[rqfar] = rqfpr;
- ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_NOMATCH;
- ftp_rqfpr[rqfar] = rqfpr;
- ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
rqfpr = class;
- ftp_rqfcr[rqfar] = rqfcr;
- ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
rqfpr = class;
- ftp_rqfcr[rqfar] = rqfcr;
- ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
return rqfar;
@@ -904,8 +901,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
/* Default rule */
rqfcr = RQFCR_CMP_MATCH;
- ftp_rqfcr[rqfar] = rqfcr;
- ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
@@ -921,8 +918,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
/* Rest are masked rules */
rqfcr = RQFCR_CMP_NOMATCH;
for (i = 0; i < rqfar; i++) {
- ftp_rqfcr[i] = rqfcr;
- ftp_rqfpr[i] = rqfpr;
+ priv->ftp_rqfcr[i] = rqfcr;
+ priv->ftp_rqfpr[i] = rqfpr;
gfar_write_filer(priv, i, rqfcr, rqfpr);
}
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index fc86f51..ba36dc7 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -1107,10 +1107,12 @@ struct gfar_private {
/* HW time stamping enabled flag */
int hwts_rx_en;
int hwts_tx_en;
+
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
};
-extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
-extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
static inline int gfar_has_errata(struct gfar_private *priv,
enum gfar_errata err)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 493d743..239e333 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
+ * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
*
* This software may be used and distributed according to
* the terms of the GNU Public License, Version 2, incorporated herein
@@ -609,15 +609,15 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -626,16 +626,16 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -643,8 +643,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -652,8 +652,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -661,8 +661,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -670,8 +670,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -705,12 +705,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
}
for (i = 0; i < MAX_FILER_IDX + 1; i++) {
- local_rqfpr[j] = ftp_rqfpr[i];
- local_rqfcr[j] = ftp_rqfcr[i];
+ local_rqfpr[j] = priv->ftp_rqfpr[i];
+ local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
- if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+ if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
RQFCR_CLE |RQFCR_AND)) &&
- (ftp_rqfpr[i] == cmp_rqfpr))
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@@ -724,20 +724,22 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
* if it was already programmed, we need to overwrite these rules
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
- if ((ftp_rqfcr[l] & RQFCR_CLE) &&
- !(ftp_rqfcr[l] & RQFCR_AND)) {
- ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+ if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
- ftp_rqfpr[l] = FPR_FILER_MASK;
- gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
+ priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+ gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+ priv->ftp_rqfpr[l]);
break;
}
- if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
+ if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ (priv->ftp_rqfcr[l] & RQFCR_AND))
continue;
else {
- local_rqfpr[j] = ftp_rqfpr[l];
- local_rqfcr[j] = ftp_rqfcr[l];
+ local_rqfpr[j] = priv->ftp_rqfpr[l];
+ local_rqfcr[j] = priv->ftp_rqfcr[l];
j--;
}
}
@@ -750,8 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
/* Write back the popped out rules again */
for (k = j+1; k < MAX_FILER_IDX; k++) {
- ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
- ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+ priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+ priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e10d2f..c3ecb11 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
lp->txrcommit++;
- spin_unlock_irqrestore(&lp->lock, flags);
- /* Update statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
drop:
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index b6060f7..a900d5b 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
}
/* Initialise a single lance board at the given DIO device */
-static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
+static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 18fccf9..2c28621 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2373,6 +2373,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
}
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+ /* i350 cannot do RSS and SR-IOV at the same time */
+ if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
+ adapter->rss_queues = 1;
/*
* if rss_queues > 4 or vfs are going to be allocated with rss_queues
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b644383..c0788a3 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netxen_tso_check(netdev, tx_ring, first_desc, skb);
- netxen_nic_update_cmd_producer(adapter, tx_ring);
-
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
return NETDEV_TX_OK;
drop_packet:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392a6c4..a702443 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,6 +58,7 @@ config BROADCOM_PHY
config BCM63XX_PHY
tristate "Drivers for Broadcom 63xx SOCs internal PHY"
+ depends on BCM63XX
---help---
Currently supports the 6348 and 6358 PHYs.
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0c9522..2cd8dc5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
/* time stamping methods */
-static void decode_evnt(struct dp83640_private *dp83640,
- struct phy_txts *phy_txts, u16 ests)
+static int decode_evnt(struct dp83640_private *dp83640,
+ void *data, u16 ests)
{
+ struct phy_txts *phy_txts;
struct ptp_clock_event event;
int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
+ u16 ext_status = 0;
+
+ if (ests & MULT_EVNT) {
+ ext_status = *(u16 *) data;
+ data += sizeof(ext_status);
+ }
+
+ phy_txts = data;
switch (words) { /* fall through in every case */
case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
event.timestamp = phy2txts(&dp83640->edata);
ptp_clock_event(dp83640->clock->ptp_clock, &event);
+
+ words = ext_status ? words + 2 : words + 1;
+ return words * sizeof(u16);
}
static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
} else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
- phy_txts = (struct phy_txts *) ptr;
- decode_evnt(dp83640, phy_txts, ests);
- size = sizeof(*phy_txts);
+ size = decode_evnt(dp83640, ptr, ests);
} else {
size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
- /* Let the stack drop this frame. */
- return false;
+ kfree_skb(skb);
+ return true;
}
SKB_PTP_TYPE(skb) = type;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index a1b82c9..c554a39 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -523,7 +523,7 @@ static void ppp_async_process(unsigned long arg)
#define PUT_BYTE(ap, buf, c, islcp) do { \
if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
*buf++ = PPP_ESCAPE; \
- *buf++ = c ^ 0x20; \
+ *buf++ = c ^ PPP_TRANS; \
} else \
*buf++ = c; \
} while (0)
@@ -896,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
sp = skb_put(skb, n);
memcpy(sp, buf, n);
if (ap->state & SC_ESCAPE) {
- sp[0] ^= 0x20;
+ sp[0] ^= PPP_TRANS;
ap->state &= ~SC_ESCAPE;
}
}
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 89f7540..5f597ca5 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += length;
stats->tx_packets++;
dev->trans_start = jiffies;
if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e965661..a5d9fbf 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1406,6 +1406,7 @@ qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
for (loop = 0; loop < que->no_ops; loop++) {
QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
+ addr = que->read_addr;
for (i = 0; i < cnt; i++) {
QLCNIC_RD_DUMP_REG(addr, base, &data);
*buffer++ = cpu_to_le32(data);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 3ab7d2c..0f6af5c 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2159,6 +2159,7 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ pbuf->skb = NULL;
}
static inline void
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ef1ce2e..05d8178 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
*
* (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
*/
- static const struct {
+ static const struct rtl_mac_info {
u32 mask;
u32 val;
int mac_version;
@@ -1689,7 +1689,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
/* Catch-all */
{ 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
- }, *p = mac_info;
+ };
+ const struct rtl_mac_info *p = mac_info;
u32 reg;
reg = RTL_R32(TxConfig);
@@ -3681,7 +3682,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
{
- static const struct {
+ static const struct rtl_cfg2_info {
u32 mac_version;
u32 clk;
u32 val;
@@ -3690,7 +3691,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
- }, *p = cfg2_info;
+ };
+ const struct rtl_cfg2_info *p = cfg2_info;
unsigned int i;
u32 clk;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index dc4805f..f628574 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2400,8 +2400,10 @@ static const struct of_device_id smc91x_match[] = {
{ .compatible = "smsc,lan91c94", },
{ .compatible = "smsc,lan91c111", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, smc91x_match);
+#else
+#define smc91x_match NULL
#endif
static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2416,9 +2418,7 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
-#ifdef CONFIG_OF
.of_match_table = smc91x_match,
-#endif
},
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 74e9405..5235f48 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
-
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tun_poll_controller(struct net_device *dev)
+{
+ /*
+ * Tun only receives frames when:
+ * 1) the char device endpoint gets data from user space
+ * 2) the tun socket gets a sendmsg call from user space
+ * Since both of those are syncronous operations, we are guaranteed
+ * never to have pending data when we poll for it
+ * so theres nothing to do here but return.
+ * We need this though so netpoll recognizes us as an interface that
+ * supports polling, which enables bridge devices in virt setups to
+ * still use netconsole
+ */
+ return;
+}
+#endif
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tun_poll_controller,
+#endif
};
static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_multicast_list = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tun_poll_controller,
+#endif
};
/* Initialize net device. */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9d4f911..84d4608 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
router with USB ethernet port. This driver is for routers only,
it will not work with ADSL modems (use cxacru driver instead).
+config USB_NET_KALMIA
+ tristate "Samsung Kalmia based LTE USB modem"
+ depends on USB_USBNET
+ help
+ Choose this option if you have a Samsung Kalmia based USB modem
+ as Samsung GT-B3730.
+
+ To compile this driver as a module, choose M here: the
+ module will be called kalmia.
+
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c7ec8a5..c203fa2 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
+obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644
index 0000000..d965fb1
--- /dev/null
+++ b/drivers/net/usb/kalmia.c
@@ -0,0 +1,384 @@
+/*
+ * USB network interface driver for Samsung Kalmia based LTE USB modem like the
+ * Samsung GT-B3730 and GT-B3710.
+ *
+ * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
+ *
+ * Sponsored by Quicklink Video Distribution Services Ltd.
+ *
+ * Based on the cdc_eem module.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/gfp.h>
+
+/*
+ * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
+ * handled by the "option" module and an ethernet data port handled by this
+ * module.
+ *
+ * The stick must first be switched into modem mode by usb_modeswitch
+ * or similar tool. Then the modem gets sent two initialization packets by
+ * this module, which gives the MAC address of the device. User space can then
+ * connect the modem using AT commands through the ACM port and then use
+ * DHCP on the network interface exposed by this module. Network packets are
+ * sent to and from the modem in a proprietary format discovered after watching
+ * the behavior of the windows driver for the modem.
+ *
+ * More information about the use of the modem is available in usb_modeswitch
+ * forum and the project page:
+ *
+ * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
+ * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE */
+
+#define KALMIA_HEADER_LENGTH 6
+#define KALMIA_ALIGN_SIZE 4
+#define KALMIA_USB_TIMEOUT 10000
+
+/*-------------------------------------------------------------------------*/
+
+static int
+kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
+ u8 *buffer, u8 expected_len)
+{
+ int act_len;
+ int status;
+
+ netdev_dbg(dev->net, "Sending init packet");
+
+ status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
+ init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
+ if (status != 0) {
+ netdev_err(dev->net,
+ "Error sending init packet. Status %i, length %i\n",
+ status, act_len);
+ return status;
+ }
+ else if (act_len != init_msg_len) {
+ netdev_err(dev->net,
+ "Did not send all of init packet. Bytes sent: %i",
+ act_len);
+ }
+ else {
+ netdev_dbg(dev->net, "Successfully sent init packet.");
+ }
+
+ status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
+ buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
+
+ if (status != 0)
+ netdev_err(dev->net,
+ "Error receiving init result. Status %i, length %i\n",
+ status, act_len);
+ else if (act_len != expected_len)
+ netdev_err(dev->net, "Unexpected init result length: %i\n",
+ act_len);
+
+ return status;
+}
+
+static int
+kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
+{
+ char init_msg_1[] =
+ { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x00, 0x00 };
+ char init_msg_2[] =
+ { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
+ 0x00, 0x00 };
+ char receive_buf[28];
+ int status;
+
+ status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
+ / sizeof(init_msg_1[0]), receive_buf, 24);
+ if (status != 0)
+ return status;
+
+ status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
+ / sizeof(init_msg_2[0]), receive_buf, 28);
+ if (status != 0)
+ return status;
+
+ memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
+
+ return status;
+}
+
+static int
+kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u8 status;
+ u8 ethernet_addr[ETH_ALEN];
+
+ /* Don't bind to AT command interface */
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+ return -EINVAL;
+
+ dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
+ dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
+ dev->status = NULL;
+
+ dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
+ dev->hard_mtu = 1400;
+ dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
+
+ status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
+
+ if (status < 0) {
+ usb_set_intfdata(intf, NULL);
+ usb_driver_release_interface(driver_of(intf), intf);
+ return status;
+ }
+
+ memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
+ memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
+
+ return status;
+}
+
+static struct sk_buff *
+kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *skb2 = NULL;
+ u16 content_len;
+ unsigned char *header_start;
+ unsigned char ether_type_1, ether_type_2;
+ u8 remainder, padlen = 0;
+
+ if (!skb_cloned(skb)) {
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+
+ if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
+ >= KALMIA_HEADER_LENGTH))
+ goto done;
+
+ if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
+ + KALMIA_ALIGN_SIZE)) {
+ skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
+ skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ goto done;
+ }
+ }
+
+ skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
+ KALMIA_ALIGN_SIZE, flags);
+ if (!skb2)
+ return NULL;
+
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+
+ done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
+ ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
+ ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
+
+ netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
+ ether_type_2);
+
+ /* According to empiric data for data packages */
+ header_start[0] = 0x57;
+ header_start[1] = 0x44;
+ content_len = skb->len - KALMIA_HEADER_LENGTH;
+ header_start[2] = (content_len & 0xff); /* low byte */
+ header_start[3] = (content_len >> 8); /* high byte */
+
+ header_start[4] = ether_type_1;
+ header_start[5] = ether_type_2;
+
+ /* Align to 4 bytes by padding with zeros */
+ remainder = skb->len % KALMIA_ALIGN_SIZE;
+ if (remainder > 0) {
+ padlen = KALMIA_ALIGN_SIZE - remainder;
+ memset(skb_put(skb, padlen), 0, padlen);
+ }
+
+ netdev_dbg(
+ dev->net,
+ "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
+ content_len, padlen, header_start[0], header_start[1],
+ header_start[2], header_start[3], header_start[4],
+ header_start[5]);
+
+ return skb;
+}
+
+static int
+kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ /*
+ * Our task here is to strip off framing, leaving skb with one
+ * data frame for the usbnet framework code to process.
+ */
+ const u8 HEADER_END_OF_USB_PACKET[] =
+ { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
+ const u8 EXPECTED_UNKNOWN_HEADER_1[] =
+ { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
+ const u8 EXPECTED_UNKNOWN_HEADER_2[] =
+ { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
+ u8 i = 0;
+
+ /* incomplete header? */
+ if (skb->len < KALMIA_HEADER_LENGTH)
+ return 0;
+
+ do {
+ struct sk_buff *skb2 = NULL;
+ u8 *header_start;
+ u16 usb_packet_length, ether_packet_length;
+ int is_last;
+
+ header_start = skb->data;
+
+ if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
+ if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
+ sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
+ header_start, EXPECTED_UNKNOWN_HEADER_2,
+ sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
+ netdev_dbg(
+ dev->net,
+ "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1],
+ header_start[2], header_start[3],
+ header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+ }
+ else {
+ netdev_err(
+ dev->net,
+ "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1],
+ header_start[2], header_start[3],
+ header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+ return 0;
+ }
+ }
+ else
+ netdev_dbg(
+ dev->net,
+ "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1], header_start[2],
+ header_start[3], header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+
+ /* subtract start header and end header */
+ usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
+ ether_packet_length = header_start[2] + (header_start[3] << 8);
+ skb_pull(skb, KALMIA_HEADER_LENGTH);
+
+ /* Some small packets misses end marker */
+ if (usb_packet_length < ether_packet_length) {
+ ether_packet_length = usb_packet_length
+ + KALMIA_HEADER_LENGTH;
+ is_last = true;
+ }
+ else {
+ netdev_dbg(dev->net, "Correct package length #%i", i
+ + 1);
+
+ is_last = (memcmp(skb->data + ether_packet_length,
+ HEADER_END_OF_USB_PACKET,
+ sizeof(HEADER_END_OF_USB_PACKET)) == 0);
+ if (!is_last) {
+ header_start = skb->data + ether_packet_length;
+ netdev_dbg(
+ dev->net,
+ "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1],
+ header_start[2], header_start[3],
+ header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+ }
+ }
+
+ if (is_last) {
+ skb2 = skb;
+ }
+ else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2))
+ return 0;
+ }
+
+ skb_trim(skb2, ether_packet_length);
+
+ if (is_last) {
+ return 1;
+ }
+ else {
+ usbnet_skb_return(dev, skb2);
+ skb_pull(skb, ether_packet_length);
+ }
+
+ i++;
+ }
+ while (skb->len);
+
+ return 1;
+}
+
+static const struct driver_info kalmia_info = {
+ .description = "Samsung Kalmia LTE USB dongle",
+ .flags = FLAG_WWAN,
+ .bind = kalmia_bind,
+ .rx_fixup = kalmia_rx_fixup,
+ .tx_fixup = kalmia_tx_fixup
+};
+
+/*-------------------------------------------------------------------------*/
+
+static const struct usb_device_id products[] = {
+ /* The unswitched USB ID, to get the module auto loaded: */
+ { USB_DEVICE(0x04e8, 0x689a) },
+ /* The stick swithed into modem (by e.g. usb_modeswitch): */
+ { USB_DEVICE(0x04e8, 0x6889),
+ .driver_info = (unsigned long) &kalmia_info, },
+ { /* EMPTY == end of list */} };
+MODULE_DEVICE_TABLE( usb, products);
+
+static struct usb_driver kalmia_driver = {
+ .name = "kalmia",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume
+};
+
+static int __init kalmia_init(void)
+{
+ return usb_register(&kalmia_driver);
+}
+module_init( kalmia_init);
+
+static void __exit kalmia_exit(void)
+{
+ usb_deregister(&kalmia_driver);
+}
+module_exit( kalmia_exit);
+
+MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
+MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e050bd6..777d1a4 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2203,8 +2203,10 @@ fst_open(struct net_device *dev)
if (port->mode != FST_RAW) {
err = hdlc_open(dev);
- if (err)
+ if (err) {
+ module_put(THIS_MODULE);
return err;
+ }
}
fst_openport(port);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 2204762..b6c5d37 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -72,6 +72,11 @@ static int modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
+static int modparam_fastchanswitch;
+module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
+MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+
+
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
@@ -2686,6 +2691,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
int ret, ani_mode;
+ bool fast;
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
@@ -2705,7 +2711,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
ath5k_drain_tx_buffs(sc);
if (chan)
sc->curchan = chan;
- ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
+
+ fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
+
+ ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
skip_pcu);
if (ret) {
ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 3510de2..126a4ea 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1124,8 +1124,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Non fatal, can happen eg.
* on mode change */
ret = 0;
- } else
+ } else {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ "fast chan change successful\n");
return 0;
+ }
}
/*
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index f9db25b..facc94e 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (priv->switch_rxon.switch_in_progress &&
- (priv->switch_rxon.channel != ctx->staging.channel)) {
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ (priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_rxon.channel));
+ le16_to_cpu(priv->switch_channel));
iwl_legacy_chswitch_done(priv, false);
}
@@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx);
- return 0;
+ goto set_tx_power;
}
/* If we are currently associated and the new config requires
@@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv);
+set_tx_power:
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
@@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
return rc;
}
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
-
return iwl_legacy_send_cmd_pdu(priv,
REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 42df832..3be76bd 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (priv->switch_rxon.switch_in_progress) {
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
- mutex_lock(&priv->mutex);
- priv->switch_rxon.switch_in_progress = false;
- mutex_unlock(&priv->mutex);
- }
}
EXPORT_SYMBOL(iwl_legacy_chswitch_done);
@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
- if (priv->switch_rxon.switch_in_progress) {
- if (!le32_to_cpu(csa->status) &&
- (csa->channel == priv->switch_rxon.channel)) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, false);
- }
+ iwl_legacy_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_legacy_chswitch_done(priv, false);
}
}
EXPORT_SYMBOL(iwl_legacy_rx_csa);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index bc66c60..c5fbda0 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
-
+#define STATUS_CHANNEL_SWITCH_PENDING 18
static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
{
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index be0106c..ea30122 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -855,17 +855,6 @@ struct traffic_stats {
};
/*
- * iwl_switch_rxon: "channel switch" structure
- *
- * @ switch_in_progress: channel switch in progress
- * @ channel: new channel
- */
-struct iwl_switch_rxon {
- bool switch_in_progress;
- __le16 channel;
-};
-
-/*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
*/
@@ -1115,7 +1104,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
- struct iwl_switch_rxon switch_rxon;
+ __le16 switch_channel;
/* 1st responses from initialize and runtime uCode images.
* _4965's initialize alive response contains some calibration data. */
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index af2ae22..7157ba5 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
+ test_bit(STATUS_SCANNING, &priv->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_legacy_is_associated_ctx(ctx))
goto out;
- /* channel switch in progress */
- if (priv->switch_rxon.switch_in_progress == true)
- goto out;
-
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the
* configuration for channel switch
*/
- if (priv->cfg->ops->lib->set_channel_switch(priv,
- ch_switch))
- priv->switch_rxon.switch_in_progress = false;
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
}
}
out:
mutex_unlock(&priv->mutex);
- if (!priv->switch_rxon.switch_in_progress)
- ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 86feec8..2282279 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -177,79 +177,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
return 0;
}
-static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch)
-{
- /*
- * MULTI-FIXME
- * See iwl_mac_channel_switch.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl6000_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
- u32 switch_time_in_usec, ucode_switch_time;
- u16 ch;
- u32 tsf_low;
- u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- struct iwl_host_cmd hcmd = {
- .id = REPLY_CHANNEL_SWITCH,
- .len = { sizeof(cmd), },
- .flags = CMD_SYNC,
- .data = { &cmd, },
- };
-
- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
- ch = ch_switch->channel->hw_value;
- IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
- ctx->active.channel, ch);
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
- switch_count = ch_switch->count;
- tsf_low = ch_switch->timestamp & 0x0ffffffff;
- /*
- * calculate the ucode channel switch time
- * adding TSF as one of the factor for when to switch
- */
- if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
- if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
- beacon_interval)) {
- switch_count -= (priv->ucode_beacon_time -
- tsf_low) / beacon_interval;
- } else
- switch_count = 0;
- }
- if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- else {
- switch_time_in_usec =
- vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_usecs_to_beacons(priv,
- switch_time_in_usec,
- beacon_interval);
- cmd.switch_time = iwl_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
- }
- IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
-
- return iwl_send_cmd_sync(priv, &hcmd);
-}
-
static struct iwl_lib_ops iwl2000_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.rx_handler_setup = iwlagn_rx_handler_setup,
@@ -258,7 +185,6 @@ static struct iwl_lib_ops iwl2000_lib = {
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
- .set_channel_switch = iwl2030_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
.config = iwl2000_nic_config,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a70b8cf..f99f9c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -331,8 +331,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
ctx->active.channel, ch);
return -EFAULT;
}
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}
@@ -425,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = {
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
};
#define IWL_DEVICE_5000 \
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index fda6fe0..fbe565c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -270,8 +270,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
ctx->active.channel, ch);
return -EFAULT;
}
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index b12c72d..23fa93d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
__le16 fc, __le32 *tx_flags)
{
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
- info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
+ info->flags & IEEE80211_TX_CTL_AMPDU)
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
- return;
- }
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation &&
- info->flags & IEEE80211_TX_CTL_AMPDU) {
- *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
- return;
- }
}
/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index a95ad84..09f679d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+ /*
+ * force CTS-to-self frames protection if RTS-CTS is not preferred
+ * one aggregation protection method
+ */
+ if (!(priv->cfg->ht_params &&
+ priv->cfg->ht_params->use_rts_for_aggregation))
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -342,10 +350,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (priv->switch_rxon.switch_in_progress &&
- (priv->switch_rxon.channel != ctx->staging.channel)) {
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ (priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_rxon.channel));
+ le16_to_cpu(priv->switch_channel));
iwl_chswitch_done(priv, false);
}
@@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
memcpy(active, &ctx->staging, sizeof(*active));
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if after settings changed.
+ */
+ iwl_set_tx_power(priv, priv->tx_power_next, false);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index a662adc..8e1942e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2843,16 +2843,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
+ test_bit(STATUS_SCANNING, &priv->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
goto out;
- /* channel switch in progress */
- if (priv->switch_rxon.switch_in_progress == true)
- goto out;
-
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
@@ -2901,15 +2898,19 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the
* configuration for channel switch
*/
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->ops->lib->set_channel_switch(priv,
- ch_switch))
- priv->switch_rxon.switch_in_progress = false;
+ ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
}
}
out:
mutex_unlock(&priv->mutex);
- if (!priv->switch_rxon.switch_in_progress)
- ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4653dea..213c80c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -843,12 +843,8 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (priv->switch_rxon.switch_in_progress) {
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
- mutex_lock(&priv->mutex);
- priv->switch_rxon.switch_in_progress = false;
- mutex_unlock(&priv->mutex);
- }
}
#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3bb76f6..a54d416 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -560,6 +560,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
#define STATUS_DEVICE_ENABLED 18
+#define STATUS_CHANNEL_SWITCH_PENDING 19
static inline int iwl_is_ready(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 22a6e3e..c8de236 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -982,17 +982,6 @@ struct traffic_stats {
};
/*
- * iwl_switch_rxon: "channel switch" structure
- *
- * @ switch_in_progress: channel switch in progress
- * @ channel: new channel
- */
-struct iwl_switch_rxon {
- bool switch_in_progress;
- __le16 channel;
-};
-
-/*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
*/
@@ -1287,7 +1276,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
- struct iwl_switch_rxon switch_rxon;
+ __le16 switch_channel;
struct {
u32 error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0053e9e..b774517 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -250,19 +250,19 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
- if (priv->switch_rxon.switch_in_progress) {
- if (!le32_to_cpu(csa->status) &&
- (csa->channel == priv->switch_rxon.channel)) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, false);
- }
+ iwl_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_chswitch_done(priv, false);
}
}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index a7b5cb0..224e985 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -907,7 +907,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
card = sdio_get_drvdata(func);
cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
- if (ret)
+ if (ret || !cause)
goto out;
lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
@@ -1008,10 +1008,6 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto release;
- ret = sdio_claim_irq(func, if_sdio_interrupt);
- if (ret)
- goto disable;
-
/* For 1-bit transfers to the 8686 model, we need to enable the
* interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
* bit to allow access to non-vendor registers. */
@@ -1083,6 +1079,21 @@ static int if_sdio_probe(struct sdio_func *func,
card->rx_unit = 0;
/*
+ * Set up the interrupt handler late.
+ *
+ * If we set it up earlier, the (buggy) hardware generates a spurious
+ * interrupt, even before the interrupt has been enabled, with
+ * CCCR_INTx = 0.
+ *
+ * We register the interrupt handler late so that we can handle any
+ * spurious interrupts, and also to avoid generation of that known
+ * spurious interrupt in the first place.
+ */
+ ret = sdio_claim_irq(func, if_sdio_interrupt);
+ if (ret)
+ goto disable;
+
+ /*
* Enable interrupts now that everything is set up
*/
sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 660831c..687c1f2 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
*(unsigned long *) wdev_priv = (unsigned long) priv;
+ set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
+
ret = wiphy_register(wdev->wiphy);
if (ret < 0) {
dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 3226118..aeac3cc 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2474,6 +2474,7 @@ struct mwl8k_cmd_set_hw_spec {
* faster client.
*/
#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
+#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2510,7 +2511,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
- MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY);
+ MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
+ MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 555180d..b704e5b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
rt2x00link_reset_tuner(rt2x00dev, false);
- if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
+ if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
+ test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
(ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
(conf->flags & IEEE80211_CONF_PS)) {
beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index c018d67a..939821b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
struct rt2x00_dev *rt2x00dev =
container_of(work, struct rt2x00_dev, autowakeup_work.work);
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
ERROR(rt2x00dev, "Device failed to wakeup.\n");
clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
@@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work.
*/
cancel_work_sync(&rt2x00dev->intf_work);
+ cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
if (rt2x00_is_usb(rt2x00dev)) {
del_timer_sync(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 89100e7..9f8ccae93 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -669,6 +669,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
&rx_status,
(u8 *) pdesc, skb);
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+ DBG_DMESG,
+ ("can't alloc skb for rx\n"));
+ goto done;
+ }
+
+ pci_unmap_single(rtlpci->pdev,
+ *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
false,
HW_DESC_RXPKT_LEN));
@@ -685,22 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
- /* try for new buffer - if allocation fails, drop
- * frame and reuse old buffer
- */
- new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
- if (unlikely(!new_skb)) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
- DBG_DMESG,
- ("can't alloc skb for rx\n"));
- goto done;
- }
- pci_unmap_single(rtlpci->pdev,
- *((dma_addr_t *) skb->cb),
- rtlpci->rxbuffersize,
- PCI_DMA_FROMDEVICE);
-
- if (!stats.crc || !stats.hwerror) {
+ if (!stats.crc && !stats.hwerror) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a3984f4..f34b5b2 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,6 +141,13 @@ static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify,
};
+static void free_all_tasks(void)
+{
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+}
+
int sync_start(void)
{
int err;
@@ -148,8 +155,6 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- mutex_lock(&buffer_mutex);
-
err = task_handoff_register(&task_free_nb);
if (err)
goto out1;
@@ -166,7 +171,6 @@ int sync_start(void)
start_cpu_work();
out:
- mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -174,6 +178,7 @@ out3:
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
out2:
task_handoff_unregister(&task_free_nb);
+ free_all_tasks();
out1:
free_cpumask_var(marked_cpus);
goto out;
@@ -182,20 +187,16 @@ out1:
void sync_stop(void)
{
- /* flush buffers */
- mutex_lock(&buffer_mutex);
end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- mutex_unlock(&buffer_mutex);
- flush_cpu_work();
+ barrier(); /* do all of the above first */
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
+ flush_cpu_work();
+ free_all_tasks();
free_cpumask_var(marked_cpus);
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index c85f744..094308e 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_X86_VISWS) += setup-irq.o
obj-$(CONFIG_MN10300) += setup-bus.o
obj-$(CONFIG_MICROBLAZE) += setup-bus.o
obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
#
# ACPI Related PCI FW Functions
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 135df16..46767c5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
* system from the sleep state, we'll have to prevent it from signaling
* wake-up.
*/
- pm_runtime_resume(dev);
+ pm_runtime_get_sync(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+
+ pm_runtime_put_sync(dev);
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 56098b3..2c5b9b9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,11 +3271,11 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
}
static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
- unsigned int command_bits, bool change_bridge)
+ unsigned int command_bits, u32 flags)
{
if (arch_set_vga_state)
return arch_set_vga_state(dev, decode, command_bits,
- change_bridge);
+ flags);
return 0;
}
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge_flags: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
* CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 48849ff..bafb3c3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+ mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e8a1406..02145e9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 435002d..712baab 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index a8d03ae..e7f301da2 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -46,7 +46,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.n_ext_ts = ptp->info->n_ext_ts;
caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps;
- err = copy_to_user((void __user *)arg, &caps, sizeof(caps));
+ if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
+ err = -EFAULT;
break;
case PTP_EXTTS_REQUEST:
@@ -129,8 +130,10 @@ ssize_t ptp_read(struct posix_clock *pc,
return -ERESTARTSYS;
}
- if (ptp->defunct)
+ if (ptp->defunct) {
+ mutex_unlock(&ptp->tsevq_mux);
return -ENODEV;
+ }
spin_lock_irqsave(&queue->lock, flags);
@@ -150,10 +153,8 @@ ssize_t ptp_read(struct posix_clock *pc,
mutex_unlock(&ptp->tsevq_mux);
- if (copy_to_user(buf, event, cnt)) {
- mutex_unlock(&ptp->tsevq_mux);
+ if (copy_to_user(buf, event, cnt))
return -EFAULT;
- }
return cnt;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f822e13..ce2aabf 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1051,4 +1051,13 @@ config RTC_DRV_TILE
Enable support for the Linux driver side of the Tilera
hypervisor's real-time clock interface.
+config RTC_DRV_PUV3
+ tristate "PKUnity v3 RTC support"
+ depends on ARCH_PUV3
+ help
+ This enables support for the RTC in the PKUnity-v3 SoCs.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-puv3.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 213d725..0ffefe8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
+obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index ef6316a..df68618 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -318,7 +318,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
-int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
long now, scheduled;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index d0e06ed..cace6d3 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -421,7 +421,8 @@ static long rtc_dev_ioctl(struct file *file,
err = ops->ioctl(rtc->dev.parent, cmd, arg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
- }
+ } else
+ err = -ENOTTY;
break;
}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4724ba3..b2005b4 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds1340", ds_1340 },
{ "ds3231", ds_3231 },
{ "m41t00", m41t00 },
+ { "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ }
};
diff --git a/arch/unicore32/kernel/rtc.c b/drivers/rtc/rtc-puv3.c
index 8cad70b..46f14b8 100644
--- a/arch/unicore32/kernel/rtc.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -1,7 +1,5 @@
/*
- * linux/arch/unicore32/kernel/rtc.c
- *
- * Code specific to PKUnity SoC and UniCore ISA
+ * RTC driver code specific to PKUnity SoC and UniCore ISA
*
* Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
* Copyright (C) 2001-2010 Guan Xuetao
@@ -36,7 +34,6 @@ static int puv3_rtc_tickno = IRQ_RTC;
static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
/* IRQ Handlers */
-
static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
{
struct rtc_device *rdev = id;
@@ -89,7 +86,6 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
}
/* Time read/write */
-
static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
@@ -196,7 +192,6 @@ static void puv3_rtc_release(struct device *dev)
struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
/* do not clear AIE here, it may be needed for wake */
-
puv3_rtc_setpie(dev, 0);
free_irq(puv3_rtc_alarmno, rtc_dev);
free_irq(puv3_rtc_tickno, rtc_dev);
@@ -218,7 +213,6 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
} else {
/* re-enable the device, and check it is ok */
-
if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
@@ -251,7 +245,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
pr_debug("%s: probe=%p\n", __func__, pdev);
/* find the IRQs */
-
puv3_rtc_tickno = platform_get_irq(pdev, 1);
if (puv3_rtc_tickno < 0) {
dev_err(&pdev->dev, "no irq for rtc tick\n");
@@ -268,7 +261,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
puv3_rtc_tickno, puv3_rtc_alarmno);
/* get the memory region */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "failed to get memory region resource\n");
@@ -288,7 +280,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
puv3_rtc_enable(pdev, 1);
/* register RTC and exit */
-
rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
THIS_MODULE);
@@ -315,8 +306,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
#ifdef CONFIG_PM
-/* RTC Power management control */
-
static int ticnt_save;
static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
@@ -368,4 +357,3 @@ module_exit(puv3_rtc_exit);
MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
MODULE_AUTHOR("Hu Dongliang");
MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index b8bc862..efd6066 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -78,7 +78,6 @@ struct vt8500_rtc {
void __iomem *regbase;
struct resource *res;
int irq_alarm;
- int irq_hz;
struct rtc_device *rtc;
spinlock_t lock; /* Protects this structure */
};
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
if (isr & 1)
events |= RTC_AF | RTC_IRQF;
- /* Only second/minute interrupts are supported */
- if (isr & 2)
- events |= RTC_UF | RTC_IRQF;
-
rtc_update_irq(vt8500_rtc->rtc, 1, events);
return IRQ_HANDLED;
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
- unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
-
- if (enabled)
- tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
- else
- tmp &= ~VT8500_RTC_CR_SM_ENABLE;
-
- writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
- return 0;
-}
-
static const struct rtc_class_ops vt8500_rtc_ops = {
.read_time = vt8500_rtc_read_time,
.set_time = vt8500_rtc_set_time,
.read_alarm = vt8500_rtc_read_alarm,
.set_alarm = vt8500_rtc_set_alarm,
.alarm_irq_enable = vt8500_alarm_irq_enable,
- .update_irq_enable = vt8500_update_irq_enable,
};
static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_free;
}
- vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
- if (vt8500_rtc->irq_hz < 0) {
- dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
- ret = -ENXIO;
- goto err_free;
- }
-
vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
resource_size(vt8500_rtc->res),
"vt8500-rtc");
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_release;
}
- /* Enable the second/minute interrupt generation and enable RTC */
- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
- | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
+ /* Enable RTC and set it to 24-hour mode */
+ writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
vt8500_rtc->regbase + VT8500_RTC_CR);
vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_unmap;
}
- ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
- "rtc 1Hz", vt8500_rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get irq %i, err %d\n",
- vt8500_rtc->irq_hz, ret);
- goto err_unreg;
- }
-
ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
"rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
- goto err_free_hz;
+ goto err_unreg;
}
return 0;
-err_free_hz:
- free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
err_unreg:
rtc_device_unregister(vt8500_rtc->rtc);
err_unmap:
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
- free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
rtc_device_unregister(vt8500_rtc->rtc);
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 4f64183..7e9c399 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -635,7 +635,7 @@ static void clks_core_resume(void)
struct clk *clkp;
list_for_each_entry(clkp, &clock_list, node) {
- if (likely(clkp->ops)) {
+ if (likely(clkp->usecount && clkp->ops)) {
unsigned long rate = clkp->rate;
if (likely(clkp->ops->set_parent))
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 6a9e58d..d18ce9e 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1861,6 +1861,7 @@ static int pl022_setup(struct spi_device *spi)
}
if ((clk_freq.cpsdvsr < CPSDVR_MIN)
|| (clk_freq.cpsdvsr > CPSDVR_MAX)) {
+ status = -EINVAL;
dev_err(&spi->dev,
"cpsdvsr is configured incorrectly\n");
goto err_config_params;
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index f706dba..cc880c9 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
- if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+ bits_per_word = transfer->bits_per_word ? :
+ message->spi->bits_per_word ? : 8;
+ if (bits_per_word % 16 == 0) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
cr_width = BIT_CTL_WORDSIZE;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
- } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+ } else if (bits_per_word % 8 == 0) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = transfer->len;
cr_width = 0;
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 82feb34..2a20dab 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -539,10 +539,12 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
- /* Additional always once-executed workarounds */
- ssb_pcicore_serdes_workaround(pc);
- /* TODO: ASPM */
- /* TODO: Clock Request Update */
+ /* Additional PCIe always once-executed workarounds */
+ if (dev->id.coreid == SSB_DEV_PCIE) {
+ ssb_pcicore_serdes_workaround(pc);
+ /* TODO: ASPM */
+ /* TODO: Clock Request Update */
+ }
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dfc16f9..196284d 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,23 +24,6 @@ menuconfig STAGING
if STAGING
-config STAGING_EXCLUDE_BUILD
- bool "Exclude Staging drivers from being built" if STAGING
- default y
- ---help---
- Are you sure you really want to build the staging drivers?
- They taint your kernel, don't live up to the normal Linux
- kernel quality standards, are a bit crufty around the edges,
- and might go off and kick your dog when you aren't paying
- attention.
-
- Say N here to be able to select and build the Staging drivers.
- This option is primarily here to prevent them from being built
- when selecting 'make allyesconfg' and 'make allmodconfig' so
- don't be all that put off, your dog will be just fine.
-
-if !STAGING_EXCLUDE_BUILD
-
source "drivers/staging/tty/Kconfig"
source "drivers/staging/generic_serial/Kconfig"
@@ -177,5 +160,4 @@ source "drivers/staging/mei/Kconfig"
source "drivers/staging/nvec/Kconfig"
-endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/altera-stapl/altera-jtag.c b/drivers/staging/altera-stapl/altera-jtag.c
index 8763088..8b1620b 100644
--- a/drivers/staging/altera-stapl/altera-jtag.c
+++ b/drivers/staging/altera-stapl/altera-jtag.c
@@ -26,7 +26,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <staging/altera.h>
+#include "altera.h"
#include "altera-exprt.h"
#include "altera-jtag.h"
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/staging/altera-stapl/altera.c
index 05aad35..9cd5e76 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/staging/altera-stapl/altera.c
@@ -28,7 +28,7 @@
#include <linux/string.h>
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <staging/altera.h>
+#include "altera.h"
#include "altera-exprt.h"
#include "altera-jtag.h"
diff --git a/include/staging/altera.h b/drivers/staging/altera-stapl/altera.h
index 94c0c61..94c0c61 100644
--- a/include/staging/altera.h
+++ b/drivers/staging/altera-stapl/altera.h
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index 1f15e1f..afd6cc1 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -1,6 +1,7 @@
config ATH6K_LEGACY
tristate "Atheros AR6003 support (non mac80211)"
depends on MMC && WLAN
+ depends on CFG80211
select WIRELESS_EXT
select WEXT_PRIV
help
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index 77dfb40..d3a774d 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -870,7 +870,8 @@ ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
if(ar->scan_request)
{
/* Translate data to cfg80211 mgmt format */
- wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
+ if (ar->arWmi)
+ wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
cfg80211_scan_done(ar->scan_request,
((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false);
diff --git a/drivers/staging/brcm80211/Kconfig b/drivers/staging/brcm80211/Kconfig
index f4cf9b2..379cf16 100644
--- a/drivers/staging/brcm80211/Kconfig
+++ b/drivers/staging/brcm80211/Kconfig
@@ -7,6 +7,7 @@ config BRCMSMAC
default n
depends on PCI
depends on WLAN && MAC80211
+ depends on X86 || MIPS
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
@@ -20,6 +21,7 @@ config BRCMFMAC
default n
depends on MMC
depends on WLAN && CFG80211
+ depends on X86 || MIPS
select BRCMUTIL
select FW_LOADER
select WIRELESS_EXT
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index 929ceaf..15e1b05 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -64,8 +64,6 @@ wl_iw_extra_params_t g_wl_iw_params;
extern bool wl_iw_conn_status_str(u32 event_type, u32 status,
u32 reason, char *stringBuf, uint buflen);
-uint wl_msg_level = WL_ERROR_VAL;
-
#define MAX_WLIW_IOCTL_LEN 1024
#ifdef CONFIG_WIRELESS_EXT
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 1502d80..20008a4 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -2,6 +2,7 @@ config COMEDI
tristate "Data acquisition support (comedi)"
default N
depends on m
+ depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
---help---
Enable support a wide range of data acquisition devices
for Linux.
@@ -160,6 +161,7 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
@@ -171,6 +173,7 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -180,6 +183,7 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Advantech PCL-818 ISA cards
@@ -269,6 +273,7 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
+ depends on VIRT_TO_BUS
select COMEDI_FC
default N
---help---
@@ -340,6 +345,7 @@ config COMEDI_DT2817
config COMEDI_DT282X
tristate "Data Translation DT2821 series and DT-EZ ISA card support"
select COMEDI_FC
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Data Translation DT2821 series including DT-EZ
@@ -419,6 +425,7 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
depends on COMEDI_NI_COMMON
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for National Instruments AT-A2150 cards
@@ -536,6 +543,7 @@ if COMEDI_PCI_DRIVERS && PCI
config COMEDI_ADDI_APCI_035
tristate "ADDI-DATA APCI_035 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_035 cards
@@ -545,6 +553,7 @@ config COMEDI_ADDI_APCI_035
config COMEDI_ADDI_APCI_1032
tristate "ADDI-DATA APCI_1032 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1032 cards
@@ -554,6 +563,7 @@ config COMEDI_ADDI_APCI_1032
config COMEDI_ADDI_APCI_1500
tristate "ADDI-DATA APCI_1500 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1500 cards
@@ -563,6 +573,7 @@ config COMEDI_ADDI_APCI_1500
config COMEDI_ADDI_APCI_1516
tristate "ADDI-DATA APCI_1516 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1516 cards
@@ -572,6 +583,7 @@ config COMEDI_ADDI_APCI_1516
config COMEDI_ADDI_APCI_1564
tristate "ADDI-DATA APCI_1564 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1564 cards
@@ -581,6 +593,7 @@ config COMEDI_ADDI_APCI_1564
config COMEDI_ADDI_APCI_16XX
tristate "ADDI-DATA APCI_16xx support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_16xx cards
@@ -590,6 +603,7 @@ config COMEDI_ADDI_APCI_16XX
config COMEDI_ADDI_APCI_2016
tristate "ADDI-DATA APCI_2016 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_2016 cards
@@ -599,6 +613,7 @@ config COMEDI_ADDI_APCI_2016
config COMEDI_ADDI_APCI_2032
tristate "ADDI-DATA APCI_2032 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_2032 cards
@@ -608,6 +623,7 @@ config COMEDI_ADDI_APCI_2032
config COMEDI_ADDI_APCI_2200
tristate "ADDI-DATA APCI_2200 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_2200 cards
@@ -617,6 +633,7 @@ config COMEDI_ADDI_APCI_2200
config COMEDI_ADDI_APCI_3001
tristate "ADDI-DATA APCI_3001 support"
+ depends on VIRT_TO_BUS
select COMEDI_FC
default N
---help---
@@ -627,6 +644,7 @@ config COMEDI_ADDI_APCI_3001
config COMEDI_ADDI_APCI_3120
tristate "ADDI-DATA APCI_3520 support"
+ depends on VIRT_TO_BUS
select COMEDI_FC
default N
---help---
@@ -637,6 +655,7 @@ config COMEDI_ADDI_APCI_3120
config COMEDI_ADDI_APCI_3501
tristate "ADDI-DATA APCI_3501 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_3501 cards
@@ -646,6 +665,7 @@ config COMEDI_ADDI_APCI_3501
config COMEDI_ADDI_APCI_3XXX
tristate "ADDI-DATA APCI_3xxx support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_3xxx cards
@@ -712,6 +732,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
select COMEDI_FC
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -1287,6 +1308,7 @@ config COMEDI_NI_LABPC
depends on COMEDI_MITE
select COMEDI_8255
select COMEDI_FC
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for National Instruments Lab-PC and compatibles
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 1c45c11..aa87b1b 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -542,6 +542,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
unsigned long irqflags;
int ret = -ENOMEM;
uint32_t tt_pages;
+ struct drm_connector *connector;
+ struct psb_intel_output *psb_intel_output;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
@@ -663,7 +665,18 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_kms_helper_poll_init(dev);
}
- ret = psb_backlight_init(dev);
+ /* Only add backlight support if we have LVDS output */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ psb_intel_output = to_psb_intel_output(connector);
+
+ switch (psb_intel_output->type) {
+ case INTEL_OUTPUT_LVDS:
+ ret = psb_backlight_init(dev);
+ break;
+ }
+ }
+
if (ret)
return ret;
#if 0
diff --git a/drivers/staging/gma500/psb_fb.c b/drivers/staging/gma500/psb_fb.c
index 99c03a2..084c36b 100644
--- a/drivers/staging/gma500/psb_fb.c
+++ b/drivers/staging/gma500/psb_fb.c
@@ -441,6 +441,16 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->screen_size = size;
memset(info->screen_base, 0, size);
+ if (dev_priv->pg->stolen_size) {
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = dev_priv->pg->stolen_size;
+ }
+
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/staging/gma500/psb_intel_bios.c b/drivers/staging/gma500/psb_intel_bios.c
index 48ac8ba..417965d 100644
--- a/drivers/staging/gma500/psb_intel_bios.c
+++ b/drivers/staging/gma500/psb_intel_bios.c
@@ -154,10 +154,15 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
- dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
-
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
- drm_mode_debug_printmodeline(panel_fixed_mode);
+ if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+ } else {
+ DRM_DEBUG("Ignoring bogus LVDS VBT mode.\n");
+ dev_priv->lvds_vbt = 0;
+ kfree(panel_fixed_mode);
+ }
return;
}
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index f96d5b5..d329635 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -4,7 +4,7 @@
menuconfig IIO
tristate "Industrial I/O support"
- depends on !S390
+ depends on GENERIC_HARDIRQS
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index 0b9b854..4cc1a5b 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -81,7 +81,6 @@ struct adis16201_state {
int adis16201_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
enum adis16201_scan {
ADIS16201_SCAN_SUPPLY,
ADIS16201_SCAN_ACC_X,
@@ -92,6 +91,7 @@ enum adis16201_scan {
ADIS16201_SCAN_INCLI_Y,
};
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16201_remove_trigger(struct iio_dev *indio_dev);
int adis16201_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 8bb8ce5..175e21b 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -76,7 +76,6 @@ struct adis16203_state {
int adis16203_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
enum adis16203_scan {
ADIS16203_SCAN_SUPPLY,
ADIS16203_SCAN_AUX_ADC,
@@ -85,6 +84,7 @@ enum adis16203_scan {
ADIS16203_SCAN_INCLI_Y,
};
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16203_remove_trigger(struct iio_dev *indio_dev);
int adis16203_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
index 5310a42..1690c0d 100644
--- a/drivers/staging/iio/accel/adis16204.h
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -84,7 +84,6 @@ struct adis16204_state {
int adis16204_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
enum adis16204_scan {
ADIS16204_SCAN_SUPPLY,
ADIS16204_SCAN_ACC_X,
@@ -93,6 +92,7 @@ enum adis16204_scan {
ADIS16204_SCAN_TEMP,
};
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16204_remove_trigger(struct iio_dev *indio_dev);
int adis16204_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 58d08db..3153cbe 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -121,8 +121,6 @@ struct adis16209_state {
int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
-
#define ADIS16209_SCAN_SUPPLY 0
#define ADIS16209_SCAN_ACC_X 1
#define ADIS16209_SCAN_ACC_Y 2
@@ -132,6 +130,8 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16209_SCAN_INCLI_Y 6
#define ADIS16209_SCAN_ROT 7
+#ifdef CONFIG_IIO_RING_BUFFER
+
void adis16209_remove_trigger(struct iio_dev *indio_dev);
int adis16209_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index 881768d..2fe34d2 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -195,7 +195,7 @@ static const struct iio_info max517_info = {
};
static const struct iio_info max518_info = {
- .attrs = &max517_attribute_group,
+ .attrs = &max518_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 702dc98..24bf70e 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -104,7 +104,6 @@ struct adis16260_state {
int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
@@ -115,6 +114,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16260_SCAN_TEMP 3
#define ADIS16260_SCAN_ANGL 4
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16260_remove_trigger(struct iio_dev *indio_dev);
int adis16260_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index db184d1..e87715b9 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -158,7 +158,6 @@ struct adis16400_state {
int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
@@ -182,6 +181,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16300_SCAN_INCLI_X 12
#define ADIS16300_SCAN_INCLI_Y 13
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16400_remove_trigger(struct iio_dev *indio_dev);
int adis16400_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 2589a7e..3612373 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -137,13 +137,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
if (st->variant->flags & ADIS16400_NO_BURST) {
ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
if (ret < 0)
- return ret;
+ goto err;
for (; i < ring->scan_count; i++)
data[i] = *(s16 *)(st->rx + i*2);
} else {
ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
if (ret < 0)
- return ret;
+ goto err;
for (; i < indio_dev->ring->scan_count; i++) {
j = __ffs(mask);
mask &= ~(1 << j);
@@ -158,9 +158,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
- kfree(data);
+ kfree(data);
return IRQ_HANDLED;
+
+err:
+ kfree(data);
+ return ret;
}
void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 6159023..d504aa2 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -294,6 +294,7 @@ struct iio_poll_func
pf->h = h;
pf->thread = thread;
pf->type = type;
+ pf->private_data = private;
return pf;
}
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 2818851..685fcf6 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -189,7 +189,7 @@ int mei_hw_init(struct mei_device *dev)
mutex_lock(&dev->device_lock);
}
- if (!err && !dev->recvd_msg) {
+ if (err <= 0 && !dev->recvd_msg) {
dev->mei_state = MEI_DISABLED;
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed"
@@ -205,10 +205,10 @@ int mei_hw_init(struct mei_device *dev)
"host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
- if (!(dev->host_hw_state & H_RDY) != H_RDY)
+ if (!(dev->host_hw_state & H_RDY))
dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
- if (!(dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
+ if (!(dev->me_hw_state & ME_RDY_HRA))
dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
printk(KERN_ERR "mei: link layer initialization failed.\n");
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 2564b03..fff53d0 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -169,10 +169,15 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
dev->wd_stopped, 10 * HZ);
mutex_lock(&dev->device_lock);
- if (!dev->wd_stopped)
- dev_dbg(&dev->pdev->dev, "stop wd failed to complete.\n");
- else
- dev_dbg(&dev->pdev->dev, "stop wd complete.\n");
+ if (dev->wd_stopped) {
+ dev_dbg(&dev->pdev->dev, "stop wd complete ret=%d.\n", ret);
+ ret = 0;
+ } else {
+ if (!ret)
+ ret = -ETIMEDOUT;
+ dev_warn(&dev->pdev->dev,
+ "stop wd failed to complete ret=%d.\n", ret);
+ }
if (preserve)
dev->wd_timeout = wd_timeout;
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index b053067..fe40e0b 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -2,6 +2,7 @@ config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
select I2C
+ select BACKLIGHT_CLASS_DEVICE
---help---
Add support for the OLPC XO DCON controller. This controller is
only available on OLPC platforms. Unless you have one of these
diff --git a/drivers/staging/rts_pstor/sd.c b/drivers/staging/rts_pstor/sd.c
index bddb031..cdae497 100644
--- a/drivers/staging/rts_pstor/sd.c
+++ b/drivers/staging/rts_pstor/sd.c
@@ -2328,7 +2328,7 @@ Switch_Fail:
retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5);
if (retval == STATUS_SUCCESS) {
- int func_num = (rsp[1] >> 4) && 0x07;
+ int func_num = (rsp[1] >> 4) & 0x07;
if (func_num) {
RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
chip->sd_io = 1;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 6e99ec8..8cbea42 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -26,6 +26,8 @@
static int stub_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void stub_disconnect(struct usb_interface *interface);
+static int stub_pre_reset(struct usb_interface *interface);
+static int stub_post_reset(struct usb_interface *interface);
/*
* Define device IDs here if you want to explicitly limit exportable devices.
@@ -59,6 +61,8 @@ struct usb_driver stub_driver = {
.probe = stub_probe,
.disconnect = stub_disconnect,
.id_table = stub_table,
+ .pre_reset = stub_pre_reset,
+ .post_reset = stub_post_reset,
};
/*
@@ -541,3 +545,20 @@ static void stub_disconnect(struct usb_interface *interface)
del_match_busid((char *)udev_busid);
}
}
+
+/*
+ * Presence of pre_reset and post_reset prevents the driver from being unbound
+ * when the device is being reset
+ */
+
+int stub_pre_reset(struct usb_interface *interface)
+{
+ dev_dbg(&interface->dev, "pre_reset\n");
+ return 0;
+}
+
+int stub_post_reset(struct usb_interface *interface)
+{
+ dev_dbg(&interface->dev, "post_reset\n");
+ return 0;
+}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index a5c1fa1..bc57844 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -175,16 +175,18 @@ static int tweak_reset_device_cmd(struct urb *urb)
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
/*
- * usb_lock_device_for_reset caused a deadlock: it causes the driver
- * to unbind. In the shutdown the rx thread is signalled to shut down
- * but this thread is pending in the usb_lock_device_for_reset.
- *
- * Instead queue the reset.
- *
- * Unfortunatly an existing usbip connection will be dropped due to
- * driver unbinding.
+ * With the implementation of pre_reset and post_reset the driver no
+ * longer unbinds. This allows the use of synchronous reset.
*/
- usb_queue_reset_device(sdev->interface);
+
+ if (usb_lock_device_for_reset(sdev->udev, sdev->interface)<0)
+ {
+ dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+ return 0;
+ }
+ usb_reset_device(sdev->udev);
+ usb_unlock_device(sdev->udev);
+
return 0;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index dee2a2c..70c2e7f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -386,7 +386,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
*/
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
TMR_LUN_RESET);
- if (!se_cmd->se_tmr_req)
+ if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
* Locate the underlying TCM struct se_lun from sc->device->lun
@@ -1017,6 +1017,7 @@ static int tcm_loop_make_nexus(
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
+ int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
@@ -1033,8 +1034,10 @@ static int tcm_loop_make_nexus(
* Initialize the struct se_session pointer
*/
tl_nexus->se_sess = transport_init_session();
- if (!tl_nexus->se_sess)
+ if (IS_ERR(tl_nexus->se_sess)) {
+ ret = PTR_ERR(tl_nexus->se_sess);
goto out;
+ }
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1060,7 +1063,7 @@ static int tcm_loop_make_nexus(
out:
kfree(tl_nexus);
- return -ENOMEM;
+ return ret;
}
static int tcm_loop_drop_nexus(
@@ -1140,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
- if (strlen(page) > TL_WWN_ADDR_LEN) {
+ if (strlen(page) >= TL_WWN_ADDR_LEN) {
printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
@@ -1321,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
return ERR_PTR(-EINVAL);
check_len:
- if (strlen(name) > TL_WWN_ADDR_LEN) {
+ if (strlen(name) >= TL_WWN_ADDR_LEN) {
printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ee6fad9..25c1f49 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
printk(KERN_ERR "Unable to locate passed fabric name\n");
return NULL;
}
- if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+ if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
return NULL;
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!(tf))
- return ERR_PTR(-ENOMEM);
+ return NULL;
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
@@ -851,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
return -EOPNOTSUPP;
}
- if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+ if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
@@ -917,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if ((len + strlen(buf) >= PAGE_SIZE))
break;
len += sprintf(page+len, "%s", buf);
@@ -962,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if ((len + strlen(buf) >= PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if ((len + strlen(buf) >= PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if ((len + strlen(buf) >= PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
@@ -1299,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if ((len + strlen(buf) >= PAGE_SIZE))
break;
len += sprintf(page+len, "%s", buf);
@@ -1496,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+ if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
printk(KERN_ERR "APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
@@ -1510,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(isid) > PR_REG_ISID_LEN) {
+ if (strlen(isid) >= PR_REG_ISID_LEN) {
printk(KERN_ERR "APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
@@ -1571,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+ if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
printk(KERN_ERR "APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
@@ -3052,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget(
int ret;
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
- if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+ if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
printk(KERN_ERR "Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8407f9c..ba698ea 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -192,7 +192,7 @@ int transport_get_lun_for_tmr(
&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
- dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+ dev = se_lun->lun_se_dev;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
@@ -216,6 +216,7 @@ int transport_get_lun_for_tmr(
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -1;
}
+ se_tmr->tmr_dev = dev;
spin_lock(&dev->se_tmr_lock);
list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
@@ -1430,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_lun_acl *lacl;
struct se_node_acl *nacl;
- if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+ if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
TPG_TFO(tpg)->get_fabric_name());
*ret = -EOVERFLOW;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a79f518..b662db3 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf(
pr_reg->pr_res_mapped_lun);
}
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
printk(KERN_ERR "Unable to update renaming"
" APTPL metadata\n");
spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf(
TPG_TFO(tpg)->tpg_get_tag(tpg),
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
printk(KERN_ERR "Unable to update renaming"
" APTPL metadata\n");
spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file(
memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
- if (strlen(&wwn->unit_serial[0]) > 512) {
+ if (strlen(&wwn->unit_serial[0]) >= 512) {
printk(KERN_ERR "WWN value for struct se_device does not fit"
" into path buffer\n");
return -1;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 59b8b9c..179063d 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,10 +75,16 @@ void core_tmr_release_req(
{
struct se_device *dev = tmr->tmr_dev;
+ if (!dev) {
+ kmem_cache_free(se_tmr_req_cache, tmr);
+ return;
+ }
+
spin_lock(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
- kmem_cache_free(se_tmr_req_cache, tmr);
spin_unlock(&dev->se_tmr_lock);
+
+ kmem_cache_free(se_tmr_req_cache, tmr);
}
static void core_tmr_handle_tas_abort(
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4dafeb8..4b9b716 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
struct se_node_acl *se_nacl;
-
+ unsigned long flags;
/*
* Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl;
if ((se_nacl)) {
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
list_del(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl->acl_sess_list.prev,
struct se_session, sess_acl_list);
}
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index defff32..7b82f1b 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -144,7 +144,7 @@ enum ft_cmd_state {
*/
struct ft_cmd {
enum ft_cmd_state state;
- u16 lun; /* LUN from request */
+ u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index c056a11..b2a1067 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -94,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
-/*
- * Get LUN from CDB.
- */
-static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
-{
- u64 lun;
-
- lun = lunp[1];
- switch (lunp[0] >> 6) {
- case 0:
- break;
- case 1:
- lun |= (lunp[0] & 0x3f) << 8;
- break;
- default:
- return -1;
- }
- if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return -1;
- cmd->lun = lun;
- return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
-}
-
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{
struct se_queue_obj *qobj;
@@ -418,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
{
struct se_tmr_req *tmr;
struct fcp_cmnd *fcp;
+ struct ft_sess *sess;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
- if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
- ft_dump_cmd(cmd, __func__);
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- ft_sess_put(cmd->sess);
- return;
- }
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
@@ -463,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd)
return;
}
cmd->se_cmd.se_tmr_req = tmr;
+
+ switch (fcp->fc_tm_flags) {
+ case FCP_TMF_LUN_RESET:
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
+ /*
+ * Make sure to clean up newly allocated TMR request
+ * since "unable to handle TMR request because failed
+ * to get to LUN"
+ */
+ FT_TM_DBG("Failed to get LUN for TMR func %d, "
+ "se_cmd %p, unpacked_lun %d\n",
+ tm_func, &cmd->se_cmd, cmd->lun);
+ ft_dump_cmd(cmd, __func__);
+ sess = cmd->sess;
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ cmd->se_cmd.scsi_sense_reason, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ ft_sess_put(sess);
+ return;
+ }
+ break;
+ case FCP_TMF_TGT_RESET:
+ case FCP_TMF_CLR_TASK_SET:
+ case FCP_TMF_ABT_TASK_SET:
+ case FCP_TMF_CLR_ACA:
+ break;
+ default:
+ return;
+ }
transport_generic_handle_tmr(&cmd->se_cmd);
}
@@ -635,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
- ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
transport_send_check_condition_and_sense(&cmd->se_cmd,
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 4c3c0ef..8c4a240 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
/* XXX For now, initiator will retry */
if (printk_ratelimit())
printk(KERN_ERR "%s: Failed to send frame %p, "
- "xid <0x%x>, remaining <0x%x>, "
+ "xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index a3bd57f..7491e21 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
return NULL;
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
lport = sess->tport->lport;
port_id = sess->port_id;
if (port_id == -1) {
- mutex_lock(&ft_lport_lock);
+ mutex_unlock(&ft_lport_lock);
return;
}
FT_SESS_DBG("port_id %x\n", port_id);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 09e8c7d..19b4ae0 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -875,7 +875,8 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
*dp++ = last << 7 | first << 6 | 1; /* EA */
len--;
}
- memcpy(dp, skb_pull(dlci->skb, len), len);
+ memcpy(dp, dlci->skb->data, len);
+ skb_pull(dlci->skb, len);
__gsm_data_queue(dlci, msg);
if (last)
dlci->skb = NULL;
@@ -984,10 +985,22 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
*/
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
- u32 modem)
+ u32 modem, int clen)
{
int mlines = 0;
- u8 brk = modem >> 6;
+ u8 brk = 0;
+
+ /* The modem status command can either contain one octet (v.24 signals)
+ or two octets (v.24 signals + break signals). The length field will
+ either be 2 or 3 respectively. This is specified in section
+ 5.4.6.3.7 of the 27.010 mux spec. */
+
+ if (clen == 2)
+ modem = modem & 0x7f;
+ else {
+ brk = modem & 0x7f;
+ modem = (modem >> 7) & 0x7f;
+ };
/* Flow control/ready to communicate */
if (modem & MDM_FC) {
@@ -1061,7 +1074,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
return;
}
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem);
+ gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1482,12 +1495,13 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
* open we shovel the bits down it, if not we drop them.
*/
-static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
{
/* krefs .. */
struct tty_port *port = &dlci->port;
struct tty_struct *tty = tty_port_tty_get(port);
unsigned int modem = 0;
+ int len = clen;
if (debug & 16)
pr_debug("%d bytes for tty %p\n", len, tty);
@@ -1507,7 +1521,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
if (len == 0)
return;
}
- gsm_process_modem(tty, dlci, modem);
+ gsm_process_modem(tty, dlci, modem, clen);
/* Line state will go via DLCI 0 controls only */
case 1:
default:
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0ad3288..c3954fb 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1815,6 +1815,7 @@ do_it_again:
/* FIXME: does n_tty_set_room need locking ? */
n_tty_set_room(tty);
timeout = schedule_timeout(timeout);
+ BUG_ON(!tty->read_buf);
continue;
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index b40f7b9..b4129f5 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -3318,6 +3318,7 @@ void serial8250_unregister_port(int line)
uart->port.flags &= ~UPF_BOOT_AUTOCONF;
uart->port.type = PORT_UNKNOWN;
uart->port.dev = &serial8250_isa_devs->dev;
+ uart->capabilities = uart_config[uart->port.type].flags;
uart_add_one_port(&serial8250_reg, &uart->port);
} else {
uart->port.dev = NULL;
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 4b4968a..f41b425 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv,
static int
pci_omegapci_setup(struct serial_private *priv,
- struct pciserial_board *board,
+ const struct pciserial_board *board,
struct uart_port *port, int idx)
{
return setup_port(priv, port, 2, idx * 8, 0);
@@ -994,6 +994,15 @@ static int skip_tx_en_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int pci_eg20t_init(struct pci_dev *dev)
+{
+#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
+ return -ENODEV;
+#else
+ return 0;
+#endif
+}
+
/* This should be in linux/pci_ids.h */
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
@@ -1446,6 +1455,56 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.init = pci_oxsemi_tornado_init,
.setup = pci_default_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8811,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8812,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8813,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8814,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8027,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8028,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8029,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800C,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
+ .init = pci_eg20t_init,
+ },
/*
* Cronyx Omega PCI (PLX-chip based)
*/
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8dc0541..f5f6831 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -50,6 +50,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/sizes.h>
@@ -65,6 +66,30 @@
#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+
+#define UART_WA_SAVE_NR 14
+
+static void pl011_lockup_wa(unsigned long data);
+static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
+ ST_UART011_DMAWM,
+ ST_UART011_TIMEOUT,
+ ST_UART011_LCRH_RX,
+ UART011_IBRD,
+ UART011_FBRD,
+ ST_UART011_LCRH_TX,
+ UART011_IFLS,
+ ST_UART011_XFCR,
+ ST_UART011_XON1,
+ ST_UART011_XON2,
+ ST_UART011_XOFF1,
+ ST_UART011_XOFF2,
+ UART011_CR,
+ UART011_IMSC
+};
+
+static u32 uart_wa_regdata[UART_WA_SAVE_NR];
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+
/* There is by now at least one vendor with differing details, so handle it */
struct vendor_data {
unsigned int ifls;
@@ -72,6 +97,7 @@ struct vendor_data {
unsigned int lcrh_tx;
unsigned int lcrh_rx;
bool oversampling;
+ bool interrupt_may_hang; /* vendor-specific */
bool dma_threshold;
};
@@ -90,9 +116,12 @@ static struct vendor_data vendor_st = {
.lcrh_tx = ST_UART011_LCRH_TX,
.lcrh_rx = ST_UART011_LCRH_RX,
.oversampling = true,
+ .interrupt_may_hang = true,
.dma_threshold = true,
};
+static struct uart_amba_port *amba_ports[UART_NR];
+
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -132,6 +161,7 @@ struct uart_amba_port {
unsigned int lcrh_rx; /* vendor-specific */
bool autorts;
char type[12];
+ bool interrupt_may_hang; /* vendor-specific */
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
bool using_tx_dma;
@@ -1008,6 +1038,68 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#endif
+/*
+ * pl011_lockup_wa
+ * This workaround aims to break the deadlock situation
+ * when after long transfer over uart in hardware flow
+ * control, uart interrupt registers cannot be cleared.
+ * Hence uart transfer gets blocked.
+ *
+ * It is seen that during such deadlock condition ICR
+ * don't get cleared even on multiple write. This leads
+ * pass_counter to decrease and finally reach zero. This
+ * can be taken as trigger point to run this UART_BT_WA.
+ *
+ */
+static void pl011_lockup_wa(unsigned long data)
+{
+ struct uart_amba_port *uap = amba_ports[0];
+ void __iomem *base = uap->port.membase;
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ struct tty_struct *tty = uap->port.state->port.tty;
+ int buf_empty_retries = 200;
+ int loop;
+
+ /* Stop HCI layer from submitting data for tx */
+ tty->hw_stopped = 1;
+ while (!uart_circ_empty(xmit)) {
+ if (buf_empty_retries-- == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Backup registers */
+ for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+ uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
+
+ /* Disable UART so that FIFO data is flushed out */
+ writew(0x00, uap->port.membase + UART011_CR);
+
+ /* Soft reset UART module */
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->reset)
+ plat->reset();
+ }
+
+ /* Restore registers */
+ for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+ writew(uart_wa_regdata[loop] ,
+ uap->port.membase + uart_wa_reg[loop]);
+
+ /* Initialise the old status of the modem signals */
+ uap->old_status = readw(uap->port.membase + UART01x_FR) &
+ UART01x_FR_MODEM_ANY;
+
+ if (readl(base + UART011_MIS) & 0x2)
+ printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
+
+ /* Start Tx/Rx */
+ tty->hw_stopped = 0;
+}
+
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1158,8 +1250,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
if (status & UART011_TXIS)
pl011_tx_chars(uap);
- if (pass_counter-- == 0)
+ if (pass_counter-- == 0) {
+ if (uap->interrupt_may_hang)
+ tasklet_schedule(&pl011_lockup_tlet);
break;
+ }
status = readw(uap->port.membase + UART011_MIS);
} while (status != 0);
@@ -1339,6 +1434,14 @@ static int pl011_startup(struct uart_port *port)
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->init)
+ plat->init();
+ }
+
return 0;
clk_dis:
@@ -1394,6 +1497,15 @@ static void pl011_shutdown(struct uart_port *port)
* Shut down the clock producer
*/
clk_disable(uap->clk);
+
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->exit)
+ plat->exit();
+ }
+
}
static void
@@ -1700,6 +1812,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->init)
+ plat->init();
+ }
+
uap->port.uartclk = clk_get_rate(uap->clk);
if (options)
@@ -1774,6 +1894,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->fifosize = vendor->fifosize;
+ uap->interrupt_may_hang = vendor->interrupt_may_hang;
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55..c0b68b9 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -250,6 +250,20 @@ static void bcm_uart_do_rx(struct uart_port *port)
/* get overrun/fifo empty information from ier
* register */
iestat = bcm_uart_readl(port, UART_IR_REG);
+
+ if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
+ unsigned int val;
+
+ /* fifo reset is required to clear
+ * interrupt */
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val |= UART_CTL_RSTRXFIFO_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
break;
@@ -284,10 +298,6 @@ static void bcm_uart_do_rx(struct uart_port *port)
if (uart_handle_sysrq_char(port, c))
continue;
- if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
- port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- }
if ((cstat & port->ignore_status_mask) == 0)
tty_insert_flip_char(tty, c, flag);
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f5484..96da178 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -125,7 +125,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
brd->bd_uart_offset = 0x200;
brd->bd_dividend = 921600;
- brd->re_map_membase = ioremap(brd->membase, 0x1000);
+ brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
if (!brd->re_map_membase) {
dev_err(&pdev->dev,
"card has no PCI Memory resources, "
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1bd2845..a764bf9 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
int ret = 0;
struct circ_buf *xmit = &max->con_xmit;
- init_waitqueue_head(wq);
pr_info(PR_FMT "start main thread\n");
do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
res = RC_TAG;
ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
if (ret < 0 || res == 0 || res == 0xffff) {
- printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+ dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
res);
ret = -ENODEV;
goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
max->con_xmit.head = 0;
max->con_xmit.tail = 0;
+ init_waitqueue_head(&max->wq);
+
max->main_thread = kthread_run(max3110_main_thread,
max, "max3110_main");
if (IS_ERR(max->main_thread)) {
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index f2cb750..4652109 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1397,6 +1397,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
int fifosize, base_baud;
int port_type;
struct pch_uart_driver_data *board;
+ const char *board_name;
board = &drv_dat[id->driver_data];
port_type = board->port_type;
@@ -1412,7 +1413,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
base_baud = 1843200; /* 1.8432MHz */
/* quirk for CM-iTC board */
- if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC"))
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board_name && strstr(board_name, "CM-iTC"))
base_baud = 192000000; /* 192.0MHz */
switch (port_type) {
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index fb2619f9..dd194dc 100644
--- a/drivers/tty/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
@@ -30,7 +30,7 @@ static int s5pv210_serial_setsource(struct uart_port *port,
struct s3c2410_uartcfg *cfg = port->dev->platform_data;
unsigned long ucon = rd_regl(port, S3C2410_UCON);
- if ((cfg->clocks_size) == 1)
+ if (cfg->flags & NO_NEED_CHECK_CLKSRC)
return 0;
if (strcmp(clk->name, "pclk") == 0)
@@ -55,7 +55,7 @@ static int s5pv210_serial_getsource(struct uart_port *port,
clk->divisor = 1;
- if ((cfg->clocks_size) == 1)
+ if (cfg->flags & NO_NEED_CHECK_CLKSRC)
return 0;
switch (ucon & S5PV210_UCON_CLKMASK) {
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5d01d32..ef925d5 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -555,7 +555,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
static int tty_ldisc_wait_idle(struct tty_struct *tty)
{
int ret;
- ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+ ret = wait_event_timeout(tty_ldisc_idle,
atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
if (ret < 0)
return ret;
@@ -763,6 +763,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
if (IS_ERR(ld))
return -1;
+ WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
+
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e35a176..34e3da5 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
- } else if (!error && !intf->dev.power.in_suspend) {
+ } else if (!error && !intf->dev.power.is_prepared) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- if (!intf->dev.power.in_suspend) {
+ if (!intf->dev.power.is_prepared) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
if (intf->condition == USB_INTERFACE_UNBOUND) {
/* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
+ if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
intf->needs_altsetting0 = 0;
@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
for (i = n - 1; i >= 0; --i) {
intf = udev->actconfig->interface[i];
status = usb_suspend_interface(udev, intf, msg);
+
+ /* Ignore errors during system sleep transitions */
+ if (!(msg.event & PM_EVENT_AUTO))
+ status = 0;
if (status != 0)
break;
}
}
- if (status == 0)
+ if (status == 0) {
status = usb_suspend_device(udev, msg);
+ /* Again, ignore errors during system sleep transitions */
+ if (!(msg.event & PM_EVENT_AUTO))
+ status = 0;
+ }
+
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 90ae175..a428aa0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1634,6 +1634,7 @@ void usb_disconnect(struct usb_device **pdev)
{
struct usb_device *udev = *pdev;
int i;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!udev) {
pr_debug ("%s nodev\n", __func__);
@@ -1661,7 +1662,9 @@ void usb_disconnect(struct usb_device **pdev)
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
+ mutex_lock(hcd->bandwidth_mutex);
usb_disable_device(udev, 0);
+ mutex_unlock(hcd->bandwidth_mutex);
usb_hcd_synchronize_unlinks(udev);
usb_remove_ep_devs(&udev->ep0);
@@ -2362,6 +2365,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
USB_DEVICE_REMOTE_WAKEUP, 0,
NULL, 0,
USB_CTRL_SET_TIMEOUT);
+
+ /* System sleep transitions should never fail */
+ if (!(msg.event & PM_EVENT_AUTO))
+ status = 0;
} else {
/* device has up to 10 msec to fully suspend */
dev_dbg(&udev->dev, "usb %ssuspend\n",
@@ -2611,16 +2618,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
struct usb_device *hdev = hub->hdev;
unsigned port1;
- /* fail if children aren't already suspended */
+ /* Warn if children aren't already suspended */
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_device *udev;
udev = hdev->children [port1-1];
if (udev && udev->can_submit) {
- if (!(msg.event & PM_EVENT_AUTO))
- dev_dbg(&intf->dev, "port %d nyet suspended\n",
- port1);
- return -EBUSY;
+ dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
+ if (msg.event & PM_EVENT_AUTO)
+ return -EBUSY;
}
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 5701e85..64c7ab4 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1135,10 +1135,13 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
* Deallocates hcd/hardware state for the endpoints (nuking all or most
* pending urbs) and usbcore state for the interfaces, so that usbcore
* must usb_set_configuration() before any interfaces could be used.
+ *
+ * Must be called with hcd->bandwidth_mutex held.
*/
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
@@ -1172,6 +1175,16 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
+ if (hcd->driver->check_bandwidth) {
+ /* First pass: Cancel URBs, leave endpoint pointers intact. */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, false);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+ }
+ /* Remove endpoints from the host controller internal state */
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ /* Second pass: remove endpoint pointers */
+ }
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
@@ -1727,6 +1740,7 @@ free_interfaces:
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
+ mutex_lock(hcd->bandwidth_mutex);
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
@@ -1739,7 +1753,6 @@ free_interfaces:
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
- mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index 98cc8a1..aa248c2 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -44,7 +44,6 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct platform_device *pdev = to_platform_device(hcd->self.controller);
const struct platform_device_id *id;
- int hclength;
int ret;
id = platform_get_device_id(pdev);
@@ -53,20 +52,23 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
return -EINVAL;
}
- hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
switch (id->driver_data) {
case EHCI_ATH79_IP_V1:
ehci->has_synopsys_hc_bug = 1;
ehci->caps = hcd->regs;
- ehci->regs = hcd->regs + hclength;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci,
+ ehci_readl(ehci, &ehci->caps->hc_capbase));
break;
case EHCI_ATH79_IP_V2:
hcd->has_tt = 1;
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 + hclength;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci,
+ ehci_readl(ehci, &ehci->caps->hc_capbase));
break;
default:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b435ed6..f8030ee 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,4 +1,8 @@
/*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
* Copyright (c) 2000-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c9e6e45..55d3d58 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1555,7 +1555,7 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
/* We need to forcefully reclaim the slot since some transfers never
return, e.g. interrupt transfers and NAKed bulk transfers. */
- if (usb_pipebulk(urb->pipe)) {
+ if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
skip_map |= (1 << qh->slot);
reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 9aa10bd..f9cf3f0 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,5 +1,7 @@
/*
- * OHCI HCD (Host Controller Driver) for USB.
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index db6f8b9..4586369 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2517,6 +2517,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&r8a66597->child_device);
hcd->rsrc_start = res->start;
+ hcd->has_tt = 1;
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
if (ret != 0) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0f8e1d2..fcb7f7e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1215,8 +1215,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */
max_packet = ep->ss_ep_comp.bMaxBurst;
- if (!max_packet)
- xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
break;
case USB_SPEED_HIGH:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17541d0..cb16de2 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -29,6 +29,9 @@
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_VENDOR_ID_ETRON 0x1b6f
+#define PCI_DEVICE_ID_ASROCK_P67 0x7023
+
static const char hcd_name[] = "xhci_hcd";
/* called after powerup, by probe or system-pm "wakeup" */
@@ -134,6 +137,11 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
}
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+ }
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 800f417..70cacbb 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1733,6 +1733,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
frame->status = -EOVERFLOW;
skip_td = true;
break;
+ case COMP_DEV_ERR:
case COMP_STALL:
frame->status = -EPROTO;
skip_td = true;
@@ -1767,9 +1768,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
}
- if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
- *status = 0;
-
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
@@ -1787,8 +1785,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
- /* The transfer is partly done */
- *status = -EXDEV;
+ /* The transfer is partly done. */
frame->status = -EXDEV;
/* calc actual length */
@@ -2016,6 +2013,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
+ case COMP_DEV_ERR:
+ xhci_warn(xhci, "WARN: detect an incompatible device");
+ status = -EPROTO;
+ break;
case COMP_MISSED_INT:
/*
* When encounter missed service error, one or more isoc tds
@@ -2063,6 +2064,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
+
+ /*
+ * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
+ * is not in the current TD pointed by ep_ring->dequeue because
+ * that the hardware dequeue pointer still at the previous TRB
+ * of the current TD. The previous TRB maybe a Link TD or the
+ * last TRB of the previous TD. The command completion handle
+ * will take care the rest.
+ */
+ if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+ ret = 0;
+ goto cleanup;
+ }
+
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
@@ -2158,6 +2173,11 @@ cleanup:
urb->transfer_buffer_length,
status);
spin_unlock(&xhci->lock);
+ /* EHCI, UHCI, and OHCI always unconditionally set the
+ * urb->status of an isochronous endpoint to 0.
+ */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ status = 0;
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 06e7023..f5fe1ac 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -759,6 +759,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
msleep(100);
spin_lock_irq(&xhci->lock);
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ hibernated = true;
if (!hibernated) {
/* step 1: restore register */
@@ -1401,6 +1403,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
u32 added_ctxs;
unsigned int last_ctx;
u32 new_add_flags, new_drop_flags, new_slot_info;
+ struct xhci_virt_device *virt_dev;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1425,11 +1428,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- in_ctx = xhci->devs[udev->slot_id]->in_ctx;
- out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+ virt_dev = xhci->devs[udev->slot_id];
+ in_ctx = virt_dev->in_ctx;
+ out_ctx = virt_dev->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+
+ /* If this endpoint is already in use, and the upper layers are trying
+ * to add it again without dropping it, reject the addition.
+ */
+ if (virt_dev->eps[ep_index].ring &&
+ !(le32_to_cpu(ctrl_ctx->drop_flags) &
+ xhci_get_endpoint_flag(&ep->desc))) {
+ xhci_warn(xhci, "Trying to add endpoint 0x%x "
+ "without dropping it.\n",
+ (unsigned int) ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+
/* If the HCD has already noted the endpoint is enabled,
* ignore this request.
*/
@@ -1445,8 +1462,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* process context, not interrupt context (or so documenation
* for usb_set_interface() and usb_set_configuration() claim).
*/
- if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
- udev, ep, GFP_NOIO) < 0) {
+ if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
@@ -1537,6 +1553,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
+ "configure command.\n");
+ ret = -ENODEV;
+ break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
ret = 0;
@@ -1571,6 +1592,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
+ "context command.\n");
+ ret = -ENODEV;
+ break;
case COMP_MEL_ERR:
/* Max Exit Latency too large error */
dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
@@ -2853,6 +2879,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
dev_warn(&udev->dev, "Device not responding to set address.\n");
ret = -EPROTO;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for address "
+ "device command.\n");
+ ret = -ENODEV;
+ break;
case COMP_SUCCESS:
xhci_dbg(xhci, "Successful Address Device command\n");
break;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7d1ea3b..d8bbf5c 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -874,6 +874,8 @@ struct xhci_transfer_event {
#define COMP_PING_ERR 20
/* Event Ring is full */
#define COMP_ER_FULL 21
+/* Incompatible Device Error */
+#define COMP_DEV_ERR 22
/* Missed Service Error - HC couldn't service an isoc ep within interval */
#define COMP_MISSED_INT 23
/* Successfully stopped command ring */
@@ -1308,6 +1310,7 @@ struct xhci_hcd {
*/
#define XHCI_EP_LIMIT_QUIRK (1 << 5)
#define XHCI_BROKEN_MSI (1 << 6)
+#define XHCI_RESET_ON_RESUME (1 << 7)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 0a50a35..6aeb363 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1524,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ /*
+ * Setting both TXPKTRDY and FLUSHFIFO makes controller
+ * to interrupt current FIFO loading, but not flushing
+ * the already loaded ones.
+ */
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(epio, MUSB_TXCSR, csr);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 7295e31..8b2473f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1575,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* even if there was an error, we did the dma
* for iso_frame_desc->length
*/
- if (d->status != EILSEQ && d->status != -EOVERFLOW)
+ if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
if (++qh->iso_idx >= urb->number_of_packets)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1627289..2e06b90 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -179,6 +179,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -848,7 +849,8 @@ static const char *ftdi_chip_name[] = {
[FT2232C] = "FT2232C",
[FT232RL] = "FT232RL",
[FT2232H] = "FT2232H",
- [FT4232H] = "FT4232H"
+ [FT4232H] = "FT4232H",
+ [FT232H] = "FT232H"
};
@@ -1168,6 +1170,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
break;
case FT2232H: /* FT2232H chip */
case FT4232H: /* FT4232H chip */
+ case FT232H: /* FT232H chip */
if ((baud <= 12000000) & (baud >= 1200)) {
div_value = ftdi_2232h_baud_to_divisor(baud);
} else if (baud < 1200) {
@@ -1429,9 +1432,12 @@ static void ftdi_determine_type(struct usb_serial_port *port)
} else if (version < 0x600) {
/* Assume it's an FT232BM (or FT245BM) */
priv->chip_type = FT232BM;
- } else {
- /* Assume it's an FT232R */
+ } else if (version < 0x900) {
+ /* Assume it's an FT232RL */
priv->chip_type = FT232RL;
+ } else {
+ /* Assume it's an FT232H */
+ priv->chip_type = FT232H;
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
}
@@ -1559,7 +1565,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
priv->chip_type == FT2232C ||
priv->chip_type == FT232RL ||
priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H)) {
+ priv->chip_type == FT4232H ||
+ priv->chip_type == FT232H)) {
retval = device_create_file(&port->dev,
&dev_attr_latency_timer);
}
@@ -1580,7 +1587,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
priv->chip_type == FT2232C ||
priv->chip_type == FT232RL ||
priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H) {
+ priv->chip_type == FT4232H ||
+ priv->chip_type == FT232H) {
device_remove_file(&port->dev, &dev_attr_latency_timer);
}
}
@@ -2212,6 +2220,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
case FT232RL:
case FT2232H:
case FT4232H:
+ case FT232H:
len = 2;
break;
default:
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 213fe3d..19584fa 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -156,7 +156,8 @@ enum ftdi_chip_type {
FT2232C = 4,
FT232RL = 5,
FT2232H = 6,
- FT4232H = 7
+ FT4232H = 7,
+ FT232H = 8
};
enum ftdi_sio_baudrate {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ab1fcdf..19156d1 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,6 +22,7 @@
#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c6d92a5..ea84456 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1745,6 +1745,7 @@ static int ti_download_firmware(struct ti_device *tdev)
}
if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
+ release_firmware(fw_p);
return -ENOENT;
}
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index ebb893c..d7aaec5 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -248,10 +248,6 @@ static int atyfb_sync(struct fb_info *info);
static int aty_init(struct fb_info *info);
-#ifdef CONFIG_ATARI
-static int store_video_par(char *videopar, unsigned char m64_num);
-#endif
-
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -2268,11 +2264,13 @@ error:
return;
}
+#ifdef CONFIG_PCI
static void aty_bl_exit(struct backlight_device *bd)
{
backlight_device_unregister(bd);
printk("aty: Backlight unloaded\n");
}
+#endif /* CONFIG_PCI */
#endif /* CONFIG_FB_ATY_BACKLIGHT */
@@ -2789,7 +2787,7 @@ aty_init_exit:
return ret;
}
-#ifdef CONFIG_ATARI
+#if defined(CONFIG_ATARI) && !defined(MODULE)
static int __devinit store_video_par(char *video_str, unsigned char m64_num)
{
char *p;
@@ -2818,7 +2816,7 @@ static int __devinit store_video_par(char *video_str, unsigned char m64_num)
phys_vmembase[m64_num] = 0;
return -1;
}
-#endif /* CONFIG_ATARI */
+#endif /* CONFIG_ATARI && !MODULE */
/*
* Blank the display.
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0c9373b..2d93c8d 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -302,6 +302,18 @@ config BACKLIGHT_ADP8860
To compile this driver as a module, choose M here: the module will
be called adp8860_bl.
+config BACKLIGHT_ADP8870
+ tristate "Backlight Driver for ADP8870 using WLED"
+ depends on BACKLIGHT_CLASS_DEVICE && I2C
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ If you have a LCD backlight connected to the ADP8870,
+ say Y here to enable this driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp8870_bl.
+
config BACKLIGHT_88PM860X
tristate "Backlight Driver for 88PM8606 using WLED"
depends on MFD_88PM860X
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index b9ca849..ee72adb 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
new file mode 100644
index 0000000..05a8832
--- /dev/null
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -0,0 +1,1012 @@
+/*
+ * Backlight driver for Analog Devices ADP8870 Backlight Devices
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/i2c/adp8870.h>
+#define ADP8870_EXT_FEATURES
+#define ADP8870_USE_LEDS
+
+
+#define ADP8870_MFDVID 0x00 /* Manufacturer and device ID */
+#define ADP8870_MDCR 0x01 /* Device mode and status */
+#define ADP8870_INT_STAT 0x02 /* Interrupts status */
+#define ADP8870_INT_EN 0x03 /* Interrupts enable */
+#define ADP8870_CFGR 0x04 /* Configuration register */
+#define ADP8870_BLSEL 0x05 /* Sink enable backlight or independent */
+#define ADP8870_PWMLED 0x06 /* PWM Enable Selection Register */
+#define ADP8870_BLOFF 0x07 /* Backlight off timeout */
+#define ADP8870_BLDIM 0x08 /* Backlight dim timeout */
+#define ADP8870_BLFR 0x09 /* Backlight fade in and out rates */
+#define ADP8870_BLMX1 0x0A /* Backlight (Brightness Level 1-daylight) maximum current */
+#define ADP8870_BLDM1 0x0B /* Backlight (Brightness Level 1-daylight) dim current */
+#define ADP8870_BLMX2 0x0C /* Backlight (Brightness Level 2-bright) maximum current */
+#define ADP8870_BLDM2 0x0D /* Backlight (Brightness Level 2-bright) dim current */
+#define ADP8870_BLMX3 0x0E /* Backlight (Brightness Level 3-office) maximum current */
+#define ADP8870_BLDM3 0x0F /* Backlight (Brightness Level 3-office) dim current */
+#define ADP8870_BLMX4 0x10 /* Backlight (Brightness Level 4-indoor) maximum current */
+#define ADP8870_BLDM4 0x11 /* Backlight (Brightness Level 4-indoor) dim current */
+#define ADP8870_BLMX5 0x12 /* Backlight (Brightness Level 5-dark) maximum current */
+#define ADP8870_BLDM5 0x13 /* Backlight (Brightness Level 5-dark) dim current */
+#define ADP8870_ISCLAW 0x1A /* Independent sink current fade law register */
+#define ADP8870_ISCC 0x1B /* Independent sink current control register */
+#define ADP8870_ISCT1 0x1C /* Independent Sink Current Timer Register LED[7:5] */
+#define ADP8870_ISCT2 0x1D /* Independent Sink Current Timer Register LED[4:1] */
+#define ADP8870_ISCF 0x1E /* Independent sink current fade register */
+#define ADP8870_ISC1 0x1F /* Independent Sink Current LED1 */
+#define ADP8870_ISC2 0x20 /* Independent Sink Current LED2 */
+#define ADP8870_ISC3 0x21 /* Independent Sink Current LED3 */
+#define ADP8870_ISC4 0x22 /* Independent Sink Current LED4 */
+#define ADP8870_ISC5 0x23 /* Independent Sink Current LED5 */
+#define ADP8870_ISC6 0x24 /* Independent Sink Current LED6 */
+#define ADP8870_ISC7 0x25 /* Independent Sink Current LED7 (Brightness Level 1-daylight) */
+#define ADP8870_ISC7_L2 0x26 /* Independent Sink Current LED7 (Brightness Level 2-bright) */
+#define ADP8870_ISC7_L3 0x27 /* Independent Sink Current LED7 (Brightness Level 3-office) */
+#define ADP8870_ISC7_L4 0x28 /* Independent Sink Current LED7 (Brightness Level 4-indoor) */
+#define ADP8870_ISC7_L5 0x29 /* Independent Sink Current LED7 (Brightness Level 5-dark) */
+#define ADP8870_CMP_CTL 0x2D /* ALS Comparator Control Register */
+#define ADP8870_ALS1_EN 0x2E /* Main ALS comparator level enable */
+#define ADP8870_ALS2_EN 0x2F /* Second ALS comparator level enable */
+#define ADP8870_ALS1_STAT 0x30 /* Main ALS Comparator Status Register */
+#define ADP8870_ALS2_STAT 0x31 /* Second ALS Comparator Status Register */
+#define ADP8870_L2TRP 0x32 /* L2 comparator reference */
+#define ADP8870_L2HYS 0x33 /* L2 hysteresis */
+#define ADP8870_L3TRP 0x34 /* L3 comparator reference */
+#define ADP8870_L3HYS 0x35 /* L3 hysteresis */
+#define ADP8870_L4TRP 0x36 /* L4 comparator reference */
+#define ADP8870_L4HYS 0x37 /* L4 hysteresis */
+#define ADP8870_L5TRP 0x38 /* L5 comparator reference */
+#define ADP8870_L5HYS 0x39 /* L5 hysteresis */
+#define ADP8870_PH1LEVL 0x40 /* First phototransistor ambient light level-low byte register */
+#define ADP8870_PH1LEVH 0x41 /* First phototransistor ambient light level-high byte register */
+#define ADP8870_PH2LEVL 0x42 /* Second phototransistor ambient light level-low byte register */
+#define ADP8870_PH2LEVH 0x43 /* Second phototransistor ambient light level-high byte register */
+
+#define ADP8870_MANUFID 0x3 /* Analog Devices AD8870 Manufacturer and device ID */
+#define ADP8870_DEVID(x) ((x) & 0xF)
+#define ADP8870_MANID(x) ((x) >> 4)
+
+/* MDCR Device mode and status */
+#define D7ALSEN (1 << 7)
+#define INT_CFG (1 << 6)
+#define NSTBY (1 << 5)
+#define DIM_EN (1 << 4)
+#define GDWN_DIS (1 << 3)
+#define SIS_EN (1 << 2)
+#define CMP_AUTOEN (1 << 1)
+#define BLEN (1 << 0)
+
+/* ADP8870_ALS1_EN Main ALS comparator level enable */
+#define L5_EN (1 << 3)
+#define L4_EN (1 << 2)
+#define L3_EN (1 << 1)
+#define L2_EN (1 << 0)
+
+#define CFGR_BLV_SHIFT 3
+#define CFGR_BLV_MASK 0x7
+#define ADP8870_FLAG_LED_MASK 0xFF
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
+#define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1)
+
+struct adp8870_bl {
+ struct i2c_client *client;
+ struct backlight_device *bl;
+ struct adp8870_led *led;
+ struct adp8870_backlight_platform_data *pdata;
+ struct mutex lock;
+ unsigned long cached_daylight_max;
+ int id;
+ int revid;
+ int current_brightness;
+};
+
+struct adp8870_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct i2c_client *client;
+ enum led_brightness new_brightness;
+ int id;
+ int flags;
+};
+
+static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = ret;
+ return 0;
+}
+
+
+static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret)
+ dev_err(&client->dev, "failed to write\n");
+
+ return ret;
+}
+
+static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adp8870_read(client, reg, &reg_val);
+
+ if (!ret && ((reg_val & bit_mask) == 0)) {
+ reg_val |= bit_mask;
+ ret = adp8870_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adp8870_read(client, reg, &reg_val);
+
+ if (!ret && (reg_val & bit_mask)) {
+ reg_val &= ~bit_mask;
+ ret = adp8870_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+/*
+ * Independent sink / LED
+ */
+#if defined(ADP8870_USE_LEDS)
+static void adp8870_led_work(struct work_struct *work)
+{
+ struct adp8870_led *led = container_of(work, struct adp8870_led, work);
+ adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
+ led->new_brightness >> 1);
+}
+
+static void adp8870_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct adp8870_led *led;
+
+ led = container_of(led_cdev, struct adp8870_led, cdev);
+ led->new_brightness = value;
+ /*
+ * Use workqueue for IO since I2C operations can sleep.
+ */
+ schedule_work(&led->work);
+}
+
+static int adp8870_led_setup(struct adp8870_led *led)
+{
+ struct i2c_client *client = led->client;
+ int ret = 0;
+
+ ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0);
+ if (ret)
+ return ret;
+
+ ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1));
+ if (ret)
+ return ret;
+
+ if (led->id > 4)
+ ret = adp8870_set_bits(client, ADP8870_ISCT1,
+ (led->flags & 0x3) << ((led->id - 5) * 2));
+ else
+ ret = adp8870_set_bits(client, ADP8870_ISCT2,
+ (led->flags & 0x3) << ((led->id - 1) * 2));
+
+ return ret;
+}
+
+static int __devinit adp8870_led_probe(struct i2c_client *client)
+{
+ struct adp8870_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ struct adp8870_led *led, *led_dat;
+ struct led_info *cur_led;
+ int ret, i;
+
+
+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&client->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
+ if (ret)
+ goto err_free;
+
+ ret = adp8870_write(client, ADP8870_ISCT1,
+ (pdata->led_on_time & 0x3) << 6);
+ if (ret)
+ goto err_free;
+
+ ret = adp8870_write(client, ADP8870_ISCF,
+ FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
+ if (ret)
+ goto err_free;
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ cur_led = &pdata->leds[i];
+ led_dat = &led[i];
+
+ led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;
+
+ if (led_dat->id > 7 || led_dat->id < 1) {
+ dev_err(&client->dev, "Invalid LED ID %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
+ dev_err(&client->dev, "LED %d used by Backlight\n",
+ led_dat->id);
+ goto err;
+ }
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->cdev.brightness_set = adp8870_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
+ led_dat->client = client;
+ led_dat->new_brightness = LED_OFF;
+ INIT_WORK(&led_dat->work, adp8870_led_work);
+
+ ret = led_classdev_register(&client->dev, &led_dat->cdev);
+ if (ret) {
+ dev_err(&client->dev, "failed to register LED %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ ret = adp8870_led_setup(led_dat);
+ if (ret) {
+ dev_err(&client->dev, "failed to write\n");
+ i++;
+ goto err;
+ }
+ }
+
+ data->led = led;
+
+ return 0;
+
+ err:
+ for (i = i - 1; i >= 0; --i) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ err_free:
+ kfree(led);
+
+ return ret;
+}
+
+static int __devexit adp8870_led_remove(struct i2c_client *client)
+{
+ struct adp8870_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&data->led[i].cdev);
+ cancel_work_sync(&data->led[i].work);
+ }
+
+ kfree(data->led);
+ return 0;
+}
+#else
+static int __devinit adp8870_led_probe(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int __devexit adp8870_led_remove(struct i2c_client *client)
+{
+ return 0;
+}
+#endif
+
+static int adp8870_bl_set(struct backlight_device *bl, int brightness)
+{
+ struct adp8870_bl *data = bl_get_data(bl);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ if (data->pdata->en_ambl_sens) {
+ if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) {
+ /* Disable Ambient Light auto adjust */
+ ret = adp8870_clr_bits(client, ADP8870_MDCR,
+ CMP_AUTOEN);
+ if (ret)
+ return ret;
+ ret = adp8870_write(client, ADP8870_BLMX1, brightness);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
+ * restore daylight l1 sysfs brightness
+ */
+ ret = adp8870_write(client, ADP8870_BLMX1,
+ data->cached_daylight_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_set_bits(client, ADP8870_MDCR,
+ CMP_AUTOEN);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ret = adp8870_write(client, ADP8870_BLMX1, brightness);
+ if (ret)
+ return ret;
+ }
+
+ if (data->current_brightness && brightness == 0)
+ ret = adp8870_set_bits(client,
+ ADP8870_MDCR, DIM_EN);
+ else if (data->current_brightness == 0 && brightness)
+ ret = adp8870_clr_bits(client,
+ ADP8870_MDCR, DIM_EN);
+
+ if (!ret)
+ data->current_brightness = brightness;
+
+ return ret;
+}
+
+static int adp8870_bl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+ if (bl->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return adp8870_bl_set(bl, brightness);
+}
+
+static int adp8870_bl_get_brightness(struct backlight_device *bl)
+{
+ struct adp8870_bl *data = bl_get_data(bl);
+
+ return data->current_brightness;
+}
+
+static const struct backlight_ops adp8870_bl_ops = {
+ .update_status = adp8870_bl_update_status,
+ .get_brightness = adp8870_bl_get_brightness,
+};
+
+static int adp8870_bl_setup(struct backlight_device *bl)
+{
+ struct adp8870_bl *data = bl_get_data(bl);
+ struct i2c_client *client = data->client;
+ struct adp8870_backlight_platform_data *pdata = data->pdata;
+ int ret = 0;
+
+ ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim);
+ if (ret)
+ return ret;
+
+ if (pdata->en_ambl_sens) {
+ data->cached_daylight_max = pdata->l1_daylight_max;
+ ret = adp8870_write(client, ADP8870_BLMX2,
+ pdata->l2_bright_max);
+ if (ret)
+ return ret;
+ ret = adp8870_write(client, ADP8870_BLDM2,
+ pdata->l2_bright_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX3,
+ pdata->l3_office_max);
+ if (ret)
+ return ret;
+ ret = adp8870_write(client, ADP8870_BLDM3,
+ pdata->l3_office_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX4,
+ pdata->l4_indoor_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLDM4,
+ pdata->l4_indor_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX5,
+ pdata->l5_dark_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLDM5,
+ pdata->l5_dark_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN |
+ L3_EN | L2_EN);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_CMP_CTL,
+ ALS_CMPR_CFG_VAL(pdata->abml_filt));
+ if (ret)
+ return ret;
+ }
+
+ ret = adp8870_write(client, ADP8870_CFGR,
+ BL_CFGR_VAL(pdata->bl_fade_law, 0));
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in,
+ pdata->bl_fade_out));
+ if (ret)
+ return ret;
+ /*
+ * ADP8870 Rev0 requires GDWN_DIS bit set
+ */
+
+ ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY |
+ (data->revid == 0 ? GDWN_DIS : 0));
+
+ return ret;
+}
+
+static ssize_t adp8870_show(struct device *dev, char *buf, int reg)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+
+ mutex_lock(&data->lock);
+ error = adp8870_read(data->client, reg, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%u\n", reg_val);
+}
+
+static ssize_t adp8870_store(struct device *dev, const char *buf,
+ size_t count, int reg)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ adp8870_write(data->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX5);
+}
+
+static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX5);
+}
+static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show,
+ adp8870_bl_l5_dark_max_store);
+
+
+static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX4);
+}
+
+static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX4);
+}
+static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show,
+ adp8870_bl_l4_indoor_max_store);
+
+
+static ssize_t adp8870_bl_l3_office_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX3);
+}
+
+static ssize_t adp8870_bl_l3_office_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX3);
+}
+
+static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show,
+ adp8870_bl_l3_office_max_store);
+
+static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX2);
+}
+
+static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX2);
+}
+static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show,
+ adp8870_bl_l2_bright_max_store);
+
+static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX1);
+}
+
+static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ if (ret)
+ return ret;
+
+ return adp8870_store(dev, buf, count, ADP8870_BLMX1);
+}
+static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show,
+ adp8870_bl_l1_daylight_max_store);
+
+static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM5);
+}
+
+static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM5);
+}
+static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show,
+ adp8870_bl_l5_dark_dim_store);
+
+static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM4);
+}
+
+static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM4);
+}
+static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show,
+ adp8870_bl_l4_indoor_dim_store);
+
+
+static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM3);
+}
+
+static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM3);
+}
+static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show,
+ adp8870_bl_l3_office_dim_store);
+
+static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM2);
+}
+
+static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM2);
+}
+static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show,
+ adp8870_bl_l2_bright_dim_store);
+
+static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM1);
+}
+
+static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM1);
+}
+static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show,
+ adp8870_bl_l1_daylight_dim_store);
+
+#ifdef ADP8870_EXT_FEATURES
+static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+ uint16_t ret_val;
+
+ mutex_lock(&data->lock);
+ error = adp8870_read(data->client, ADP8870_PH1LEVL, &reg_val);
+ if (error < 0) {
+ mutex_unlock(&data->lock);
+ return error;
+ }
+ ret_val = reg_val;
+ error = adp8870_read(data->client, ADP8870_PH1LEVH, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ /* Return 13-bit conversion value for the first light sensor */
+ ret_val += (reg_val & 0x1F) << 8;
+
+ return sprintf(buf, "%u\n", ret_val);
+}
+static DEVICE_ATTR(ambient_light_level, 0444,
+ adp8870_bl_ambient_light_level_show, NULL);
+
+static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+
+ mutex_lock(&data->lock);
+ error = adp8870_read(data->client, ADP8870_CFGR, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%u\n",
+ ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
+}
+
+static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ unsigned long val;
+ uint8_t reg_val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val == 0) {
+ /* Enable automatic ambient light sensing */
+ adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
+ } else if ((val > 0) && (val < 6)) {
+ /* Disable automatic ambient light sensing */
+ adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
+
+ /* Set user supplied ambient light zone */
+ mutex_lock(&data->lock);
+ adp8870_read(data->client, ADP8870_CFGR, &reg_val);
+ reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
+ reg_val |= (val - 1) << CFGR_BLV_SHIFT;
+ adp8870_write(data->client, ADP8870_CFGR, reg_val);
+ mutex_unlock(&data->lock);
+ }
+
+ return count;
+}
+static DEVICE_ATTR(ambient_light_zone, 0664,
+ adp8870_bl_ambient_light_zone_show,
+ adp8870_bl_ambient_light_zone_store);
+#endif
+
+static struct attribute *adp8870_bl_attributes[] = {
+ &dev_attr_l5_dark_max.attr,
+ &dev_attr_l5_dark_dim.attr,
+ &dev_attr_l4_indoor_max.attr,
+ &dev_attr_l4_indoor_dim.attr,
+ &dev_attr_l3_office_max.attr,
+ &dev_attr_l3_office_dim.attr,
+ &dev_attr_l2_bright_max.attr,
+ &dev_attr_l2_bright_dim.attr,
+ &dev_attr_l1_daylight_max.attr,
+ &dev_attr_l1_daylight_dim.attr,
+#ifdef ADP8870_EXT_FEATURES
+ &dev_attr_ambient_light_level.attr,
+ &dev_attr_ambient_light_zone.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group adp8870_bl_attr_group = {
+ .attrs = adp8870_bl_attributes,
+};
+
+static int __devinit adp8870_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct backlight_properties props;
+ struct backlight_device *bl;
+ struct adp8870_bl *data;
+ struct adp8870_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ uint8_t reg_val;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ ret = adp8870_read(client, ADP8870_MFDVID, &reg_val);
+ if (ret < 0)
+ return -EIO;
+
+ if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) {
+ dev_err(&client->dev, "failed to probe\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ data->revid = ADP8870_DEVID(reg_val);
+ data->client = client;
+ data->pdata = pdata;
+ data->id = id->driver_data;
+ data->current_brightness = 0;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
+ bl = backlight_device_register(dev_driver_string(&client->dev),
+ &client->dev, data, &adp8870_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ dev_err(&client->dev, "failed to register backlight\n");
+ ret = PTR_ERR(bl);
+ goto out2;
+ }
+
+ data->bl = bl;
+
+ if (pdata->en_ambl_sens)
+ ret = sysfs_create_group(&bl->dev.kobj,
+ &adp8870_bl_attr_group);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to register sysfs\n");
+ goto out1;
+ }
+
+ ret = adp8870_bl_setup(bl);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ backlight_update_status(bl);
+
+ dev_info(&client->dev, "Rev.%d Backlight\n", data->revid);
+
+ if (pdata->num_leds)
+ adp8870_led_probe(client);
+
+ return 0;
+
+out:
+ if (data->pdata->en_ambl_sens)
+ sysfs_remove_group(&data->bl->dev.kobj,
+ &adp8870_bl_attr_group);
+out1:
+ backlight_device_unregister(bl);
+out2:
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit adp8870_remove(struct i2c_client *client)
+{
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+
+ adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
+
+ if (data->led)
+ adp8870_led_remove(client);
+
+ if (data->pdata->en_ambl_sens)
+ sysfs_remove_group(&data->bl->dev.kobj,
+ &adp8870_bl_attr_group);
+
+ backlight_device_unregister(data->bl);
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
+
+ return 0;
+}
+
+static int adp8870_i2c_resume(struct i2c_client *client)
+{
+ adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
+
+ return 0;
+}
+#else
+#define adp8870_i2c_suspend NULL
+#define adp8870_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id adp8870_id[] = {
+ { "adp8870", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp8870_id);
+
+static struct i2c_driver adp8870_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = adp8870_probe,
+ .remove = __devexit_p(adp8870_remove),
+ .suspend = adp8870_i2c_suspend,
+ .resume = adp8870_i2c_resume,
+ .id_table = adp8870_id,
+};
+
+static int __init adp8870_init(void)
+{
+ return i2c_add_driver(&adp8870_driver);
+}
+module_init(adp8870_init);
+
+static void __exit adp8870_exit(void)
+{
+ i2c_del_driver(&adp8870_driver);
+}
+module_exit(adp8870_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP8870 Backlight driver");
+MODULE_ALIAS("platform:adp8870-backlight");
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 69c49df..784139a 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -541,7 +541,7 @@ static int __init efifb_init(void)
*/
ret = platform_driver_probe(&efifb_driver, efifb_probe);
if (ret) {
- platform_device_unregister(&efifb_driver);
+ platform_device_unregister(&efifb_device);
return ret;
}
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 0352afa..4aecf21 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -235,13 +235,12 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct s3c_fb_win *win = info->par;
- struct s3c_fb_pd_win *windata = win->windata;
struct s3c_fb *sfb = win->parent;
dev_dbg(sfb->dev, "checking parameters\n");
- var->xres_virtual = max((unsigned int)windata->virtual_x, var->xres);
- var->yres_virtual = max((unsigned int)windata->virtual_y, var->yres);
+ var->xres_virtual = max(var->xres_virtual, var->xres);
+ var->yres_virtual = max(var->yres_virtual, var->yres);
if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) {
dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n",
@@ -558,6 +557,13 @@ static int s3c_fb_set_par(struct fb_info *info)
vidosd_set_alpha(win, alpha);
vidosd_set_size(win, data);
+ /* Enable DMA channel for this window */
+ if (sfb->variant.has_shadowcon) {
+ data = readl(sfb->regs + SHADOWCON);
+ data |= SHADOWCON_CHx_ENABLE(win_no);
+ writel(data, sfb->regs + SHADOWCON);
+ }
+
data = WINCONx_ENWIN;
/* note, since we have to round up the bits-per-pixel, we end up
@@ -637,13 +643,6 @@ static int s3c_fb_set_par(struct fb_info *info)
writel(data, regs + sfb->variant.wincon + (win_no * 4));
writel(0x0, regs + sfb->variant.winmap + (win_no * 4));
- /* Enable DMA channel for this window */
- if (sfb->variant.has_shadowcon) {
- data = readl(sfb->regs + SHADOWCON);
- data |= SHADOWCON_CHx_ENABLE(win_no);
- writel(data, sfb->regs + SHADOWCON);
- }
-
shadow_protect_win(win, 0);
return 0;
@@ -1487,11 +1486,10 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
- kfree(sfb);
-
pm_runtime_put_sync(sfb->dev);
pm_runtime_disable(sfb->dev);
+ kfree(sfb);
return 0;
}
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 6ae40b6..7d54e2c 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1127,23 +1127,16 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
struct fb_info *info = hdmi->info;
unsigned long parent_rate = 0, hdmi_rate;
- /* A device has been plugged in */
- pm_runtime_get_sync(hdmi->dev);
-
ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
- if (ret < 0) {
- pm_runtime_put(hdmi->dev);
+ if (ret < 0)
goto out;
- }
hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
/* Reconfigure the clock */
ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
- if (ret < 0) {
- pm_runtime_put(hdmi->dev);
+ if (ret < 0)
goto out;
- }
msleep(10);
sh_hdmi_configure(hdmi);
@@ -1191,7 +1184,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
fb_set_suspend(hdmi->info, 1);
console_unlock();
- pm_runtime_put(hdmi->dev);
}
out:
@@ -1312,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn);
pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* Product and revision IDs are 0 in sh-mobile version */
dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
@@ -1340,7 +1332,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
ecodec:
free_irq(irq, hdmi);
ereqirq:
- pm_runtime_suspend(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
iounmap(hdmi->base);
emap:
@@ -1377,7 +1369,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
free_irq(irq, hdmi);
/* Wait for already scheduled work */
cancel_delayed_work_sync(&hdmi->edid_work);
- pm_runtime_suspend(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable(hdmi->hdmi_clk);
clk_put(hdmi->hdmi_clk);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 00d615d..979d6ee 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
config W1_MASTER_DS1WM
tristate "Maxim DS1WM 1-wire busmaster"
- depends on W1
+ depends on W1 && GENERIC_HARDIRQS
help
Say Y here to enable the DS1WM 1-wire driver, such as that
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 553da68..30df85d 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
+#ifdef CONFIG_SMP
struct irq_desc *desc = irq_to_desc(irq);
-#ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
#endif
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 65ea21a..6e8c15a 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -147,9 +147,15 @@ void __init xen_swiotlb_init(int verbose)
{
unsigned long bytes;
int rc;
-
- xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
- xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+ unsigned long nr_tbl;
+
+ nr_tbl = swioltb_nr_tbl();
+ if (nr_tbl)
+ xen_io_tlb_nslabs = nr_tbl;
+ else {
+ xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
+ xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+ }
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 20c106f..1b0b195 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -584,11 +584,11 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
success:
d_add(dentry, inode);
- _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }",
+ _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
fid.vnode,
fid.unique,
dentry->d_inode->i_ino,
- (unsigned long long)dentry->d_inode->i_version);
+ dentry->d_inode->i_generation);
return NULL;
}
@@ -671,10 +671,10 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
* been deleted and replaced, and the original vnode ID has
* been reused */
if (fid.unique != vnode->fid.unique) {
- _debug("%s: file deleted (uq %u -> %u I:%llu)",
+ _debug("%s: file deleted (uq %u -> %u I:%u)",
dentry->d_name.name, fid.unique,
vnode->fid.unique,
- (unsigned long long)dentry->d_inode->i_version);
+ dentry->d_inode->i_generation);
spin_lock(&vnode->lock);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
spin_unlock(&vnode->lock);
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 4bd0218..346e328 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -89,7 +89,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
i_size_write(&vnode->vfs_inode, size);
vnode->vfs_inode.i_uid = status->owner;
vnode->vfs_inode.i_gid = status->group;
- vnode->vfs_inode.i_version = vnode->fid.unique;
+ vnode->vfs_inode.i_generation = vnode->fid.unique;
vnode->vfs_inode.i_nlink = status->nlink;
mode = vnode->vfs_inode.i_mode;
@@ -102,6 +102,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
+ vnode->vfs_inode.i_version = data_version;
}
expected_version = status->data_version;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index db66c52..0fdab6e 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -75,7 +75,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
- inode->i_version = vnode->fid.unique;
+ inode->i_generation = vnode->fid.unique;
+ inode->i_version = vnode->status.data_version;
inode->i_mapping->a_ops = &afs_fs_aops;
/* check to see whether a symbolic link is really a mountpoint */
@@ -100,7 +101,7 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
struct afs_iget_data *data = opaque;
return inode->i_ino == data->fid.vnode &&
- inode->i_version == data->fid.unique;
+ inode->i_generation == data->fid.unique;
}
/*
@@ -122,7 +123,7 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
struct afs_vnode *vnode = AFS_FS_I(inode);
inode->i_ino = data->fid.vnode;
- inode->i_version = data->fid.unique;
+ inode->i_generation = data->fid.unique;
vnode->fid = data->fid;
vnode->volume = data->volume;
@@ -380,8 +381,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
inode = dentry->d_inode;
- _enter("{ ino=%lu v=%llu }", inode->i_ino,
- (unsigned long long)inode->i_version);
+ _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
generic_fillattr(inode, stat);
return 0;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index fb240e8..356dcf0 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -31,8 +31,8 @@
static void afs_i_init_once(void *foo);
static struct dentry *afs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
+static void afs_kill_super(struct super_block *sb);
static struct inode *afs_alloc_inode(struct super_block *sb);
-static void afs_put_super(struct super_block *sb);
static void afs_destroy_inode(struct inode *inode);
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
@@ -40,7 +40,7 @@ struct file_system_type afs_fs_type = {
.owner = THIS_MODULE,
.name = "afs",
.mount = afs_mount,
- .kill_sb = kill_anon_super,
+ .kill_sb = afs_kill_super,
.fs_flags = 0,
};
@@ -50,7 +50,6 @@ static const struct super_operations afs_super_ops = {
.drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.evict_inode = afs_evict_inode,
- .put_super = afs_put_super,
.show_options = generic_show_options,
};
@@ -282,19 +281,25 @@ static int afs_parse_device_name(struct afs_mount_params *params,
*/
static int afs_test_super(struct super_block *sb, void *data)
{
- struct afs_mount_params *params = data;
+ struct afs_super_info *as1 = data;
struct afs_super_info *as = sb->s_fs_info;
- return as->volume == params->volume;
+ return as->volume == as1->volume;
+}
+
+static int afs_set_super(struct super_block *sb, void *data)
+{
+ sb->s_fs_info = data;
+ return set_anon_super(sb, NULL);
}
/*
* fill in the superblock
*/
-static int afs_fill_super(struct super_block *sb, void *data)
+static int afs_fill_super(struct super_block *sb,
+ struct afs_mount_params *params)
{
- struct afs_mount_params *params = data;
- struct afs_super_info *as = NULL;
+ struct afs_super_info *as = sb->s_fs_info;
struct afs_fid fid;
struct dentry *root = NULL;
struct inode *inode = NULL;
@@ -302,23 +307,13 @@ static int afs_fill_super(struct super_block *sb, void *data)
_enter("");
- /* allocate a superblock info record */
- as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
- if (!as) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-
- afs_get_volume(params->volume);
- as->volume = params->volume;
-
/* fill in the superblock */
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
- sb->s_fs_info = as;
sb->s_bdi = &as->volume->bdi;
+ strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id));
/* allocate the root inode and dentry */
fid.vid = as->volume->vid;
@@ -326,7 +321,7 @@ static int afs_fill_super(struct super_block *sb, void *data)
fid.unique = 1;
inode = afs_iget(sb, params->key, &fid, NULL, NULL);
if (IS_ERR(inode))
- goto error_inode;
+ return PTR_ERR(inode);
if (params->autocell)
set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
@@ -342,16 +337,8 @@ static int afs_fill_super(struct super_block *sb, void *data)
_leave(" = 0");
return 0;
-error_inode:
- ret = PTR_ERR(inode);
- inode = NULL;
error:
iput(inode);
- afs_put_volume(as->volume);
- kfree(as);
-
- sb->s_fs_info = NULL;
-
_leave(" = %d", ret);
return ret;
}
@@ -367,6 +354,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
struct afs_volume *vol;
struct key *key;
char *new_opts = kstrdup(options, GFP_KERNEL);
+ struct afs_super_info *as;
int ret;
_enter(",,%s,%p", dev_name, options);
@@ -399,12 +387,22 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
ret = PTR_ERR(vol);
goto error;
}
- params.volume = vol;
+
+ /* allocate a superblock info record */
+ as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
+ if (!as) {
+ ret = -ENOMEM;
+ afs_put_volume(vol);
+ goto error;
+ }
+ as->volume = vol;
/* allocate a deviceless superblock */
- sb = sget(fs_type, afs_test_super, set_anon_super, &params);
+ sb = sget(fs_type, afs_test_super, afs_set_super, as);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
+ afs_put_volume(vol);
+ kfree(as);
goto error;
}
@@ -422,16 +420,16 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
} else {
_debug("reuse");
ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+ afs_put_volume(vol);
+ kfree(as);
}
- afs_put_volume(params.volume);
afs_put_cell(params.cell);
kfree(new_opts);
_leave(" = 0 [%p]", sb);
return dget(sb->s_root);
error:
- afs_put_volume(params.volume);
afs_put_cell(params.cell);
key_put(params.key);
kfree(new_opts);
@@ -439,18 +437,12 @@ error:
return ERR_PTR(ret);
}
-/*
- * finish the unmounting process on the superblock
- */
-static void afs_put_super(struct super_block *sb)
+static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = sb->s_fs_info;
-
- _enter("");
-
+ kill_anon_super(sb);
afs_put_volume(as->volume);
-
- _leave("");
+ kfree(as);
}
/*
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 789b3af..b806285 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,23 +84,21 @@ void afs_put_writeback(struct afs_writeback *wb)
* partly or wholly fill a page that's under preparation for writing
*/
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
- loff_t pos, unsigned len, struct page *page)
+ loff_t pos, struct page *page)
{
loff_t i_size;
- unsigned eof;
int ret;
+ int len;
- _enter(",,%llu,%u", (unsigned long long)pos, len);
-
- ASSERTCMP(len, <=, PAGE_CACHE_SIZE);
+ _enter(",,%llu", (unsigned long long)pos);
i_size = i_size_read(&vnode->vfs_inode);
- if (pos + len > i_size)
- eof = i_size;
+ if (pos + PAGE_CACHE_SIZE > i_size)
+ len = i_size - pos;
else
- eof = PAGE_CACHE_SIZE;
+ len = PAGE_CACHE_SIZE;
- ret = afs_vnode_fetch_data(vnode, key, 0, eof, page);
+ ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
if (ret < 0) {
if (ret == -ENOENT) {
_debug("got NOENT from server"
@@ -153,9 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
*pagep = page;
/* page won't leak in error case: it eventually gets cleaned off LRU */
- if (!PageUptodate(page)) {
- _debug("not up to date");
- ret = afs_fill_page(vnode, key, pos, len, page);
+ if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
+ ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
if (ret < 0) {
kfree(candidate);
_leave(" = %d [prep]", ret);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9ad2369..bfcb18f 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
{
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-
return -EIO;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1a2421f..610e8e0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
if (!disk)
return ERR_PTR(-ENXIO);
- whole = bdget_disk(disk, 0);
+ /*
+ * Normally, @bdev should equal what's returned from bdget_disk()
+ * if partno is 0; however, some drivers (floppy) use multiple
+ * bdev's for the same physical device and @bdev may be one of the
+ * aliases. Keep @bdev if partno is 0. This means claimer
+ * tracking is broken for those devices but it has always been that
+ * way.
+ */
+ if (partno)
+ whole = bdget_disk(disk, 0);
+ else
+ whole = bdgrab(bdev);
+
module_put(disk->fops->owner);
put_disk(disk);
if (!whole)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d840893..2e66786 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root,
u32 nr;
u32 blocksize;
u32 nscan = 0;
+ bool map = true;
if (level != 1)
return;
@@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root,
nritems = btrfs_header_nritems(node);
nr = slot;
+ if (node->map_token || path->skip_locking)
+ map = false;
+
while (1) {
- if (!node->map_token) {
+ if (map && !node->map_token) {
unsigned long offset = btrfs_node_key_ptr_offset(nr);
map_private_extent_buffer(node, offset,
sizeof(struct btrfs_key_ptr),
@@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root,
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
gen = btrfs_node_ptr_generation(node, nr);
- if (node->map_token) {
+ if (map && node->map_token) {
unmap_extent_buffer(node, node->map_token,
KM_USER1);
node->map_token = NULL;
@@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root,
if ((nread > 65536 || nscan > 32))
break;
}
- if (node->map_token) {
+ if (map && node->map_token) {
unmap_extent_buffer(node, node->map_token, KM_USER1);
node->map_token = NULL;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 378b5b4..f30ac05 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -19,7 +19,6 @@
#ifndef __BTRFS_CTREE__
#define __BTRFS_CTREE__
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/fs.h>
@@ -967,6 +966,12 @@ struct btrfs_fs_info {
struct srcu_struct subvol_srcu;
spinlock_t trans_lock;
+ /*
+ * the reloc mutex goes with the trans lock, it is taken
+ * during commit to protect us from the relocation code
+ */
+ struct mutex reloc_mutex;
+
struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots;
@@ -1172,6 +1177,14 @@ struct btrfs_root {
u32 type;
u64 highest_objectid;
+
+ /* btrfs_record_root_in_trans is a multi-step process,
+ * and it can race with the balancing code. But the
+ * race is very small, and only the first time the root
+ * is added to each transaction. So in_trans_setup
+ * is used to tell us when more checks are required
+ */
+ unsigned long in_trans_setup;
int ref_cows;
int track_dirty;
int in_radix;
@@ -1181,7 +1194,6 @@ struct btrfs_root {
struct btrfs_key defrag_max;
int defrag_running;
char *name;
- int in_sysfs;
/* the dirty list is only used by non-reference counted roots */
struct list_head dirty_list;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 6462c29..98c68e6 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
return root->fs_info->delayed_root;
}
-static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
- struct inode *inode)
+static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
{
- struct btrfs_delayed_node *node;
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
struct btrfs_root *root = btrfs_inode->root;
u64 ino = btrfs_ino(inode);
- int ret;
+ struct btrfs_delayed_node *node;
-again:
node = ACCESS_ONCE(btrfs_inode->delayed_node);
if (node) {
- atomic_inc(&node->refs); /* can be accessed */
+ atomic_inc(&node->refs);
return node;
}
@@ -102,8 +99,10 @@ again:
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
if (node) {
if (btrfs_inode->delayed_node) {
+ atomic_inc(&node->refs); /* can be accessed */
+ BUG_ON(btrfs_inode->delayed_node != node);
spin_unlock(&root->inode_lock);
- goto again;
+ return node;
}
btrfs_inode->delayed_node = node;
atomic_inc(&node->refs); /* can be accessed */
@@ -113,6 +112,23 @@ again:
}
spin_unlock(&root->inode_lock);
+ return NULL;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+ struct inode *inode)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ struct btrfs_root *root = btrfs_inode->root;
+ u64 ino = btrfs_ino(inode);
+ int ret;
+
+again:
+ node = btrfs_get_delayed_node(inode);
+ if (node)
+ return node;
+
node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
if (!node)
return ERR_PTR(-ENOMEM);
@@ -297,7 +313,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
item->data_len = data_len;
item->ins_or_del = 0;
item->bytes_reserved = 0;
- item->block_rsv = NULL;
item->delayed_node = NULL;
atomic_set(&item->refs, 1);
}
@@ -549,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item(
return next;
}
-static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
- struct inode *inode)
-{
- struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
- struct btrfs_delayed_node *delayed_node;
-
- delayed_node = btrfs_inode->delayed_node;
- if (delayed_node)
- atomic_inc(&delayed_node->refs);
-
- return delayed_node;
-}
-
static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
u64 root_id)
{
@@ -593,10 +595,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
- if (!ret) {
+ if (!ret)
item->bytes_reserved = num_bytes;
- item->block_rsv = dst_rsv;
- }
return ret;
}
@@ -604,10 +604,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
struct btrfs_delayed_item *item)
{
+ struct btrfs_block_rsv *rsv;
+
if (!item->bytes_reserved)
return;
- btrfs_block_rsv_release(root, item->block_rsv,
+ rsv = &root->fs_info->global_block_rsv;
+ btrfs_block_rsv_release(root, rsv,
item->bytes_reserved);
}
@@ -1014,6 +1017,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
+ struct btrfs_block_rsv *block_rsv;
int ret = 0;
path = btrfs_alloc_path();
@@ -1021,6 +1025,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
return -ENOMEM;
path->leave_spinning = 1;
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &root->fs_info->global_block_rsv;
+
delayed_root = btrfs_get_delayed_root(root);
curr_node = btrfs_first_delayed_node(delayed_root);
@@ -1045,6 +1052,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
}
btrfs_free_path(path);
+ trans->block_rsv = block_rsv;
return ret;
}
@@ -1052,6 +1060,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_delayed_node *node)
{
struct btrfs_path *path;
+ struct btrfs_block_rsv *block_rsv;
int ret;
path = btrfs_alloc_path();
@@ -1059,6 +1068,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
return -ENOMEM;
path->leave_spinning = 1;
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &node->root->fs_info->global_block_rsv;
+
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
if (!ret)
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
@@ -1066,6 +1078,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
btrfs_free_path(path);
+ trans->block_rsv = block_rsv;
return ret;
}
@@ -1116,6 +1129,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
struct btrfs_path *path;
struct btrfs_delayed_node *delayed_node = NULL;
struct btrfs_root *root;
+ struct btrfs_block_rsv *block_rsv;
unsigned long nr = 0;
int need_requeue = 0;
int ret;
@@ -1134,6 +1148,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
if (IS_ERR(trans))
goto free_path;
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &root->fs_info->global_block_rsv;
+
ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
if (!ret)
ret = btrfs_delete_delayed_items(trans, path, root,
@@ -1176,6 +1193,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
nr = trans->blocks_used;
+ trans->block_rsv = block_rsv;
btrfs_end_transaction_dmeta(trans, root);
__btrfs_btree_balance_dirty(root, nr);
free_path:
@@ -1222,6 +1240,13 @@ again:
return 0;
}
+void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
+{
+ struct btrfs_delayed_root *delayed_root;
+ delayed_root = btrfs_get_delayed_root(root);
+ WARN_ON(btrfs_first_delayed_node(delayed_root));
+}
+
void btrfs_balance_delayed_items(struct btrfs_root *root)
{
struct btrfs_delayed_root *delayed_root;
@@ -1382,8 +1407,7 @@ end:
int btrfs_inode_delayed_dir_index_count(struct inode *inode)
{
- struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
- int ret = 0;
+ struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
if (!delayed_node)
return -ENOENT;
@@ -1393,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
* a new directory index is added into the delayed node and index_cnt
* is updated now. So we needn't lock the delayed node.
*/
- if (!delayed_node->index_cnt)
+ if (!delayed_node->index_cnt) {
+ btrfs_release_delayed_node(delayed_node);
return -EINVAL;
+ }
BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
- return ret;
+ btrfs_release_delayed_node(delayed_node);
+ return 0;
}
void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
@@ -1591,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
inode->i_ctime.tv_nsec);
}
+int btrfs_fill_inode(struct inode *inode, u32 *rdev)
+{
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_inode_item *inode_item;
+ struct btrfs_timespec *tspec;
+
+ delayed_node = btrfs_get_delayed_node(inode);
+ if (!delayed_node)
+ return -ENOENT;
+
+ mutex_lock(&delayed_node->mutex);
+ if (!delayed_node->inode_dirty) {
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return -ENOENT;
+ }
+
+ inode_item = &delayed_node->inode_item;
+
+ inode->i_uid = btrfs_stack_inode_uid(inode_item);
+ inode->i_gid = btrfs_stack_inode_gid(inode_item);
+ btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
+ inode->i_mode = btrfs_stack_inode_mode(inode_item);
+ inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
+ inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
+ BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
+ BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
+ inode->i_rdev = 0;
+ *rdev = btrfs_stack_inode_rdev(inode_item);
+ BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
+
+ tspec = btrfs_inode_atime(inode_item);
+ inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
+ inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+ tspec = btrfs_inode_mtime(inode_item);
+ inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
+ inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+ tspec = btrfs_inode_ctime(inode_item);
+ inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
+ inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+ inode->i_generation = BTRFS_I(inode)->generation;
+ BTRFS_I(inode)->index_cnt = (u64)-1;
+
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return 0;
+}
+
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index eb7d240..8d27af4 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -75,7 +75,6 @@ struct btrfs_delayed_item {
struct list_head tree_list; /* used for batch insert/delete items */
struct list_head readdir_list; /* used for readdir items */
u64 bytes_reserved;
- struct btrfs_block_rsv *block_rsv;
struct btrfs_delayed_node *delayed_node;
atomic_t refs;
int ins_or_del;
@@ -120,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
+int btrfs_fill_inode(struct inode *inode, u32 *rdev);
/* Used for drop dead root */
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
@@ -138,4 +138,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
/* for init */
int __init btrfs_delayed_inode_init(void);
void btrfs_delayed_inode_exit(void);
+
+/* for debugging */
+void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
+
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a203d36..1ac8db5d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->last_trans = 0;
root->highest_objectid = 0;
root->name = NULL;
- root->in_sysfs = 0;
root->inode_tree = RB_ROOT;
INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
root->block_rsv = NULL;
@@ -1300,19 +1299,21 @@ again:
return root;
root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
- if (!root->free_ino_ctl)
- goto fail;
root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
GFP_NOFS);
- if (!root->free_ino_pinned)
+ if (!root->free_ino_pinned || !root->free_ino_ctl) {
+ ret = -ENOMEM;
goto fail;
+ }
btrfs_init_free_ino_ctl(root);
mutex_init(&root->fs_commit_mutex);
spin_lock_init(&root->cache_lock);
init_waitqueue_head(&root->cache_wait);
- set_anon_super(&root->anon_super, NULL);
+ ret = set_anon_super(&root->anon_super, NULL);
+ if (ret)
+ goto fail;
if (btrfs_root_refs(&root->root_item) == 0) {
ret = -ENOENT;
@@ -1618,6 +1619,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
+ mutex_init(&fs_info->reloc_mutex);
init_completion(&fs_info->kobj_unregister);
fs_info->tree_root = tree_root;
@@ -1668,8 +1670,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->scrub_pause_wait);
init_rwsem(&fs_info->scrub_super_lock);
fs_info->scrub_workers_refcnt = 0;
- btrfs_init_workers(&fs_info->scrub_workers, "scrub",
- fs_info->thread_pool_size, &fs_info->generic_worker);
sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
@@ -2911,9 +2911,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
INIT_LIST_HEAD(&splice);
- list_splice_init(&root->fs_info->delalloc_inodes, &splice);
-
spin_lock(&root->fs_info->delalloc_lock);
+ list_splice_init(&root->fs_info->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5b9b6b6..71cd456 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3089,6 +3089,13 @@ alloc:
}
goto again;
}
+
+ /*
+ * If we have less pinned bytes than we want to allocate then
+ * don't bother committing the transaction, it won't help us.
+ */
+ if (data_sinfo->bytes_pinned < bytes)
+ committed = 1;
spin_unlock(&data_sinfo->lock);
/* commit the current transaction and try again */
@@ -3307,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
if (reserved == 0)
return 0;
- /* nothing to shrink - nothing to reclaim */
- if (root->fs_info->delalloc_bytes == 0)
- return 0;
-
max_reclaim = min(reserved, to_reclaim);
while (loops < 1024) {
@@ -4839,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
u64 num_bytes, u64 empty_size,
u64 search_start, u64 search_end,
u64 hint_byte, struct btrfs_key *ins,
- int data)
+ u64 data)
{
int ret = 0;
struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -4866,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
space_info = __find_space_info(root->fs_info, data);
if (!space_info) {
- printk(KERN_ERR "No space info for %d\n", data);
+ printk(KERN_ERR "No space info for %llu\n", data);
return -ENOSPC;
}
@@ -5211,9 +5214,7 @@ loop:
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
* again
*/
- if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
- (found_uncached_bg || empty_size || empty_cluster ||
- allowed_chunk_alloc)) {
+ if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
index = 0;
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false;
@@ -5253,32 +5254,36 @@ loop:
goto search;
}
- if (loop < LOOP_CACHING_WAIT) {
- loop++;
- goto search;
- }
+ loop++;
if (loop == LOOP_ALLOC_CHUNK) {
- empty_size = 0;
- empty_cluster = 0;
- }
+ if (allowed_chunk_alloc) {
+ ret = do_chunk_alloc(trans, root, num_bytes +
+ 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_LIMITED);
+ allowed_chunk_alloc = 0;
+ if (ret == 1)
+ done_chunk_alloc = 1;
+ } else if (!done_chunk_alloc &&
+ space_info->force_alloc ==
+ CHUNK_ALLOC_NO_FORCE) {
+ space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+ }
- if (allowed_chunk_alloc) {
- ret = do_chunk_alloc(trans, root, num_bytes +
- 2 * 1024 * 1024, data,
- CHUNK_ALLOC_LIMITED);
- allowed_chunk_alloc = 0;
- done_chunk_alloc = 1;
- } else if (!done_chunk_alloc &&
- space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
- space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+ /*
+ * We didn't allocate a chunk, go ahead and drop the
+ * empty size and loop again.
+ */
+ if (!done_chunk_alloc)
+ loop = LOOP_NO_EMPTY_SIZE;
}
- if (loop < LOOP_NO_EMPTY_SIZE) {
- loop++;
- goto search;
+ if (loop == LOOP_NO_EMPTY_SIZE) {
+ empty_size = 0;
+ empty_cluster = 0;
}
- ret = -ENOSPC;
+
+ goto search;
} else if (!ins->objectid) {
ret = -ENOSPC;
} else if (ins->objectid) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4e8445a..a11a92e 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -126,9 +126,9 @@ struct extent_buffer {
unsigned long map_len;
struct page *first_page;
unsigned long bflags;
- atomic_t refs;
struct list_head leak_list;
struct rcu_head rcu_head;
+ atomic_t refs;
/* the spinlock is used to protect most operations */
spinlock_t lock;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index ad14473..bf0d615 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
pgoff_t index = 0;
unsigned long first_page_offset;
int num_checksums;
- int ret = 0, ret2;
+ int ret = 0;
INIT_LIST_HEAD(&bitmaps);
@@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
goto free_cache;
}
spin_lock(&ctl->tree_lock);
- ret2 = link_free_space(ctl, e);
+ ret = link_free_space(ctl, e);
ctl->total_bitmaps++;
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
- list_add_tail(&e->list, &bitmaps);
if (ret) {
printk(KERN_ERR "Duplicate entries in "
"free space cache, dumping\n");
@@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
page_cache_release(page);
goto free_cache;
}
+ list_add_tail(&e->list, &bitmaps);
}
num_entries--;
@@ -1417,6 +1417,23 @@ again:
return 0;
}
+static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes)
+{
+ u64 bytes_to_set = 0;
+ u64 end;
+
+ end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
+
+ bytes_to_set = min(end - offset, bytes);
+
+ bitmap_set_bits(ctl, info, offset, bytes_to_set);
+
+ return bytes_to_set;
+
+}
+
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
@@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
return true;
}
+static struct btrfs_free_space_op free_space_op = {
+ .recalc_thresholds = recalculate_thresholds,
+ .use_bitmap = use_bitmap,
+};
+
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_free_space *bitmap_info;
+ struct btrfs_block_group_cache *block_group = NULL;
int added = 0;
- u64 bytes, offset, end;
+ u64 bytes, offset, bytes_added;
int ret;
bytes = info->bytes;
@@ -1467,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
if (!ctl->op->use_bitmap(ctl, info))
return 0;
+ if (ctl->op == &free_space_op)
+ block_group = ctl->private;
again:
+ /*
+ * Since we link bitmaps right into the cluster we need to see if we
+ * have a cluster here, and if so and it has our bitmap we need to add
+ * the free space to that bitmap.
+ */
+ if (block_group && !list_empty(&block_group->cluster_list)) {
+ struct btrfs_free_cluster *cluster;
+ struct rb_node *node;
+ struct btrfs_free_space *entry;
+
+ cluster = list_entry(block_group->cluster_list.next,
+ struct btrfs_free_cluster,
+ block_group_list);
+ spin_lock(&cluster->lock);
+ node = rb_first(&cluster->root);
+ if (!node) {
+ spin_unlock(&cluster->lock);
+ goto no_cluster_bitmap;
+ }
+
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ if (!entry->bitmap) {
+ spin_unlock(&cluster->lock);
+ goto no_cluster_bitmap;
+ }
+
+ if (entry->offset == offset_to_bitmap(ctl, offset)) {
+ bytes_added = add_bytes_to_bitmap(ctl, entry,
+ offset, bytes);
+ bytes -= bytes_added;
+ offset += bytes_added;
+ }
+ spin_unlock(&cluster->lock);
+ if (!bytes) {
+ ret = 1;
+ goto out;
+ }
+ }
+
+no_cluster_bitmap:
bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!bitmap_info) {
@@ -1475,19 +1540,10 @@ again:
goto new_bitmap;
}
- end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
-
- if (offset >= bitmap_info->offset && offset + bytes > end) {
- bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
- bytes -= end - offset;
- offset = end;
- added = 0;
- } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
- bitmap_set_bits(ctl, bitmap_info, offset, bytes);
- bytes = 0;
- } else {
- BUG();
- }
+ bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
+ bytes -= bytes_added;
+ offset += bytes_added;
+ added = 0;
if (!bytes) {
ret = 1;
@@ -1766,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
"\n", count);
}
-static struct btrfs_free_space_op free_space_op = {
- .recalc_thresholds = recalculate_thresholds,
- .use_bitmap = use_bitmap,
-};
-
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -1842,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
info = rb_entry(node, struct btrfs_free_space, offset_index);
- unlink_free_space(ctl, info);
- kfree(info->bitmap);
- kmem_cache_free(btrfs_free_space_cachep, info);
+ if (!info->bitmap) {
+ unlink_free_space(ctl, info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ } else {
+ free_bitmap(ctl, info);
+ }
if (need_resched()) {
spin_unlock(&ctl->tree_lock);
cond_resched();
@@ -2142,9 +2196,11 @@ again:
/*
* This searches the block group for just extents to fill the cluster with.
*/
-static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_cluster *cluster,
- u64 offset, u64 bytes, u64 min_bytes)
+static noinline int
+setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ struct list_head *bitmaps, u64 offset, u64 bytes,
+ u64 min_bytes)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *first = NULL;
@@ -2166,6 +2222,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
* extent entry.
*/
while (entry->bitmap) {
+ if (list_empty(&entry->list))
+ list_add_tail(&entry->list, bitmaps);
node = rb_next(&entry->offset_index);
if (!node)
return -ENOSPC;
@@ -2185,8 +2243,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
return -ENOSPC;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
- if (entry->bitmap)
+ if (entry->bitmap) {
+ if (list_empty(&entry->list))
+ list_add_tail(&entry->list, bitmaps);
continue;
+ }
+
/*
* we haven't filled the empty size and the window is
* very large. reset and try again
@@ -2238,9 +2300,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
* This specifically looks for bitmaps that may work in the cluster, we assume
* that we have already failed to find extents that will work.
*/
-static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_cluster *cluster,
- u64 offset, u64 bytes, u64 min_bytes)
+static noinline int
+setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ struct list_head *bitmaps, u64 offset, u64 bytes,
+ u64 min_bytes)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
@@ -2250,10 +2314,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
if (ctl->total_bitmaps == 0)
return -ENOSPC;
+ /*
+ * First check our cached list of bitmaps and see if there is an entry
+ * here that will work.
+ */
+ list_for_each_entry(entry, bitmaps, list) {
+ if (entry->bytes < min_bytes)
+ continue;
+ ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
+ bytes, min_bytes);
+ if (!ret)
+ return 0;
+ }
+
+ /*
+ * If we do have entries on our list and we are here then we didn't find
+ * anything, so go ahead and get the next entry after the last entry in
+ * this list and start the search from there.
+ */
+ if (!list_empty(bitmaps)) {
+ entry = list_entry(bitmaps->prev, struct btrfs_free_space,
+ list);
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ return -ENOSPC;
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ goto search;
+ }
+
entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
if (!entry)
return -ENOSPC;
+search:
node = &entry->offset_index;
do {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -2284,6 +2377,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct list_head bitmaps;
+ struct btrfs_free_space *entry, *tmp;
u64 min_bytes;
int ret;
@@ -2322,11 +2417,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
goto out;
}
- ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
- min_bytes);
+ INIT_LIST_HEAD(&bitmaps);
+ ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
+ bytes, min_bytes);
if (ret)
- ret = setup_cluster_bitmap(block_group, cluster, offset,
- bytes, min_bytes);
+ ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
+ offset, bytes, min_bytes);
+
+ /* Clear our temporary list */
+ list_for_each_entry_safe(entry, tmp, &bitmaps, list)
+ list_del_init(&entry->list);
if (!ret) {
atomic_inc(&block_group->count);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ebf95f7..d340f63 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1986,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
- return 0;
+ goto good;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
@@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
int maybe_acls;
u32 rdev;
int ret;
+ bool filled = false;
+
+ ret = btrfs_fill_inode(inode, &rdev);
+ if (!ret)
+ filled = true;
path = btrfs_alloc_path();
BUG_ON(!path);
@@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode)
goto make_bad;
leaf = path->nodes[0];
+
+ if (filled)
+ goto cache_acl;
+
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
if (!leaf->map_token)
@@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->index_cnt = (u64)-1;
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
-
+cache_acl:
/*
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
@@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
}
btrfs_free_path(path);
- inode_item = NULL;
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
@@ -3076,6 +3084,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
+ btrfs_free_path(path);
return 0;
}
@@ -3646,7 +3655,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_i_size_write(inode, 0);
while (1) {
- trans = btrfs_start_transaction(root, 0);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
trans->block_rsv = root->orphan_block_rsv;
@@ -4519,6 +4528,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
inode_tree_add(inode);
trace_btrfs_inode_new(inode);
+ btrfs_set_inode_last_trans(trans, inode);
return inode;
fail:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ac37040..a3c4751 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
BUG_ON(ret);
+ spin_lock(&root->fs_info->trans_lock);
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
+ spin_unlock(&root->fs_info->trans_lock);
if (async_transid) {
*async_transid = trans->transid;
ret = btrfs_commit_transaction_async(trans,
@@ -2054,29 +2056,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
{
- struct btrfs_ioctl_fs_info_args fi_args;
+ struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
struct btrfs_device *next;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- fi_args.num_devices = fs_devices->num_devices;
- fi_args.max_id = 0;
- memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
+ fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
+ if (!fi_args)
+ return -ENOMEM;
+
+ fi_args->num_devices = fs_devices->num_devices;
+ memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
- if (device->devid > fi_args.max_id)
- fi_args.max_id = device->devid;
+ if (device->devid > fi_args->max_id)
+ fi_args->max_id = device->devid;
}
mutex_unlock(&fs_devices->device_list_mutex);
- if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
- return -EFAULT;
+ if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
+ ret = -EFAULT;
- return 0;
+ kfree(fi_args);
+ return ret;
}
static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b1ef27c..5e0a3dc 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
int ret;
if (!root->reloc_root)
- return 0;
+ goto out;
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
@@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&reloc_root->root_key, root_item);
BUG_ON(ret);
+
+out:
return 0;
}
@@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err)
u64 num_bytes = 0;
int ret;
- spin_lock(&root->fs_info->trans_lock);
+ mutex_lock(&root->fs_info->reloc_mutex);
rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
rc->merging_rsv_size += rc->nodes_relocated * 2;
- spin_unlock(&root->fs_info->trans_lock);
+ mutex_unlock(&root->fs_info->reloc_mutex);
+
again:
if (!err) {
num_bytes = rc->merging_rsv_size;
@@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc)
int ret;
again:
root = rc->extent_root;
- spin_lock(&root->fs_info->trans_lock);
+
+ /*
+ * this serializes us with btrfs_record_root_in_transaction,
+ * we have to make sure nobody is in the middle of
+ * adding their roots to the list while we are
+ * doing this splice
+ */
+ mutex_lock(&root->fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
- spin_unlock(&root->fs_info->trans_lock);
+ mutex_unlock(&root->fs_info->reloc_mutex);
while (!list_empty(&reloc_roots)) {
found = 1;
@@ -3590,17 +3600,19 @@ next:
static void set_reloc_control(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- spin_lock(&fs_info->trans_lock);
+
+ mutex_lock(&fs_info->reloc_mutex);
fs_info->reloc_ctl = rc;
- spin_unlock(&fs_info->trans_lock);
+ mutex_unlock(&fs_info->reloc_mutex);
}
static void unset_reloc_control(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- spin_lock(&fs_info->trans_lock);
+
+ mutex_lock(&fs_info->reloc_mutex);
fs_info->reloc_ctl = NULL;
- spin_unlock(&fs_info->trans_lock);
+ mutex_unlock(&fs_info->reloc_mutex);
}
static int check_extent_flags(u64 flags)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index df50fd1..a8d03d5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -16,13 +16,7 @@
* Boston, MA 021110-1307, USA.
*/
-#include <linux/sched.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
#include "ctree.h"
#include "volumes.h"
#include "disk-io.h"
@@ -804,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
-
- l = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(l, &key, slot);
- if (key.objectid != logical) {
- ret = btrfs_previous_item(root, path, 0,
- BTRFS_EXTENT_ITEM_KEY);
- if (ret < 0)
- goto out;
- }
+ goto out_noplug;
+ /*
+ * we might miss half an extent here, but that doesn't matter,
+ * as it's only the prefetch
+ */
while (1) {
l = path->nodes[0];
slot = path->slots[0];
@@ -824,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
if (ret == 0)
continue;
if (ret < 0)
- goto out;
+ goto out_noplug;
break;
}
@@ -906,15 +894,20 @@ again:
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
-
- l = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(l, &key, slot);
- if (key.objectid != logical) {
+ if (ret > 0) {
ret = btrfs_previous_item(root, path, 0,
BTRFS_EXTENT_ITEM_KEY);
if (ret < 0)
goto out;
+ if (ret > 0) {
+ /* there's no smaller item, so stick with the
+ * larger one */
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(NULL, root, &key,
+ path, 0, 0);
+ if (ret < 0)
+ goto out;
+ }
}
while (1) {
@@ -989,6 +982,7 @@ next:
out:
blk_finish_plug(&plug);
+out_noplug:
btrfs_free_path(path);
return ret < 0 ? ret : 0;
}
@@ -1064,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- ret = 0;
+ break;
+ if (ret > 0) {
+ if (path->slots[0] >=
+ btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret)
+ break;
+ }
+ }
l = path->nodes[0];
slot = path->slots[0];
@@ -1075,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
if (found_key.objectid != sdev->dev->devid)
break;
- if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+ if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
break;
if (found_key.offset >= end)
@@ -1104,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
if (!cache) {
ret = -ENOENT;
- goto out;
+ break;
}
ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
chunk_offset, length);
@@ -1116,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
btrfs_release_path(path);
}
-out:
btrfs_free_path(path);
- return ret;
+
+ /*
+ * ret can still be 1 from search_slot or next_leaf,
+ * that's not an error
+ */
+ return ret < 0 ? ret : 0;
}
static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
@@ -1155,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
mutex_lock(&fs_info->scrub_lock);
- if (fs_info->scrub_workers_refcnt == 0)
+ if (fs_info->scrub_workers_refcnt == 0) {
+ btrfs_init_workers(&fs_info->scrub_workers, "scrub",
+ fs_info->thread_pool_size, &fs_info->generic_worker);
+ fs_info->scrub_workers.idle_thresh = 4;
btrfs_start_workers(&fs_info->scrub_workers, 1);
+ }
++fs_info->scrub_workers_refcnt;
mutex_unlock(&fs_info->scrub_lock);
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c3c223a..daac9ae 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -28,152 +28,6 @@
#include "disk-io.h"
#include "transaction.h"
-static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)btrfs_root_used(&root->root_item));
-}
-
-static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)btrfs_root_limit(&root->root_item));
-}
-
-static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
-{
-
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
-}
-
-static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
-}
-
-static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
-}
-
-/* this is for root attrs (subvols/snapshots) */
-struct btrfs_root_attr {
- struct attribute attr;
- ssize_t (*show)(struct btrfs_root *, char *);
- ssize_t (*store)(struct btrfs_root *, const char *, size_t);
-};
-
-#define ROOT_ATTR(name, mode, show, store) \
-static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
- show, store)
-
-ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
-ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
-
-static struct attribute *btrfs_root_attrs[] = {
- &btrfs_root_attr_blocks_used.attr,
- &btrfs_root_attr_block_limit.attr,
- NULL,
-};
-
-/* this is for super attrs (actual full fs) */
-struct btrfs_super_attr {
- struct attribute attr;
- ssize_t (*show)(struct btrfs_fs_info *, char *);
- ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
-};
-
-#define SUPER_ATTR(name, mode, show, store) \
-static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
- show, store)
-
-SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
-SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
-SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL);
-
-static struct attribute *btrfs_super_attrs[] = {
- &btrfs_super_attr_blocks_used.attr,
- &btrfs_super_attr_total_blocks.attr,
- &btrfs_super_attr_blocksize.attr,
- NULL,
-};
-
-static ssize_t btrfs_super_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
- super_kobj);
- struct btrfs_super_attr *a = container_of(attr,
- struct btrfs_super_attr,
- attr);
-
- return a->show ? a->show(fs, buf) : 0;
-}
-
-static ssize_t btrfs_super_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t len)
-{
- struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
- super_kobj);
- struct btrfs_super_attr *a = container_of(attr,
- struct btrfs_super_attr,
- attr);
-
- return a->store ? a->store(fs, buf, len) : 0;
-}
-
-static ssize_t btrfs_root_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct btrfs_root *root = container_of(kobj, struct btrfs_root,
- root_kobj);
- struct btrfs_root_attr *a = container_of(attr,
- struct btrfs_root_attr,
- attr);
-
- return a->show ? a->show(root, buf) : 0;
-}
-
-static ssize_t btrfs_root_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t len)
-{
- struct btrfs_root *root = container_of(kobj, struct btrfs_root,
- root_kobj);
- struct btrfs_root_attr *a = container_of(attr,
- struct btrfs_root_attr,
- attr);
- return a->store ? a->store(root, buf, len) : 0;
-}
-
-static void btrfs_super_release(struct kobject *kobj)
-{
- struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
- super_kobj);
- complete(&fs->kobj_unregister);
-}
-
-static void btrfs_root_release(struct kobject *kobj)
-{
- struct btrfs_root *root = container_of(kobj, struct btrfs_root,
- root_kobj);
- complete(&root->kobj_unregister);
-}
-
-static const struct sysfs_ops btrfs_super_attr_ops = {
- .show = btrfs_super_attr_show,
- .store = btrfs_super_attr_store,
-};
-
-static const struct sysfs_ops btrfs_root_attr_ops = {
- .show = btrfs_root_attr_show,
- .store = btrfs_root_attr_store,
-};
-
/* /sys/fs/btrfs/ entry */
static struct kset *btrfs_kset;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dd71966..51dcec8 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
* to make sure the old root from before we joined the transaction is deleted
* when the transaction commits
*/
-int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
+static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (root->ref_cows && root->last_trans < trans->transid) {
WARN_ON(root == root->fs_info->extent_root);
WARN_ON(root->commit_root != root->node);
+ /*
+ * see below for in_trans_setup usage rules
+ * we have the reloc mutex held now, so there
+ * is only one writer in this function
+ */
+ root->in_trans_setup = 1;
+
+ /* make sure readers find in_trans_setup before
+ * they find our root->last_trans update
+ */
+ smp_wmb();
+
spin_lock(&root->fs_info->fs_roots_radix_lock);
if (root->last_trans == trans->transid) {
spin_unlock(&root->fs_info->fs_roots_radix_lock);
return 0;
}
- root->last_trans = trans->transid;
radix_tree_tag_set(&root->fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&root->fs_info->fs_roots_radix_lock);
+ root->last_trans = trans->transid;
+
+ /* this is pretty tricky. We don't want to
+ * take the relocation lock in btrfs_record_root_in_trans
+ * unless we're really doing the first setup for this root in
+ * this transaction.
+ *
+ * Normally we'd use root->last_trans as a flag to decide
+ * if we want to take the expensive mutex.
+ *
+ * But, we have to set root->last_trans before we
+ * init the relocation root, otherwise, we trip over warnings
+ * in ctree.c. The solution used here is to flag ourselves
+ * with root->in_trans_setup. When this is 1, we're still
+ * fixing up the reloc trees and everyone must wait.
+ *
+ * When this is zero, they can trust root->last_trans and fly
+ * through btrfs_record_root_in_trans without having to take the
+ * lock. smp_wmb() makes sure that all the writes above are
+ * done before we pop in the zero below
+ */
btrfs_init_reloc_root(trans, root);
+ smp_wmb();
+ root->in_trans_setup = 0;
}
return 0;
}
+
+int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ if (!root->ref_cows)
+ return 0;
+
+ /*
+ * see record_root_in_trans for comments about in_trans_setup usage
+ * and barriers
+ */
+ smp_rmb();
+ if (root->last_trans == trans->transid &&
+ !root->in_trans_setup)
+ return 0;
+
+ mutex_lock(&root->fs_info->reloc_mutex);
+ record_root_in_trans(trans, root);
+ mutex_unlock(&root->fs_info->reloc_mutex);
+
+ return 0;
+}
+
/* wait for commit against the current transaction to become unblocked
* when this is done, it is safe to start a new transaction, but the current
* transaction might not be fully on disk.
@@ -349,7 +406,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
list) {
if (t->in_commit) {
if (t->commit_done)
- goto out;
+ break;
cur_trans = t;
atomic_inc(&cur_trans->use_count);
break;
@@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
parent = dget_parent(dentry);
parent_inode = parent->d_inode;
parent_root = BTRFS_I(parent_inode)->root;
- btrfs_record_root_in_trans(trans, parent_root);
+ record_root_in_trans(trans, parent_root);
/*
* insert the directory item
@@ -900,7 +957,16 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, parent_root, parent_inode);
BUG_ON(ret);
- btrfs_record_root_in_trans(trans, root);
+ /*
+ * pull in the delayed directory update
+ * and the delayed inode item
+ * otherwise we corrupt the FS during
+ * snapshot
+ */
+ ret = btrfs_run_delayed_items(trans, root);
+ BUG_ON(ret);
+
+ record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
btrfs_check_and_init_root_item(new_root_item);
@@ -961,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
int ret;
list_for_each_entry(pending, head, list) {
- /*
- * We must deal with the delayed items before creating
- * snapshots, or we will create a snapthot with inconsistent
- * information.
- */
- ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
- BUG_ON(ret);
-
ret = create_pending_snapshot(trans, fs_info, pending);
BUG_ON(ret);
}
@@ -1118,8 +1176,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
wait_current_trans_commit_start_and_unblock(root, cur_trans);
else
wait_current_trans_commit_start(root, cur_trans);
- put_transaction(cur_trans);
+ if (current->journal_info == trans)
+ current->journal_info = NULL;
+
+ put_transaction(cur_trans);
return 0;
}
@@ -1238,21 +1299,42 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
schedule_timeout(1);
finish_wait(&cur_trans->writer_wait, &wait);
- spin_lock(&root->fs_info->trans_lock);
- root->fs_info->trans_no_join = 1;
- spin_unlock(&root->fs_info->trans_lock);
} while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
- ret = create_pending_snapshots(trans, root->fs_info);
- BUG_ON(ret);
+ /*
+ * Ok now we need to make sure to block out any other joins while we
+ * commit the transaction. We could have started a join before setting
+ * no_join so make sure to wait for num_writers to == 1 again.
+ */
+ spin_lock(&root->fs_info->trans_lock);
+ root->fs_info->trans_no_join = 1;
+ spin_unlock(&root->fs_info->trans_lock);
+ wait_event(cur_trans->writer_wait,
+ atomic_read(&cur_trans->num_writers) == 1);
+
+ /*
+ * the reloc mutex makes sure that we stop
+ * the balancing code from coming in and moving
+ * extents around in the middle of the commit
+ */
+ mutex_lock(&root->fs_info->reloc_mutex);
ret = btrfs_run_delayed_items(trans, root);
BUG_ON(ret);
+ ret = create_pending_snapshots(trans, root->fs_info);
+ BUG_ON(ret);
+
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
BUG_ON(ret);
+ /*
+ * make sure none of the code above managed to slip in a
+ * delayed item
+ */
+ btrfs_assert_delayed_root_empty(root);
+
WARN_ON(cur_trans != trans->transaction);
btrfs_scrub_pause(root);
@@ -1309,6 +1391,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
root->fs_info->running_transaction = NULL;
root->fs_info->trans_no_join = 0;
spin_unlock(&root->fs_info->trans_lock);
+ mutex_unlock(&root->fs_info->reloc_mutex);
wake_up(&root->fs_info->transaction_wait);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 592396c..4ce8a9f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3177,7 +3177,7 @@ again:
tmp_key.offset = (u64)-1;
wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
- BUG_ON(!wc.replay_dest);
+ BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
wc.replay_dest->log_root = log;
btrfs_record_root_in_trans(trans, wc.replay_dest);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index da541df..1efa56e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
transid = btrfs_super_generation(disk_super);
if (disk_super->label[0])
printk(KERN_INFO "device label %s ", disk_super->label);
- else {
- /* FIXME, make a readl uuid parser */
- printk(KERN_INFO "device fsid %llx-%llx ",
- *(unsigned long long *)disk_super->fsid,
- *(unsigned long long *)(disk_super->fsid + 8));
- }
+ else
+ printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
diff --git a/fs/buffer.c b/fs/buffer.c
index 49c9aad..1a80b04 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1902,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
- if (unlikely(err)) {
+ if (unlikely(err))
page_zero_new_buffers(page, from, to);
- ClearPageUptodate(page);
- }
return err;
}
EXPORT_SYMBOL(__block_write_begin);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 33da49d..5a3953d 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -453,7 +453,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
int err;
struct inode *inode = page->mapping->host;
BUG_ON(!inode);
- igrab(inode);
+ ihold(inode);
err = writepage_nounlock(page, wbc);
unlock_page(page);
iput(inode);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 1f72b00..f605753 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2940,14 +2940,12 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
while (!list_empty(&mdsc->cap_dirty)) {
ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
i_dirty_item);
- inode = igrab(&ci->vfs_inode);
+ inode = &ci->vfs_inode;
+ ihold(inode);
dout("flush_dirty_caps %p\n", inode);
spin_unlock(&mdsc->cap_dirty_lock);
- if (inode) {
- ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
- NULL);
- iput(inode);
- }
+ ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
+ iput(inode);
spin_lock(&mdsc->cap_dirty_lock);
}
spin_unlock(&mdsc->cap_dirty_lock);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 33729e8..ef8f08c 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -308,7 +308,8 @@ more:
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_dentry = dget(filp->f_dentry);
/* hints to request -> mds selection code */
req->r_direct_mode = USE_AUTH_MDS;
@@ -787,10 +788,12 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
- if (err)
+ if (err) {
d_drop(dentry);
- else if (!req->r_reply_info.head->is_dentry)
- d_instantiate(dentry, igrab(old_dentry->d_inode));
+ } else if (!req->r_reply_info.head->is_dentry) {
+ ihold(old_dentry->d_inode);
+ d_instantiate(dentry, old_dentry->d_inode);
+ }
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index a610d3d..f67b687 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -109,7 +109,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
err = ceph_mdsc_do_request(mdsc, NULL, req);
inode = req->r_target_inode;
if (inode)
- igrab(inode);
+ ihold(inode);
ceph_mdsc_put_request(req);
if (!inode)
return ERR_PTR(-ESTALE);
@@ -167,7 +167,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
err = ceph_mdsc_do_request(mdsc, NULL, req);
inode = req->r_target_inode;
if (inode)
- igrab(inode);
+ ihold(inode);
ceph_mdsc_put_request(req);
if (!inode)
return ERR_PTR(err ? err : -ESTALE);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 203252d..9542f07 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -191,7 +191,8 @@ int ceph_open(struct inode *inode, struct file *file)
err = PTR_ERR(req);
goto out;
}
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_num_caps = 1;
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
if (!err)
@@ -282,7 +283,7 @@ int ceph_release(struct inode *inode, struct file *file)
static int striped_read(struct inode *inode,
u64 off, u64 len,
struct page **pages, int num_pages,
- int *checkeof, bool align_to_pages,
+ int *checkeof, bool o_direct,
unsigned long buf_align)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -307,7 +308,7 @@ static int striped_read(struct inode *inode,
io_align = off & ~PAGE_MASK;
more:
- if (align_to_pages)
+ if (o_direct)
page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
else
page_align = pos & ~PAGE_MASK;
@@ -317,10 +318,10 @@ more:
ci->i_truncate_seq,
ci->i_truncate_size,
page_pos, pages_left, page_align);
- hit_stripe = this_len < left;
- was_short = ret >= 0 && ret < this_len;
if (ret == -ENOENT)
ret = 0;
+ hit_stripe = this_len < left;
+ was_short = ret >= 0 && ret < this_len;
dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
@@ -345,20 +346,22 @@ more:
}
if (was_short) {
- /* was original extent fully inside i_size? */
- if (pos + left <= inode->i_size) {
- dout("zero tail\n");
- ceph_zero_page_vector_range(page_off + read, len - read,
+ /* did we bounce off eof? */
+ if (pos + left > inode->i_size)
+ *checkeof = 1;
+
+ /* zero trailing bytes (inside i_size) */
+ if (left > 0 && pos < inode->i_size) {
+ if (pos + left > inode->i_size)
+ left = inode->i_size - pos;
+
+ dout("zero tail %d\n", left);
+ ceph_zero_page_vector_range(page_off + read, left,
pages);
- read = len;
- goto out;
+ read += left;
}
-
- /* check i_size */
- *checkeof = 1;
}
-out:
if (ret >= 0)
ret = read;
dout("striped_read returns %d\n", ret);
@@ -658,7 +661,7 @@ out:
/* hit EOF or hole? */
if (statret == 0 && *ppos < inode->i_size) {
- dout("aio_read sync_read hit hole, reading more\n");
+ dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
read += ret;
base += ret;
len -= ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 70b6a48..d8858e9 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1101,10 +1101,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
goto done;
}
req->r_dentry = dn; /* may have spliced */
- igrab(in);
+ ihold(in);
} else if (ceph_ino(in) == vino.ino &&
ceph_snap(in) == vino.snap) {
- igrab(in);
+ ihold(in);
} else {
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
dn, in, ceph_ino(in), ceph_snap(in),
@@ -1144,7 +1144,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
goto done;
}
req->r_dentry = dn; /* may have spliced */
- igrab(in);
+ ihold(in);
rinfo->head->is_dentry = 1; /* fool notrace handlers */
}
@@ -1328,7 +1328,7 @@ void ceph_queue_writeback(struct inode *inode)
if (queue_work(ceph_inode_to_client(inode)->wb_wq,
&ceph_inode(inode)->i_wb_work)) {
dout("ceph_queue_writeback %p\n", inode);
- igrab(inode);
+ ihold(inode);
} else {
dout("ceph_queue_writeback %p failed\n", inode);
}
@@ -1353,7 +1353,7 @@ void ceph_queue_invalidate(struct inode *inode)
if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
&ceph_inode(inode)->i_pg_inv_work)) {
dout("ceph_queue_invalidate %p\n", inode);
- igrab(inode);
+ ihold(inode);
} else {
dout("ceph_queue_invalidate %p failed\n", inode);
}
@@ -1477,7 +1477,7 @@ void ceph_queue_vmtruncate(struct inode *inode)
if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work)) {
dout("ceph_queue_vmtruncate %p\n", inode);
- igrab(inode);
+ ihold(inode);
} else {
dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
inode, ci->i_truncate_pending);
@@ -1738,7 +1738,8 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
__mark_inode_dirty(inode, inode_dirty_flags);
if (mask) {
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = release;
req->r_args.setattr.mask = cpu_to_le32(mask);
req->r_num_caps = 1;
@@ -1779,7 +1780,8 @@ int ceph_do_getattr(struct inode *inode, int mask)
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_num_caps = 1;
req->r_args.getattr.mask = cpu_to_le32(mask);
err = ceph_mdsc_do_request(mdsc, NULL, req);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8888c9ba..ef0b5f4 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -73,7 +73,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
req->r_args.setlayout.layout.fl_stripe_unit =
@@ -135,7 +136,8 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_args.setlayout.layout.fl_stripe_unit =
cpu_to_le32(l.stripe_unit);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 476b329..80576d05 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -23,7 +23,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
/* mds requires start and length rather than start and end */
if (LLONG_MAX == fl->fl_end)
@@ -32,11 +33,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
length = fl->fl_end - fl->fl_start + 1;
dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
- "length: %llu, wait: %d, type`: %d", (int)lock_type,
+ "length: %llu, wait: %d, type: %d", (int)lock_type,
(int)operation, (u64)fl->fl_pid, fl->fl_start,
length, wait, fl->fl_type);
-
req->r_args.filelock_change.rule = lock_type;
req->r_args.filelock_change.type = cmd;
req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
@@ -70,7 +70,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
}
ceph_mdsc_put_request(req);
dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
- "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type,
+ "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
(int)operation, (u64)fl->fl_pid, fl->fl_start,
length, wait, fl->fl_type, err);
return err;
@@ -109,16 +109,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
dout("mds locked, locking locally");
err = posix_lock_file(file, fl, NULL);
if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
- /* undo! This should only happen if the kernel detects
- * local deadlock. */
+ /* undo! This should only happen if
+ * the kernel detects local
+ * deadlock. */
ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on posix_lock_file, undid lock", err);
+ dout("got %d on posix_lock_file, undid lock",
+ err);
}
}
- } else {
- dout("mds returned error code %d", err);
+ } else if (err == -ERESTARTSYS) {
+ dout("undoing lock\n");
+ ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+ CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
@@ -155,8 +159,11 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
file, CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on flock_lock_file_wait, undid lock", err);
}
- } else {
- dout("mds error code %d", err);
+ } else if (err == -ERESTARTSYS) {
+ dout("undoing lock\n");
+ ceph_lock_message(CEPH_LOCK_FLOCK,
+ CEPH_MDS_OP_SETFILELOCK,
+ file, CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 24067d6..54b14de 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -722,7 +722,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
ci = list_first_entry(&mdsc->snap_flush_list,
struct ceph_inode_info, i_snap_flush_item);
inode = &ci->vfs_inode;
- igrab(inode);
+ ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
spin_lock(&inode->i_lock);
__ceph_flush_snaps(ci, &session, 0);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index f2b6286..f42d730 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -665,7 +665,8 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
err = PTR_ERR(req);
goto out;
}
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
req->r_num_caps = 1;
req->r_args.setxattr.flags = cpu_to_le32(flags);
@@ -795,7 +796,8 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
req->r_num_caps = 1;
req->r_path2 = kstrdup(name, GFP_NOFS);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 53ed1ad..f66cc16 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -156,6 +156,6 @@ config CIFS_ACL
config CIFS_NFSD_EXPORT
bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
- depends on CIFS && EXPERIMENTAL
+ depends on CIFS && EXPERIMENTAL && BROKEN
help
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index dd8584d..545509c 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -92,7 +92,7 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data,
break;
default:
- cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family);
+ cERROR(1, "Unknown network family '%d'", sa->sa_family);
key_len = 0;
break;
}
@@ -152,7 +152,7 @@ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
sharename = extract_sharename(tcon->treeName);
if (IS_ERR(sharename)) {
- cFYI(1, "CIFS: couldn't extract sharename\n");
+ cFYI(1, "%s: couldn't extract sharename\n", __func__);
sharename = NULL;
return 0;
}
@@ -302,7 +302,7 @@ static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
pagevec_init(&pvec, 0);
first = 0;
- cFYI(1, "cifs inode 0x%p now uncached", cifsi);
+ cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi);
for (;;) {
nr_pages = pagevec_lookup(&pvec,
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index ffb1459..7260e11 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -42,6 +42,7 @@
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
+#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 989442d..35f9154 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -104,8 +104,7 @@ cifs_sb_deactive(struct super_block *sb)
}
static int
-cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
- const char *devname, int silent)
+cifs_read_super(struct super_block *sb)
{
struct inode *inode;
struct cifs_sb_info *cifs_sb;
@@ -113,22 +112,16 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
cifs_sb = CIFS_SB(sb);
- spin_lock_init(&cifs_sb->tlink_tree_lock);
- cifs_sb->tlink_tree = RB_ROOT;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
+ sb->s_flags |= MS_POSIXACL;
- rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
- if (rc)
- return rc;
-
- cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+ if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ else
+ sb->s_maxbytes = MAX_NON_LFS;
- rc = cifs_mount(sb, cifs_sb, volume_info, devname);
-
- if (rc) {
- if (!silent)
- cERROR(1, "cifs_mount failed w/return code = %d", rc);
- goto out_mount_failed;
- }
+ /* BB FIXME fix time_gran to be larger for LANMAN sessions */
+ sb->s_time_gran = 100;
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
@@ -170,37 +163,14 @@ out_no_root:
if (inode)
iput(inode);
- cifs_umount(sb, cifs_sb);
-
-out_mount_failed:
- bdi_destroy(&cifs_sb->bdi);
return rc;
}
-static void
-cifs_put_super(struct super_block *sb)
+static void cifs_kill_sb(struct super_block *sb)
{
- int rc = 0;
- struct cifs_sb_info *cifs_sb;
-
- cFYI(1, "In cifs_put_super");
- cifs_sb = CIFS_SB(sb);
- if (cifs_sb == NULL) {
- cFYI(1, "Empty cifs superblock info passed to unmount");
- return;
- }
-
- rc = cifs_umount(sb, cifs_sb);
- if (rc)
- cERROR(1, "cifs_umount failed with return code %d", rc);
- if (cifs_sb->mountdata) {
- kfree(cifs_sb->mountdata);
- cifs_sb->mountdata = NULL;
- }
-
- unload_nls(cifs_sb->local_nls);
- bdi_destroy(&cifs_sb->bdi);
- kfree(cifs_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ kill_anon_super(sb);
+ cifs_umount(cifs_sb);
}
static int
@@ -257,9 +227,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
{
struct cifs_sb_info *cifs_sb;
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-
cifs_sb = CIFS_SB(inode->i_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -352,6 +319,37 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
}
}
+static void
+cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
+{
+ seq_printf(s, ",sec=");
+
+ switch (server->secType) {
+ case LANMAN:
+ seq_printf(s, "lanman");
+ break;
+ case NTLMv2:
+ seq_printf(s, "ntlmv2");
+ break;
+ case NTLM:
+ seq_printf(s, "ntlm");
+ break;
+ case Kerberos:
+ seq_printf(s, "krb5");
+ break;
+ case RawNTLMSSP:
+ seq_printf(s, "ntlmssp");
+ break;
+ default:
+ /* shouldn't ever happen */
+ seq_printf(s, "unknown");
+ break;
+ }
+
+ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ seq_printf(s, "i");
+}
+
/*
* cifs_show_options() is for displaying mount options in /proc/mounts.
* Not all settable options are displayed but most of the important
@@ -365,6 +363,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
+ cifs_show_security(s, tcon->ses->server);
+
seq_printf(s, ",unc=%s", tcon->treeName);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
@@ -518,7 +518,6 @@ static int cifs_drop_inode(struct inode *inode)
}
static const struct super_operations cifs_super_ops = {
- .put_super = cifs_put_super,
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
.destroy_inode = cifs_destroy_inode,
@@ -555,7 +554,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
full_path = cifs_build_path_to_root(vol, cifs_sb,
cifs_sb_master_tcon(cifs_sb));
if (full_path == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
cFYI(1, "Get root dentry for %s", full_path);
@@ -584,7 +583,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
dchild = d_alloc(dparent, &name);
if (dchild == NULL) {
dput(dparent);
- dparent = NULL;
+ dparent = ERR_PTR(-ENOMEM);
goto out;
}
}
@@ -602,7 +601,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
if (rc) {
dput(dchild);
dput(dparent);
- dparent = NULL;
+ dparent = ERR_PTR(rc);
goto out;
}
alias = d_materialise_unique(dchild, inode);
@@ -610,7 +609,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
dput(dchild);
if (IS_ERR(alias)) {
dput(dparent);
- dparent = NULL;
+ dparent = ERR_PTR(-EINVAL); /* XXX */
goto out;
}
dchild = alias;
@@ -630,6 +629,13 @@ out:
return dparent;
}
+static int cifs_set_super(struct super_block *sb, void *data)
+{
+ struct cifs_mnt_data *mnt_data = data;
+ sb->s_fs_info = mnt_data->cifs_sb;
+ return set_anon_super(sb, NULL);
+}
+
static struct dentry *
cifs_do_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
@@ -650,75 +656,73 @@ cifs_do_mount(struct file_system_type *fs_type,
cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
if (cifs_sb == NULL) {
root = ERR_PTR(-ENOMEM);
- goto out;
+ goto out_nls;
+ }
+
+ cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
+ if (cifs_sb->mountdata == NULL) {
+ root = ERR_PTR(-ENOMEM);
+ goto out_cifs_sb;
}
cifs_setup_cifs_sb(volume_info, cifs_sb);
+ rc = cifs_mount(cifs_sb, volume_info);
+ if (rc) {
+ if (!(flags & MS_SILENT))
+ cERROR(1, "cifs_mount failed w/return code = %d", rc);
+ root = ERR_PTR(rc);
+ goto out_mountdata;
+ }
+
mnt_data.vol = volume_info;
mnt_data.cifs_sb = cifs_sb;
mnt_data.flags = flags;
- sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data);
+ sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
if (IS_ERR(sb)) {
root = ERR_CAST(sb);
- goto out_cifs_sb;
+ cifs_umount(cifs_sb);
+ goto out;
}
- if (sb->s_fs_info) {
+ if (sb->s_root) {
cFYI(1, "Use existing superblock");
- goto out_shared;
- }
-
- /*
- * Copy mount params for use in submounts. Better to do
- * the copy here and deal with the error before cleanup gets
- * complicated post-mount.
- */
- cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
- if (cifs_sb->mountdata == NULL) {
- root = ERR_PTR(-ENOMEM);
- goto out_super;
- }
-
- sb->s_flags = flags;
- /* BB should we make this contingent on mount parm? */
- sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
- sb->s_fs_info = cifs_sb;
+ cifs_umount(cifs_sb);
+ } else {
+ sb->s_flags = flags;
+ /* BB should we make this contingent on mount parm? */
+ sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
+
+ rc = cifs_read_super(sb);
+ if (rc) {
+ root = ERR_PTR(rc);
+ goto out_super;
+ }
- rc = cifs_read_super(sb, volume_info, dev_name,
- flags & MS_SILENT ? 1 : 0);
- if (rc) {
- root = ERR_PTR(rc);
- goto out_super;
+ sb->s_flags |= MS_ACTIVE;
}
- sb->s_flags |= MS_ACTIVE;
-
root = cifs_get_root(volume_info, sb);
- if (root == NULL)
+ if (IS_ERR(root))
goto out_super;
cFYI(1, "dentry root is: %p", root);
goto out;
-out_shared:
- root = cifs_get_root(volume_info, sb);
- if (root)
- cFYI(1, "dentry root is: %p", root);
- goto out;
-
out_super:
- kfree(cifs_sb->mountdata);
deactivate_locked_super(sb);
-
-out_cifs_sb:
- unload_nls(cifs_sb->local_nls);
- kfree(cifs_sb);
-
out:
cifs_cleanup_volume_info(&volume_info);
return root;
+
+out_mountdata:
+ kfree(cifs_sb->mountdata);
+out_cifs_sb:
+ kfree(cifs_sb);
+out_nls:
+ unload_nls(volume_info->local_nls);
+ goto out;
}
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -807,7 +811,7 @@ struct file_system_type cifs_fs_type = {
.owner = THIS_MODULE,
.name = "cifs",
.mount = cifs_do_mount,
- .kill_sb = kill_anon_super,
+ .kill_sb = cifs_kill_sb,
/* .fs_flags */
};
const struct inode_operations cifs_dir_inode_ops = {
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 64313f7..0900e16 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "1.72"
+#define CIFS_VERSION "1.73"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 953f844..257f312 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -157,9 +157,8 @@ extern int cifs_match_super(struct super_block *, void *);
extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
char *mount_data, const char *devname);
-extern int cifs_mount(struct super_block *, struct cifs_sb_info *,
- struct smb_vol *, const char *);
-extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
+extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
+extern void cifs_umount(struct cifs_sb_info *);
extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
@@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
struct dfs_info3_param **preferrals,
int remap);
extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
- struct super_block *sb, struct smb_vol *vol);
+ struct cifs_sb_info *cifs_sb,
+ struct smb_vol *vol);
extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index bb659eb..7f540df 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -152,7 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
mid_entry->callback(mid_entry);
}
- while (server->tcpStatus == CifsNeedReconnect) {
+ do {
try_to_freeze();
/* we should try only the port we connected to before */
@@ -167,7 +167,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&GlobalMid_Lock);
}
- }
+ } while (server->tcpStatus == CifsNeedReconnect);
return rc;
}
@@ -2149,7 +2149,10 @@ cifs_put_tlink(struct tcon_link *tlink)
}
static inline struct tcon_link *
-cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb);
+cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
+{
+ return cifs_sb->master_tlink;
+}
static int
compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
@@ -2543,7 +2546,7 @@ ip_connect(struct TCP_Server_Info *server)
}
void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
- struct super_block *sb, struct smb_vol *vol_info)
+ struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
* any requested capabilities changed locally e.g. via
@@ -2597,22 +2600,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
cFYI(1, "negotiated posix acl support");
- if (sb)
- sb->s_flags |= MS_POSIXACL;
+ if (cifs_sb)
+ cifs_sb->mnt_cifs_flags |=
+ CIFS_MOUNT_POSIXACL;
}
if (vol_info && vol_info->posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
cFYI(1, "negotiate posix pathnames");
- if (sb)
- CIFS_SB(sb)->mnt_cifs_flags |=
+ if (cifs_sb)
+ cifs_sb->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
}
- if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
+ if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
- CIFS_SB(sb)->rsize = 127 * 1024;
+ cifs_sb->rsize = 127 * 1024;
cFYI(DBG2, "larger reads not supported by srv");
}
}
@@ -2659,6 +2663,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
{
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
+ spin_lock_init(&cifs_sb->tlink_tree_lock);
+ cifs_sb->tlink_tree = RB_ROOT;
+
if (pvolume_info->rsize > CIFSMaxBufSize) {
cERROR(1, "rsize %d too large, using MaxBufSize",
pvolume_info->rsize);
@@ -2747,21 +2754,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
/*
* When the server supports very large writes via POSIX extensions, we can
- * allow up to 2^24 - PAGE_CACHE_SIZE.
+ * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including
+ * the RFC1001 length.
*
* Note that this might make for "interesting" allocation problems during
- * writeback however (as we have to allocate an array of pointers for the
- * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * writeback however as we have to allocate an array of pointers for the
+ * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
*/
-#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE)
+#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
/*
- * When the server doesn't allow large posix writes, default to a wsize of
- * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size
- * described in RFC1001. This allows space for the header without going over
- * that by default.
+ * When the server doesn't allow large posix writes, only allow a wsize of
+ * 128k minus the size of the WRITE_AND_X header. That allows for a write up
+ * to the maximum size described by RFC1002.
*/
-#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE)
+#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
/*
* The default wsize is 1M. find_get_pages seems to return a maximum of 256
@@ -2780,11 +2787,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
/* can server support 24-bit write sizes? (via UNIX extensions) */
if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
- wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE);
+ wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
- /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */
- if (!(server->capabilities & CAP_LARGE_WRITE_X))
- wsize = min_t(unsigned int, wsize, USHRT_MAX);
+ /*
+ * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
+ * Limit it to max buffer offered by the server, minus the size of the
+ * WRITEX header, not including the 4 byte RFC1001 length.
+ */
+ if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
+ (!(server->capabilities & CAP_UNIX) &&
+ (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
+ wsize = min_t(unsigned int, wsize,
+ server->maxBuf - sizeof(WRITE_REQ) + 4);
/* hard limit of CIFS_MAX_WSIZE */
wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
@@ -2934,7 +2948,11 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
if (volume_info->nullauth) {
cFYI(1, "null user");
- volume_info->username = "";
+ volume_info->username = kzalloc(1, GFP_KERNEL);
+ if (volume_info->username == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
cFYI(1, "Username: %s", volume_info->username);
@@ -2968,8 +2986,7 @@ out:
}
int
-cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
- struct smb_vol *volume_info, const char *devname)
+cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
int rc = 0;
int xid;
@@ -2980,6 +2997,13 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
int referral_walks_count = 0;
+
+ rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
+ if (rc)
+ return rc;
+
+ cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+
try_mount_again:
/* cleanup activities if we're chasing a referral */
if (referral_walks_count) {
@@ -3004,6 +3028,7 @@ try_mount_again:
srvTcp = cifs_get_tcp_session(volume_info);
if (IS_ERR(srvTcp)) {
rc = PTR_ERR(srvTcp);
+ bdi_destroy(&cifs_sb->bdi);
goto out;
}
@@ -3015,14 +3040,6 @@ try_mount_again:
goto mount_fail_check;
}
- if (pSesInfo->capabilities & CAP_LARGE_FILES)
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- else
- sb->s_maxbytes = MAX_NON_LFS;
-
- /* BB FIXME fix time_gran to be larger for LANMAN sessions */
- sb->s_time_gran = 100;
-
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(pSesInfo, volume_info);
if (IS_ERR(tcon)) {
@@ -3035,7 +3052,7 @@ try_mount_again:
if (tcon->ses->capabilities & CAP_UNIX) {
/* reset of caps checks mount to see if unix extensions
disabled for just this mount */
- reset_cifs_unix_caps(xid, tcon, sb, volume_info);
+ reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
@@ -3158,6 +3175,7 @@ mount_fail_check:
cifs_put_smb_ses(pSesInfo);
else
cifs_put_tcp_session(srvTcp);
+ bdi_destroy(&cifs_sb->bdi);
goto out;
}
@@ -3171,6 +3189,10 @@ out:
return rc;
}
+/*
+ * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
+ * pointer may be NULL.
+ */
int
CIFSTCon(unsigned int xid, struct cifs_ses *ses,
const char *tree, struct cifs_tcon *tcon,
@@ -3205,7 +3227,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if ((ses->server->sec_mode) & SECMODE_USER) {
+ if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
@@ -3328,8 +3350,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
return rc;
}
-int
-cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
+void
+cifs_umount(struct cifs_sb_info *cifs_sb)
{
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
@@ -3350,7 +3372,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
}
spin_unlock(&cifs_sb->tlink_tree_lock);
- return 0;
+ bdi_destroy(&cifs_sb->bdi);
+ kfree(cifs_sb->mountdata);
+ unload_nls(cifs_sb->local_nls);
+ kfree(cifs_sb);
}
int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
@@ -3371,7 +3396,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
}
if (rc == 0) {
spin_lock(&GlobalMid_Lock);
- if (server->tcpStatus != CifsExiting)
+ if (server->tcpStatus == CifsNeedNegotiate)
server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
@@ -3484,12 +3509,6 @@ out:
return tcon;
}
-static inline struct tcon_link *
-cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
-{
- return cifs_sb->master_tlink;
-}
-
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index d368a47..8166966 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -28,14 +28,14 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
server->fscache =
fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
&cifs_fscache_server_index_def, server);
- cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server,
- server->fscache);
+ cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
+ server->fscache);
}
void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
{
- cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server,
- server->fscache);
+ cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
+ server->fscache);
fscache_relinquish_cookie(server->fscache, 0);
server->fscache = NULL;
}
@@ -47,13 +47,13 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
tcon->fscache =
fscache_acquire_cookie(server->fscache,
&cifs_fscache_super_index_def, tcon);
- cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)",
- server->fscache, tcon->fscache);
+ cFYI(1, "%s: (0x%p/0x%p)", __func__, server->fscache,
+ tcon->fscache);
}
void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
{
- cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
+ cFYI(1, "%s: (0x%p)", __func__, tcon->fscache);
fscache_relinquish_cookie(tcon->fscache, 0);
tcon->fscache = NULL;
}
@@ -70,8 +70,8 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
&cifs_fscache_inode_object_def, cifsi);
- cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
- cifsi->fscache);
+ cFYI(1, "%s: got FH cookie (0x%p/0x%p)", __func__,
+ tcon->fscache, cifsi->fscache);
}
}
@@ -80,8 +80,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
struct cifsInodeInfo *cifsi = CIFS_I(inode);
if (cifsi->fscache) {
- cFYI(1, "CIFS releasing inode cookie (0x%p)",
- cifsi->fscache);
+ cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
fscache_relinquish_cookie(cifsi->fscache, 0);
cifsi->fscache = NULL;
}
@@ -92,8 +91,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode)
struct cifsInodeInfo *cifsi = CIFS_I(inode);
if (cifsi->fscache) {
- cFYI(1, "CIFS disabling inode cookie (0x%p)",
- cifsi->fscache);
+ cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
fscache_relinquish_cookie(cifsi->fscache, 1);
cifsi->fscache = NULL;
}
@@ -121,8 +119,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
cifs_sb_master_tcon(cifs_sb)->fscache,
&cifs_fscache_inode_object_def,
cifsi);
- cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p",
- cifsi->fscache, old);
+ cFYI(1, "%s: new cookie 0x%p oldcookie 0x%p",
+ __func__, cifsi->fscache, old);
}
}
@@ -132,8 +130,8 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
struct inode *inode = page->mapping->host;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
- cFYI(1, "CIFS: fscache release page (0x%p/0x%p)",
- page, cifsi->fscache);
+ cFYI(1, "%s: (0x%p/0x%p)", __func__, page,
+ cifsi->fscache);
if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
return 0;
}
@@ -144,8 +142,7 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
int error)
{
- cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)",
- page, error);
+ cFYI(1, "%s: (0x%p/%d)", __func__, page, error);
if (!error)
SetPageUptodate(page);
unlock_page(page);
@@ -158,7 +155,7 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
int ret;
- cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p",
+ cFYI(1, "%s: (fsc:%p, p:%p, i:0x%p", __func__,
CIFS_I(inode)->fscache, page, inode);
ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
cifs_readpage_from_fscache_complete,
@@ -167,11 +164,11 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
switch (ret) {
case 0: /* page found in fscache, read submitted */
- cFYI(1, "CIFS: readpage_from_fscache: submitted");
+ cFYI(1, "%s: submitted", __func__);
return ret;
case -ENOBUFS: /* page won't be cached */
case -ENODATA: /* page not in cache */
- cFYI(1, "CIFS: readpage_from_fscache %d", ret);
+ cFYI(1, "%s: %d", __func__, ret);
return 1;
default:
@@ -190,7 +187,7 @@ int __cifs_readpages_from_fscache(struct inode *inode,
{
int ret;
- cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)",
+ cFYI(1, "%s: (0x%p/%u/0x%p)", __func__,
CIFS_I(inode)->fscache, *nr_pages, inode);
ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
pages, nr_pages,
@@ -199,12 +196,12 @@ int __cifs_readpages_from_fscache(struct inode *inode,
mapping_gfp_mask(mapping));
switch (ret) {
case 0: /* read submitted to the cache for all pages */
- cFYI(1, "CIFS: readpages_from_fscache: submitted");
+ cFYI(1, "%s: submitted", __func__);
return ret;
case -ENOBUFS: /* some pages are not cached and can't be */
case -ENODATA: /* some pages are not cached */
- cFYI(1, "CIFS: readpages_from_fscache: no page");
+ cFYI(1, "%s: no page", __func__);
return 1;
default:
@@ -218,7 +215,7 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
{
int ret;
- cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p",
+ cFYI(1, "%s: (fsc: %p, p: %p, i: %p)", __func__,
CIFS_I(inode)->fscache, page, inode);
ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL);
if (ret != 0)
@@ -230,7 +227,7 @@ void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct fscache_cookie *cookie = cifsi->fscache;
- cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie);
+ cFYI(1, "%s: (0x%p/0x%p)", __func__, page, cookie);
fscache_wait_on_page_write(cookie, page);
fscache_uncache_page(cookie, page);
}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 1525d5e..1c5b770 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
sg_init_one(&sgout, out, 8);
rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
- if (rc) {
+ if (rc)
cERROR(1, "could not encrypt crypt key rc: %d\n", rc);
- crypto_free_blkcipher(tfm_des);
- goto smbhash_err;
- }
+ crypto_free_blkcipher(tfm_des);
smbhash_err:
return rc;
}
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 6cbb3af..cb140ef 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = {
/* the coda pioctl inode ops */
static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
{
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
return (mask & MAY_EXEC) ? -EACCES : 0;
}
diff --git a/fs/dcookies.c b/fs/dcookies.c
index a21cabd..dda0dc7 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
/* FIXME: (deleted) ? */
path = d_path(&dcs->path, kbuf, PAGE_SIZE);
+ mutex_unlock(&dcookie_mutex);
+
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_free;
@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
out_free:
kfree(kbuf);
+ return err;
out:
mutex_unlock(&dcookie_mutex);
return err;
diff --git a/fs/exec.c b/fs/exec.c
index ea5f748..6075a1e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1093,6 +1093,7 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
+ set_fs(USER_DS);
current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
flush_thread();
current->personality &= ~bprm->per_clear;
@@ -1357,10 +1358,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
if (retval)
return retval;
- /* kernel module loader fixup */
- /* so we don't try to load run modprobe in kernel space. */
- set_fs(USER_DS);
-
retval = audit_bprm(bprm);
if (retval)
return retval;
@@ -1999,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file)
* is a special value that we use to trap recursive
* core dumps
*/
-static int umh_pipe_setup(struct subprocess_info *info)
+static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
{
struct file *rp, *wp;
struct fdtable *fdt;
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 2e29abb..095c36f 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -125,7 +125,7 @@ struct ext4_ext_path {
* positive retcode - signal for ext4_ext_walk_space(), see below
* callback must return valid extent (passed or newly created)
*/
-typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
+typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
struct ext4_ext_cache *,
struct ext4_extent *, void *);
@@ -133,8 +133,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
#define EXT_BREAK 1
#define EXT_REPEAT 2
-/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */
-#define EXT_MAX_BLOCK 0xffffffff
+/*
+ * Maximum number of logical blocks in a file; ext4_extent's ee_block is
+ * __le32.
+ */
+#define EXT_MAX_BLOCKS 0xffffffff
/*
* EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 5199bac..f815cc8 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1408,7 +1408,7 @@ got_index:
/*
* ext4_ext_next_allocated_block:
- * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
+ * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
* NOTE: it considers block number from index entry as
* allocated block. Thus, index entries have to be consistent
* with leaves.
@@ -1422,7 +1422,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
depth = path->p_depth;
if (depth == 0 && path->p_ext == NULL)
- return EXT_MAX_BLOCK;
+ return EXT_MAX_BLOCKS;
while (depth >= 0) {
if (depth == path->p_depth) {
@@ -1439,12 +1439,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
depth--;
}
- return EXT_MAX_BLOCK;
+ return EXT_MAX_BLOCKS;
}
/*
* ext4_ext_next_leaf_block:
- * returns first allocated block from next leaf or EXT_MAX_BLOCK
+ * returns first allocated block from next leaf or EXT_MAX_BLOCKS
*/
static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
struct ext4_ext_path *path)
@@ -1456,7 +1456,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
/* zero-tree has no leaf blocks at all */
if (depth == 0)
- return EXT_MAX_BLOCK;
+ return EXT_MAX_BLOCKS;
/* go to index block */
depth--;
@@ -1469,7 +1469,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
depth--;
}
- return EXT_MAX_BLOCK;
+ return EXT_MAX_BLOCKS;
}
/*
@@ -1677,13 +1677,13 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
*/
if (b2 < b1) {
b2 = ext4_ext_next_allocated_block(path);
- if (b2 == EXT_MAX_BLOCK)
+ if (b2 == EXT_MAX_BLOCKS)
goto out;
}
/* check for wrap through zero on extent logical start block*/
if (b1 + len1 < b1) {
- len1 = EXT_MAX_BLOCK - b1;
+ len1 = EXT_MAX_BLOCKS - b1;
newext->ee_len = cpu_to_le16(len1);
ret = 1;
}
@@ -1767,7 +1767,7 @@ repeat:
fex = EXT_LAST_EXTENT(eh);
next = ext4_ext_next_leaf_block(inode, path);
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
- && next != EXT_MAX_BLOCK) {
+ && next != EXT_MAX_BLOCKS) {
ext_debug("next leaf block - %d\n", next);
BUG_ON(npath != NULL);
npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1887,7 +1887,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
BUG_ON(func == NULL);
BUG_ON(inode == NULL);
- while (block < last && block != EXT_MAX_BLOCK) {
+ while (block < last && block != EXT_MAX_BLOCKS) {
num = last - block;
/* find extent for this block */
down_read(&EXT4_I(inode)->i_data_sem);
@@ -1958,7 +1958,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
err = -EIO;
break;
}
- err = func(inode, path, &cbex, ex, cbdata);
+ err = func(inode, next, &cbex, ex, cbdata);
ext4_ext_drop_refs(path);
if (err < 0)
@@ -2020,7 +2020,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
if (ex == NULL) {
/* there is no extent yet, so gap is [0;-] */
lblock = 0;
- len = EXT_MAX_BLOCK;
+ len = EXT_MAX_BLOCKS;
ext_debug("cache gap(whole file):");
} else if (block < le32_to_cpu(ex->ee_block)) {
lblock = block;
@@ -2350,7 +2350,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* never happen because at least one of the end points
* needs to be on the edge of the extent.
*/
- if (end == EXT_MAX_BLOCK) {
+ if (end == EXT_MAX_BLOCKS - 1) {
ext_debug(" bad truncate %u:%u\n",
start, end);
block = 0;
@@ -2398,7 +2398,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* If this is a truncate, this condition
* should never happen
*/
- if (end == EXT_MAX_BLOCK) {
+ if (end == EXT_MAX_BLOCKS - 1) {
ext_debug(" bad truncate %u:%u\n",
start, end);
err = -EIO;
@@ -2478,7 +2478,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* we need to remove it from the leaf
*/
if (num == 0) {
- if (end != EXT_MAX_BLOCK) {
+ if (end != EXT_MAX_BLOCKS - 1) {
/*
* For hole punching, we need to scoot all the
* extents up when an extent is removed so that
@@ -3699,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
- err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK);
+ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
/* In a multi-transaction truncate, we only make the final
* transaction synchronous.
@@ -3914,14 +3914,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
/*
* Callback function called for each extent to gather FIEMAP information.
*/
-static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
+static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
struct ext4_ext_cache *newex, struct ext4_extent *ex,
void *data)
{
__u64 logical;
__u64 physical;
__u64 length;
- loff_t size;
__u32 flags = 0;
int ret = 0;
struct fiemap_extent_info *fieinfo = data;
@@ -4103,8 +4102,7 @@ found_delayed_extent:
if (ex && ext4_ext_is_uninitialized(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
- size = i_size_read(inode);
- if (logical + length >= size)
+ if (next == EXT_MAX_BLOCKS)
flags |= FIEMAP_EXTENT_LAST;
ret = fiemap_fill_next_extent(fieinfo, logical, physical,
@@ -4347,8 +4345,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
start_blk = start >> inode->i_sb->s_blocksize_bits;
last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
- if (last_blk >= EXT_MAX_BLOCK)
- last_blk = EXT_MAX_BLOCK-1;
+ if (last_blk >= EXT_MAX_BLOCKS)
+ last_blk = EXT_MAX_BLOCKS-1;
len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
/*
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a5763e3..e3126c0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2634,7 +2634,7 @@ static int ext4_writepage(struct page *page,
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
- trace_ext4_writepage(inode, page);
+ trace_ext4_writepage(page);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 859f2ae..6ed859d 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3578,8 +3578,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
free += next - bit;
trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
- trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa,
- grp_blk_start + bit, next - bit);
+ trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
+ next - bit);
mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
bit = next + 1;
}
@@ -3608,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
ext4_group_t group;
ext4_grpblk_t bit;
- trace_ext4_mb_release_group_pa(sb, pa);
+ trace_ext4_mb_release_group_pa(pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -4448,7 +4448,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
* @inode: inode
* @block: start physical block to free
* @count: number of blocks to count
- * @metadata: Are these metadata blocks
+ * @flags: flags used by ext4_free_blocks
*/
void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t block,
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 2b8304b..f57455a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -1002,12 +1002,12 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}
- if ((orig_start > EXT_MAX_BLOCK) ||
- (donor_start > EXT_MAX_BLOCK) ||
- (*len > EXT_MAX_BLOCK) ||
- (orig_start + *len > EXT_MAX_BLOCK)) {
+ if ((orig_start >= EXT_MAX_BLOCKS) ||
+ (donor_start >= EXT_MAX_BLOCKS) ||
+ (*len > EXT_MAX_BLOCKS) ||
+ (orig_start + *len >= EXT_MAX_BLOCKS)) {
ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
- "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK,
+ "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cc5c157..9ea71aa 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2243,6 +2243,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
* in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
* so that won't be a limiting factor.
*
+ * However there is other limiting factor. We do store extents in the form
+ * of starting block and length, hence the resulting length of the extent
+ * covering maximum file size must fit into on-disk format containers as
+ * well. Given that length is always by 1 unit bigger than max unit (because
+ * we count 0 as well) we have to lower the s_maxbytes by one fs block.
+ *
* Note, this does *not* consider any metadata overhead for vfs i_blocks.
*/
static loff_t ext4_max_size(int blkbits, int has_huge_files)
@@ -2264,10 +2270,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
upper_limit <<= blkbits;
}
- /* 32-bit extent-start container, ee_block */
- res = 1LL << 32;
+ /*
+ * 32-bit extent-start container, ee_block. We lower the maxbytes
+ * by one fs block, so ee_len can cover the extent of maximum file
+ * size
+ */
+ res = (1LL << 32) - 1;
res <<= blkbits;
- res -= 1;
/* Sanity check against vm- & vfs- imposed limits */
if (res > upper_limit)
diff --git a/fs/inode.c b/fs/inode.c
index 0f7e88a..43566d1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -423,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash);
void end_writeback(struct inode *inode)
{
might_sleep();
+ /*
+ * We have to cycle tree_lock here because reclaim can be still in the
+ * process of removing the last page (in __delete_from_page_cache())
+ * and we must not free mapping under it.
+ */
+ spin_lock_irq(&inode->i_data.tree_lock);
BUG_ON(inode->i_data.nrpages);
+ spin_unlock_irq(&inode->i_data.tree_lock);
BUG_ON(!list_empty(&inode->i_data.private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 3db5ba4..b3cc858 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -974,7 +974,7 @@ out_no_inode:
out_no_read:
printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
__func__, s->s_id, iso_blknum, block);
- goto out_freesbi;
+ goto out_freebh;
out_bad_zone_size:
printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
sbi->s_log_zone_size);
@@ -989,6 +989,7 @@ out_unknown_format:
out_freebh:
brelse(bh);
+ brelse(pri_bh);
out_freesbi:
kfree(opt.iocharset);
kfree(sbi);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 6a79fd0..2c62c5a 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -97,10 +97,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
!buffer_dirty(bh) && !buffer_write_io_error(bh)) {
+ /*
+ * Get our reference so that bh cannot be freed before
+ * we unlock it
+ */
+ get_bh(bh);
JBUFFER_TRACE(jh, "remove from checkpoint list");
ret = __jbd2_journal_remove_checkpoint(jh) + 1;
jbd_unlock_bh_state(bh);
- jbd2_journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
__brelse(bh);
} else {
@@ -223,8 +227,8 @@ restart:
spin_lock(&journal->j_list_lock);
goto restart;
}
+ get_bh(bh);
if (buffer_locked(bh)) {
- atomic_inc(&bh->b_count);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
@@ -243,7 +247,6 @@ restart:
*/
released = __jbd2_journal_remove_checkpoint(jh);
jbd_unlock_bh_state(bh);
- jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
@@ -284,7 +287,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
int ret = 0;
if (buffer_locked(bh)) {
- atomic_inc(&bh->b_count);
+ get_bh(bh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
@@ -316,12 +319,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
ret = 1;
if (unlikely(buffer_write_io_error(bh)))
ret = -EIO;
+ get_bh(bh);
J_ASSERT_JH(jh, !buffer_jbddirty(bh));
BUFFER_TRACE(bh, "remove from checkpoint");
__jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- jbd2_journal_remove_journal_head(bh);
__brelse(bh);
} else {
/*
@@ -554,7 +557,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
/*
* journal_clean_one_cp_list
*
- * Find all the written-back checkpoint buffers in the given list and release them.
+ * Find all the written-back checkpoint buffers in the given list and
+ * release them.
*
* Called with the journal locked.
* Called with j_list_lock held.
@@ -663,8 +667,8 @@ out:
* checkpoint lists.
*
* The function returns 1 if it frees the transaction, 0 otherwise.
+ * The function can free jh and bh.
*
- * This function is called with the journal locked.
* This function is called with j_list_lock held.
* This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
@@ -684,13 +688,14 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
}
journal = transaction->t_journal;
+ JBUFFER_TRACE(jh, "removing from transaction");
__buffer_unlink(jh);
jh->b_cp_transaction = NULL;
+ jbd2_journal_put_journal_head(jh);
if (transaction->t_checkpoint_list != NULL ||
transaction->t_checkpoint_io_list != NULL)
goto out;
- JBUFFER_TRACE(jh, "transaction has no more buffers");
/*
* There is one special case to worry about: if we have just pulled the
@@ -701,10 +706,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
* The locking here around t_state is a bit sleazy.
* See the comment at the end of jbd2_journal_commit_transaction().
*/
- if (transaction->t_state != T_FINISHED) {
- JBUFFER_TRACE(jh, "belongs to running/committing transaction");
+ if (transaction->t_state != T_FINISHED)
goto out;
- }
/* OK, that was the last buffer for the transaction: we can now
safely remove this transaction from the log */
@@ -723,7 +726,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
wake_up(&journal->j_wait_logspace);
ret = 1;
out:
- JBUFFER_TRACE(jh, "exit");
return ret;
}
@@ -742,6 +744,8 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
+ /* Get reference for checkpointing transaction */
+ jbd2_journal_grab_journal_head(jh2bh(jh));
jh->b_cp_transaction = transaction;
if (!transaction->t_checkpoint_list) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7f21cf3..eef6979 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -848,10 +848,16 @@ restart_loop:
while (commit_transaction->t_forget) {
transaction_t *cp_transaction;
struct buffer_head *bh;
+ int try_to_free = 0;
jh = commit_transaction->t_forget;
spin_unlock(&journal->j_list_lock);
bh = jh2bh(jh);
+ /*
+ * Get a reference so that bh cannot be freed before we are
+ * done with it.
+ */
+ get_bh(bh);
jbd_lock_bh_state(bh);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
@@ -914,28 +920,27 @@ restart_loop:
__jbd2_journal_insert_checkpoint(jh, commit_transaction);
if (is_journal_aborted(journal))
clear_buffer_jbddirty(bh);
- JBUFFER_TRACE(jh, "refile for checkpoint writeback");
- __jbd2_journal_refile_buffer(jh);
- jbd_unlock_bh_state(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
- /* The buffer on BJ_Forget list and not jbddirty means
+ /*
+ * The buffer on BJ_Forget list and not jbddirty means
* it has been freed by this transaction and hence it
* could not have been reallocated until this
* transaction has committed. *BUT* it could be
* reallocated once we have written all the data to
* disk and before we process the buffer on BJ_Forget
- * list. */
- JBUFFER_TRACE(jh, "refile or unfile freed buffer");
- __jbd2_journal_refile_buffer(jh);
- if (!jh->b_transaction) {
- jbd_unlock_bh_state(bh);
- /* needs a brelse */
- jbd2_journal_remove_journal_head(bh);
- release_buffer_page(bh);
- } else
- jbd_unlock_bh_state(bh);
+ * list.
+ */
+ if (!jh->b_next_transaction)
+ try_to_free = 1;
}
+ JBUFFER_TRACE(jh, "refile or unfile buffer");
+ __jbd2_journal_refile_buffer(jh);
+ jbd_unlock_bh_state(bh);
+ if (try_to_free)
+ release_buffer_page(bh); /* Drops bh reference */
+ else
+ __brelse(bh);
cond_resched_lock(&journal->j_list_lock);
}
spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 9a78269..0dfa5b59 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2078,10 +2078,9 @@ static void journal_free_journal_head(struct journal_head *jh)
* When a buffer has its BH_JBD bit set it is immune from being released by
* core kernel code, mainly via ->b_count.
*
- * A journal_head may be detached from its buffer_head when the journal_head's
- * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
- * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
- * journal_head can be dropped if needed.
+ * A journal_head is detached from its buffer_head when the journal_head's
+ * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
+ * transaction (b_cp_transaction) hold their references to b_jcount.
*
* Various places in the kernel want to attach a journal_head to a buffer_head
* _before_ attaching the journal_head to a transaction. To protect the
@@ -2094,17 +2093,16 @@ static void journal_free_journal_head(struct journal_head *jh)
* (Attach a journal_head if needed. Increments b_jcount)
* struct journal_head *jh = jbd2_journal_add_journal_head(bh);
* ...
+ * (Get another reference for transaction)
+ * jbd2_journal_grab_journal_head(bh);
* jh->b_transaction = xxx;
+ * (Put original reference)
* jbd2_journal_put_journal_head(jh);
- *
- * Now, the journal_head's b_jcount is zero, but it is safe from being released
- * because it has a non-zero b_transaction.
*/
/*
* Give a buffer_head a journal_head.
*
- * Doesn't need the journal lock.
* May sleep.
*/
struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
@@ -2168,61 +2166,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
struct journal_head *jh = bh2jh(bh);
J_ASSERT_JH(jh, jh->b_jcount >= 0);
-
- get_bh(bh);
- if (jh->b_jcount == 0) {
- if (jh->b_transaction == NULL &&
- jh->b_next_transaction == NULL &&
- jh->b_cp_transaction == NULL) {
- J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
- J_ASSERT_BH(bh, buffer_jbd(bh));
- J_ASSERT_BH(bh, jh2bh(jh) == bh);
- BUFFER_TRACE(bh, "remove journal_head");
- if (jh->b_frozen_data) {
- printk(KERN_WARNING "%s: freeing "
- "b_frozen_data\n",
- __func__);
- jbd2_free(jh->b_frozen_data, bh->b_size);
- }
- if (jh->b_committed_data) {
- printk(KERN_WARNING "%s: freeing "
- "b_committed_data\n",
- __func__);
- jbd2_free(jh->b_committed_data, bh->b_size);
- }
- bh->b_private = NULL;
- jh->b_bh = NULL; /* debug, really */
- clear_buffer_jbd(bh);
- __brelse(bh);
- journal_free_journal_head(jh);
- } else {
- BUFFER_TRACE(bh, "journal_head was locked");
- }
+ J_ASSERT_JH(jh, jh->b_transaction == NULL);
+ J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+ J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
+ J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
+ J_ASSERT_BH(bh, buffer_jbd(bh));
+ J_ASSERT_BH(bh, jh2bh(jh) == bh);
+ BUFFER_TRACE(bh, "remove journal_head");
+ if (jh->b_frozen_data) {
+ printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
+ jbd2_free(jh->b_frozen_data, bh->b_size);
}
+ if (jh->b_committed_data) {
+ printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
+ jbd2_free(jh->b_committed_data, bh->b_size);
+ }
+ bh->b_private = NULL;
+ jh->b_bh = NULL; /* debug, really */
+ clear_buffer_jbd(bh);
+ journal_free_journal_head(jh);
}
/*
- * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
- * and has a zero b_jcount then remove and release its journal_head. If we did
- * see that the buffer is not used by any transaction we also "logically"
- * decrement ->b_count.
- *
- * We in fact take an additional increment on ->b_count as a convenience,
- * because the caller usually wants to do additional things with the bh
- * after calling here.
- * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
- * time. Once the caller has run __brelse(), the buffer is eligible for
- * reaping by try_to_free_buffers().
- */
-void jbd2_journal_remove_journal_head(struct buffer_head *bh)
-{
- jbd_lock_bh_journal_head(bh);
- __journal_remove_journal_head(bh);
- jbd_unlock_bh_journal_head(bh);
-}
-
-/*
- * Drop a reference on the passed journal_head. If it fell to zero then try to
+ * Drop a reference on the passed journal_head. If it fell to zero then
* release the journal_head from the buffer_head.
*/
void jbd2_journal_put_journal_head(struct journal_head *jh)
@@ -2232,11 +2198,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
jbd_lock_bh_journal_head(bh);
J_ASSERT_JH(jh, jh->b_jcount > 0);
--jh->b_jcount;
- if (!jh->b_jcount && !jh->b_transaction) {
+ if (!jh->b_jcount) {
__journal_remove_journal_head(bh);
+ jbd_unlock_bh_journal_head(bh);
__brelse(bh);
- }
- jbd_unlock_bh_journal_head(bh);
+ } else
+ jbd_unlock_bh_journal_head(bh);
}
/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3eec82d..2d71094 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
/*
* jbd2_get_transaction: obtain a new transaction_t object.
@@ -764,7 +765,6 @@ repeat:
if (!jh->b_transaction) {
JBUFFER_TRACE(jh, "no transaction");
J_ASSERT_JH(jh, !jh->b_next_transaction);
- jh->b_transaction = transaction;
JBUFFER_TRACE(jh, "file as BJ_Reserved");
spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
@@ -814,7 +814,6 @@ out:
* int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
* @handle: transaction to add buffer modifications to
* @bh: bh to be used for metadata writes
- * @credits: variable that will receive credits for the buffer
*
* Returns an error code or 0 on success.
*
@@ -896,8 +895,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* committed and so it's safe to clear the dirty bit.
*/
clear_buffer_dirty(jh2bh(jh));
- jh->b_transaction = transaction;
-
/* first access by this transaction */
jh->b_modified = 0;
@@ -932,7 +929,6 @@ out:
* non-rewindable consequences
* @handle: transaction
* @bh: buffer to undo
- * @credits: store the number of taken credits here (if not NULL)
*
* Sometimes there is a need to distinguish between metadata which has
* been committed to disk and that which has not. The ext3fs code uses
@@ -1232,8 +1228,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
} else {
__jbd2_journal_unfile_buffer(jh);
- jbd2_journal_remove_journal_head(bh);
- __brelse(bh);
if (!buffer_jbd(bh)) {
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
@@ -1556,19 +1550,32 @@ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
mark_buffer_dirty(bh); /* Expose it to the VM */
}
-void __jbd2_journal_unfile_buffer(struct journal_head *jh)
+/*
+ * Remove buffer from all transactions.
+ *
+ * Called with bh_state lock and j_list_lock
+ *
+ * jh and bh may be already freed when this function returns.
+ */
+static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
+ jbd2_journal_put_journal_head(jh);
}
void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
{
- jbd_lock_bh_state(jh2bh(jh));
+ struct buffer_head *bh = jh2bh(jh);
+
+ /* Get reference so that buffer cannot be freed before we unlock it */
+ get_bh(bh);
+ jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
__jbd2_journal_unfile_buffer(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(jh2bh(jh));
+ jbd_unlock_bh_state(bh);
+ __brelse(bh);
}
/*
@@ -1595,8 +1602,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
if (jh->b_jlist == BJ_None) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
__jbd2_journal_remove_checkpoint(jh);
- jbd2_journal_remove_journal_head(bh);
- __brelse(bh);
}
}
spin_unlock(&journal->j_list_lock);
@@ -1659,7 +1664,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
/*
* We take our own ref against the journal_head here to avoid
* having to add tons of locking around each instance of
- * jbd2_journal_remove_journal_head() and
* jbd2_journal_put_journal_head().
*/
jh = jbd2_journal_grab_journal_head(bh);
@@ -1697,10 +1701,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
int may_free = 1;
struct buffer_head *bh = jh2bh(jh);
- __jbd2_journal_unfile_buffer(jh);
-
if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction");
+ __jbd2_journal_temp_unlink_buffer(jh);
/*
* We don't want to write the buffer anymore, clear the
* bit so that we don't confuse checks in
@@ -1711,8 +1714,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
may_free = 0;
} else {
JBUFFER_TRACE(jh, "on running transaction");
- jbd2_journal_remove_journal_head(bh);
- __brelse(bh);
+ __jbd2_journal_unfile_buffer(jh);
}
return may_free;
}
@@ -1990,6 +1992,8 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
if (jh->b_transaction)
__jbd2_journal_temp_unlink_buffer(jh);
+ else
+ jbd2_journal_grab_journal_head(bh);
jh->b_transaction = transaction;
switch (jlist) {
@@ -2041,9 +2045,10 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
* already started to be used by a subsequent transaction, refile the
* buffer on that transaction's metadata list.
*
- * Called under journal->j_list_lock
- *
+ * Called under j_list_lock
* Called under jbd_lock_bh_state(jh2bh(jh))
+ *
+ * jh and bh may be already free when this function returns
*/
void __jbd2_journal_refile_buffer(struct journal_head *jh)
{
@@ -2067,6 +2072,11 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
was_dirty = test_clear_buffer_jbddirty(bh);
__jbd2_journal_temp_unlink_buffer(jh);
+ /*
+ * We set b_transaction here because b_next_transaction will inherit
+ * our jh reference and thus __jbd2_journal_file_buffer() must not
+ * take a new one.
+ */
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
if (buffer_freed(bh))
@@ -2083,30 +2093,21 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
}
/*
- * For the unlocked version of this call, also make sure that any
- * hanging journal_head is cleaned up if necessary.
- *
- * __jbd2_journal_refile_buffer is usually called as part of a single locked
- * operation on a buffer_head, in which the caller is probably going to
- * be hooking the journal_head onto other lists. In that case it is up
- * to the caller to remove the journal_head if necessary. For the
- * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
- * doing anything else to the buffer so we need to do the cleanup
- * ourselves to avoid a jh leak.
- *
- * *** The journal_head may be freed by this call! ***
+ * __jbd2_journal_refile_buffer() with necessary locking added. We take our
+ * bh reference so that we can safely unlock bh.
+ *
+ * The jh and bh may be freed by this call.
*/
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
+ /* Get reference so that buffer cannot be freed before we unlock it */
+ get_bh(bh);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
-
__jbd2_journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
- jbd2_journal_remove_journal_head(bh);
-
spin_unlock(&journal->j_list_lock);
__brelse(bh);
}
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index c5ce6c1..2f3f531 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file)
struct jfs_inode_info *ji = JFS_IP(inode);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag == -1) {
- ji->active_ag = ji->agno;
- atomic_inc(
- &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]);
+ struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
+ ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
+ atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
}
spin_unlock_irq(&ji->ag_lock);
}
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ed53a47..b78b2f9 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -397,7 +397,7 @@ int diRead(struct inode *ip)
release_metapage(mp);
/* set the ag for the inode */
- JFS_IP(ip)->agno = BLKTOAG(agstart, sbi);
+ JFS_IP(ip)->agstart = agstart;
JFS_IP(ip)->active_ag = -1;
return (rc);
@@ -901,7 +901,7 @@ int diFree(struct inode *ip)
/* get the allocation group for this ino.
*/
- agno = JFS_IP(ip)->agno;
+ agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
/* Lock the AG specific inode map information
*/
@@ -1315,12 +1315,11 @@ int diFree(struct inode *ip)
static inline void
diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
{
- struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
ip->i_ino = (iagno << L2INOSPERIAG) + ino;
jfs_ip->ixpxd = iagp->inoext[extno];
- jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+ jfs_ip->agstart = le64_to_cpu(iagp->agstart);
jfs_ip->active_ag = -1;
}
@@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
*/
/* get the ag number of this iag */
- agno = JFS_IP(pip)->agno;
+ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
/*
@@ -2921,10 +2920,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
continue;
}
- /* agstart that computes to the same ag is treated as same; */
agstart = le64_to_cpu(iagp->agstart);
- /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */
n = agstart >> mp->db_agl2size;
+ iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
/* compute backed inodes */
numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 1439f11..584a4a1 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -50,8 +50,9 @@ struct jfs_inode_info {
short btindex; /* btpage entry index*/
struct inode *ipimap; /* inode map */
unsigned long cflag; /* commit flags */
+ u64 agstart; /* agstart of the containing IAG */
u16 bxflag; /* xflag of pseudo buffer? */
- unchar agno; /* ag number */
+ unchar pad;
signed char active_ag; /* ag currently allocating from */
lid_t blid; /* lid of pseudo buffer? */
lid_t atlhead; /* anonymous tlock list head */
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 8ea5efb..8d0c1c7 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
int log_formatted = 0;
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
- uint old_agsize;
+ s64 old_agsize;
int agsizechanged = 0;
struct buffer_head *bh, *bh2;
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index adb45ec..e374050 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
- goto retry_rebind;
+ switch (task->tk_status) {
+ case -EACCES:
+ case -EIO:
+ goto die;
+ default:
+ goto retry_rebind;
+ }
}
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
rpc_delay(task, NLMCLNT_GRACE_WAIT);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9ed89d1..1afae26 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
return __logfs_create(dir, dentry, inode, target, destlen);
}
-static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
-{
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
- return generic_permission(inode, mask, flags, NULL);
-}
-
static int logfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
@@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = {
.mknod = logfs_mknod,
.rename = logfs_rename,
.rmdir = logfs_rmdir,
- .permission = logfs_permission,
.symlink = logfs_symlink,
.unlink = logfs_unlink,
};
diff --git a/fs/namei.c b/fs/namei.c
index 9802345..0223c41 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
/*
* Read/write DACs are always overridable.
- * Executable DACs are overridable if at least one exec bit is set.
+ * Executable DACs are overridable for all directories and
+ * for non-directories that have least one exec bit set.
*/
if (!(mask & MAY_EXEC) || execute_ok(inode))
if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
@@ -812,6 +813,11 @@ static int follow_automount(struct path *path, unsigned flags,
if (!mnt) /* mount collision */
return 0;
+ if (!*need_mntput) {
+ /* lock_mount() may release path->mnt on error */
+ mntget(path->mnt);
+ *need_mntput = true;
+ }
err = finish_automount(mnt, path);
switch (err) {
@@ -819,12 +825,9 @@ static int follow_automount(struct path *path, unsigned flags,
/* Someone else made a mount here whilst we were busy */
return 0;
case 0:
- dput(path->dentry);
- if (*need_mntput)
- mntput(path->mnt);
+ path_put(path);
path->mnt = mnt;
path->dentry = dget(mnt->mnt_root);
- *need_mntput = true;
return 0;
default:
return err;
@@ -844,9 +847,10 @@ static int follow_automount(struct path *path, unsigned flags,
*/
static int follow_managed(struct path *path, unsigned flags)
{
+ struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
unsigned managed;
bool need_mntput = false;
- int ret;
+ int ret = 0;
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
@@ -861,7 +865,7 @@ static int follow_managed(struct path *path, unsigned flags)
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path->dentry, false);
if (ret < 0)
- return ret == -EISDIR ? 0 : ret;
+ break;
}
/* Transit to a mounted filesystem. */
@@ -887,14 +891,19 @@ static int follow_managed(struct path *path, unsigned flags)
if (managed & DCACHE_NEED_AUTOMOUNT) {
ret = follow_automount(path, flags, &need_mntput);
if (ret < 0)
- return ret == -EISDIR ? 0 : ret;
+ break;
continue;
}
/* We didn't change the current path point */
break;
}
- return 0;
+
+ if (need_mntput && path->mnt == mnt)
+ mntput(path->mnt);
+ if (ret == -EISDIR)
+ ret = 0;
+ return ret;
}
int follow_down_one(struct path *path)
@@ -1003,9 +1012,6 @@ failed:
* Follow down to the covering mount currently visible to userspace. At each
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
- *
- * Care must be taken as namespace_sem may be held (indicated by mounting_here
- * being true).
*/
int follow_down(struct path *path)
{
@@ -2713,8 +2719,10 @@ static long do_unlinkat(int dfd, const char __user *pathname)
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
+ if (nd.last.name[nd.last.len])
+ goto slashes;
inode = dentry->d_inode;
- if (nd.last.name[nd.last.len] || !inode)
+ if (!inode)
goto slashes;
ihold(inode);
error = mnt_want_write(nd.path.mnt);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 144f2a3..6f4850d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
nfs_attr_check_mountpoint(sb, fattr);
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0)
+ if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
+ !nfs_attr_use_mounted_on_fileid(fattr))
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
@@ -1294,7 +1295,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (new_isize != cur_isize) {
/* Do we perhaps have any outstanding writes, or has
* the file grown beyond our last write? */
- if (nfsi->npages == 0 || new_isize > cur_isize) {
+ if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) ||
+ new_isize > cur_isize) {
i_size_write(inode, new_isize);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b9056cb..2a55347 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct
fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
}
+static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
+{
+ if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
+ (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
+ ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
+ return 0;
+
+ fattr->fileid = fattr->mounted_on_fileid;
+ return 1;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 4269088..0bafcc9 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -30,6 +30,7 @@
*/
#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
#include "internal.h"
#include "nfs4filelayout.h"
@@ -552,13 +553,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
__func__, nfl_util, fl->num_fh, fl->first_stripe_index,
fl->pattern_offset);
- if (!fl->num_fh)
+ /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
+ * Futher checking is done in filelayout_check_layout */
+ if (fl->num_fh < 0 || fl->num_fh >
+ max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
goto out_err;
- fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
- gfp_flags);
- if (!fl->fh_array)
- goto out_err;
+ if (fl->num_fh > 0) {
+ fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+ gfp_flags);
+ if (!fl->fh_array)
+ goto out_err;
+ }
for (i = 0; i < fl->num_fh; i++) {
/* Do we want to use a mempool here? */
@@ -661,8 +667,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
u64 p_stripe, r_stripe;
u32 stripe_unit;
- if (!pnfs_generic_pg_test(pgio, prev, req))
- return 0;
+ if (!pnfs_generic_pg_test(pgio, prev, req) ||
+ !nfs_generic_pg_test(pgio, prev, req))
+ return false;
if (!pgio->pg_lseg)
return 1;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d2c4b59..5879b23 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
return nfs4_map_errors(status);
}
+static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
/*
* Get locations and (maybe) other attributes of a referral.
* Note that we'll actually follow the referral later when
* we detect fsid mismatch in inode revalidation
*/
-static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
+static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
+ struct nfs_fattr *fattr, struct nfs_fh *fhandle)
{
int status = -ENOMEM;
struct page *page = NULL;
@@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
goto out;
/* Make sure server returned a different fsid for the referral */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
- dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
+ dprintk("%s: server did not return a different fsid for"
+ " a referral at %s\n", __func__, name->name);
status = -EIO;
goto out;
}
+ /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
+ nfs_fixup_referral_attributes(&locations->fattr);
+ /* replace the lookup nfs_fattr with the locations nfs_fattr */
memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
- fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
- if (!fattr->mode)
- fattr->mode = S_IFDIR;
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
@@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
return len;
}
+/*
+ * nfs_fhget will use either the mounted_on_fileid or the fileid
+ */
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
{
- if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
- (fattr->valid & NFS_ATTR_FATTR_FSID) &&
- (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
+ if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
+ (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
+ (fattr->valid & NFS_ATTR_FATTR_FSID) &&
+ (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
return;
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
@@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs_server *server = NFS_SERVER(dir);
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
- [1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
};
struct nfs4_fs_locations_arg args = {
.dir_fh = NFS_FH(dir),
@@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
int status;
dprintk("%s: start\n", __func__);
+
+ /* Ask for the fileid of the absent filesystem if mounted_on_fileid
+ * is not supported */
+ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
+ bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+ else
+ bitmask[0] |= FATTR4_WORD0_FILEID;
+
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
- nfs_fixup_referral_attributes(&fs_locations->fattr);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
@@ -5098,7 +5111,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
if (mxresp_sz == 0)
mxresp_sz = NFS_MAX_FILE_IO_SIZE;
/* Fore channel attributes */
- args->fc_attrs.headerpadsz = 0;
args->fc_attrs.max_rqst_sz = mxrqst_sz;
args->fc_attrs.max_resp_sz = mxresp_sz;
args->fc_attrs.max_ops = NFS4_MAX_OPS;
@@ -5111,7 +5123,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
/* Back channel attributes */
- args->bc_attrs.headerpadsz = 0;
args->bc_attrs.max_rqst_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz_cached = 0;
@@ -5131,8 +5142,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
struct nfs4_channel_attrs *sent = &args->fc_attrs;
struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
- if (rcvd->headerpadsz > sent->headerpadsz)
- return -EINVAL;
if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
/*
@@ -5697,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
struct nfs_server *server;
+ struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
dprintk("--> %s\n", __func__);
@@ -5708,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
nfs_restart_rpc(task, lrp->clp);
return;
}
+ spin_lock(&lo->plh_inode->i_lock);
if (task->tk_status == 0) {
- struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
-
if (lrp->res.lrs_present) {
- spin_lock(&lo->plh_inode->i_lock);
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
- spin_unlock(&lo->plh_inode->i_lock);
} else
BUG_ON(!list_empty(&lo->plh_segs));
}
+ lo->plh_block_lgets--;
+ spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index d869a5e..6870bc6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int);
#define decode_fs_locations_maxsz \
(0)
#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
-#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)))
+#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr,
*p++ = cpu_to_be32(args->flags); /*flags */
/* Fore Channel */
- *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+ *p++ = cpu_to_be32(0); /* header padding size */
*p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
*p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */
@@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr,
*p++ = cpu_to_be32(0); /* rdmachannel_attrs */
/* Back Channel */
- *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+ *p++ = cpu_to_be32(0); /* header padding size */
*p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
*p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
*p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
@@ -3098,7 +3098,7 @@ out_overflow:
return -EIO;
}
-static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
+static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
{
__be32 *p;
@@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
if (unlikely(!p))
goto out_overflow;
bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
- return -be32_to_cpup(p);
+ *res = -be32_to_cpup(p);
}
return 0;
out_overflow:
@@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
int status;
umode_t fmode = 0;
uint32_t type;
+ int32_t err;
status = decode_attr_type(xdr, bitmap, &type);
if (status < 0)
@@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_error(xdr, bitmap);
- if (status == -NFS4ERR_WRONGSEC) {
- nfs_fixup_secinfo_attributes(fattr, fh);
- status = 0;
- }
+ err = 0;
+ status = decode_attr_error(xdr, bitmap, &err);
if (status < 0)
goto xdr_error;
+ if (err == -NFS4ERR_WRONGSEC)
+ nfs_fixup_secinfo_attributes(fattr, fh);
status = decode_attr_filehandle(xdr, bitmap, fh);
if (status < 0)
@@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
struct nfs4_channel_attrs *attrs)
{
__be32 *p;
- u32 nr_attrs;
+ u32 nr_attrs, val;
p = xdr_inline_decode(xdr, 28);
if (unlikely(!p))
goto out_overflow;
- attrs->headerpadsz = be32_to_cpup(p++);
+ val = be32_to_cpup(p++); /* headerpadsz */
+ if (val)
+ return -EINVAL; /* no support for header padding yet */
attrs->max_rqst_sz = be32_to_cpup(p++);
attrs->max_resp_sz = be32_to_cpup(p++);
attrs->max_resp_sz_cached = be32_to_cpup(p++);
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9cf208d..8ff2ea3 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss,
de = n;
}
- atomic_inc(&de->id_node.ref);
return de;
}
@@ -1001,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
if (!pnfs_generic_pg_test(pgio, prev, req))
return false;
+ if (pgio->pg_lseg == NULL)
+ return true;
+
return pgio->pg_count + req->wb_bytes <=
OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
}
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index dc3956c..1d06f8e 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
struct nfs_read_data *rdata;
state->status = status;
- dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof);
+ dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
rdata = state->rpcdata;
rdata->task.tk_status = status;
if (status >= 0) {
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 7913961..0098557 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req)
TASK_UNINTERRUPTIBLE);
}
-static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
+bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
{
/*
* FIXME: ideally we should be able to coalesce all requests
@@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p
return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
}
+EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
/**
* nfs_pageio_init - initialise a page io descriptor
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 8c1309d..29c0ca7f 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -634,14 +634,16 @@ _pnfs_return_layout(struct inode *ino)
spin_lock(&ino->i_lock);
lo = nfsi->layout;
- if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) {
+ if (!lo) {
spin_unlock(&ino->i_lock);
- dprintk("%s: no layout segments to return\n", __func__);
- goto out;
+ dprintk("%s: no layout to return\n", __func__);
+ return status;
}
stateid = nfsi->layout->plh_stateid;
/* Reference matched in nfs4_layoutreturn_release */
get_layout_hdr(lo);
+ mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ lo->plh_block_lgets++;
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
@@ -650,6 +652,9 @@ _pnfs_return_layout(struct inode *ino)
lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
if (unlikely(lrp == NULL)) {
status = -ENOMEM;
+ set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
+ set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
+ put_layout_hdr(lo);
goto out;
}
@@ -887,7 +892,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
ret = get_lseg(lseg);
break;
}
- if (cmp_layout(range, &lseg->pls_range) > 0)
+ if (lseg->pls_range.offset > range->offset)
break;
}
@@ -1059,23 +1064,36 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
gfp_flags = GFP_NOFS;
}
- if (pgio->pg_count == prev->wb_bytes) {
+ if (pgio->pg_lseg == NULL) {
+ if (pgio->pg_count != prev->wb_bytes)
+ return true;
/* This is first coelesce call for a series of nfs_pages */
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
prev->wb_context,
- req_offset(req),
+ req_offset(prev),
pgio->pg_count,
access_type,
gfp_flags);
- return true;
+ if (pgio->pg_lseg == NULL)
+ return true;
}
- if (pgio->pg_lseg &&
- req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset,
- pgio->pg_lseg->pls_range.length))
- return false;
-
- return true;
+ /*
+ * Test if a nfs_page is fully contained in the pnfs_layout_range.
+ * Note that this test makes several assumptions:
+ * - that the previous nfs_page in the struct nfs_pageio_descriptor
+ * is known to lie within the range.
+ * - that the nfs_page being tested is known to be contiguous with the
+ * previous nfs_page.
+ * - Layout ranges are page aligned, so we only have to test the
+ * start offset of the request.
+ *
+ * Please also note that 'end_offset' is actually the offset of the
+ * first byte that lies outside the pnfs_layout_range. FIXME?
+ *
+ */
+ return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
+ pgio->pg_lseg->pls_range.length);
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 48d0a8e..96bf4e6 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *);
/* pnfs_dev.c */
struct nfs4_deviceid_node {
struct hlist_node node;
+ struct hlist_node tmpnode;
const struct pnfs_layoutdriver_type *ld;
const struct nfs_client *nfs_client;
struct nfs4_deviceid deviceid;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index c65e133..f0f8e1e 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
const struct nfs4_deviceid *id)
{
INIT_HLIST_NODE(&d->node);
+ INIT_HLIST_NODE(&d->tmpnode);
d->ld = ld;
d->nfs_client = nfs_client;
d->deviceid = *id;
@@ -208,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
spin_unlock(&nfs4_deviceid_lock);
+ atomic_inc(&new->ref);
return new;
}
@@ -238,24 +240,29 @@ static void
_deviceid_purge_client(const struct nfs_client *clp, long hash)
{
struct nfs4_deviceid_node *d;
- struct hlist_node *n, *next;
+ struct hlist_node *n;
HLIST_HEAD(tmp);
+ spin_lock(&nfs4_deviceid_lock);
rcu_read_lock();
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
if (d->nfs_client == clp && atomic_read(&d->ref)) {
hlist_del_init_rcu(&d->node);
- hlist_add_head(&d->node, &tmp);
+ hlist_add_head(&d->tmpnode, &tmp);
}
rcu_read_unlock();
+ spin_unlock(&nfs4_deviceid_lock);
if (hlist_empty(&tmp))
return;
synchronize_rcu();
- hlist_for_each_entry_safe(d, n, next, &tmp, node)
+ while (!hlist_empty(&tmp)) {
+ d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
+ hlist_del(&d->tmpnode);
if (atomic_dec_and_test(&d->ref))
d->ld->free_deviceid_node(d);
+ }
}
void
@@ -263,8 +270,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp)
{
long h;
- spin_lock(&nfs4_deviceid_lock);
+ if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
+ return;
for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
_deviceid_purge_client(clp, h);
- spin_unlock(&nfs4_deviceid_lock);
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 18b3e89..fbb2a5e 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,7 @@ config NFSD_V4
select NFSD_V3
select FS_POSIX_ACL
select SUNRPC_GSS
+ select CRYPTO
help
This option enables support in your system's NFS server for
version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1f5eae4..2b1449d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,6 +13,7 @@
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/gss_krb5_enctypes.h>
#include "idmap.h"
#include "nfsd.h"
@@ -189,18 +190,10 @@ static struct file_operations export_features_operations = {
.release = single_release,
};
-#ifdef CONFIG_SUNRPC_GSS
+#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
static int supported_enctypes_show(struct seq_file *m, void *v)
{
- struct gss_api_mech *k5mech;
-
- k5mech = gss_mech_get_by_name("krb5");
- if (k5mech == NULL)
- goto out;
- if (k5mech->gm_upcall_enctypes != NULL)
- seq_printf(m, k5mech->gm_upcall_enctypes);
- gss_mech_put(k5mech);
-out:
+ seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
return 0;
}
@@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif /* CONFIG_SUNRPC_GSS */
+#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
-#ifdef CONFIG_SUNRPC_GSS
+#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
[NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
-#endif /* CONFIG_SUNRPC_GSS */
+#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d571827..fd0acca 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
}
#endif /* CONFIG_NFSD_V3 */
+static int nfsd_open_break_lease(struct inode *inode, int access)
+{
+ unsigned int mode;
+ if (access & NFSD_MAY_NOT_BREAK_LEASE)
+ return 0;
+ mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
+ return break_lease(inode, mode | O_NONBLOCK);
+}
/*
* Open an existing file or directory.
@@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (!inode->i_fop)
goto out;
- /*
- * Check to see if there are any leases on this file.
- * This may block while leases are broken.
- */
- if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
- host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
+ host_err = nfsd_open_break_lease(inode, access);
if (host_err) /* NOMEM or WOULDBLOCK */
goto out_nfserr;
@@ -1660,8 +1663,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (!dold->d_inode)
goto out_drop_write;
host_err = nfsd_break_lease(dold->d_inode);
- if (host_err)
+ if (host_err) {
+ err = nfserrno(host_err);
goto out_drop_write;
+ }
host_err = vfs_link(dold, dirp, dnew);
if (!host_err) {
err = nfserrno(commit_metadata(ffhp));
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7eafe46..b2e3ff3 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1346,6 +1346,11 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree,
path[level].bp_bh = NULL;
}
+static void nilfs_btree_nop(struct nilfs_bmap *btree,
+ struct nilfs_btree_path *path,
+ int level, __u64 *keyp, __u64 *ptrp)
+{
+}
static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
@@ -1356,20 +1361,19 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
- int pindex, level, ncmin, ncmax, ncblk, ret;
+ int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
ret = 0;
stats->bs_nblocks = 0;
ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
ncblk = nilfs_btree_nchildren_per_block(btree);
- for (level = NILFS_BTREE_LEVEL_NODE_MIN;
+ for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(node, path[level].bp_index,
- ncblk);
+ nilfs_btree_node_get_ptr(node, dindex, ncblk);
ret = nilfs_bmap_prepare_end_ptr(btree,
&path[level].bp_oldreq, dat);
if (ret < 0)
@@ -1383,6 +1387,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
pindex = path[level + 1].bp_index;
+ dindex = pindex;
if (pindex > 0) {
/* left sibling */
@@ -1421,6 +1426,14 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_concat_right;
stats->bs_nblocks++;
+ /*
+ * When merging right sibling node
+ * into the current node, pointer to
+ * the right sibling node must be
+ * terminated instead. The adjustment
+ * below is required for that.
+ */
+ dindex = pindex + 1;
/* continue; */
}
} else {
@@ -1431,29 +1444,31 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
NILFS_BTREE_ROOT_NCHILDREN_MAX) {
path[level].bp_op = nilfs_btree_shrink;
stats->bs_nblocks += 2;
+ level++;
+ path[level].bp_op = nilfs_btree_nop;
+ goto shrink_root_child;
} else {
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
+ goto out;
}
-
- goto out;
-
}
}
+ /* child of the root node is deleted */
+ path[level].bp_op = nilfs_btree_do_delete;
+ stats->bs_nblocks++;
+
+shrink_root_child:
node = nilfs_btree_get_root(btree);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(node, path[level].bp_index,
+ nilfs_btree_node_get_ptr(node, dindex,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
if (ret < 0)
goto err_out_child_node;
- /* child of the root node is deleted */
- path[level].bp_op = nilfs_btree_do_delete;
- stats->bs_nblocks++;
-
/* success */
out:
*levelp = level;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b954878..b9b45fc 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -801,12 +801,7 @@ out_err:
int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
{
- struct nilfs_root *root;
-
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-
- root = NILFS_I(inode)->i_root;
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
if ((mask & MAY_WRITE) && root &&
root->cno != NILFS_CPTREE_CURRENT_CNO)
return -EROFS; /* snapshot is not writable */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 141646e..bb24ab6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2573,7 +2573,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
if (nilfs->ns_interval)
- sci->sc_interval = nilfs->ns_interval;
+ sci->sc_interval = HZ * nilfs->ns_interval;
if (nilfs->ns_watermark)
sci->sc_watermark = nilfs->ns_watermark;
return sci;
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index d738a7e..2c6d952 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -4,7 +4,6 @@
* Released under GPL v2.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 14def99..fc5bc27 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = {
*/
static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
{
- int rv;
-
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
- rv = generic_permission(inode, mask, flags, NULL);
+ int rv = generic_permission(inode, mask, flags, NULL);
if (rv == 0)
return 0;
if (task_pid(current) == proc_pid(inode))
@@ -2712,6 +2708,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
struct task_io_accounting acct = task->ioac;
unsigned long flags;
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ return -EACCES;
+
if (whole && lock_task_sighand(task, &flags)) {
struct task_struct *t = task;
@@ -2843,7 +2842,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
- INF("io", S_IRUGO, proc_tgid_io_accounting),
+ INF("io", S_IRUSR, proc_tgid_io_accounting),
#endif
#ifdef CONFIG_HARDWALL
INF("hardwall", S_IRUGO, proc_pid_hardwall),
@@ -3185,7 +3184,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
- INF("io", S_IRUGO, proc_tid_io_accounting),
+ INF("io", S_IRUSR, proc_tid_io_accounting),
#endif
#ifdef CONFIG_HARDWALL
INF("hardwall", S_IRUGO, proc_pid_hardwall),
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 781dec5..be177f7 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -38,18 +38,21 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
+ void *ns;
inode = proc_pid_make_inode(dir->i_sb, task);
if (!inode)
goto out;
+ ns = ns_ops->get(task);
+ if (!ns)
+ goto out_iput;
+
ei = PROC_I(inode);
inode->i_mode = S_IFREG|S_IRUSR;
inode->i_fop = &ns_file_operations;
ei->ns_ops = ns_ops;
- ei->ns = ns_ops->get(task);
- if (!ei->ns)
- goto out_iput;
+ ei->ns = ns;
dentry->d_op = &pid_dentry_operations;
d_add(dentry, inode);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f50133c..d167de3 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
struct ctl_table *table;
int error;
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-
/* Executable files are not allowed under /proc/sys/ */
if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
return -EACCES;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index a9000e9..d6c3b41 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -28,11 +28,12 @@ static int proc_test_super(struct super_block *sb, void *data)
static int proc_set_super(struct super_block *sb, void *data)
{
- struct pid_namespace *ns;
-
- ns = (struct pid_namespace *)data;
- sb->s_fs_info = get_pid_ns(ns);
- return set_anon_super(sb, NULL);
+ int err = set_anon_super(sb, NULL);
+ if (!err) {
+ struct pid_namespace *ns = (struct pid_namespace *)data;
+ sb->s_fs_info = get_pid_ns(ns);
+ }
+ return err;
}
static struct dentry *proc_mount(struct file_system_type *fs_type,
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e8a62f4..d780896 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s)
int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
{
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
/*
* We don't do permission checks on the internal objects.
* Permissions are determined by the "owning" object.
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index f0511e8..eed9942 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -27,14 +27,18 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
{
struct inode *inode = file->f_mapping->host;
struct mtd_info *mtd = inode->i_sb->s_mtd;
- unsigned long isize, offset;
+ unsigned long isize, offset, maxpages, lpages;
if (!mtd)
goto cant_map_directly;
+ /* the mapping mustn't extend beyond the EOF */
+ lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
isize = i_size_read(inode);
offset = pgoff << PAGE_SHIFT;
- if (offset > isize || len > isize || offset > isize - len)
+
+ maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if ((pgoff >= maxpages) || (maxpages - pgoff < lpages))
return (unsigned long) -EINVAL;
/* we need to call down to the MTD layer to do the actual mapping */
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 2668957..e34f0d9 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -95,6 +95,14 @@ static int sysfs_set_super(struct super_block *sb, void *data)
return error;
}
+static void free_sysfs_super_info(struct sysfs_super_info *info)
+{
+ int type;
+ for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
+ kobj_ns_drop(type, info->ns[type]);
+ kfree(info);
+}
+
static struct dentry *sysfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
@@ -108,11 +116,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
return ERR_PTR(-ENOMEM);
for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
- info->ns[type] = kobj_ns_current(type);
+ info->ns[type] = kobj_ns_grab_current(type);
sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
if (IS_ERR(sb) || sb->s_fs_info != info)
- kfree(info);
+ free_sysfs_super_info(info);
if (IS_ERR(sb))
return ERR_CAST(sb);
if (!sb->s_root) {
@@ -131,12 +139,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
static void sysfs_kill_sb(struct super_block *sb)
{
struct sysfs_super_info *info = sysfs_info(sb);
-
/* Remove the superblock from fs_supers/s_instances
* so we can't find it, before freeing sysfs_super_info.
*/
kill_anon_super(sb);
- kfree(info);
+ free_sysfs_super_info(info);
}
static struct file_system_type sysfs_fs_type = {
@@ -145,28 +152,6 @@ static struct file_system_type sysfs_fs_type = {
.kill_sb = sysfs_kill_sb,
};
-void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
-{
- struct super_block *sb;
-
- mutex_lock(&sysfs_mutex);
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
- struct sysfs_super_info *info = sysfs_info(sb);
- /*
- * If we see a superblock on the fs_supers/s_instances
- * list the unmount has not completed and sb->s_fs_info
- * points to a valid struct sysfs_super_info.
- */
- /* Ignore superblocks with the wrong ns */
- if (info->ns[type] != ns)
- continue;
- info->ns[type] = NULL;
- }
- spin_unlock(&sb_lock);
- mutex_unlock(&sysfs_mutex);
-}
-
int __init sysfs_init(void)
{
int err = -ENOMEM;
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3d28af31..2ed2404 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -136,7 +136,7 @@ struct sysfs_addrm_cxt {
* instance).
*/
struct sysfs_super_info {
- const void *ns[KOBJ_NS_TYPES];
+ void *ns[KOBJ_NS_TYPES];
};
#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
extern struct sysfs_dirent sysfs_root;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index f67acbd..dffeb37 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
/*
* Called when the clock was set to cancel the timers in the cancel
- * list.
+ * list. This will wake up processes waiting on these timers. The
+ * wake-up requires ctx->ticks to be non zero, therefore we increment
+ * it before calling wake_up_locked().
*/
void timerfd_clock_was_set(void)
{
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
spin_lock_irqsave(&ctx->wqh.lock, flags);
if (ctx->moffs.tv64 != moffs.tv64) {
ctx->moffs.tv64 = KTIME_MAX;
+ ctx->ticks++;
wake_up_locked(&ctx->wqh);
}
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b5aeb5a..529be05 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1848,7 +1848,6 @@ static void ubifs_put_super(struct super_block *sb)
bdi_destroy(&c->bdi);
ubi_close_volume(c->ubi);
mutex_unlock(&c->umount_mutex);
- kfree(c);
}
static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
@@ -1971,61 +1970,65 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
return ERR_PTR(-EINVAL);
}
-static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
+static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
{
- struct ubi_volume_desc *ubi = sb->s_fs_info;
struct ubifs_info *c;
- struct inode *root;
- int err;
c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
+ if (c) {
+ spin_lock_init(&c->cnt_lock);
+ spin_lock_init(&c->cs_lock);
+ spin_lock_init(&c->buds_lock);
+ spin_lock_init(&c->space_lock);
+ spin_lock_init(&c->orphan_lock);
+ init_rwsem(&c->commit_sem);
+ mutex_init(&c->lp_mutex);
+ mutex_init(&c->tnc_mutex);
+ mutex_init(&c->log_mutex);
+ mutex_init(&c->mst_mutex);
+ mutex_init(&c->umount_mutex);
+ mutex_init(&c->bu_mutex);
+ mutex_init(&c->write_reserve_mutex);
+ init_waitqueue_head(&c->cmt_wq);
+ c->buds = RB_ROOT;
+ c->old_idx = RB_ROOT;
+ c->size_tree = RB_ROOT;
+ c->orph_tree = RB_ROOT;
+ INIT_LIST_HEAD(&c->infos_list);
+ INIT_LIST_HEAD(&c->idx_gc);
+ INIT_LIST_HEAD(&c->replay_list);
+ INIT_LIST_HEAD(&c->replay_buds);
+ INIT_LIST_HEAD(&c->uncat_list);
+ INIT_LIST_HEAD(&c->empty_list);
+ INIT_LIST_HEAD(&c->freeable_list);
+ INIT_LIST_HEAD(&c->frdi_idx_list);
+ INIT_LIST_HEAD(&c->unclean_leb_list);
+ INIT_LIST_HEAD(&c->old_buds);
+ INIT_LIST_HEAD(&c->orph_list);
+ INIT_LIST_HEAD(&c->orph_new);
+ c->no_chk_data_crc = 1;
+
+ c->highest_inum = UBIFS_FIRST_INO;
+ c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
+
+ ubi_get_volume_info(ubi, &c->vi);
+ ubi_get_device_info(c->vi.ubi_num, &c->di);
+ }
+ return c;
+}
- spin_lock_init(&c->cnt_lock);
- spin_lock_init(&c->cs_lock);
- spin_lock_init(&c->buds_lock);
- spin_lock_init(&c->space_lock);
- spin_lock_init(&c->orphan_lock);
- init_rwsem(&c->commit_sem);
- mutex_init(&c->lp_mutex);
- mutex_init(&c->tnc_mutex);
- mutex_init(&c->log_mutex);
- mutex_init(&c->mst_mutex);
- mutex_init(&c->umount_mutex);
- mutex_init(&c->bu_mutex);
- mutex_init(&c->write_reserve_mutex);
- init_waitqueue_head(&c->cmt_wq);
- c->buds = RB_ROOT;
- c->old_idx = RB_ROOT;
- c->size_tree = RB_ROOT;
- c->orph_tree = RB_ROOT;
- INIT_LIST_HEAD(&c->infos_list);
- INIT_LIST_HEAD(&c->idx_gc);
- INIT_LIST_HEAD(&c->replay_list);
- INIT_LIST_HEAD(&c->replay_buds);
- INIT_LIST_HEAD(&c->uncat_list);
- INIT_LIST_HEAD(&c->empty_list);
- INIT_LIST_HEAD(&c->freeable_list);
- INIT_LIST_HEAD(&c->frdi_idx_list);
- INIT_LIST_HEAD(&c->unclean_leb_list);
- INIT_LIST_HEAD(&c->old_buds);
- INIT_LIST_HEAD(&c->orph_list);
- INIT_LIST_HEAD(&c->orph_new);
- c->no_chk_data_crc = 1;
+static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct ubifs_info *c = sb->s_fs_info;
+ struct inode *root;
+ int err;
c->vfs_sb = sb;
- c->highest_inum = UBIFS_FIRST_INO;
- c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
-
- ubi_get_volume_info(ubi, &c->vi);
- ubi_get_device_info(c->vi.ubi_num, &c->di);
-
/* Re-open the UBI device in read-write mode */
c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
if (IS_ERR(c->ubi)) {
err = PTR_ERR(c->ubi);
- goto out_free;
+ goto out;
}
/*
@@ -2091,24 +2094,29 @@ out_bdi:
bdi_destroy(&c->bdi);
out_close:
ubi_close_volume(c->ubi);
-out_free:
- kfree(c);
+out:
return err;
}
static int sb_test(struct super_block *sb, void *data)
{
- dev_t *dev = data;
+ struct ubifs_info *c1 = data;
struct ubifs_info *c = sb->s_fs_info;
- return c->vi.cdev == *dev;
+ return c->vi.cdev == c1->vi.cdev;
+}
+
+static int sb_set(struct super_block *sb, void *data)
+{
+ sb->s_fs_info = data;
+ return set_anon_super(sb, NULL);
}
static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
const char *name, void *data)
{
struct ubi_volume_desc *ubi;
- struct ubi_volume_info vi;
+ struct ubifs_info *c;
struct super_block *sb;
int err;
@@ -2125,19 +2133,25 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
}
- ubi_get_volume_info(ubi, &vi);
- dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id);
+ c = alloc_ubifs_info(ubi);
+ if (!c) {
+ err = -ENOMEM;
+ goto out_close;
+ }
+
+ dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
- sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev);
+ sb = sget(fs_type, sb_test, sb_set, c);
if (IS_ERR(sb)) {
err = PTR_ERR(sb);
+ kfree(c);
goto out_close;
}
if (sb->s_root) {
struct ubifs_info *c1 = sb->s_fs_info;
-
+ kfree(c);
/* A new mount point for already mounted UBIFS */
dbg_gen("this ubi volume is already mounted");
if (!!(flags & MS_RDONLY) != c1->ro_mount) {
@@ -2146,11 +2160,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
}
} else {
sb->s_flags = flags;
- /*
- * Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is
- * replaced by 'c'.
- */
- sb->s_fs_info = ubi;
err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (err)
goto out_deact;
@@ -2170,11 +2179,18 @@ out_close:
return ERR_PTR(err);
}
+static void kill_ubifs_super(struct super_block *s)
+{
+ struct ubifs_info *c = s->s_fs_info;
+ kill_anon_super(s);
+ kfree(c);
+}
+
static struct file_system_type ubifs_fs_type = {
.name = "ubifs",
.owner = THIS_MODULE,
.mount = ubifs_mount,
- .kill_sb = kill_anon_super,
+ .kill_sb = kill_ubifs_super,
};
/*
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f4213ba..7f782af 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -131,19 +131,34 @@ xfs_file_fsync(
{
struct inode *inode = file->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error = 0;
int log_flushed = 0;
trace_xfs_file_fsync(ip);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
xfs_iflags_clear(ip, XFS_ITRUNCATED);
xfs_ioend_wait(ip);
+ if (mp->m_flags & XFS_MOUNT_BARRIER) {
+ /*
+ * If we have an RT and/or log subvolume we need to make sure
+ * to flush the write cache the device used for file data
+ * first. This is to ensure newly written file data make
+ * it to disk before logging the new inode size in case of
+ * an extending write.
+ */
+ if (XFS_IS_REALTIME_INODE(ip))
+ xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+ else if (mp->m_logdev_targp != mp->m_ddev_targp)
+ xfs_blkdev_issue_flush(mp->m_ddev_targp);
+ }
+
/*
* We always need to make sure that the required inode state is safe on
* disk. The inode might be clean but we still might need to force the
@@ -175,9 +190,9 @@ xfs_file_fsync(
* updates. The sync transaction will also force the log.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
error = xfs_trans_reserve(tp, 0,
- XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
+ XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
if (error) {
xfs_trans_cancel(tp, 0);
return -error;
@@ -209,28 +224,25 @@ xfs_file_fsync(
* force the log.
*/
if (xfs_ipincount(ip)) {
- error = _xfs_log_force_lsn(ip->i_mount,
+ error = _xfs_log_force_lsn(mp,
ip->i_itemp->ili_last_lsn,
XFS_LOG_SYNC, &log_flushed);
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
}
- if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
- /*
- * If the log write didn't issue an ordered tag we need
- * to flush the disk cache for the data device now.
- */
- if (!log_flushed)
- xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
-
- /*
- * If this inode is on the RT dev we need to flush that
- * cache as well.
- */
- if (XFS_IS_REALTIME_INODE(ip))
- xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
- }
+ /*
+ * If we only have a single device, and the log force about was
+ * a no-op we might have to flush the data device cache here.
+ * This can only happen for fdatasync/O_DSYNC if we were overwriting
+ * an already allocated file and thus do not have any metadata to
+ * commit.
+ */
+ if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
+ mp->m_logdev_targp == mp->m_ddev_targp &&
+ !XFS_IS_REALTIME_INODE(ip) &&
+ !log_flushed)
+ xfs_blkdev_issue_flush(mp->m_ddev_targp);
return -error;
}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index dd21784..d44d92c 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -182,7 +182,7 @@ xfs_vn_mknod(
if (IS_POSIXACL(dir)) {
default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(default_acl))
- return -PTR_ERR(default_acl);
+ return PTR_ERR(default_acl);
if (!default_acl)
mode &= ~current_umask();
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1e3a7ce..a1a881e 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -627,68 +627,6 @@ xfs_blkdev_put(
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
-/*
- * Try to write out the superblock using barriers.
- */
-STATIC int
-xfs_barrier_test(
- xfs_mount_t *mp)
-{
- xfs_buf_t *sbp = xfs_getsb(mp, 0);
- int error;
-
- XFS_BUF_UNDONE(sbp);
- XFS_BUF_UNREAD(sbp);
- XFS_BUF_UNDELAYWRITE(sbp);
- XFS_BUF_WRITE(sbp);
- XFS_BUF_UNASYNC(sbp);
- XFS_BUF_ORDERED(sbp);
-
- xfsbdstrat(mp, sbp);
- error = xfs_buf_iowait(sbp);
-
- /*
- * Clear all the flags we set and possible error state in the
- * buffer. We only did the write to try out whether barriers
- * worked and shouldn't leave any traces in the superblock
- * buffer.
- */
- XFS_BUF_DONE(sbp);
- XFS_BUF_ERROR(sbp, 0);
- XFS_BUF_UNORDERED(sbp);
-
- xfs_buf_relse(sbp);
- return error;
-}
-
-STATIC void
-xfs_mountfs_check_barriers(xfs_mount_t *mp)
-{
- int error;
-
- if (mp->m_logdev_targp != mp->m_ddev_targp) {
- xfs_notice(mp,
- "Disabling barriers, not supported with external log device");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-
- if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
- xfs_notice(mp,
- "Disabling barriers, underlying device is readonly");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-
- error = xfs_barrier_test(mp);
- if (error) {
- xfs_notice(mp,
- "Disabling barriers, trial barrier write failed");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-}
-
void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
@@ -1240,14 +1178,6 @@ xfs_fs_remount(
switch (token) {
case Opt_barrier:
mp->m_flags |= XFS_MOUNT_BARRIER;
-
- /*
- * Test if barriers are actually working if we can,
- * else delay this check until the filesystem is
- * marked writeable.
- */
- if (!(mp->m_flags & XFS_MOUNT_RDONLY))
- xfs_mountfs_check_barriers(mp);
break;
case Opt_nobarrier:
mp->m_flags &= ~XFS_MOUNT_BARRIER;
@@ -1282,8 +1212,6 @@ xfs_fs_remount(
/* ro -> rw */
if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
mp->m_flags &= ~XFS_MOUNT_RDONLY;
- if (mp->m_flags & XFS_MOUNT_BARRIER)
- xfs_mountfs_check_barriers(mp);
/*
* If this is the first remount to writeable state we
@@ -1465,9 +1393,6 @@ xfs_fs_fill_super(
if (error)
goto out_free_sb;
- if (mp->m_flags & XFS_MOUNT_BARRIER)
- xfs_mountfs_check_barriers(mp);
-
error = xfs_filestream_mount(mp);
if (error)
goto out_free_sb;
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index c863753..01d2072 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -490,6 +490,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
args.whichfork = XFS_ATTR_FORK;
/*
+ * we have no control over the attribute names that userspace passes us
+ * to remove, so we have to allow the name lookup prior to attribute
+ * removal to fail.
+ */
+ args.op_flags = XFS_DA_OP_OKNOENT;
+
+ /*
* Attach the dquots to the inode.
*/
error = xfs_qm_dqattach(dp, 0);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index cb9b6d1..3631783 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -253,16 +253,21 @@ xfs_iget_cache_hit(
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
- ip->i_flags &= ~XFS_INEW;
- ip->i_flags |= XFS_IRECLAIMABLE;
- __xfs_inode_set_reclaim_tag(pag, ip);
+ ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
}
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
- ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
+
+ /*
+ * Clear the per-lifetime state in the inode as we are now
+ * effectively a new inode and need to return to the initial
+ * state before reuse occurs.
+ */
+ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
ip->i_flags |= XFS_INEW;
__xfs_inode_clear_reclaim_tag(mp, pag, ip);
inode->i_state = I_NEW;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 3ae6d58..964cfea 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -384,6 +384,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */
/*
+ * Per-lifetime flags need to be reset when re-using a reclaimable inode during
+ * inode lookup. Thi prevents unintended behaviour on the new inode from
+ * ocurring.
+ */
+#define XFS_IRECLAIM_RESET_FLAGS \
+ (XFS_IRECLAIMABLE | XFS_IRECLAIM | \
+ XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
+ XFS_IFILESTREAM);
+
+/*
* Flags for inode locking.
* Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)
* 1<<16 - 1<<32-1 -- lockdep annotation (integers)
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2119302..41d5b8f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1372,8 +1372,17 @@ xlog_sync(xlog_t *log,
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_LOG_BUFFER;
- if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
+ if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
+ /*
+ * If we have an external log device, flush the data device
+ * before flushing the log to make sure all meta data
+ * written back from the AIL actually made it to disk
+ * before writing out the new log tail LSN in the log buffer.
+ */
+ if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
+ xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
XFS_BUF_ORDERED(bp);
+ }
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b7a5fe7..6197207 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -960,8 +960,11 @@ xfs_release(
* be exposed to that problem.
*/
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
- if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
- xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
+ if (truncated) {
+ xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
+ if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
+ xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
+ }
}
if (ip->i_d.di_nlink == 0)
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index fcdcb5d..d494001 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -170,16 +170,6 @@ extern int __gpio_cansleep(unsigned gpio);
extern int __gpio_to_irq(unsigned gpio);
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
-
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e9b8e59..76bff2b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -88,7 +88,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
pmd_t pmd = *pmdp;
pmd_clear(mm, address, pmdp);
return pmd;
-})
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 9573e0c..33d12f8 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -520,6 +520,8 @@ struct drm_connector {
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
+
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
};
/**
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 5479fdc..514ed45 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -201,6 +201,9 @@ struct amba_pl011_data {
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
+ void (*init) (void);
+ void (*exit) (void);
+ void (*reset) (void);
};
#endif
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 1ae1271..98999cf1 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -16,6 +16,7 @@
#include <linux/gpio.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
struct bgpio_pdata {
int base;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2a7cea53..6395692 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -167,7 +167,7 @@ enum rq_flag_bits {
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
- REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+ REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index b22fb0d..8c7c2de 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *);
extern int do_blk_trace_setup(struct request_queue *q, char *name,
dev_t dev, struct block_device *bdev,
struct blk_user_trace_setup *buts);
-extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+extern __attribute__((format(printf, 2, 3)))
+void __trace_note_message(struct blk_trace *, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d4646b4..18a1baf 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -188,6 +188,7 @@ struct clocksource {
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
+ cycle_t cs_last;
cycle_t wd_last;
#endif
} ____cacheline_aligned;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ddcb7db..846bb17 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -467,6 +467,8 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
char __user *optval, unsigned int optlen);
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
unsigned int flags);
asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 7c60d09..f696bcc 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -44,7 +44,7 @@
#define CN_VAL_DRBD 0x1
#define CN_KVP_IDX 0x9 /* HyperV KVP */
-#define CN_NETLINK_USERS 9
+#define CN_NETLINK_USERS 10 /* Highest index + 1 */
/*
* Maximum connector's message size.
diff --git a/include/linux/device.h b/include/linux/device.h
index c66111a..e4f62d8 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -530,7 +530,6 @@ struct device_dma_parameters {
* @dma_mem: Internal for coherent mem override.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
- * @of_match: Matching of_device_id from driver.
* @devt: For creating the sysfs "dev".
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
@@ -654,13 +653,13 @@ static inline int device_is_registered(struct device *dev)
static inline void device_enable_async_suspend(struct device *dev)
{
- if (!dev->power.in_suspend)
+ if (!dev->power.is_prepared)
dev->power.async_suspend = true;
}
static inline void device_disable_async_suspend(struct device *dev)
{
- if (!dev->power.in_suspend)
+ if (!dev->power.is_prepared)
dev->power.async_suspend = false;
}
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0b0d9c3..7aad1f4 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -2,8 +2,16 @@
#include <linux/fs.h>
#ifdef CONFIG_CGROUP_DEVICE
-extern int devcgroup_inode_permission(struct inode *inode, int mask);
+extern int __devcgroup_inode_permission(struct inode *inode, int mask);
extern int devcgroup_inode_mknod(int mode, dev_t dev);
+static inline int devcgroup_inode_permission(struct inode *inode, int mask)
+{
+ if (likely(!inode->i_rdev))
+ return 0;
+ if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+ return 0;
+ return __devcgroup_inode_permission(inode, mask);
+}
#else
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{ return 0; }
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c6a850a..439b173 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -268,7 +268,7 @@ struct ethtool_pauseparam {
__u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
- * being true) the user may set 'autonet' here non-zero to have the
+ * being true) the user may set 'autoneg' here non-zero to have the
* pause parameters be auto-negotiated too. In such a case, the
* {rx,tx}_pause values below determine what capabilities are
* advertised.
@@ -811,7 +811,7 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
* @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
* are turned on or off.
* @set_tx_csum: Deprecated in favour of generic netdev features. Turn
- * transmit checksums on or off. Returns a egative error code or zero.
+ * transmit checksums on or off. Returns a negative error code or zero.
* @get_sg: Deprecated as redundant. Report whether scatter-gather is
* enabled.
* @set_sg: Deprecated in favour of generic netdev features. Turn
@@ -1087,7 +1087,7 @@ struct ethtool_ops {
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
* devices settings, these indicate the current mode and whether
- * it was foced up into this mode or autonegotiated.
+ * it was forced up into this mode or autonegotiated.
*/
/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1c77787..b5b9792 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,6 +639,7 @@ struct address_space {
struct prio_tree_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
struct mutex i_mmap_mutex; /* protect tree, count, list */
+ /* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
@@ -744,7 +745,7 @@ struct inode {
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned int i_flags;
- unsigned int i_state;
+ unsigned long i_state;
#ifdef CONFIG_SECURITY
void *i_security;
#endif
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 32d47e7..17b5a0d 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -3,6 +3,17 @@
/* see Documentation/gpio.txt */
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_DIR_OUT (0 << 0)
+#define GPIOF_DIR_IN (1 << 0)
+
+#define GPIOF_INIT_LOW (0 << 1)
+#define GPIOF_INIT_HIGH (1 << 1)
+
+#define GPIOF_IN (GPIOF_DIR_IN)
+#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
+#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+
#ifdef CONFIG_GENERIC_GPIO
#include <asm/gpio.h>
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 51932e5..fd0dc30 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -135,6 +135,7 @@ struct hrtimer_sleeper {
* @cpu_base: per cpu clock base
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
* @active: red black tree root node for the active timers
* @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h
new file mode 100644
index 0000000..624dcec
--- /dev/null
+++ b/include/linux/i2c/adp8870.h
@@ -0,0 +1,153 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * Backlight drivers ADP8870
+ *
+ * Copyright 2009-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_I2C_ADP8870_H
+#define __LINUX_I2C_ADP8870_H
+
+#define ID_ADP8870 8870
+
+#define ADP8870_MAX_BRIGHTNESS 0x7F
+#define FLAG_OFFT_SHIFT 8
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT)
+
+#define ADP8870_LED_ONT_200ms 0
+#define ADP8870_LED_ONT_600ms 1
+#define ADP8870_LED_ONT_800ms 2
+#define ADP8870_LED_ONT_1200ms 3
+
+#define ADP8870_LED_D7 (7)
+#define ADP8870_LED_D6 (6)
+#define ADP8870_LED_D5 (5)
+#define ADP8870_LED_D4 (4)
+#define ADP8870_LED_D3 (3)
+#define ADP8870_LED_D2 (2)
+#define ADP8870_LED_D1 (1)
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP8870_BL_D7 (1 << 6)
+#define ADP8870_BL_D6 (1 << 5)
+#define ADP8870_BL_D5 (1 << 4)
+#define ADP8870_BL_D4 (1 << 3)
+#define ADP8870_BL_D3 (1 << 2)
+#define ADP8870_BL_D2 (1 << 1)
+#define ADP8870_BL_D1 (1 << 0)
+
+#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP8870_FADE_T_600ms 2
+#define ADP8870_FADE_T_900ms 3
+#define ADP8870_FADE_T_1200ms 4
+#define ADP8870_FADE_T_1500ms 5
+#define ADP8870_FADE_T_1800ms 6
+#define ADP8870_FADE_T_2100ms 7
+#define ADP8870_FADE_T_2400ms 8
+#define ADP8870_FADE_T_2700ms 9
+#define ADP8870_FADE_T_3000ms 10
+#define ADP8870_FADE_T_3500ms 11
+#define ADP8870_FADE_T_4000ms 12
+#define ADP8870_FADE_T_4500ms 13
+#define ADP8870_FADE_T_5000ms 14
+#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP8870_FADE_LAW_LINEAR 0
+#define ADP8870_FADE_LAW_SQUARE 1
+#define ADP8870_FADE_LAW_CUBIC1 2
+#define ADP8870_FADE_LAW_CUBIC2 3
+
+#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP8870_BL_AMBL_FILT_160ms 1
+#define ADP8870_BL_AMBL_FILT_320ms 2
+#define ADP8870_BL_AMBL_FILT_640ms 3
+#define ADP8870_BL_AMBL_FILT_1280ms 4
+#define ADP8870_BL_AMBL_FILT_2560ms 5
+#define ADP8870_BL_AMBL_FILT_5120ms 6
+#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+/*
+ * Blacklight current 0..30mA
+ */
+#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30)
+
+/*
+ * L2 comparator current 0..1106uA
+ */
+#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106)
+
+/*
+ * L3 comparator current 0..551uA
+ */
+#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551)
+
+/*
+ * L4 comparator current 0..275uA
+ */
+#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275)
+
+/*
+ * L5 comparator current 0..138uA
+ */
+#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138)
+
+struct adp8870_backlight_platform_data {
+ u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */
+ u8 pwm_assign; /* 1 = Enables PWM mode */
+
+ u8 bl_fade_in; /* Backlight Fade-In Timer */
+ u8 bl_fade_out; /* Backlight Fade-Out Timer */
+ u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+ u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+ u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+ u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+
+ /**
+ * Independent Current Sinks / LEDS
+ * Sinks not assigned to the Backlight can be exposed to
+ * user space using the LEDS CLASS interface
+ */
+
+ int num_leds;
+ struct led_info *leds;
+ u8 led_fade_in; /* LED Fade-In Timer */
+ u8 led_fade_out; /* LED Fade-Out Timer */
+ u8 led_fade_law; /* fade-on/fade-off transfer characteristic */
+ u8 led_on_time;
+};
+
+#endif /* __LINUX_I2C_ADP8870_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 6d66ce1..7b31863 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -62,6 +62,7 @@ struct tpacket_auxdata {
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
+ __u16 tp_padding;
};
/* Rx ring - header status */
@@ -101,6 +102,7 @@ struct tpacket2_hdr {
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
+ __u16 tp_padding;
};
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index dc01681..affa273 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -225,7 +225,7 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
}
/**
- * __vlan_put_tag - regular VLAN tag inserting
+ * vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
@@ -234,8 +234,10 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
@@ -255,8 +257,25 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
- skb->protocol = htons(ETH_P_8021Q);
+ return skb;
+}
+/**
+ * __vlan_put_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+{
+ skb = vlan_insert_tag(skb, vlan_tci);
+ if (skb)
+ skb->protocol = htons(ETH_P_8021Q);
return skb;
}
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 649dc7f..5d253cd 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,7 +1,7 @@
#ifndef __SH_KEYSC_H__
#define __SH_KEYSC_H__
-#define SH_KEYSC_MAXKEYS 49
+#define SH_KEYSC_MAXKEYS 64
struct sh_keysc_info {
enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 6c12989..f6efed0 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -414,6 +414,7 @@ enum
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
HRTIMER_SOFTIRQ,
+ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
};
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4ecb7b1..d087c2e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1024,7 +1024,6 @@ struct journal_s
/* Filing buffers */
extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_unfile_buffer(struct journal_head *);
extern void __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
@@ -1165,7 +1164,6 @@ extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_in
*/
struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
-void jbd2_journal_remove_journal_head(struct buffer_head *bh);
void jbd2_journal_put_journal_head(struct journal_head *jh);
/*
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fb0e732..953352a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -671,8 +671,8 @@ struct sysinfo {
#ifdef __CHECKER__
#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
-#define BUILD_BUG_ON_ZERO(e)
-#define BUILD_BUG_ON_NULL(e)
+#define BUILD_BUG_ON_ZERO(e) (0)
+#define BUILD_BUG_ON_NULL(e) ((void*)0)
#define BUILD_BUG_ON(condition)
#else /* __CHECKER__ */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index d4a5c84..0da38cf 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
#endif
-struct key;
+struct cred;
struct file;
enum umh_wait {
@@ -62,7 +62,7 @@ struct subprocess_info {
char **envp;
enum umh_wait wait;
int retval;
- int (*init)(struct subprocess_info *info);
+ int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
};
@@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
/* Set various pieces of state into the subprocess_info structure */
void call_usermodehelper_setfns(struct subprocess_info *info,
- int (*init)(struct subprocess_info *info),
+ int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *info),
void *data);
@@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info);
static inline int
call_usermodehelper_fns(char *path, char **argv, char **envp,
enum umh_wait wait,
- int (*init)(struct subprocess_info *info),
+ int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data)
{
struct subprocess_info *info;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 2a0d7d6..ee0c952 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_KMSG_DUMP_H
#define _LINUX_KMSG_DUMP_H
+#include <linux/errno.h>
#include <linux/list.h>
enum kmsg_dump_reason {
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 82cb5bf..f66b065 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -32,15 +32,17 @@ enum kobj_ns_type {
/*
* Callbacks so sysfs can determine namespaces
- * @current_ns: return calling task's namespace
+ * @grab_current_ns: return a new reference to calling task's namespace
* @netlink_ns: return namespace to which a sock belongs (right?)
* @initial_ns: return the initial namespace (i.e. init_net_ns)
+ * @drop_ns: drops a reference to namespace
*/
struct kobj_ns_type_operations {
enum kobj_ns_type type;
- const void *(*current_ns)(void);
+ void *(*grab_current_ns)(void);
const void *(*netlink_ns)(struct sock *sk);
const void *(*initial_ns)(void);
+ void (*drop_ns)(void *);
};
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
@@ -48,9 +50,9 @@ int kobj_ns_type_registered(enum kobj_ns_type type);
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
-const void *kobj_ns_current(enum kobj_ns_type type);
+void *kobj_ns_grab_current(enum kobj_ns_type type);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
const void *kobj_ns_initial(enum kobj_ns_type type);
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
+void kobj_ns_drop(enum kobj_ns_type type, void *ns);
#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9724a38..50940da 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -84,6 +84,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
static inline
int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
@@ -246,6 +247,11 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
return NULL;
}
+static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+{
+ return NULL;
+}
+
static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
{
return 1;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c928dac..9f7c3eb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -647,6 +647,13 @@ typedef struct pglist_data {
#endif
#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
+#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
+
+#define node_end_pfn(nid) ({\
+ pg_data_t *__pgdat = NODE_DATA(nid);\
+ __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
+})
+
#include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca333e7..54b8b4d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2555,7 +2555,7 @@ extern void netdev_class_remove_file(struct class_attribute *class_attr);
extern struct kobj_ns_type_operations net_ns_type_operations;
-extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
+extern const char *netdev_drivername(const struct net_device *dev);
extern void linkwatch_run_queue(void);
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 50cdc25..0d3dd66 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -18,6 +18,9 @@ enum ip_conntrack_info {
/* >= this indicates reply direction */
IP_CT_IS_REPLY,
+ IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
+ IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
+ IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,
/* Number of distinct IP_CT types (no NEW in reply dirn). */
IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
};
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 3a34e80..25311b3 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -92,6 +92,9 @@ extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
+extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *prev,
+ struct nfs_page *req);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern int nfs_set_page_tag_locked(struct nfs_page *req);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 5e8444a..00848d8 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -158,7 +158,6 @@ struct nfs_seqid;
/* nfs41 sessions channel attributes */
struct nfs4_channel_attrs {
- u32 headerpadsz;
u32 max_rqst_sz;
u32 max_resp_sz;
u32 max_resp_sz_cached;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a311008..f8910e1 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1537,6 +1537,7 @@
#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
#define PCI_DEVICE_ID_RICOH_R5C822 0x0822
+#define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
#define PCI_DEVICE_ID_RICOH_R5C832 0x0832
#define PCI_DEVICE_ID_RICOH_R5C843 0x0843
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 8b97308..9ca008f 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void);
* Special handling for cmpxchg_double. cmpxchg_double is passed two
* percpu variables. The first has to be aligned to a double word
* boundary and the second has to follow directly thereafter.
+ * We enforce this on all architectures even if they don't support
+ * a double cmpxchg instruction, since it's a cheap requirement, and it
+ * avoids breaking the requirement for architectures with the instruction.
*/
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
({ \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3160648..411e4f4 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -425,7 +425,8 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
- unsigned int in_suspend:1; /* Owned by the PM core */
+ bool is_prepared:1; /* Owned by the PM core */
+ bool is_suspended:1; /* Ditto */
spinlock_t lock;
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e981189..c6db9fb 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -28,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <asm/processor.h>
typedef struct {
unsigned sequence;
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 2b7fec8..aa08fa8 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -3,6 +3,7 @@
#include <linux/swap.h>
#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
#include <linux/percpu_counter.h>
/* inode in-kernel data */
@@ -45,7 +46,27 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
return container_of(inode, struct shmem_inode_info, vfs_inode);
}
+/*
+ * Functions in mm/shmem.c called directly from elsewhere:
+ */
extern int init_tmpfs(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
+extern struct file *shmem_file_setup(const char *name,
+ loff_t size, unsigned long flags);
+extern int shmem_zero_setup(struct vm_area_struct *);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+extern int shmem_unuse(swp_entry_t entry, struct page *page);
+extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+ struct page **pagep, swp_entry_t *ent);
+
+static inline struct page *shmem_read_mapping_page(
+ struct address_space *mapping, pgoff_t index)
+{
+ return shmem_read_mapping_page_gfp(mapping, index,
+ mapping_gfp_mask(mapping));
+}
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e8b78ce..c0a4f3a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1256,6 +1256,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
+static inline void skb_reset_mac_len(struct sk_buff *skb)
+{
+ skb->mac_len = skb->network_header - skb->mac_header;
+}
+
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ad824d..8cc38d3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
* Generic and arch helpers
*/
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void);
void ipi_call_lock(void);
void ipi_call_unlock(void);
void ipi_call_lock_irq(void);
void ipi_call_unlock_irq(void);
+#else
+static inline void call_function_init(void) { }
#endif
/*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
-static inline void init_call_single_data(void) { }
+static inline void call_function_init(void) { }
static inline int
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644
index 0000000..ec6234e
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -0,0 +1,4 @@
+/*
+ * Dumb way to share this static piece of information with nfsd
+ */
+#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index f73c482..fe2d8e6 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -84,7 +84,8 @@ struct rpc_task {
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
- tk_cred_retry : 2;
+ tk_cred_retry : 2,
+ tk_rebind_retry : 2;
};
#define tk_xprt tk_client->cl_xprt
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 384eb5f..a273468 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,16 +300,6 @@ static inline void scan_unevictable_unregister_node(struct node *node)
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
-#ifdef CONFIG_MMU
-/* linux/mm/shmem.c */
-extern int shmem_unuse(swp_entry_t entry, struct page *page);
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
- struct page **pagep, swp_entry_t *ent);
-#endif
-
#ifdef CONFIG_SWAP
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
@@ -358,6 +348,7 @@ struct backing_dev_info;
extern struct mm_struct *swap_token_mm;
extern void grab_swap_token(struct mm_struct *);
extern void __put_swap_token(struct mm_struct *);
+extern void disable_swap_token(struct mem_cgroup *memcg);
static inline int has_swap_token(struct mm_struct *mm)
{
@@ -370,11 +361,6 @@ static inline void put_swap_token(struct mm_struct *mm)
__put_swap_token(mm);
}
-static inline void disable_swap_token(void)
-{
- put_swap_token(swap_token_mm);
-}
-
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
extern void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
@@ -500,7 +486,7 @@ static inline int has_swap_token(struct mm_struct *mm)
return 0;
}
-static inline void disable_swap_token(void)
+static inline void disable_swap_token(struct mem_cgroup *memcg)
{
}
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 8c0e349..445702c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -24,6 +24,7 @@ extern int swiotlb_force;
extern void swiotlb_init(int verbose);
extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
+extern unsigned long swioltb_nr_tbl(void);
/*
* Enumeration for sync targets
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c3acda6..e2696d7 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -177,9 +177,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
void sysfs_put(struct sysfs_dirent *sd);
-/* Called to clear a ns tag when it is no longer valid */
-void sysfs_exit_ns(enum kobj_ns_type type, const void *tag);
-
int __must_check sysfs_init(void);
#else /* CONFIG_SYSFS */
@@ -338,10 +335,6 @@ static inline void sysfs_put(struct sysfs_dirent *sd)
{
}
-static inline void sysfs_exit_ns(int type, const void *tag)
-{
-}
-
static inline int __must_check sysfs_init(void)
{
return 0;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index b91a40e..fc839bf 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -60,7 +60,7 @@ int arch_update_cpu_topology(void);
* (in whatever arch specific measurement units returned by node_distance())
* then switch on zone reclaim on boot.
*/
-#define RECLAIM_DISTANCE 20
+#define RECLAIM_DISTANCE 30
#endif
#ifndef PENALTY_FOR_NODE_WITH_CPUS
#define PENALTY_FOR_NODE_WITH_CPUS (1)
diff --git a/include/linux/uts.h b/include/linux/uts.h
index 73eb1ed..6ddbd86 100644
--- a/include/linux/uts.h
+++ b/include/linux/uts.h
@@ -9,7 +9,7 @@
#endif
#ifndef UTS_NODENAME
-#define UTS_NODENAME "(none)" /* set by sethostname() */
+#define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */
#endif
#ifndef UTS_DOMAINNAME
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2bf9ed9..aef430d 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -35,8 +35,11 @@ struct netns_ipvs;
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
struct net {
+ atomic_t passive; /* To decided when the network
+ * namespace should be freed.
+ */
atomic_t count; /* To decided when the network
- * namespace should be freed.
+ * namespace should be shut down.
*/
#ifdef NETNS_REFCNT_DEBUG
atomic_t use_count; /* To track references we
@@ -154,6 +157,9 @@ int net_eq(const struct net *net1, const struct net *net2)
{
return net1 == net2;
}
+
+extern void net_drop_ns(void *);
+
#else
static inline struct net *get_net(struct net *net)
@@ -175,6 +181,8 @@ int net_eq(const struct net *net1, const struct net *net2)
{
return 1;
}
+
+#define net_drop_ns NULL
#endif
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7c42e7..5d4f8e5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -307,6 +307,12 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
return test_bit(IPS_UNTRACKED_BIT, &ct->status);
}
+/* Packet is received from loopback */
+static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+{
+ return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
+}
+
extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
diff --git a/include/net/sock.h b/include/net/sock.h
index f2046e4..c0b938c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -178,7 +178,6 @@ struct sock_common {
* @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
- * @sk_rmem_alloc: receive queue bytes committed
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_write_queue: Packet sending queue
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1de3e0..3a4bd3a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -248,8 +248,7 @@ typedef int (*hw_write_t)(void *,const char* ,int);
extern struct snd_ac97_bus_ops soc_ac97_ops;
enum snd_soc_control_type {
- SND_SOC_CUSTOM = 1,
- SND_SOC_I2C,
+ SND_SOC_I2C = 1,
SND_SOC_SPI,
};
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index e09592d..5ce2b2f 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -26,7 +26,7 @@ TRACE_EVENT(ext4_free_inode,
__field( umode_t, mode )
__field( uid_t, uid )
__field( gid_t, gid )
- __field( blkcnt_t, blocks )
+ __field( __u64, blocks )
),
TP_fast_assign(
@@ -40,9 +40,8 @@ TRACE_EVENT(ext4_free_inode,
TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->uid, __entry->gid,
- (unsigned long long) __entry->blocks)
+ (unsigned long) __entry->ino, __entry->mode,
+ __entry->uid, __entry->gid, __entry->blocks)
);
TRACE_EVENT(ext4_request_inode,
@@ -178,7 +177,7 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
TP_printk("dev %d,%d ino %lu new_size %lld",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (long long) __entry->new_size)
+ __entry->new_size)
);
DECLARE_EVENT_CLASS(ext4__write_begin,
@@ -204,7 +203,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
__entry->flags = flags;
),
- TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
+ TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->pos, __entry->len, __entry->flags)
@@ -248,7 +247,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
__entry->copied = copied;
),
- TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
+ TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->pos, __entry->len, __entry->copied)
@@ -286,29 +285,6 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
TP_ARGS(inode, pos, len, copied)
);
-TRACE_EVENT(ext4_writepage,
- TP_PROTO(struct inode *inode, struct page *page),
-
- TP_ARGS(inode, page),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( pgoff_t, index )
-
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->index = page->index;
- ),
-
- TP_printk("dev %d,%d ino %lu page_index %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->index)
-);
-
TRACE_EVENT(ext4_da_writepages,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
@@ -341,7 +317,7 @@ TRACE_EVENT(ext4_da_writepages,
),
TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
- "range_start %llu range_end %llu sync_mode %d"
+ "range_start %lld range_end %lld sync_mode %d"
"for_kupdate %d range_cyclic %d writeback_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->nr_to_write,
@@ -449,7 +425,14 @@ DECLARE_EVENT_CLASS(ext4__page_op,
TP_printk("dev %d,%d ino %lu page_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->index)
+ (unsigned long) __entry->index)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_writepage,
+
+ TP_PROTO(struct page *page),
+
+ TP_ARGS(page)
);
DEFINE_EVENT(ext4__page_op, ext4_readpage,
@@ -489,7 +472,7 @@ TRACE_EVENT(ext4_invalidatepage,
TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->index, __entry->offset)
+ (unsigned long) __entry->index, __entry->offset)
);
TRACE_EVENT(ext4_discard_blocks,
@@ -562,12 +545,10 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
);
TRACE_EVENT(ext4_mb_release_inode_pa,
- TP_PROTO(struct super_block *sb,
- struct inode *inode,
- struct ext4_prealloc_space *pa,
+ TP_PROTO(struct ext4_prealloc_space *pa,
unsigned long long block, unsigned int count),
- TP_ARGS(sb, inode, pa, block, count),
+ TP_ARGS(pa, block, count),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -578,8 +559,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->ino = inode->i_ino;
+ __entry->dev = pa->pa_inode->i_sb->s_dev;
+ __entry->ino = pa->pa_inode->i_ino;
__entry->block = block;
__entry->count = count;
),
@@ -591,10 +572,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
);
TRACE_EVENT(ext4_mb_release_group_pa,
- TP_PROTO(struct super_block *sb,
- struct ext4_prealloc_space *pa),
+ TP_PROTO(struct ext4_prealloc_space *pa),
- TP_ARGS(sb, pa),
+ TP_ARGS(pa),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -604,7 +584,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev = pa->pa_inode->i_sb->s_dev;
__entry->pa_pstart = pa->pa_pstart;
__entry->pa_len = pa->pa_len;
),
@@ -666,10 +646,10 @@ TRACE_EVENT(ext4_request_blocks,
__field( ino_t, ino )
__field( unsigned int, flags )
__field( unsigned int, len )
- __field( __u64, logical )
+ __field( __u32, logical )
+ __field( __u32, lleft )
+ __field( __u32, lright )
__field( __u64, goal )
- __field( __u64, lleft )
- __field( __u64, lright )
__field( __u64, pleft )
__field( __u64, pright )
),
@@ -687,17 +667,13 @@ TRACE_EVENT(ext4_request_blocks,
__entry->pright = ar->pright;
),
- TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu "
- "lleft %llu lright %llu pleft %llu pright %llu ",
+ TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+ "lleft %u lright %u pleft %llu pright %llu ",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->flags, __entry->len,
- (unsigned long long) __entry->logical,
- (unsigned long long) __entry->goal,
- (unsigned long long) __entry->lleft,
- (unsigned long long) __entry->lright,
- (unsigned long long) __entry->pleft,
- (unsigned long long) __entry->pright)
+ (unsigned long) __entry->ino, __entry->flags,
+ __entry->len, __entry->logical, __entry->goal,
+ __entry->lleft, __entry->lright, __entry->pleft,
+ __entry->pright)
);
TRACE_EVENT(ext4_allocate_blocks,
@@ -711,10 +687,10 @@ TRACE_EVENT(ext4_allocate_blocks,
__field( __u64, block )
__field( unsigned int, flags )
__field( unsigned int, len )
- __field( __u64, logical )
+ __field( __u32, logical )
+ __field( __u32, lleft )
+ __field( __u32, lright )
__field( __u64, goal )
- __field( __u64, lleft )
- __field( __u64, lright )
__field( __u64, pleft )
__field( __u64, pright )
),
@@ -733,17 +709,13 @@ TRACE_EVENT(ext4_allocate_blocks,
__entry->pright = ar->pright;
),
- TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu "
- "goal %llu lleft %llu lright %llu pleft %llu pright %llu",
+ TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+ "goal %llu lleft %u lright %u pleft %llu pright %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->flags, __entry->len, __entry->block,
- (unsigned long long) __entry->logical,
- (unsigned long long) __entry->goal,
- (unsigned long long) __entry->lleft,
- (unsigned long long) __entry->lright,
- (unsigned long long) __entry->pleft,
- (unsigned long long) __entry->pright)
+ (unsigned long) __entry->ino, __entry->flags,
+ __entry->len, __entry->block, __entry->logical,
+ __entry->goal, __entry->lleft, __entry->lright,
+ __entry->pleft, __entry->pright)
);
TRACE_EVENT(ext4_free_blocks,
@@ -755,10 +727,10 @@ TRACE_EVENT(ext4_free_blocks,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( umode_t, mode )
__field( __u64, block )
__field( unsigned long, count )
- __field( int, flags )
+ __field( int, flags )
),
TP_fast_assign(
@@ -798,7 +770,7 @@ TRACE_EVENT(ext4_sync_file_enter,
__entry->parent = dentry->d_parent->d_inode->i_ino;
),
- TP_printk("dev %d,%d ino %ld parent %ld datasync %d ",
+ TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->parent, __entry->datasync)
@@ -821,7 +793,7 @@ TRACE_EVENT(ext4_sync_file_exit,
__entry->dev = inode->i_sb->s_dev;
),
- TP_printk("dev %d,%d ino %ld ret %d",
+ TP_printk("dev %d,%d ino %lu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->ret)
@@ -1005,7 +977,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
__entry->result_len = len;
),
- TP_printk("dev %d,%d inode %lu extent %u/%d/%u ",
+ TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->result_group, __entry->result_start,
@@ -1093,7 +1065,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
"allocated_meta_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->mode, __entry->i_blocks,
__entry->used_blocks, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
);
@@ -1127,7 +1099,7 @@ TRACE_EVENT(ext4_da_reserve_space,
"reserved_data_blocks %d reserved_meta_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->mode, __entry->i_blocks,
__entry->md_needed, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks)
);
@@ -1164,7 +1136,7 @@ TRACE_EVENT(ext4_da_release_space,
"allocated_meta_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->mode, __entry->i_blocks,
__entry->freed_blocks, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
);
@@ -1239,14 +1211,15 @@ TRACE_EVENT(ext4_direct_IO_enter,
__entry->rw = rw;
),
- TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
+ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len, __entry->rw)
+ __entry->pos, __entry->len, __entry->rw)
);
TRACE_EVENT(ext4_direct_IO_exit,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret),
+ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+ int rw, int ret),
TP_ARGS(inode, offset, len, rw, ret),
@@ -1268,10 +1241,10 @@ TRACE_EVENT(ext4_direct_IO_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
+ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
+ __entry->pos, __entry->len,
__entry->rw, __entry->ret)
);
@@ -1296,15 +1269,15 @@ TRACE_EVENT(ext4_fallocate_enter,
__entry->mode = mode;
),
- TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d",
+ TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos,
- (unsigned long long) __entry->len, __entry->mode)
+ (unsigned long) __entry->ino, __entry->pos,
+ __entry->len, __entry->mode)
);
TRACE_EVENT(ext4_fallocate_exit,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret),
+ TP_PROTO(struct inode *inode, loff_t offset,
+ unsigned int max_blocks, int ret),
TP_ARGS(inode, offset, max_blocks, ret),
@@ -1312,7 +1285,7 @@ TRACE_EVENT(ext4_fallocate_exit,
__field( ino_t, ino )
__field( dev_t, dev )
__field( loff_t, pos )
- __field( unsigned, blocks )
+ __field( unsigned int, blocks )
__field( int, ret )
),
@@ -1324,10 +1297,10 @@ TRACE_EVENT(ext4_fallocate_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d",
+ TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->blocks,
+ __entry->pos, __entry->blocks,
__entry->ret)
);
@@ -1350,7 +1323,7 @@ TRACE_EVENT(ext4_unlink_enter,
__entry->dev = dentry->d_inode->i_sb->s_dev;
),
- TP_printk("dev %d,%d ino %ld size %lld parent %ld",
+ TP_printk("dev %d,%d ino %lu size %lld parent %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->size,
(unsigned long) __entry->parent)
@@ -1373,7 +1346,7 @@ TRACE_EVENT(ext4_unlink_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %ld ret %d",
+ TP_printk("dev %d,%d ino %lu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->ret)
@@ -1387,7 +1360,7 @@ DECLARE_EVENT_CLASS(ext4__truncate,
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
- __field( blkcnt_t, blocks )
+ __field( __u64, blocks )
),
TP_fast_assign(
@@ -1396,9 +1369,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
__entry->blocks = inode->i_blocks;
),
- TP_printk("dev %d,%d ino %lu blocks %lu",
+ TP_printk("dev %d,%d ino %lu blocks %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
+ (unsigned long) __entry->ino, __entry->blocks)
);
DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
@@ -1417,7 +1390,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
- unsigned len, unsigned flags),
+ unsigned int len, unsigned int flags),
TP_ARGS(inode, lblk, len, flags),
@@ -1425,8 +1398,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
__field( ino_t, ino )
__field( dev_t, dev )
__field( ext4_lblk_t, lblk )
- __field( unsigned, len )
- __field( unsigned, flags )
+ __field( unsigned int, len )
+ __field( unsigned int, flags )
),
TP_fast_assign(
@@ -1440,7 +1413,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned) __entry->lblk, __entry->len, __entry->flags)
+ __entry->lblk, __entry->len, __entry->flags)
);
DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1459,7 +1432,7 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
- ext4_fsblk_t pblk, unsigned len, int ret),
+ ext4_fsblk_t pblk, unsigned int len, int ret),
TP_ARGS(inode, lblk, pblk, len, ret),
@@ -1468,7 +1441,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
__field( dev_t, dev )
__field( ext4_lblk_t, lblk )
__field( ext4_fsblk_t, pblk )
- __field( unsigned, len )
+ __field( unsigned int, len )
__field( int, ret )
),
@@ -1484,7 +1457,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+ __entry->lblk, __entry->pblk,
__entry->len, __entry->ret)
);
@@ -1524,7 +1497,7 @@ TRACE_EVENT(ext4_ext_load_extent,
TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned) __entry->lblk, (unsigned long long) __entry->pblk)
+ __entry->lblk, __entry->pblk)
);
TRACE_EVENT(ext4_load_inode,
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index ae045ca..1c09820 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -20,7 +20,8 @@ struct softirq_action;
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
- softirq_name(HRTIMER))
+ softirq_name(HRTIMER), \
+ softirq_name(RCU))
/**
* irq_handler_entry - called immediately before the irq action handler
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ea422aa..b2c33bd 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -6,6 +6,8 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
#include "gfpflags.h"
#define RECLAIM_WB_ANON 0x0001u
@@ -310,6 +312,87 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
show_reclaim_flags(__entry->reclaim_flags))
);
+TRACE_EVENT(replace_swap_token,
+ TP_PROTO(struct mm_struct *old_mm,
+ struct mm_struct *new_mm),
+
+ TP_ARGS(old_mm, new_mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct*, old_mm)
+ __field(unsigned int, old_prio)
+ __field(struct mm_struct*, new_mm)
+ __field(unsigned int, new_prio)
+ ),
+
+ TP_fast_assign(
+ __entry->old_mm = old_mm;
+ __entry->old_prio = old_mm ? old_mm->token_priority : 0;
+ __entry->new_mm = new_mm;
+ __entry->new_prio = new_mm->token_priority;
+ ),
+
+ TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
+ __entry->old_mm, __entry->old_prio,
+ __entry->new_mm, __entry->new_prio)
+);
+
+DECLARE_EVENT_CLASS(put_swap_token_template,
+ TP_PROTO(struct mm_struct *swap_token_mm),
+
+ TP_ARGS(swap_token_mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct*, swap_token_mm)
+ ),
+
+ TP_fast_assign(
+ __entry->swap_token_mm = swap_token_mm;
+ ),
+
+ TP_printk("token_mm=%p", __entry->swap_token_mm)
+);
+
+DEFINE_EVENT(put_swap_token_template, put_swap_token,
+ TP_PROTO(struct mm_struct *swap_token_mm),
+ TP_ARGS(swap_token_mm)
+);
+
+DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
+ TP_PROTO(struct mm_struct *swap_token_mm),
+ TP_ARGS(swap_token_mm),
+ TP_CONDITION(swap_token_mm != NULL)
+);
+
+TRACE_EVENT_CONDITION(update_swap_token_priority,
+ TP_PROTO(struct mm_struct *mm,
+ unsigned int old_prio,
+ struct mm_struct *swap_token_mm),
+
+ TP_ARGS(mm, old_prio, swap_token_mm),
+
+ TP_CONDITION(mm->token_priority != old_prio),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct*, mm)
+ __field(unsigned int, old_prio)
+ __field(unsigned int, new_prio)
+ __field(struct mm_struct*, swap_token_mm)
+ __field(unsigned int, swap_token_prio)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->old_prio = old_prio;
+ __entry->new_prio = mm->token_priority;
+ __entry->swap_token_mm = swap_token_mm;
+ __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
+ ),
+
+ TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
+ __entry->mm, __entry->old_prio, __entry->new_prio,
+ __entry->swap_token_mm, __entry->swap_token_prio)
+);
#endif /* _TRACE_VMSCAN_H */
diff --git a/init/Kconfig b/init/Kconfig
index ebafac4..412c21b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -19,7 +19,6 @@ config DEFCONFIG_LIST
config CONSTRUCTORS
bool
depends on !UML
- default y
config HAVE_IRQ_WORK
bool
@@ -204,6 +203,15 @@ config KERNEL_LZO
endchoice
+config DEFAULT_HOSTNAME
+ string "Default hostname"
+ default "(none)"
+ help
+ This option determines the default system hostname before userspace
+ calls sethostname(2). The kernel traditionally uses "(none)" here,
+ but you may wish to use a different default here to make a minimal
+ system more usable with less configuration.
+
config SWAP
bool "Support for paging of anonymous memory (swap)"
depends on MMU && BLOCK
diff --git a/init/calibrate.c b/init/calibrate.c
index cfd7000..aae2f40 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -93,9 +93,6 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
* If the upper limit and lower limit of the timer_rate is
* >= 12.5% apart, redo calibration.
*/
- printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu "
- "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n",
- timer_rate_max, timer_rate_min, pre_start, pre_end);
if (start >= post_end)
printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
"timer_rate as we had a TSC wrap around"
@@ -248,30 +245,32 @@ recalibrate:
void __cpuinit calibrate_delay(void)
{
+ unsigned long lpj;
static bool printed;
if (preset_lpj) {
- loops_per_jiffy = preset_lpj;
+ lpj = preset_lpj;
if (!printed)
pr_info("Calibrating delay loop (skipped) "
"preset value.. ");
} else if ((!printed) && lpj_fine) {
- loops_per_jiffy = lpj_fine;
+ lpj = lpj_fine;
pr_info("Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
- } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
+ } else if ((lpj = calibrate_delay_direct()) != 0) {
if (!printed)
pr_info("Calibrating delay using timer "
"specific routine.. ");
} else {
if (!printed)
pr_info("Calibrating delay loop... ");
- loops_per_jiffy = calibrate_delay_converge();
+ lpj = calibrate_delay_converge();
}
if (!printed)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+ lpj/(500000/HZ),
+ (lpj/(5000/HZ)) % 100, lpj);
+ loops_per_jiffy = lpj;
printed = true;
}
diff --git a/init/main.c b/init/main.c
index cafba67..d7211fa 100644
--- a/init/main.c
+++ b/init/main.c
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
timekeeping_init();
time_init();
profile_init();
+ call_function_init();
if (!irqs_disabled())
printk(KERN_CRIT "start_kernel(): bug: interrupts were "
"enabled early\n");
diff --git a/kernel/exit.c b/kernel/exit.c
index 20a4064..f2b321b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -561,29 +561,28 @@ void exit_files(struct task_struct *tsk)
#ifdef CONFIG_MM_OWNER
/*
- * Task p is exiting and it owned mm, lets find a new owner for it
+ * A task is exiting. If it owned this mm, find a new owner for the mm.
*/
-static inline int
-mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
-{
- /*
- * If there are other users of the mm and the owner (us) is exiting
- * we need to find a new owner to take on the responsibility.
- */
- if (atomic_read(&mm->mm_users) <= 1)
- return 0;
- if (mm->owner != p)
- return 0;
- return 1;
-}
-
void mm_update_next_owner(struct mm_struct *mm)
{
struct task_struct *c, *g, *p = current;
retry:
- if (!mm_need_new_owner(mm, p))
+ /*
+ * If the exiting or execing task is not the owner, it's
+ * someone else's problem.
+ */
+ if (mm->owner != p)
return;
+ /*
+ * The current owner is exiting/execing and there are no other
+ * candidates. Do not leave the mm pointing to a possibly
+ * freed task structure.
+ */
+ if (atomic_read(&mm->mm_users) <= 1) {
+ mm->owner = NULL;
+ return;
+ }
read_lock(&tasklist_lock);
/*
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index b8cadf7..5bf924d 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -2,7 +2,8 @@ menu "GCOV-based kernel profiling"
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
- depends on DEBUG_FS && CONSTRUCTORS
+ depends on DEBUG_FS
+ select CONSTRUCTORS
default n
---help---
This option enables gcov-based code profiling (e.g. for code coverage
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d64bafb..0a7840ae 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
int ret = 0;
+ if (!desc)
+ return -EINVAL;
+
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ad6a81c..47613df 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
*/
set_user_nice(current, 0);
- if (sub_info->init) {
- retval = sub_info->init(sub_info);
- if (retval)
- goto fail;
- }
-
retval = -ENOMEM;
new = prepare_kernel_cred(current);
if (!new)
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
new->cap_inheritable);
spin_unlock(&umh_sysctl_lock);
+ if (sub_info->init) {
+ retval = sub_info->init(sub_info, new);
+ if (retval) {
+ abort_creds(new);
+ goto fail;
+ }
+ }
+
commit_creds(new);
retval = kernel_execve(sub_info->path,
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
* context in which call_usermodehelper_exec is called.
*/
void call_usermodehelper_setfns(struct subprocess_info *info,
- int (*init)(struct subprocess_info *info),
+ int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *info),
void *data)
{
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 7d02d33..42ddbc6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
}
- if (error)
+ if (error) {
+ free_basic_memory_bitmaps();
atomic_inc(&snapshot_device_available);
+ }
data->frozen = 0;
data->ready = 0;
data->platform_support = 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 89419ff..7e59ffb 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+#ifdef CONFIG_RCU_BOOST
+
/*
* Control variables for per-CPU and per-rcu_node kthreads. These
* handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
static char rcu_kthreads_spawnable;
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
-static void invoke_rcu_cpu_kthread(void);
+static void invoke_rcu_core(void);
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
int need_report = 0;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp;
- struct task_struct *t;
- /* Stop the CPU's kthread. */
- t = per_cpu(rcu_cpu_kthread_task, cpu);
- if (t != NULL) {
- per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
- kthread_stop(t);
- }
+ rcu_stop_cpu_kthread(cpu);
/* Exclude any attempts to start a new grace period. */
raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* Re-raise the RCU softirq if there are callbacks remaining. */
if (cpu_has_callbacks_ready_to_invoke(rdp))
- invoke_rcu_cpu_kthread();
+ invoke_rcu_core();
}
/*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
}
rcu_preempt_check_callbacks(cpu);
if (rcu_pending(cpu))
- invoke_rcu_cpu_kthread();
+ invoke_rcu_core();
}
#ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
}
/* If there are callbacks ready, invoke them. */
- rcu_do_batch(rsp, rdp);
+ if (cpu_has_callbacks_ready_to_invoke(rdp))
+ invoke_rcu_callbacks(rsp, rdp);
}
/*
* Do softirq processing for the current CPU.
*/
-static void rcu_process_callbacks(void)
+static void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_sched_state,
&__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
* the current CPU with interrupts disabled, the rcu_cpu_kthread_task
* cannot disappear out from under us.
*/
-static void invoke_rcu_cpu_kthread(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __this_cpu_write(rcu_cpu_has_work, 1);
- if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
- local_irq_restore(flags);
- return;
- }
- wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
- local_irq_restore(flags);
-}
-
-/*
- * Wake up the specified per-rcu_node-structure kthread.
- * Because the per-rcu_node kthreads are immortal, we don't need
- * to do anything to keep them alive.
- */
-static void invoke_rcu_node_kthread(struct rcu_node *rnp)
-{
- struct task_struct *t;
-
- t = rnp->node_kthread_task;
- if (t != NULL)
- wake_up_process(t);
-}
-
-/*
- * Set the specified CPU's kthread to run RT or not, as specified by
- * the to_rt argument. The CPU-hotplug locks are held, so the task
- * is not going away.
- */
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
-{
- int policy;
- struct sched_param sp;
- struct task_struct *t;
-
- t = per_cpu(rcu_cpu_kthread_task, cpu);
- if (t == NULL)
- return;
- if (to_rt) {
- policy = SCHED_FIFO;
- sp.sched_priority = RCU_KTHREAD_PRIO;
- } else {
- policy = SCHED_NORMAL;
- sp.sched_priority = 0;
- }
- sched_setscheduler_nocheck(t, policy, &sp);
-}
-
-/*
- * Timer handler to initiate the waking up of per-CPU kthreads that
- * have yielded the CPU due to excess numbers of RCU callbacks.
- * We wake up the per-rcu_node kthread, which in turn will wake up
- * the booster kthread.
- */
-static void rcu_cpu_kthread_timer(unsigned long arg)
-{
- struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
- struct rcu_node *rnp = rdp->mynode;
-
- atomic_or(rdp->grpmask, &rnp->wakemask);
- invoke_rcu_node_kthread(rnp);
-}
-
-/*
- * Drop to non-real-time priority and yield, but only after posting a
- * timer that will cause us to regain our real-time priority if we
- * remain preempted. Either way, we restore our real-time priority
- * before returning.
- */
-static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
-{
- struct sched_param sp;
- struct timer_list yield_timer;
-
- setup_timer_on_stack(&yield_timer, f, arg);
- mod_timer(&yield_timer, jiffies + 2);
- sp.sched_priority = 0;
- sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
- set_user_nice(current, 19);
- schedule();
- sp.sched_priority = RCU_KTHREAD_PRIO;
- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
- del_timer(&yield_timer);
-}
-
-/*
- * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
- * This can happen while the corresponding CPU is either coming online
- * or going offline. We cannot wait until the CPU is fully online
- * before starting the kthread, because the various notifier functions
- * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
- * the corresponding CPU is online.
- *
- * Return 1 if the kthread needs to stop, 0 otherwise.
- *
- * Caller must disable bh. This function can momentarily enable it.
- */
-static int rcu_cpu_kthread_should_stop(int cpu)
-{
- while (cpu_is_offline(cpu) ||
- !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
- smp_processor_id() != cpu) {
- if (kthread_should_stop())
- return 1;
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
- per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
- local_bh_enable();
- schedule_timeout_uninterruptible(1);
- if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- local_bh_disable();
- }
- per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
- return 0;
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * earlier RCU softirq.
- */
-static int rcu_cpu_kthread(void *arg)
-{
- int cpu = (int)(long)arg;
- unsigned long flags;
- int spincnt = 0;
- unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
- char work;
- char *workp = &per_cpu(rcu_cpu_has_work, cpu);
-
- for (;;) {
- *statusp = RCU_KTHREAD_WAITING;
- rcu_wait(*workp != 0 || kthread_should_stop());
- local_bh_disable();
- if (rcu_cpu_kthread_should_stop(cpu)) {
- local_bh_enable();
- break;
- }
- *statusp = RCU_KTHREAD_RUNNING;
- per_cpu(rcu_cpu_kthread_loops, cpu)++;
- local_irq_save(flags);
- work = *workp;
- *workp = 0;
- local_irq_restore(flags);
- if (work)
- rcu_process_callbacks();
- local_bh_enable();
- if (*workp != 0)
- spincnt++;
- else
- spincnt = 0;
- if (spincnt > 10) {
- *statusp = RCU_KTHREAD_YIELDING;
- rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
- spincnt = 0;
- }
- }
- *statusp = RCU_KTHREAD_STOPPED;
- return 0;
-}
-
-/*
- * Spawn a per-CPU kthread, setting up affinity and priority.
- * Because the CPU hotplug lock is held, no other CPU will be attempting
- * to manipulate rcu_cpu_kthread_task. There might be another CPU
- * attempting to access it during boot, but the locking in kthread_bind()
- * will enforce sufficient ordering.
- */
-static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
- struct sched_param sp;
- struct task_struct *t;
-
- if (!rcu_kthreads_spawnable ||
- per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
- return 0;
- t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
- if (IS_ERR(t))
- return PTR_ERR(t);
- kthread_bind(t, cpu);
- per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
- WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
- per_cpu(rcu_cpu_kthread_task, cpu) = t;
- sp.sched_priority = RCU_KTHREAD_PRIO;
- sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
- return 0;
-}
-
-/*
- * Per-rcu_node kthread, which is in charge of waking up the per-CPU
- * kthreads when needed. We ignore requests to wake up kthreads
- * for offline CPUs, which is OK because force_quiescent_state()
- * takes care of this case.
- */
-static int rcu_node_kthread(void *arg)
-{
- int cpu;
- unsigned long flags;
- unsigned long mask;
- struct rcu_node *rnp = (struct rcu_node *)arg;
- struct sched_param sp;
- struct task_struct *t;
-
- for (;;) {
- rnp->node_kthread_status = RCU_KTHREAD_WAITING;
- rcu_wait(atomic_read(&rnp->wakemask) != 0);
- rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
- raw_spin_lock_irqsave(&rnp->lock, flags);
- mask = atomic_xchg(&rnp->wakemask, 0);
- rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
- for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
- if ((mask & 0x1) == 0)
- continue;
- preempt_disable();
- t = per_cpu(rcu_cpu_kthread_task, cpu);
- if (!cpu_online(cpu) || t == NULL) {
- preempt_enable();
- continue;
- }
- per_cpu(rcu_cpu_has_work, cpu) = 1;
- sp.sched_priority = RCU_KTHREAD_PRIO;
- sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
- preempt_enable();
- }
- }
- /* NOTREACHED */
- rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
- return 0;
-}
-
-/*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question. The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU. If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- */
-static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
-{
- cpumask_var_t cm;
- int cpu;
- unsigned long mask = rnp->qsmaskinit;
-
- if (rnp->node_kthread_task == NULL)
- return;
- if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+ if (likely(!rsp->boost)) {
+ rcu_do_batch(rsp, rdp);
return;
- cpumask_clear(cm);
- for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
- if ((mask & 0x1) && cpu != outgoingcpu)
- cpumask_set_cpu(cpu, cm);
- if (cpumask_weight(cm) == 0) {
- cpumask_setall(cm);
- for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
- cpumask_clear_cpu(cpu, cm);
- WARN_ON_ONCE(cpumask_weight(cm) == 0);
}
- set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
- rcu_boost_kthread_setaffinity(rnp, cm);
- free_cpumask_var(cm);
+ invoke_rcu_callbacks_kthread();
}
-/*
- * Spawn a per-rcu_node kthread, setting priority and affinity.
- * Called during boot before online/offline can happen, or, if
- * during runtime, with the main CPU-hotplug locks held. So only
- * one of these can be executing at a time.
- */
-static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp)
+static void invoke_rcu_core(void)
{
- unsigned long flags;
- int rnp_index = rnp - &rsp->node[0];
- struct sched_param sp;
- struct task_struct *t;
-
- if (!rcu_kthreads_spawnable ||
- rnp->qsmaskinit == 0)
- return 0;
- if (rnp->node_kthread_task == NULL) {
- t = kthread_create(rcu_node_kthread, (void *)rnp,
- "rcun%d", rnp_index);
- if (IS_ERR(t))
- return PTR_ERR(t);
- raw_spin_lock_irqsave(&rnp->lock, flags);
- rnp->node_kthread_task = t;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- sp.sched_priority = 99;
- sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
- }
- return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+ raise_softirq(RCU_SOFTIRQ);
}
-static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
-
-/*
- * Spawn all kthreads -- called as soon as the scheduler is running.
- */
-static int __init rcu_spawn_kthreads(void)
-{
- int cpu;
- struct rcu_node *rnp;
- struct task_struct *t;
-
- rcu_kthreads_spawnable = 1;
- for_each_possible_cpu(cpu) {
- per_cpu(rcu_cpu_has_work, cpu) = 0;
- if (cpu_online(cpu)) {
- (void)rcu_spawn_one_cpu_kthread(cpu);
- t = per_cpu(rcu_cpu_kthread_task, cpu);
- if (t)
- wake_up_process(t);
- }
- }
- rnp = rcu_get_root(rcu_state);
- (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
- if (rnp->node_kthread_task)
- wake_up_process(rnp->node_kthread_task);
- if (NUM_RCU_NODES > 1) {
- rcu_for_each_leaf_node(rcu_state, rnp) {
- (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
- t = rnp->node_kthread_task;
- if (t)
- wake_up_process(t);
- rcu_wake_one_boost_kthread(rnp);
- }
- }
- return 0;
-}
-early_initcall(rcu_spawn_kthreads);
-
static void
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
rcu_preempt_init_percpu_data(cpu);
}
-static void __cpuinit rcu_prepare_kthreads(int cpu)
-{
- struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
- struct rcu_node *rnp = rdp->mynode;
-
- /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
- if (rcu_kthreads_spawnable) {
- (void)rcu_spawn_one_cpu_kthread(cpu);
- if (rnp->node_kthread_task == NULL)
- (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
- }
-}
-
-/*
- * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
- * but the RCU threads are woken on demand, and if demand is low this
- * could be a while triggering the hung task watchdog.
- *
- * In order to avoid this, poke all tasks once the CPU is fully
- * up and running.
- */
-static void __cpuinit rcu_online_kthreads(int cpu)
-{
- struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
- struct rcu_node *rnp = rdp->mynode;
- struct task_struct *t;
-
- t = per_cpu(rcu_cpu_kthread_task, cpu);
- if (t)
- wake_up_process(t);
-
- t = rnp->node_kthread_task;
- if (t)
- wake_up_process(t);
-
- rcu_wake_one_boost_kthread(rnp);
-}
-
/*
* Handle CPU online/offline notification events.
*/
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
rcu_prepare_kthreads(cpu);
break;
case CPU_ONLINE:
- rcu_online_kthreads(cpu);
case CPU_DOWN_FAILED:
rcu_node_kthread_setaffinity(rnp, -1);
rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
* We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7b9a08b..01b2ccd 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -369,6 +369,7 @@ struct rcu_state {
/* period because */
/* force_quiescent_state() */
/* was running. */
+ u8 boost; /* Subject to priority boost. */
unsigned long gpnum; /* Current gp number. */
unsigned long completed; /* # of last completed gp. */
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
unsigned long flags);
+static void rcu_stop_cpu_kthread(int cpu);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+static void invoke_rcu_callbacks_kthread(void);
+#ifdef CONFIG_RCU_BOOST
+static void rcu_preempt_do_callbacks(void);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm);
-static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index);
+static void invoke_rcu_node_kthread(struct rcu_node *rnp);
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
+static void __cpuinit rcu_prepare_kthreads(int cpu);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c8bff30..14dc7dd 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
&__get_cpu_var(rcu_preempt_data));
}
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+ rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
@@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
}
/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __this_cpu_write(rcu_cpu_has_work, 1);
+ if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
+ local_irq_restore(flags);
+ return;
+ }
+ wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+ local_irq_restore(flags);
+}
+
+/*
* Set the affinity of the boost kthread. The CPU-hotplug locks are
* held, so no one should be messing with the existence of the boost
* kthread.
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (&rcu_preempt_state != rsp)
return 0;
+ rsp->boost = 1;
if (rnp->boost_kthread_task != NULL)
return 0;
t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
raw_spin_unlock_irqrestore(&rnp->lock, flags);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
return 0;
}
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Stop the RCU's per-CPU kthread when its CPU goes offline,.
+ */
+static void rcu_stop_cpu_kthread(int cpu)
{
- if (rnp->boost_kthread_task)
- wake_up_process(rnp->boost_kthread_task);
+ struct task_struct *t;
+
+ /* Stop the CPU's kthread. */
+ t = per_cpu(rcu_cpu_kthread_task, cpu);
+ if (t != NULL) {
+ per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
+ kthread_stop(t);
+ }
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_kthread_do_work(void)
+{
+ rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+ rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+ rcu_preempt_do_callbacks();
+}
+
+/*
+ * Wake up the specified per-rcu_node-structure kthread.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
+ */
+static void invoke_rcu_node_kthread(struct rcu_node *rnp)
+{
+ struct task_struct *t;
+
+ t = rnp->node_kthread_task;
+ if (t != NULL)
+ wake_up_process(t);
+}
+
+/*
+ * Set the specified CPU's kthread to run RT or not, as specified by
+ * the to_rt argument. The CPU-hotplug locks are held, so the task
+ * is not going away.
+ */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+{
+ int policy;
+ struct sched_param sp;
+ struct task_struct *t;
+
+ t = per_cpu(rcu_cpu_kthread_task, cpu);
+ if (t == NULL)
+ return;
+ if (to_rt) {
+ policy = SCHED_FIFO;
+ sp.sched_priority = RCU_KTHREAD_PRIO;
+ } else {
+ policy = SCHED_NORMAL;
+ sp.sched_priority = 0;
+ }
+ sched_setscheduler_nocheck(t, policy, &sp);
+}
+
+/*
+ * Timer handler to initiate the waking up of per-CPU kthreads that
+ * have yielded the CPU due to excess numbers of RCU callbacks.
+ * We wake up the per-rcu_node kthread, which in turn will wake up
+ * the booster kthread.
+ */
+static void rcu_cpu_kthread_timer(unsigned long arg)
+{
+ struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
+ struct rcu_node *rnp = rdp->mynode;
+
+ atomic_or(rdp->grpmask, &rnp->wakemask);
+ invoke_rcu_node_kthread(rnp);
+}
+
+/*
+ * Drop to non-real-time priority and yield, but only after posting a
+ * timer that will cause us to regain our real-time priority if we
+ * remain preempted. Either way, we restore our real-time priority
+ * before returning.
+ */
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
+{
+ struct sched_param sp;
+ struct timer_list yield_timer;
+
+ setup_timer_on_stack(&yield_timer, f, arg);
+ mod_timer(&yield_timer, jiffies + 2);
+ sp.sched_priority = 0;
+ sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+ set_user_nice(current, 19);
+ schedule();
+ sp.sched_priority = RCU_KTHREAD_PRIO;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+ del_timer(&yield_timer);
+}
+
+/*
+ * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
+ * This can happen while the corresponding CPU is either coming online
+ * or going offline. We cannot wait until the CPU is fully online
+ * before starting the kthread, because the various notifier functions
+ * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
+ * the corresponding CPU is online.
+ *
+ * Return 1 if the kthread needs to stop, 0 otherwise.
+ *
+ * Caller must disable bh. This function can momentarily enable it.
+ */
+static int rcu_cpu_kthread_should_stop(int cpu)
+{
+ while (cpu_is_offline(cpu) ||
+ !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
+ smp_processor_id() != cpu) {
+ if (kthread_should_stop())
+ return 1;
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+ per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
+ local_bh_enable();
+ schedule_timeout_uninterruptible(1);
+ if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ local_bh_disable();
+ }
+ per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+ return 0;
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+ * earlier RCU softirq.
+ */
+static int rcu_cpu_kthread(void *arg)
+{
+ int cpu = (int)(long)arg;
+ unsigned long flags;
+ int spincnt = 0;
+ unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
+ char work;
+ char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+
+ for (;;) {
+ *statusp = RCU_KTHREAD_WAITING;
+ rcu_wait(*workp != 0 || kthread_should_stop());
+ local_bh_disable();
+ if (rcu_cpu_kthread_should_stop(cpu)) {
+ local_bh_enable();
+ break;
+ }
+ *statusp = RCU_KTHREAD_RUNNING;
+ per_cpu(rcu_cpu_kthread_loops, cpu)++;
+ local_irq_save(flags);
+ work = *workp;
+ *workp = 0;
+ local_irq_restore(flags);
+ if (work)
+ rcu_kthread_do_work();
+ local_bh_enable();
+ if (*workp != 0)
+ spincnt++;
+ else
+ spincnt = 0;
+ if (spincnt > 10) {
+ *statusp = RCU_KTHREAD_YIELDING;
+ rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+ spincnt = 0;
+ }
+ }
+ *statusp = RCU_KTHREAD_STOPPED;
+ return 0;
+}
+
+/*
+ * Spawn a per-CPU kthread, setting up affinity and priority.
+ * Because the CPU hotplug lock is held, no other CPU will be attempting
+ * to manipulate rcu_cpu_kthread_task. There might be another CPU
+ * attempting to access it during boot, but the locking in kthread_bind()
+ * will enforce sufficient ordering.
+ *
+ * Please note that we cannot simply refuse to wake up the per-CPU
+ * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
+ * which can result in softlockup complaints if the task ends up being
+ * idle for more than a couple of minutes.
+ *
+ * However, please note also that we cannot bind the per-CPU kthread to its
+ * CPU until that CPU is fully online. We also cannot wait until the
+ * CPU is fully online before we create its per-CPU kthread, as this would
+ * deadlock the system when CPU notifiers tried waiting for grace
+ * periods. So we bind the per-CPU kthread to its CPU only if the CPU
+ * is online. If its CPU is not yet fully online, then the code in
+ * rcu_cpu_kthread() will wait until it is fully online, and then do
+ * the binding.
+ */
+static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+{
+ struct sched_param sp;
+ struct task_struct *t;
+
+ if (!rcu_kthreads_spawnable ||
+ per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
+ return 0;
+ t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ if (cpu_online(cpu))
+ kthread_bind(t, cpu);
+ per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+ WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
+ sp.sched_priority = RCU_KTHREAD_PRIO;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ per_cpu(rcu_cpu_kthread_task, cpu) = t;
+ wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
+ return 0;
+}
+
+/*
+ * Per-rcu_node kthread, which is in charge of waking up the per-CPU
+ * kthreads when needed. We ignore requests to wake up kthreads
+ * for offline CPUs, which is OK because force_quiescent_state()
+ * takes care of this case.
+ */
+static int rcu_node_kthread(void *arg)
+{
+ int cpu;
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_node *rnp = (struct rcu_node *)arg;
+ struct sched_param sp;
+ struct task_struct *t;
+
+ for (;;) {
+ rnp->node_kthread_status = RCU_KTHREAD_WAITING;
+ rcu_wait(atomic_read(&rnp->wakemask) != 0);
+ rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ mask = atomic_xchg(&rnp->wakemask, 0);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
+ if ((mask & 0x1) == 0)
+ continue;
+ preempt_disable();
+ t = per_cpu(rcu_cpu_kthread_task, cpu);
+ if (!cpu_online(cpu) || t == NULL) {
+ preempt_enable();
+ continue;
+ }
+ per_cpu(rcu_cpu_has_work, cpu) = 1;
+ sp.sched_priority = RCU_KTHREAD_PRIO;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ preempt_enable();
+ }
+ }
+ /* NOTREACHED */
+ rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
+ return 0;
+}
+
+/*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question. The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU. If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ */
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+ cpumask_var_t cm;
+ int cpu;
+ unsigned long mask = rnp->qsmaskinit;
+
+ if (rnp->node_kthread_task == NULL)
+ return;
+ if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+ return;
+ cpumask_clear(cm);
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
+ if ((mask & 0x1) && cpu != outgoingcpu)
+ cpumask_set_cpu(cpu, cm);
+ if (cpumask_weight(cm) == 0) {
+ cpumask_setall(cm);
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
+ cpumask_clear_cpu(cpu, cm);
+ WARN_ON_ONCE(cpumask_weight(cm) == 0);
+ }
+ set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
+ rcu_boost_kthread_setaffinity(rnp, cm);
+ free_cpumask_var(cm);
+}
+
+/*
+ * Spawn a per-rcu_node kthread, setting priority and affinity.
+ * Called during boot before online/offline can happen, or, if
+ * during runtime, with the main CPU-hotplug locks held. So only
+ * one of these can be executing at a time.
+ */
+static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
+ struct rcu_node *rnp)
+{
+ unsigned long flags;
+ int rnp_index = rnp - &rsp->node[0];
+ struct sched_param sp;
+ struct task_struct *t;
+
+ if (!rcu_kthreads_spawnable ||
+ rnp->qsmaskinit == 0)
+ return 0;
+ if (rnp->node_kthread_task == NULL) {
+ t = kthread_create(rcu_node_kthread, (void *)rnp,
+ "rcun%d", rnp_index);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ rnp->node_kthread_task = t;
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ sp.sched_priority = 99;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+ }
+ return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+}
+
+/*
+ * Spawn all kthreads -- called as soon as the scheduler is running.
+ */
+static int __init rcu_spawn_kthreads(void)
+{
+ int cpu;
+ struct rcu_node *rnp;
+
+ rcu_kthreads_spawnable = 1;
+ for_each_possible_cpu(cpu) {
+ per_cpu(rcu_cpu_has_work, cpu) = 0;
+ if (cpu_online(cpu))
+ (void)rcu_spawn_one_cpu_kthread(cpu);
+ }
+ rnp = rcu_get_root(rcu_state);
+ (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+ if (NUM_RCU_NODES > 1) {
+ rcu_for_each_leaf_node(rcu_state, rnp)
+ (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+ }
+ return 0;
+}
+early_initcall(rcu_spawn_kthreads);
+
+static void __cpuinit rcu_prepare_kthreads(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+ struct rcu_node *rnp = rdp->mynode;
+
+ /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
+ if (rcu_kthreads_spawnable) {
+ (void)rcu_spawn_one_cpu_kthread(cpu);
+ if (rnp->node_kthread_task == NULL)
+ (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+ }
}
#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
- cpumask_var_t cm)
+static void invoke_rcu_callbacks_kthread(void)
{
+ WARN_ON_ONCE(1);
}
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp,
- int rnp_index)
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void rcu_stop_cpu_kthread(int cpu)
+{
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+}
+
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
{
- return 0;
}
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+static void __cpuinit rcu_prepare_kthreads(int cpu)
{
}
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
*
* Because it is not legal to invoke rcu_process_callbacks() with irqs
* disabled, we do one pass of force_quiescent_state(), then do a
- * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
+ * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
* later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
*/
int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
/* If RCU callbacks are still pending, RCU still needs this CPU. */
if (c)
- invoke_rcu_cpu_kthread();
+ invoke_rcu_core();
return c;
}
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9678cc3..4e14487 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,8 @@
#define RCU_TREE_NONCORE
#include "rcutree.h"
+#ifdef CONFIG_RCU_BOOST
+
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
return "SRWOY"[kthread_status];
}
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
- seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
+ seq_printf(m, " ql=%ld qs=%c%c%c%c",
rdp->qlen,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->nxttail[RCU_NEXT_READY_TAIL]],
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
rdp->nxttail[RCU_WAIT_TAIL]],
- ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+ ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+ seq_printf(m, " kt=%d/%c/%d ktl=%x",
per_cpu(rcu_cpu_has_work, rdp->cpu),
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
rdp->cpu)),
per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
- per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
- rdp->blimit);
+ per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+ seq_printf(m, " b=%ld", rdp->blimit);
seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
- seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
+ seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
rdp->nxttail[RCU_NEXT_TAIL]],
".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
rdp->nxttail[RCU_NEXT_READY_TAIL]],
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
rdp->nxttail[RCU_WAIT_TAIL]],
- ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+ ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+ seq_printf(m, ",%d,\"%c\"",
per_cpu(rcu_cpu_has_work, rdp->cpu),
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
- rdp->cpu)),
- rdp->blimit);
+ rdp->cpu)));
+#endif /* #ifdef CONFIG_RCU_BOOST */
+ seq_printf(m, ",%ld", rdp->blimit);
seq_printf(m, ",%lu,%lu,%lu\n",
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
- seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
+ seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
+#ifdef CONFIG_RCU_BOOST
+ seq_puts(m, "\"kt\",\"ktl\"");
+#endif /* #ifdef CONFIG_RCU_BOOST */
+ seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
#ifdef CONFIG_TREE_PREEMPT_RCU
seq_puts(m, "\"rcu_preempt:\"\n");
PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 88725c9..10d0182 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1096,7 +1096,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
* to move current somewhere else, making room for our non-migratable
* task.
*/
- if (p->prio == rq->curr->prio && !need_resched())
+ if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
check_preempt_equal_prio(rq, p);
#endif
}
@@ -1239,6 +1239,10 @@ static int find_lowest_rq(struct task_struct *task)
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
+ /* Make sure the mask is initialized first */
+ if (unlikely(!lowest_mask))
+ return -1;
+
if (task->rt.nr_cpus_allowed == 1)
return -1; /* No other targets possible */
diff --git a/kernel/signal.c b/kernel/signal.c
index 86c32b8..ff76786 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2365,7 +2365,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
/**
* sys_rt_sigprocmask - change the list of currently blocked signals
* @how: whether to add, remove, or set signals
- * @set: stores pending signals
+ * @nset: stores pending signals
* @oset: previous value of signal mask if non-null
* @sigsetsize: size of sigset_t type
*/
diff --git a/kernel/smp.c b/kernel/smp.c
index 73a1951..fb67dfa 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
.notifier_call = hotplug_cfd,
};
-static int __cpuinit init_call_single_data(void)
+void __init call_function_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&hotplug_cfd_notifier);
-
- return 0;
}
-early_initcall(init_call_single_data);
/*
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 1396017..40cf63d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
char *softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
- "TASKLET", "SCHED", "HRTIMER"
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
};
/*
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9ffea36..fc0f220 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -285,16 +285,18 @@ ret:
static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
{
struct listener_list *listeners;
- struct listener *s, *tmp;
+ struct listener *s, *tmp, *s2;
unsigned int cpu;
if (!cpumask_subset(mask, cpu_possible_mask))
return -EINVAL;
+ s = NULL;
if (isadd == REGISTER) {
for_each_cpu(cpu, mask) {
- s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
- cpu_to_node(cpu));
+ if (!s)
+ s = kmalloc_node(sizeof(struct listener),
+ GFP_KERNEL, cpu_to_node(cpu));
if (!s)
goto cleanup;
s->pid = pid;
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
listeners = &per_cpu(listener_array, cpu);
down_write(&listeners->sem);
+ list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
+ if (s2->pid == pid)
+ goto next_cpu;
+ }
list_add(&s->list, &listeners->list);
+ s = NULL;
+next_cpu:
up_write(&listeners->sem);
}
+ kfree(s);
return 0;
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2d96624..59f369f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -42,15 +42,75 @@ static struct alarm_base {
clockid_t base_clockid;
} alarm_bases[ALARM_NUMTYPE];
+/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
+static ktime_t freezer_delta;
+static DEFINE_SPINLOCK(freezer_delta_lock);
+
#ifdef CONFIG_RTC_CLASS
/* rtc timer and device for setting alarm wakeups at suspend */
static struct rtc_timer rtctimer;
static struct rtc_device *rtcdev;
-#endif
+static DEFINE_SPINLOCK(rtcdev_lock);
-/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
-static ktime_t freezer_delta;
-static DEFINE_SPINLOCK(freezer_delta_lock);
+/**
+ * has_wakealarm - check rtc device has wakealarm ability
+ * @dev: current device
+ * @name_ptr: name to be returned
+ *
+ * This helper function checks to see if the rtc device can wake
+ * from suspend.
+ */
+static int has_wakealarm(struct device *dev, void *name_ptr)
+{
+ struct rtc_device *candidate = to_rtc_device(dev);
+
+ if (!candidate->ops->set_alarm)
+ return 0;
+ if (!device_may_wakeup(candidate->dev.parent))
+ return 0;
+
+ *(const char **)name_ptr = dev_name(dev);
+ return 1;
+}
+
+/**
+ * alarmtimer_get_rtcdev - Return selected rtcdevice
+ *
+ * This function returns the rtc device to use for wakealarms.
+ * If one has not already been chosen, it checks to see if a
+ * functional rtc device is available.
+ */
+static struct rtc_device *alarmtimer_get_rtcdev(void)
+{
+ struct device *dev;
+ char *str;
+ unsigned long flags;
+ struct rtc_device *ret;
+
+ spin_lock_irqsave(&rtcdev_lock, flags);
+ if (!rtcdev) {
+ /* Find an rtc device and init the rtc_timer */
+ dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
+ /* If we have a device then str is valid. See has_wakealarm() */
+ if (dev) {
+ rtcdev = rtc_class_open(str);
+ /*
+ * Drop the reference we got in class_find_device,
+ * rtc_open takes its own.
+ */
+ put_device(dev);
+ rtc_timer_init(&rtctimer, NULL, NULL);
+ }
+ }
+ ret = rtcdev;
+ spin_unlock_irqrestore(&rtcdev_lock, flags);
+
+ return ret;
+}
+#else
+#define alarmtimer_get_rtcdev() (0)
+#define rtcdev (0)
+#endif
/**
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
struct rtc_time tm;
ktime_t min, now;
unsigned long flags;
+ struct rtc_device *rtc;
int i;
spin_lock_irqsave(&freezer_delta_lock, flags);
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
freezer_delta = ktime_set(0, 0);
spin_unlock_irqrestore(&freezer_delta_lock, flags);
+ rtc = rtcdev;
/* If we have no rtcdev, just return */
- if (!rtcdev)
+ if (!rtc)
return 0;
/* Find the soonest timer to expire*/
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
WARN_ON(min.tv64 < NSEC_PER_SEC);
/* Setup an rtc timer to fire that far in the future */
- rtc_timer_cancel(rtcdev, &rtctimer);
- rtc_read_time(rtcdev, &tm);
+ rtc_timer_cancel(rtc, &rtctimer);
+ rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
now = ktime_add(now, min);
- rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0));
+ rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
return 0;
}
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
return hrtimer_get_res(baseid, tp);
}
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
{
struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
*tp = ktime_to_timespec(base->gettime());
return 0;
}
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
enum alarmtimer_type type;
struct alarm_base *base;
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
*/
static int alarm_timer_del(struct k_itimer *timr)
{
+ if (!rtcdev)
+ return -ENOTSUPP;
+
alarm_cancel(&timr->it.alarmtimer);
return 0;
}
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
struct itimerspec *new_setting,
struct itimerspec *old_setting)
{
+ if (!rtcdev)
+ return -ENOTSUPP;
+
/* Save old values */
old_setting->it_interval =
ktime_to_timespec(timr->it.alarmtimer.period);
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
int ret = 0;
struct restart_block *restart;
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
}
device_initcall(alarmtimer_init);
-#ifdef CONFIG_RTC_CLASS
-/**
- * has_wakealarm - check rtc device has wakealarm ability
- * @dev: current device
- * @name_ptr: name to be returned
- *
- * This helper function checks to see if the rtc device can wake
- * from suspend.
- */
-static int __init has_wakealarm(struct device *dev, void *name_ptr)
-{
- struct rtc_device *candidate = to_rtc_device(dev);
-
- if (!candidate->ops->set_alarm)
- return 0;
- if (!device_may_wakeup(candidate->dev.parent))
- return 0;
-
- *(const char **)name_ptr = dev_name(dev);
- return 1;
-}
-
-/**
- * alarmtimer_init_late - Late initializing of alarmtimer code
- *
- * This function locates a rtc device to use for wakealarms.
- * Run as late_initcall to make sure rtc devices have been
- * registered.
- */
-static int __init alarmtimer_init_late(void)
-{
- struct device *dev;
- char *str;
-
- /* Find an rtc device and init the rtc_timer */
- dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
- /* If we have a device then str is valid. See has_wakealarm() */
- if (dev) {
- rtcdev = rtc_class_open(str);
- /*
- * Drop the reference we got in class_find_device,
- * rtc_open takes its own.
- */
- put_device(dev);
- }
- if (!rtcdev) {
- printk(KERN_WARNING "No RTC device found, ALARM timers will"
- " not wake from suspend");
- }
- rtc_timer_init(&rtctimer, NULL, NULL);
-
- return 0;
-}
-#else
-static int __init alarmtimer_init_late(void)
-{
- printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
- " will not wake from suspend");
- return 0;
-}
-#endif
-late_initcall(alarmtimer_init_late);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1c95fd6..e0980f0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
static struct timer_list watchdog_timer;
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
static DEFINE_SPINLOCK(watchdog_lock);
-static cycle_t watchdog_last;
static int watchdog_running;
static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
if (!watchdog_running)
goto out;
- wdnow = watchdog->read(watchdog);
- wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
- watchdog->mult, watchdog->shift);
- watchdog_last = wdnow;
-
list_for_each_entry(cs, &watchdog_list, wd_list) {
/* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
continue;
}
+ local_irq_disable();
csnow = cs->read(cs);
+ wdnow = watchdog->read(watchdog);
+ local_irq_enable();
/* Clocksource initialized ? */
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
cs->flags |= CLOCK_SOURCE_WATCHDOG;
- cs->wd_last = csnow;
+ cs->wd_last = wdnow;
+ cs->cs_last = csnow;
continue;
}
- /* Check the deviation from the watchdog clocksource. */
- cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
+ wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
+ watchdog->mult, watchdog->shift);
+
+ cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
cs->mask, cs->mult, cs->shift);
- cs->wd_last = csnow;
+ cs->cs_last = csnow;
+ cs->wd_last = wdnow;
+
+ /* Check the deviation from the watchdog clocksource. */
if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
clocksource_unstable(cs, cs_nsec - wd_nsec);
continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
return;
init_timer(&watchdog_timer);
watchdog_timer.function = clocksource_watchdog;
- watchdog_last = watchdog->read(watchdog);
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
watchdog_running = 1;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1ee417f..908038f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
{
char *func, *command, *next = buff;
struct ftrace_func_command *p;
- int ret;
+ int ret = -EINVAL;
func = strsep(&next, ":");
@@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod,
{
unsigned long *p;
unsigned long addr;
+ unsigned long flags;
mutex_lock(&ftrace_lock);
p = start;
@@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod,
ftrace_record_ip(addr);
}
+ /*
+ * Disable interrupts to prevent interrupts from executing
+ * code that is being modified.
+ */
+ local_irq_save(flags);
ftrace_update_code(mod);
+ local_irq_restore(flags);
mutex_unlock(&ftrace_lock);
return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index f925c45..27d13b3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace);
#ifdef CONFIG_FTRACE_STARTUP_TEST
-static int kprobe_trace_selftest_target(int a1, int a2, int a3,
- int a4, int a5, int a6)
+/*
+ * The "__used" keeps gcc from removing the function symbol
+ * from the kallsyms table.
+ */
+static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
+ int a4, int a5, int a6)
{
return a1 + a2 + a3 + a4 + a5 + a6;
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index dff763b..1f06468 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
const char **fmt = v;
int start_index;
- if (!fmt)
- fmt = __start___trace_bprintk_fmt + *pos;
-
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
if (*pos < start_index)
- return fmt;
+ return __start___trace_bprintk_fmt + *pos;
return find_next_mod_format(start_index, v, fmt, pos);
}
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 41baf02..3f3b681 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -572,7 +572,7 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
* __bitmap_parselist - convert list format ASCII string to bitmap
- * @bp: read nul-terminated user string from this buffer
+ * @buf: read nul-terminated user string from this buffer
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @is_user: location of buffer, 0 indicates kernel space
diff --git a/lib/kobject.c b/lib/kobject.c
index 82dc34c..640bd98 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -948,14 +948,14 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
}
-const void *kobj_ns_current(enum kobj_ns_type type)
+void *kobj_ns_grab_current(enum kobj_ns_type type)
{
- const void *ns = NULL;
+ void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->current_ns();
+ ns = kobj_ns_ops_tbl[type]->grab_current_ns();
spin_unlock(&kobj_ns_type_lock);
return ns;
@@ -987,23 +987,15 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
return ns;
}
-/*
- * kobj_ns_exit - invalidate a namespace tag
- *
- * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
- * @ns: the actual namespace being invalidated
- *
- * This is called when a tag is no longer valid. For instance,
- * when a network namespace exits, it uses this helper to
- * make sure no sb's sysfs_info points to the now-invalidated
- * netns.
- */
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
+void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
- sysfs_exit_ns(type, ns);
+ spin_lock(&kobj_ns_type_lock);
+ if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+ kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
+ kobj_ns_ops_tbl[type]->drop_ns(ns);
+ spin_unlock(&kobj_ns_type_lock);
}
-
EXPORT_SYMBOL(kobject_get);
EXPORT_SYMBOL(kobject_put);
EXPORT_SYMBOL(kobject_del);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 93ca08b..99093b3 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -110,6 +110,11 @@ setup_io_tlb_npages(char *str)
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
+unsigned long swioltb_nr_tbl(void)
+{
+ return io_tlb_nslabs;
+}
+
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c112056..4365df3 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -666,6 +666,8 @@ char *ip6_compressed_string(char *p, const char *addr)
colonpos = i;
}
}
+ if (longest == 1) /* don't compress a single 0 */
+ colonpos = -1;
/* emit address */
for (i = 0; i < range; i++) {
@@ -826,7 +828,7 @@ int kptr_restrict __read_mostly;
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
* - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I6c' for IPv6 addresses printed as specified by
- * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
+ * http://tools.ietf.org/html/rfc5952
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
diff --git a/mm/compaction.c b/mm/compaction.c
index 021a296..6cc604b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -144,9 +144,20 @@ static void isolate_freepages(struct zone *zone,
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
+ /*
+ * Initialise the free scanner. The starting point is where we last
+ * scanned from (or the end of the zone if starting). The low point
+ * is the end of the pageblock the migration scanner is using.
+ */
pfn = cc->free_pfn;
low_pfn = cc->migrate_pfn + pageblock_nr_pages;
- high_pfn = low_pfn;
+
+ /*
+ * Take care that if the migration scanner is at the end of the zone
+ * that the free scanner does not accidentally move to the next zone
+ * in the next isolation cycle.
+ */
+ high_pfn = min(low_pfn, pfn);
/*
* Isolate free pages until enough are available to migrate the
@@ -240,11 +251,18 @@ static bool too_many_isolated(struct zone *zone)
return isolated > (inactive + active) / 2;
}
+/* possible outcome of isolate_migratepages */
+typedef enum {
+ ISOLATE_ABORT, /* Abort compaction now */
+ ISOLATE_NONE, /* No pages isolated, continue scanning */
+ ISOLATE_SUCCESS, /* Pages isolated, migrate */
+} isolate_migrate_t;
+
/*
* Isolate all pages that can be migrated from the block pointed to by
* the migrate scanner within compact_control.
*/
-static unsigned long isolate_migratepages(struct zone *zone,
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
struct compact_control *cc)
{
unsigned long low_pfn, end_pfn;
@@ -261,7 +279,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
/* Do not cross the free scanner or scan within a memory hole */
if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
cc->migrate_pfn = end_pfn;
- return 0;
+ return ISOLATE_NONE;
}
/*
@@ -270,10 +288,14 @@ static unsigned long isolate_migratepages(struct zone *zone,
* delay for some time until fewer pages are isolated
*/
while (unlikely(too_many_isolated(zone))) {
+ /* async migration should just abort */
+ if (!cc->sync)
+ return ISOLATE_ABORT;
+
congestion_wait(BLK_RW_ASYNC, HZ/10);
if (fatal_signal_pending(current))
- return 0;
+ return ISOLATE_ABORT;
}
/* Time to isolate some pages for migration */
@@ -358,7 +380,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
- return cc->nr_migratepages;
+ return ISOLATE_SUCCESS;
}
/*
@@ -420,13 +442,6 @@ static int compact_finished(struct zone *zone,
if (cc->free_pfn <= cc->migrate_pfn)
return COMPACT_COMPLETE;
- /* Compaction run is not finished if the watermark is not met */
- watermark = low_wmark_pages(zone);
- watermark += (1 << cc->order);
-
- if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
- return COMPACT_CONTINUE;
-
/*
* order == -1 is expected when compacting via
* /proc/sys/vm/compact_memory
@@ -434,6 +449,13 @@ static int compact_finished(struct zone *zone,
if (cc->order == -1)
return COMPACT_CONTINUE;
+ /* Compaction run is not finished if the watermark is not met */
+ watermark = low_wmark_pages(zone);
+ watermark += (1 << cc->order);
+
+ if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
+ return COMPACT_CONTINUE;
+
/* Direct compactor: Is a suitable page free? */
for (order = cc->order; order < MAX_ORDER; order++) {
/* Job done if page is free of the right migratetype */
@@ -461,6 +483,13 @@ unsigned long compaction_suitable(struct zone *zone, int order)
unsigned long watermark;
/*
+ * order == -1 is expected when compacting via
+ * /proc/sys/vm/compact_memory
+ */
+ if (order == -1)
+ return COMPACT_CONTINUE;
+
+ /*
* Watermarks for order-0 must be met for compaction. Note the 2UL.
* This is because during migration, copies of pages need to be
* allocated and for a short time, the footprint is higher
@@ -470,17 +499,11 @@ unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_SKIPPED;
/*
- * order == -1 is expected when compacting via
- * /proc/sys/vm/compact_memory
- */
- if (order == -1)
- return COMPACT_CONTINUE;
-
- /*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
*
- * index of -1 implies allocations might succeed dependingon watermarks
+ * index of -1000 implies allocations might succeed depending on
+ * watermarks
* index towards 0 implies failure is due to lack of memory
* index towards 1000 implies failure is due to fragmentation
*
@@ -490,7 +513,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
return COMPACT_SKIPPED;
- if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0))
+ if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
+ 0, 0))
return COMPACT_PARTIAL;
return COMPACT_CONTINUE;
@@ -522,8 +546,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
unsigned long nr_migrate, nr_remaining;
int err;
- if (!isolate_migratepages(zone, cc))
+ switch (isolate_migratepages(zone, cc)) {
+ case ISOLATE_ABORT:
+ ret = COMPACT_PARTIAL;
+ goto out;
+ case ISOLATE_NONE:
continue;
+ case ISOLATE_SUCCESS:
+ ;
+ }
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
@@ -547,6 +578,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
}
+out:
/* Release free pages and check accounting */
cc->nr_freepages -= release_freepages(&cc->freepages);
VM_BUG_ON(cc->nr_freepages != 0);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 615d974..81532f29 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2234,11 +2234,8 @@ static void khugepaged_loop(void)
while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA
hpage = khugepaged_alloc_hugepage();
- if (unlikely(!hpage)) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ if (unlikely(!hpage))
break;
- }
- count_vm_event(THP_COLLAPSE_ALLOC);
#else
if (IS_ERR(hpage)) {
khugepaged_alloc_sleep();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6402458..bfcf153 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1111,6 +1111,14 @@ static void __init gather_bootmem_prealloc(void)
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
prep_new_huge_page(h, page, page_to_nid(page));
+ /*
+ * If we had gigantic hugepages allocated at boot time, we need
+ * to restore the 'stolen' pages to totalram_pages in order to
+ * fix confusing memory reports from free(1) and another
+ * side-effects, like CommitLimit going negative.
+ */
+ if (h->order > (MAX_ORDER - 1))
+ totalram_pages += 1 << h->order;
}
}
diff --git a/mm/ksm.c b/mm/ksm.c
index d708b3e..9a68b0c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1302,6 +1302,12 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
ksm_scan.mm_slot = slot;
spin_unlock(&ksm_mmlist_lock);
+ /*
+ * Although we tested list_empty() above, a racing __ksm_exit
+ * of the last mm on the list may have removed it since then.
+ */
+ if (slot == &ksm_mm_head)
+ return NULL;
next_mm:
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bd9052a..ddffc74 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -35,6 +35,7 @@
#include <linux/limits.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -359,7 +360,7 @@ enum charge_type {
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
-static void drain_all_stock_async(void);
+static void drain_all_stock_async(struct mem_cgroup *mem);
static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -735,7 +736,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
struct mem_cgroup, css);
}
-static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
{
struct mem_cgroup *mem = NULL;
@@ -1663,15 +1664,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
/* If memsw_is_minimum==1, swap-out is of-no-use. */
- if (root_mem->memsw_is_minimum)
+ if (!check_soft && root_mem->memsw_is_minimum)
noswap = true;
while (1) {
victim = mem_cgroup_select_victim(root_mem);
if (victim == root_mem) {
loop++;
- if (loop >= 1)
- drain_all_stock_async();
+ /*
+ * We are not draining per cpu cached charges during
+ * soft limit reclaim because global reclaim doesn't
+ * care about charges. It tries to free some memory and
+ * charges will not give any.
+ */
+ if (!check_soft && loop >= 1)
+ drain_all_stock_async(root_mem);
if (loop >= 2) {
/*
* If we have not been able to reclaim
@@ -1934,9 +1941,11 @@ struct memcg_stock_pcp {
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
struct work_struct work;
+ unsigned long flags;
+#define FLUSHING_CACHED_CHARGE (0)
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
-static atomic_t memcg_drain_count;
+static DEFINE_MUTEX(percpu_charge_mutex);
/*
* Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1993,7 @@ static void drain_local_stock(struct work_struct *dummy)
{
struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
drain_stock(stock);
+ clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
}
/*
@@ -2008,26 +2018,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
* expects some charges will be back to res_counter later but cannot wait for
* it.
*/
-static void drain_all_stock_async(void)
+static void drain_all_stock_async(struct mem_cgroup *root_mem)
{
- int cpu;
- /* This function is for scheduling "drain" in asynchronous way.
- * The result of "drain" is not directly handled by callers. Then,
- * if someone is calling drain, we don't have to call drain more.
- * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
- * there is a race. We just do loose check here.
+ int cpu, curcpu;
+ /*
+ * If someone calls draining, avoid adding more kworker runs.
*/
- if (atomic_read(&memcg_drain_count))
+ if (!mutex_trylock(&percpu_charge_mutex))
return;
/* Notify other cpus that system-wide "drain" is running */
- atomic_inc(&memcg_drain_count);
get_online_cpus();
+ /*
+ * Get a hint for avoiding draining charges on the current cpu,
+ * which must be exhausted by our charging. It is not required that
+ * this be a precise check, so we use raw_smp_processor_id() instead of
+ * getcpu()/putcpu().
+ */
+ curcpu = raw_smp_processor_id();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
- schedule_work_on(cpu, &stock->work);
+ struct mem_cgroup *mem;
+
+ if (cpu == curcpu)
+ continue;
+
+ mem = stock->cached;
+ if (!mem)
+ continue;
+ if (mem != root_mem) {
+ if (!root_mem->use_hierarchy)
+ continue;
+ /* check whether "mem" is under tree of "root_mem" */
+ if (!css_is_ancestor(&mem->css, &root_mem->css))
+ continue;
+ }
+ if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
+ schedule_work_on(cpu, &stock->work);
}
put_online_cpus();
- atomic_dec(&memcg_drain_count);
+ mutex_unlock(&percpu_charge_mutex);
/* We don't wait for flush_work */
}
@@ -2035,9 +2064,9 @@ static void drain_all_stock_async(void)
static void drain_all_stock_sync(void)
{
/* called when force_empty is called */
- atomic_inc(&memcg_drain_count);
+ mutex_lock(&percpu_charge_mutex);
schedule_on_each_cpu(drain_local_stock);
- atomic_dec(&memcg_drain_count);
+ mutex_unlock(&percpu_charge_mutex);
}
/*
@@ -4640,6 +4669,7 @@ static struct cftype mem_cgroup_files[] = {
{
.name = "numa_stat",
.open = mem_control_numa_stat_open,
+ .mode = S_IRUGO,
},
#endif
};
@@ -5414,18 +5444,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *old_cont,
struct task_struct *p)
{
- struct mm_struct *mm;
+ struct mm_struct *mm = get_task_mm(p);
- if (!mc.to)
- /* no need to move charge */
- return;
-
- mm = get_task_mm(p);
if (mm) {
- mem_cgroup_move_charge(mm);
+ if (mc.to)
+ mem_cgroup_move_charge(mm);
+ put_swap_token(mm);
mmput(mm);
}
- mem_cgroup_clear_mc();
+ if (mc.to)
+ mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5c8f7e0..740c4f5 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -52,6 +52,7 @@
#include <linux/swapops.h>
#include <linux/hugetlb.h>
#include <linux/memory_hotplug.h>
+#include <linux/mm_inline.h>
#include "internal.h"
int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -390,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
struct task_struct *tsk;
struct anon_vma *av;
- read_lock(&tasklist_lock);
av = page_lock_anon_vma(page);
if (av == NULL) /* Not actually mapped anymore */
- goto out;
+ return;
+
+ read_lock(&tasklist_lock);
for_each_process (tsk) {
struct anon_vma_chain *vmac;
@@ -407,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
add_to_kill(tsk, page, vma, to_kill, tkc);
}
}
- page_unlock_anon_vma(av);
-out:
read_unlock(&tasklist_lock);
+ page_unlock_anon_vma(av);
}
/*
@@ -423,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
struct prio_tree_iter iter;
struct address_space *mapping = page->mapping;
- /*
- * A note on the locking order between the two locks.
- * We don't rely on this particular order.
- * If you have some other code that needs a different order
- * feel free to switch them around. Or add a reverse link
- * from mm_struct to task_struct, then this could be all
- * done without taking tasklist_lock and looping over all tasks.
- */
-
- read_lock(&tasklist_lock);
mutex_lock(&mapping->i_mmap_mutex);
+ read_lock(&tasklist_lock);
for_each_process(tsk) {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -453,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
add_to_kill(tsk, page, vma, to_kill, tkc);
}
}
- mutex_unlock(&mapping->i_mmap_mutex);
read_unlock(&tasklist_lock);
+ mutex_unlock(&mapping->i_mmap_mutex);
}
/*
@@ -1468,7 +1460,8 @@ int soft_offline_page(struct page *page, int flags)
put_page(page);
if (!ret) {
LIST_HEAD(pagelist);
-
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
0, true);
diff --git a/mm/memory.c b/mm/memory.c
index 6953d39..40b7531 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1112,11 +1112,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
int force_flush = 0;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
+ pte_t *start_pte;
pte_t *pte;
again:
init_rss_vec(rss);
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = start_pte;
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
@@ -1196,7 +1198,7 @@ again:
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(pte - 1, ptl);
+ pte_unmap_unlock(start_pte, ptl);
/*
* mmu_gather ran out of room to batch pages, we break out of
@@ -1296,7 +1298,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
- * @tlbp: address of the caller's struct mmu_gather
+ * @tlb: address of the caller's struct mmu_gather
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
@@ -2796,30 +2798,6 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
-int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
-{
- struct address_space *mapping = inode->i_mapping;
-
- /*
- * If the underlying filesystem is not going to provide
- * a way to truncate a range of blocks (punch a hole) -
- * we should return failure right now.
- */
- if (!inode->i_op->truncate_range)
- return -ENOSYS;
-
- mutex_lock(&inode->i_mutex);
- down_write(&inode->i_alloc_sem);
- unmap_mapping_range(mapping, offset, (end - offset), 1);
- truncate_inode_pages_range(mapping, offset, end);
- unmap_mapping_range(mapping, offset, (end - offset), 1);
- inode->i_op->truncate_range(inode, offset, end);
- up_write(&inode->i_alloc_sem);
- mutex_unlock(&inode->i_mutex);
-
- return 0;
-}
-
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9f64637..c46887b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -494,6 +494,14 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
+ /*
+ * The node we allocated has no zone fallback lists. For avoiding
+ * to access not-initialized zonelist, build here.
+ */
+ mutex_lock(&zonelists_mutex);
+ build_all_zonelists(NULL);
+ mutex_unlock(&zonelists_mutex);
+
return pgdat;
}
@@ -515,7 +523,7 @@ int mem_online_node(int nid)
lock_memory_hotplug();
pgdat = hotadd_new_pgdat(nid, 0);
- if (pgdat) {
+ if (!pgdat) {
ret = -ENOMEM;
goto out;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index e4a5c91..666e4e6 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -288,7 +288,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
*/
__dec_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(newpage, NR_FILE_PAGES);
- if (PageSwapBacked(page)) {
+ if (!PageSwapCache(page) && PageSwapBacked(page)) {
__dec_zone_page_state(page, NR_SHMEM);
__inc_zone_page_state(newpage, NR_SHMEM);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index bbdc9af..d49736f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -906,14 +906,7 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
if (anon_vma)
return anon_vma;
try_prev:
- /*
- * It is potentially slow to have to call find_vma_prev here.
- * But it's only on the first write fault on the vma, not
- * every time, and we could devise a way to avoid it later
- * (e.g. stash info in next's anon_vma_node when assigning
- * an anon_vma, or when trying vma_merge). Another time.
- */
- BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
+ near = vma->vm_prev;
if (!near)
goto none;
@@ -2044,9 +2037,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
return -EINVAL;
/* Find the first overlapping VMA */
- vma = find_vma_prev(mm, start, &prev);
+ vma = find_vma(mm, start);
if (!vma)
return 0;
+ prev = vma->vm_prev;
/* we have start < vma->vm_end */
/* if it doesn't overlap, we have nothing.. */
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 74ccff6..53bffc6 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -162,13 +162,13 @@ static void free_page_cgroup(void *addr)
}
#endif
-static int __meminit init_section_page_cgroup(unsigned long pfn)
+static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
{
struct page_cgroup *base, *pc;
struct mem_section *section;
unsigned long table_size;
unsigned long nr;
- int nid, index;
+ int index;
nr = pfn_to_section_nr(pfn);
section = __nr_to_section(nr);
@@ -176,7 +176,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
if (section->page_cgroup)
return 0;
- nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
base = alloc_page_cgroup(table_size, nid);
@@ -196,7 +195,11 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
pc = base + index;
init_page_cgroup(pc, nr);
}
-
+ /*
+ * The passed "pfn" may not be aligned to SECTION. For the calculation
+ * we need to apply a mask.
+ */
+ pfn &= PAGE_SECTION_MASK;
section->page_cgroup = base - pfn;
total_usage += table_size;
return 0;
@@ -225,10 +228,20 @@ int __meminit online_page_cgroup(unsigned long start_pfn,
start = start_pfn & ~(PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
+ if (nid == -1) {
+ /*
+ * In this case, "nid" already exists and contains valid memory.
+ * "start_pfn" passed to us is a pfn which is an arg for
+ * online__pages(), and start_pfn should exist.
+ */
+ nid = pfn_to_nid(start_pfn);
+ VM_BUG_ON(!node_state(nid, N_ONLINE));
+ }
+
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
if (!pfn_present(pfn))
continue;
- fail = init_section_page_cgroup(pfn);
+ fail = init_section_page_cgroup(pfn, nid);
}
if (!fail)
return 0;
@@ -284,25 +297,47 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
void __init page_cgroup_init(void)
{
unsigned long pfn;
- int fail = 0;
+ int nid;
if (mem_cgroup_disabled())
return;
- for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
- if (!pfn_present(pfn))
- continue;
- fail = init_section_page_cgroup(pfn);
- }
- if (fail) {
- printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
- panic("Out of memory");
- } else {
- hotplug_memory_notifier(page_cgroup_callback, 0);
+ for_each_node_state(nid, N_HIGH_MEMORY) {
+ unsigned long start_pfn, end_pfn;
+
+ start_pfn = node_start_pfn(nid);
+ end_pfn = node_end_pfn(nid);
+ /*
+ * start_pfn and end_pfn may not be aligned to SECTION and the
+ * page->flags of out of node pages are not initialized. So we
+ * scan [start_pfn, the biggest section's pfn < end_pfn) here.
+ */
+ for (pfn = start_pfn;
+ pfn < end_pfn;
+ pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
+
+ if (!pfn_valid(pfn))
+ continue;
+ /*
+ * Nodes's pfns can be overlapping.
+ * We know some arch can have a nodes layout such as
+ * -------------pfn-------------->
+ * N0 | N1 | N2 | N0 | N1 | N2|....
+ */
+ if (pfn_to_nid(pfn) != nid)
+ continue;
+ if (init_section_page_cgroup(pfn, nid))
+ goto oom;
+ }
}
+ hotplug_memory_notifier(page_cgroup_callback, 0);
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
- printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
- " want memory cgroups\n");
+ printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
+ "don't want memory cgroups\n");
+ return;
+oom:
+ printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
+ panic("Out of memory");
}
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
diff --git a/mm/rmap.c b/mm/rmap.c
index 0eb463e..23295f65 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -38,9 +38,8 @@
* in arch-dependent flush_dcache_mmap_lock,
* within inode_wb_list_lock in __sync_single_inode)
*
- * (code doesn't rely on that order so it could be switched around)
- * ->tasklist_lock
- * anon_vma->mutex (memory_failure, collect_procs_anon)
+ * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
+ * ->tasklist_lock
* pte map lock
*/
@@ -112,9 +111,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
kmem_cache_free(anon_vma_cachep, anon_vma);
}
-static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
+static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
{
- return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
+ return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
}
static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
@@ -159,7 +158,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
struct mm_struct *mm = vma->vm_mm;
struct anon_vma *allocated;
- avc = anon_vma_chain_alloc();
+ avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_enomem;
@@ -200,6 +199,32 @@ int anon_vma_prepare(struct vm_area_struct *vma)
return -ENOMEM;
}
+/*
+ * This is a useful helper function for locking the anon_vma root as
+ * we traverse the vma->anon_vma_chain, looping over anon_vma's that
+ * have the same vma.
+ *
+ * Such anon_vma's should have the same root, so you'd expect to see
+ * just a single mutex_lock for the whole traversal.
+ */
+static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
+{
+ struct anon_vma *new_root = anon_vma->root;
+ if (new_root != root) {
+ if (WARN_ON_ONCE(root))
+ mutex_unlock(&root->mutex);
+ root = new_root;
+ mutex_lock(&root->mutex);
+ }
+ return root;
+}
+
+static inline void unlock_anon_vma_root(struct anon_vma *root)
+{
+ if (root)
+ mutex_unlock(&root->mutex);
+}
+
static void anon_vma_chain_link(struct vm_area_struct *vma,
struct anon_vma_chain *avc,
struct anon_vma *anon_vma)
@@ -208,13 +233,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
avc->anon_vma = anon_vma;
list_add(&avc->same_vma, &vma->anon_vma_chain);
- anon_vma_lock(anon_vma);
/*
* It's critical to add new vmas to the tail of the anon_vma,
* see comment in huge_memory.c:__split_huge_page().
*/
list_add_tail(&avc->same_anon_vma, &anon_vma->head);
- anon_vma_unlock(anon_vma);
}
/*
@@ -224,13 +247,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
struct anon_vma_chain *avc, *pavc;
+ struct anon_vma *root = NULL;
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
- avc = anon_vma_chain_alloc();
- if (!avc)
- goto enomem_failure;
- anon_vma_chain_link(dst, avc, pavc->anon_vma);
+ struct anon_vma *anon_vma;
+
+ avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
+ if (unlikely(!avc)) {
+ unlock_anon_vma_root(root);
+ root = NULL;
+ avc = anon_vma_chain_alloc(GFP_KERNEL);
+ if (!avc)
+ goto enomem_failure;
+ }
+ anon_vma = pavc->anon_vma;
+ root = lock_anon_vma_root(root, anon_vma);
+ anon_vma_chain_link(dst, avc, anon_vma);
}
+ unlock_anon_vma_root(root);
return 0;
enomem_failure:
@@ -263,7 +297,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
- avc = anon_vma_chain_alloc();
+ avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
@@ -280,7 +314,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
get_anon_vma(anon_vma->root);
/* Mark this anon_vma as the one where our new (COWed) pages go. */
vma->anon_vma = anon_vma;
+ anon_vma_lock(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
+ anon_vma_unlock(anon_vma);
return 0;
@@ -291,36 +327,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
return -ENOMEM;
}
-static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
-{
- struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
- int empty;
-
- /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
- if (!anon_vma)
- return;
-
- anon_vma_lock(anon_vma);
- list_del(&anon_vma_chain->same_anon_vma);
-
- /* We must garbage collect the anon_vma if it's empty */
- empty = list_empty(&anon_vma->head);
- anon_vma_unlock(anon_vma);
-
- if (empty)
- put_anon_vma(anon_vma);
-}
-
void unlink_anon_vmas(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc, *next;
+ struct anon_vma *root = NULL;
/*
* Unlink each anon_vma chained to the VMA. This list is ordered
* from newest to oldest, ensuring the root anon_vma gets freed last.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
- anon_vma_unlink(avc);
+ struct anon_vma *anon_vma = avc->anon_vma;
+
+ root = lock_anon_vma_root(root, anon_vma);
+ list_del(&avc->same_anon_vma);
+
+ /*
+ * Leave empty anon_vmas on the list - we'll need
+ * to free them outside the lock.
+ */
+ if (list_empty(&anon_vma->head))
+ continue;
+
+ list_del(&avc->same_vma);
+ anon_vma_chain_free(avc);
+ }
+ unlock_anon_vma_root(root);
+
+ /*
+ * Iterate the list once more, it now only contains empty and unlinked
+ * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
+ * needing to acquire the anon_vma->root->mutex.
+ */
+ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+ struct anon_vma *anon_vma = avc->anon_vma;
+
+ put_anon_vma(anon_vma);
+
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index d221a1c..fcedf54 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next)
} while (next);
}
-static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
{
struct shmem_inode_info *info = SHMEM_I(inode);
unsigned long idx;
@@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
spinlock_t *punch_lock;
unsigned long upper_limit;
+ truncate_inode_pages_range(inode->i_mapping, start, end);
+
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (idx >= info->next_index)
@@ -738,16 +740,8 @@ done2:
* lowered next_index. Also, though shmem_getpage checks
* i_size before adding to cache, no recheck after: so fix the
* narrow window there too.
- *
- * Recalling truncate_inode_pages_range and unmap_mapping_range
- * every time for punch_hole (which never got a chance to clear
- * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
- * yet hardly ever necessary: try to optimize them out later.
*/
truncate_inode_pages_range(inode->i_mapping, start, end);
- if (punch_hole)
- unmap_mapping_range(inode->i_mapping, start,
- end - start, 1);
}
spin_lock(&info->lock);
@@ -766,22 +760,23 @@ done2:
shmem_free_pages(pages_to_free.next);
}
}
+EXPORT_SYMBOL_GPL(shmem_truncate_range);
-static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
+static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- loff_t newsize = attr->ia_size;
int error;
error = inode_change_ok(inode, attr);
if (error)
return error;
- if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
- && newsize != inode->i_size) {
+ if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
+ loff_t oldsize = inode->i_size;
+ loff_t newsize = attr->ia_size;
struct page *page = NULL;
- if (newsize < inode->i_size) {
+ if (newsize < oldsize) {
/*
* If truncating down to a partial page, then
* if that page is already allocated, hold it
@@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
spin_unlock(&info->lock);
}
}
-
- /* XXX(truncate): truncate_setsize should be called last */
- truncate_setsize(inode, newsize);
+ if (newsize != oldsize) {
+ i_size_write(inode, newsize);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ }
+ if (newsize < oldsize) {
+ loff_t holebegin = round_up(newsize, PAGE_SIZE);
+ unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+ shmem_truncate_range(inode, newsize, (loff_t)-1);
+ /* unmap again to remove racily COWed private pages */
+ unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+ }
if (page)
page_cache_release(page);
- shmem_truncate_range(inode, newsize, (loff_t)-1);
}
setattr_copy(inode, attr);
@@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode)
struct shmem_xattr *xattr, *nxattr;
if (inode->i_mapping->a_ops == &shmem_aops) {
- truncate_inode_pages(inode->i_mapping, 0);
shmem_unacct_size(info->flags, inode->i_size);
inode->i_size = 0;
shmem_truncate_range(inode, 0, (loff_t)-1);
@@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = {
};
static const struct inode_operations shmem_inode_operations = {
- .setattr = shmem_notify_change,
+ .setattr = shmem_setattr,
.truncate_range = shmem_truncate_range,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
@@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
- .setattr = shmem_notify_change,
+ .setattr = shmem_setattr,
.check_acl = generic_check_acl,
#endif
};
@@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = {
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
- .setattr = shmem_notify_change,
+ .setattr = shmem_setattr,
.check_acl = generic_check_acl,
#endif
};
@@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
return 0;
}
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+{
+ truncate_inode_pages_range(inode->i_mapping, start, end);
+}
+EXPORT_SYMBOL_GPL(shmem_truncate_range);
+
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/**
* mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
@@ -3028,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma)
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
+
+/**
+ * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @gfp: the page allocator flags to use if allocating
+ *
+ * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
+ * with any new page allocations done using the specified allocation flags.
+ * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * suit tmpfs, since it may have pages in swapcache, and needs to find those
+ * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
+ *
+ * Provide a stub for those callers to start using now, then later
+ * flesh it out to call shmem_getpage() with additional gfp mask, when
+ * shmem_file_splice_read() is added and shmem_readpage() is removed.
+ */
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
+{
+ return read_cache_page_gfp(mapping, index, gfp);
+}
+EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
diff --git a/mm/slab.c b/mm/slab.c
index bcfa498..d96e223 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3604,13 +3604,14 @@ free_done:
* Release an obj back to its cache. If the obj has a constructed state, it must
* be in this state _before_ it is released. Called with disabled ints.
*/
-static inline void __cache_free(struct kmem_cache *cachep, void *objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+ void *caller)
{
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
- objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
+ objp = cache_free_debugcheck(cachep, objp, caller);
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
@@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_locks_freed(objp, obj_size(cachep));
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, obj_size(cachep));
- __cache_free(cachep, objp);
+ __cache_free(cachep, objp, __builtin_return_address(0));
local_irq_restore(flags);
trace_kmem_cache_free(_RET_IP_, objp);
@@ -3831,7 +3832,7 @@ void kfree(const void *objp)
c = virt_to_cache(objp);
debug_check_no_locks_freed(objp, obj_size(c));
debug_check_no_obj_freed(objp, obj_size(c));
- __cache_free(c, (void *)objp);
+ __cache_free(c, (void *)objp, __builtin_return_address(0));
local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);
diff --git a/mm/slub.c b/mm/slub.c
index 7be0223..35f351f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
-#ifdef CONFIG_CMPXCHG_LOCAL
/*
- * Must align to double word boundary for the double cmpxchg instructions
- * to work.
+ * Must align to double word boundary for the double cmpxchg
+ * instructions to work; see __pcpu_double_call_return_bool().
*/
- s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
-#else
- /* Regular alignment is sufficient */
- s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
-#endif
+ s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
+ 2 * sizeof(void *));
if (!s->cpu_slab)
return 0;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d537d29..ff8dc1a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -14,7 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
-#include <linux/shm.h>
+#include <linux/shmem_fs.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/writeback.h>
diff --git a/mm/thrash.c b/mm/thrash.c
index 2372d4e..fabf2d0 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -21,14 +21,40 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/swap.h>
+#include <linux/memcontrol.h>
+
+#include <trace/events/vmscan.h>
+
+#define TOKEN_AGING_INTERVAL (0xFF)
static DEFINE_SPINLOCK(swap_token_lock);
struct mm_struct *swap_token_mm;
+struct mem_cgroup *swap_token_memcg;
static unsigned int global_faults;
+static unsigned int last_aging;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = try_get_mem_cgroup_from_mm(mm);
+ if (memcg)
+ css_put(mem_cgroup_css(memcg));
+
+ return memcg;
+}
+#else
+static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
+{
+ return NULL;
+}
+#endif
void grab_swap_token(struct mm_struct *mm)
{
int current_interval;
+ unsigned int old_prio = mm->token_priority;
global_faults++;
@@ -38,40 +64,81 @@ void grab_swap_token(struct mm_struct *mm)
return;
/* First come first served */
- if (swap_token_mm == NULL) {
- mm->token_priority = mm->token_priority + 2;
- swap_token_mm = mm;
- goto out;
+ if (!swap_token_mm)
+ goto replace_token;
+
+ if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
+ swap_token_mm->token_priority /= 2;
+ last_aging = global_faults;
}
- if (mm != swap_token_mm) {
- if (current_interval < mm->last_interval)
- mm->token_priority++;
- else {
- if (likely(mm->token_priority > 0))
- mm->token_priority--;
- }
- /* Check if we deserve the token */
- if (mm->token_priority > swap_token_mm->token_priority) {
- mm->token_priority += 2;
- swap_token_mm = mm;
- }
- } else {
- /* Token holder came in again! */
+ if (mm == swap_token_mm) {
mm->token_priority += 2;
+ goto update_priority;
+ }
+
+ if (current_interval < mm->last_interval)
+ mm->token_priority++;
+ else {
+ if (likely(mm->token_priority > 0))
+ mm->token_priority--;
}
+ /* Check if we deserve the token */
+ if (mm->token_priority > swap_token_mm->token_priority)
+ goto replace_token;
+
+update_priority:
+ trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
+
out:
mm->faultstamp = global_faults;
mm->last_interval = current_interval;
spin_unlock(&swap_token_lock);
+ return;
+
+replace_token:
+ mm->token_priority += 2;
+ trace_replace_swap_token(swap_token_mm, mm);
+ swap_token_mm = mm;
+ swap_token_memcg = swap_token_memcg_from_mm(mm);
+ last_aging = global_faults;
+ goto out;
}
/* Called on process exit. */
void __put_swap_token(struct mm_struct *mm)
{
spin_lock(&swap_token_lock);
- if (likely(mm == swap_token_mm))
+ if (likely(mm == swap_token_mm)) {
+ trace_put_swap_token(swap_token_mm);
swap_token_mm = NULL;
+ swap_token_memcg = NULL;
+ }
spin_unlock(&swap_token_lock);
}
+
+static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
+{
+ if (!a)
+ return true;
+ if (!b)
+ return true;
+ if (a == b)
+ return true;
+ return false;
+}
+
+void disable_swap_token(struct mem_cgroup *memcg)
+{
+ /* memcg reclaim don't disable unrelated mm token. */
+ if (match_memcg(memcg, swap_token_memcg)) {
+ spin_lock(&swap_token_lock);
+ if (match_memcg(memcg, swap_token_memcg)) {
+ trace_disable_swap_token(swap_token_mm);
+ swap_token_mm = NULL;
+ swap_token_memcg = NULL;
+ }
+ spin_unlock(&swap_token_lock);
+ }
+}
diff --git a/mm/truncate.c b/mm/truncate.c
index 3a29a61..e13f22e 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -304,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
* @lstart: offset from which to truncate
*
* Called under (and serialised by) inode->i_mutex.
+ *
+ * Note: When this function returns, there can be a page in the process of
+ * deletion (inside __delete_from_page_cache()) in the specified range. Thus
+ * mapping->nrpages can be non-zero when this function returns even after
+ * truncation of the whole mapping.
*/
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
@@ -603,3 +608,27 @@ int vmtruncate(struct inode *inode, loff_t offset)
return 0;
}
EXPORT_SYMBOL(vmtruncate);
+
+int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
+{
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * If the underlying filesystem is not going to provide
+ * a way to truncate a range of blocks (punch a hole) -
+ * we should return failure right now.
+ */
+ if (!inode->i_op->truncate_range)
+ return -ENOSYS;
+
+ mutex_lock(&inode->i_mutex);
+ down_write(&inode->i_alloc_sem);
+ unmap_mapping_range(mapping, offset, (end - offset), 1);
+ inode->i_op->truncate_range(inode, offset, end);
+ /* unmap again to remove racily COWed private pages */
+ unmap_mapping_range(mapping, offset, (end - offset), 1);
+ up_write(&inode->i_alloc_sem);
+ mutex_unlock(&inode->i_mutex);
+
+ return 0;
+}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index faa0a08..4f49535 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1124,8 +1124,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_lumpy_dirty++;
scan++;
} else {
- /* the page is freed already. */
- if (!page_count(cursor_page))
+ /*
+ * Check if the page is freed already.
+ *
+ * We can't use page_count() as that
+ * requires compound_head and we don't
+ * have a pin on the page here. If a
+ * page is tail, we may or may not
+ * have isolated the head, so assume
+ * it's not free, it'd be tricky to
+ * track the head status without a
+ * page pin.
+ */
+ if (!PageTail(cursor_page) &&
+ !atomic_read(&cursor_page->_count))
continue;
break;
}
@@ -1983,14 +1995,13 @@ restart:
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
- unsigned long total_scanned = 0;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2005,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
+ /*
+ * This steals pages from memory cgroups over softlimit
+ * and returns the number of reclaimed pages and
+ * scanned pages. This works for global memory pressure
+ * and balancing, not for a memcg's limit.
+ */
+ nr_soft_scanned = 0;
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ sc->order, sc->gfp_mask,
+ &nr_soft_scanned);
+ sc->nr_reclaimed += nr_soft_reclaimed;
+ sc->nr_scanned += nr_soft_scanned;
+ /* need some check for avoid more shrink_zone() */
}
- nr_soft_scanned = 0;
- nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
- sc->order, sc->gfp_mask,
- &nr_soft_scanned);
- sc->nr_reclaimed += nr_soft_reclaimed;
- total_scanned += nr_soft_scanned;
-
shrink_zone(priority, zone, sc);
}
-
- return total_scanned;
}
static bool zone_reclaimable(struct zone *zone)
@@ -2081,8 +2096,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc->nr_scanned = 0;
if (!priority)
- disable_swap_token();
- total_scanned += shrink_zones(priority, zonelist, sc);
+ disable_swap_token(sc->mem_cgroup);
+ shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -2407,7 +2422,7 @@ loop_again:
/* The swap token gets in the way of swapout... */
if (!priority)
- disable_swap_token();
+ disable_swap_token(NULL);
all_zones_ok = 1;
balanced = 0;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index c7a581a..917ecb9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -205,7 +205,7 @@ int register_vlan_dev(struct net_device *dev)
grp->nr_vlans++;
if (ngrp) {
- if (ops->ndo_vlan_rx_register)
+ if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
ops->ndo_vlan_rx_register(real_dev, ngrp);
rcu_assign_pointer(real_dev->vlgrp, ngrp);
}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 41495dc2..fcc6846 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -23,6 +23,31 @@ bool vlan_do_receive(struct sk_buff **skbp)
return false;
skb->dev = vlan_dev;
+ if (skb->pkt_type == PACKET_OTHERHOST) {
+ /* Our lower layer thinks this is not local, let's make sure.
+ * This allows the VLAN to have a different MAC than the
+ * underlying device, and still route correctly. */
+ if (!compare_ether_addr(eth_hdr(skb)->h_dest,
+ vlan_dev->dev_addr))
+ skb->pkt_type = PACKET_HOST;
+ }
+
+ if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ unsigned int offset = skb->data - skb_mac_header(skb);
+
+ /*
+ * vlan_insert_tag expect skb->data pointing to mac header.
+ * So change skb->data before calling it and change back to
+ * original position later
+ */
+ skb_push(skb, offset);
+ skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
+ if (!skb)
+ return false;
+ skb_pull(skb, offset + VLAN_HLEN);
+ skb_reset_mac_len(skb);
+ }
+
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
skb->vlan_tci = 0;
@@ -31,22 +56,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
-
- switch (skb->pkt_type) {
- case PACKET_BROADCAST:
- break;
- case PACKET_MULTICAST:
+ if (skb->pkt_type == PACKET_MULTICAST)
rx_stats->rx_multicast++;
- break;
- case PACKET_OTHERHOST:
- /* Our lower layer thinks this is not local, let's make sure.
- * This allows the VLAN to have a different MAC than the
- * underlying device, and still route correctly. */
- if (!compare_ether_addr(eth_hdr(skb)->h_dest,
- vlan_dev->dev_addr))
- skb->pkt_type = PACKET_HOST;
- break;
- }
u64_stats_update_end(&rx_stats->syncp);
return true;
@@ -89,18 +100,13 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
}
EXPORT_SYMBOL(vlan_gro_frags);
-static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
+static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
{
- if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
- if (skb_cow(skb, skb_headroom(skb)) < 0)
- skb = NULL;
- if (skb) {
- /* Lifted from Gleb's VLAN code... */
- memmove(skb->data - ETH_HLEN,
- skb->data - VLAN_ETH_HLEN, 12);
- skb->mac_header += VLAN_HLEN;
- }
- }
+ if (skb_cow(skb, skb_headroom(skb)) < 0)
+ return NULL;
+ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ skb->mac_header += VLAN_HLEN;
+ skb_reset_mac_len(skb);
return skb;
}
@@ -161,7 +167,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
skb_pull_rcsum(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vhdr);
- skb = vlan_check_reorder_header(skb);
+ skb = vlan_reorder_header(skb);
if (unlikely(!skb))
goto err_free;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f13ddbf..77930aa 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -477,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
* command otherwise */
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
- /* Events for 1.2 and newer controllers */
- if (hdev->lmp_ver > 1) {
- events[4] |= 0x01; /* Flow Specification Complete */
- events[4] |= 0x02; /* Inquiry Result with RSSI */
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
- }
+ /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+ * any event mask for pre 1.2 devices */
+ if (hdev->lmp_ver <= 1)
+ return;
+
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
events[4] |= 0x04; /* Inquiry Result with RSSI */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 18dc988..8248303 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -413,6 +413,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
break;
}
+ memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 386cfaf..1b10727 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -788,6 +788,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
+ memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd..cb4fb78 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
+ if (sco_pi(sk)->conn) {
+ sk->sk_state = BT_DISCONN;
+ sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
+ hci_conn_put(sco_pi(sk)->conn->hcon);
+ sco_pi(sk)->conn->hcon = NULL;
+ } else
+ sco_chan_del(sk, ECONNRESET);
+ break;
+
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
- hci_conn_put(conn->hcon);
+
+ if (conn->hcon)
+ hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index a6b2f86..c188c80 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -243,6 +243,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
goto out;
np->dev = p->dev;
+ strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
err = __netpoll_setup(np);
if (err) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2f14eaf..29b9812 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1424,7 +1424,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
- BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip4_multicast_add_group(br, port, ih->group);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1543,7 +1543,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
goto out;
}
mld = (struct mld_msg *)skb_transport_header(skb2);
- BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
break;
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3fa1231..56149ec 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -104,10 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
+static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ return NULL;
+}
+
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
+ .cow_metrics = fake_cow_metrics,
};
/*
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 3a66b8c..c23979e 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
- if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ||
+ if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
layer->id != 0) {
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6ea2b89..9cb627a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1144,6 +1144,13 @@ static void handle_osds_timeout(struct work_struct *work)
round_jiffies_relative(delay));
}
+static void complete_request(struct ceph_osd_request *req)
+{
+ if (req->r_safe_callback)
+ req->r_safe_callback(req, NULL);
+ complete_all(&req->r_safe_completion); /* fsync waiter */
+}
+
/*
* handle osd op reply. either call the callback if it is specified,
* or do the completion to wake up the waiting thread.
@@ -1226,11 +1233,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
else
complete_all(&req->r_completion);
- if (flags & CEPH_OSD_FLAG_ONDISK) {
- if (req->r_safe_callback)
- req->r_safe_callback(req, msg);
- complete_all(&req->r_safe_completion); /* fsync waiter */
- }
+ if (flags & CEPH_OSD_FLAG_ONDISK)
+ complete_request(req);
done:
dout("req=%p req->r_linger=%d\n", req, req->r_linger);
@@ -1732,6 +1736,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
__cancel_request(req);
__unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
+ complete_request(req);
dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
return rc;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 9393078..9c58c1e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3114,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
- skb->mac_len = skb->network_header - skb->mac_header;
+ skb_reset_mac_len(skb);
pt_prev = NULL;
@@ -6178,6 +6178,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
+ /* Append NAPI poll list from offline CPU. */
+ if (!list_empty(&oldsd->poll_list)) {
+ list_splice_init(&oldsd->poll_list, &sd->poll_list);
+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
@@ -6264,29 +6269,23 @@ err_name:
/**
* netdev_drivername - network driver for the device
* @dev: network device
- * @buffer: buffer for resulting name
- * @len: size of buffer
*
* Determine network driver for device.
*/
-char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
+const char *netdev_drivername(const struct net_device *dev)
{
const struct device_driver *driver;
const struct device *parent;
-
- if (len <= 0 || !buffer)
- return buffer;
- buffer[0] = 0;
+ const char *empty = "";
parent = dev->dev.parent;
-
if (!parent)
- return buffer;
+ return empty;
driver = parent->driver;
if (driver && driver->name)
- strlcpy(buffer, driver->name, len);
- return buffer;
+ return driver->name;
+ return empty;
}
static int __netdev_printk(const char *level, const struct net_device *dev,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 11b98bc..33d2a1f 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1179,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net)
#endif
}
-static const void *net_current_ns(void)
+static void *net_grab_current_ns(void)
{
- return current->nsproxy->net_ns;
+ struct net *ns = current->nsproxy->net_ns;
+#ifdef CONFIG_NET_NS
+ if (ns)
+ atomic_inc(&ns->passive);
+#endif
+ return ns;
}
static const void *net_initial_ns(void)
@@ -1196,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk)
struct kobj_ns_type_operations net_ns_type_operations = {
.type = KOBJ_NS_TYPE_NET,
- .current_ns = net_current_ns,
+ .grab_current_ns = net_grab_current_ns,
.netlink_ns = net_netlink_ns,
.initial_ns = net_initial_ns,
+ .drop_ns = net_drop_ns,
};
EXPORT_SYMBOL_GPL(net_ns_type_operations);
-static void net_kobj_ns_exit(struct net *net)
-{
- kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
-}
-
-static struct pernet_operations kobj_net_ops = {
- .exit = net_kobj_ns_exit,
-};
-
-
#ifdef CONFIG_HOTPLUG
static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
{
@@ -1339,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file);
int netdev_kobject_init(void)
{
kobj_ns_type_register(&net_ns_type_operations);
- register_pernet_subsys(&kobj_net_ops);
return class_register(&net_class);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6c6b86d..ea489db 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -128,6 +128,7 @@ static __net_init int setup_net(struct net *net)
LIST_HEAD(net_exit_list);
atomic_set(&net->count, 1);
+ atomic_set(&net->passive, 1);
#ifdef NETNS_REFCNT_DEBUG
atomic_set(&net->use_count, 0);
@@ -210,6 +211,13 @@ static void net_free(struct net *net)
kmem_cache_free(net_cachep, net);
}
+void net_drop_ns(void *p)
+{
+ struct net *ns = p;
+ if (ns && atomic_dec_and_test(&ns->passive))
+ net_free(ns);
+}
+
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
struct net *net;
@@ -230,7 +238,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
}
mutex_unlock(&net_mutex);
if (rv < 0) {
- net_free(net);
+ net_drop_ns(net);
return ERR_PTR(rv);
}
return net;
@@ -286,7 +294,7 @@ static void cleanup_net(struct work_struct *work)
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
- net_free(net);
+ net_drop_ns(net);
}
}
static DECLARE_WORK(net_cleanup_work, cleanup_net);
@@ -310,19 +318,17 @@ struct net *get_net_ns_by_fd(int fd)
struct file *file;
struct net *net;
- net = ERR_PTR(-EINVAL);
file = proc_ns_fget(fd);
- if (!file)
- goto out;
+ if (IS_ERR(file))
+ return ERR_CAST(file);
ei = PROC_I(file->f_dentry->d_inode);
- if (ei->ns_ops != &netns_operations)
- goto out;
+ if (ei->ns_ops == &netns_operations)
+ net = get_net(ei->ns);
+ else
+ net = ERR_PTR(-EINVAL);
- net = get_net(ei->ns);
-out:
- if (file)
- fput(file);
+ fput(file);
return net;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2d7d6d4..18d9cbd 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -792,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
return -ENODEV;
}
+ if (ndev->master) {
+ printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
+ np->name, np->dev_name);
+ err = -EBUSY;
+ goto put;
+ }
+
if (!netif_running(ndev)) {
unsigned long atmost, atleast;
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index ed0eab3..02548b2 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
pr_debug("%s\n", __func__);
if (!buf)
- goto out;
+ return -EMSGSIZE;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
pages * sizeof(uint32_t), buf);
mutex_unlock(&phy->pib_lock);
+ kfree(buf);
return genlmsg_end(msg, hdr);
nla_put_failure:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9c19260..eae1f67 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -676,6 +676,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk2);
+ sock_rps_record_flow(sk2);
WARN_ON(!((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 6ffe94c..3267d38 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
return 0;
if (cc == len)
return 1;
- if (op->yes < 4)
+ if (op->yes < 4 || op->yes & 3)
return 0;
len -= op->yes;
bc += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
{
- const unsigned char *bc = bytecode;
+ const void *bc = bytecode;
int len = bytecode_len;
while (len > 0) {
- struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
+ const struct inet_diag_bc_op *op = bc;
//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
case INET_DIAG_BC_S_LE:
case INET_DIAG_BC_D_GE:
case INET_DIAG_BC_D_LE:
- if (op->yes < 4 || op->yes > len + 4)
- return -EINVAL;
case INET_DIAG_BC_JMP:
- if (op->no < 4 || op->no > len + 4)
+ if (op->no < 4 || op->no > len + 4 || op->no & 3)
return -EINVAL;
if (op->no < len &&
!valid_cc(bytecode, bytecode_len, len - op->no))
return -EINVAL;
break;
case INET_DIAG_BC_NOP:
- if (op->yes < 4 || op->yes > len + 4)
- return -EINVAL;
break;
default:
return -EINVAL;
}
+ if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
+ return -EINVAL;
bc += op->yes;
len -= op->yes;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 98af369..a8024ea 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -799,7 +799,9 @@ static int __ip_append_data(struct sock *sk,
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
- exthdrlen = transhdrlen ? rt->dst.header_len : 0;
+ skb = skb_peek_tail(queue);
+
+ exthdrlen = !skb ? rt->dst.header_len : 0;
length += exthdrlen;
transhdrlen += exthdrlen;
mtu = cork->fragsize;
@@ -825,8 +827,6 @@ static int __ip_append_data(struct sock *sk,
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
- skb = skb_peek_tail(queue);
-
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d2c1311..5c9b9d9 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
else
pmsg->outdev_name[0] = '\0';
- if (entry->indev && entry->skb->dev) {
+ if (entry->indev && entry->skb->dev &&
+ entry->skb->mac_header != entry->skb->network_header) {
pmsg->hw_type = entry->skb->dev->type;
pmsg->hw_addrlen = dev_parse_header(entry->skb,
pmsg->hw_addr);
@@ -402,7 +403,8 @@ ipq_dev_drop(int ifindex)
static inline void
__ipq_rcv_skb(struct sk_buff *skb)
{
- int status, type, pid, flags, nlmsglen, skblen;
+ int status, type, pid, flags;
+ unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
skblen = skb->len;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 7647438..24e556e 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
const struct xt_entry_target *t;
if (!ip_checkentry(&e->ip)) {
- duprintf("ip check failed %p %s.\n", e, par->match->name);
+ duprintf("ip check failed %p %s.\n", e, name);
return -EINVAL;
}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index d609ac3..5c9e97c 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -307,7 +307,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
* error messages (RELATED) and information requests (see below) */
if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
(ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY))
+ ctinfo == IP_CT_RELATED_REPLY))
return XT_CONTINUE;
/* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
@@ -321,12 +321,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
ct->mark = hash;
break;
case IP_CT_RELATED:
- case IP_CT_RELATED+IP_CT_IS_REPLY:
+ case IP_CT_RELATED_REPLY:
/* FIXME: we don't handle expectations at the
* moment. they can arrive on a different node than
* the master connection (e.g. FTP passive mode) */
case IP_CT_ESTABLISHED:
- case IP_CT_ESTABLISHED+IP_CT_IS_REPLY:
+ case IP_CT_ESTABLISHED_REPLY:
break;
default:
break;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d2ed9dc..9931152 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
nat = nfct_nat(ct);
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
+ ctinfo == IP_CT_RELATED_REPLY));
/* Source address is 0.0.0.0 - locally generated packet that is
* probably not supposed to be masqueraded.
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index af6e9c7..2b57e52 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
static inline bool match_ip(const struct sk_buff *skb,
const struct ipt_ecn_info *einfo)
{
- return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect;
+ return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
+ !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
}
static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
- if (ip_hdr(skb)->protocol != IPPROTO_TCP)
- return false;
if (!match_tcp(skb, info, &par->hotdrop))
return false;
}
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
- ip->proto != IPPROTO_TCP) {
+ (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
pr_info("cannot match TCP bits in rule for non-tcp packets\n");
return -EINVAL;
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 5a03c02..de9da21 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
goto out;
help = nfct_help(ct);
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
return ret;
}
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
+ /* adjust seqs for loopback traffic only in outgoing direction */
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
typeof(nf_nat_seq_adjust_hook) seq_adjust;
seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7404bde..ab5b27a 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
/* Update skb to refer to this connection */
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
skb->nfctinfo = *ctinfo;
- return -NF_ACCEPT;
+ return NF_ACCEPT;
}
/* Small and modified version of icmp_rcv */
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 9c71b27..3346de5 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -433,7 +433,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
/* Must be RELATED */
NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
- skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
+ skb->nfctinfo == IP_CT_RELATED_REPLY);
/* Redirects on non-null nats must be dropped, else they'll
start talking to each other without our translation, and be
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 99cfa28..ebc5f88 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -160,7 +160,7 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(rt->rt_flags & RTCF_LOCAL) &&
- skb->dev->features & NETIF_F_V4_CSUM) {
+ (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb) +
skb_network_offset(skb) +
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 21c3042..733c9ab 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
/* Connection must be valid and new. */
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
+ ctinfo == IP_CT_RELATED_REPLY));
NF_CT_ASSERT(par->out != NULL);
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 7317bdf..483b76d 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -116,7 +116,7 @@ nf_nat_fn(unsigned int hooknum,
switch (ctinfo) {
case IP_CT_RELATED:
- case IP_CT_RELATED+IP_CT_IS_REPLY:
+ case IP_CT_RELATED_REPLY:
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(ct, ctinfo,
hooknum, skb))
@@ -144,7 +144,7 @@ nf_nat_fn(unsigned int hooknum,
default:
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
- ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
+ ctinfo == IP_CT_ESTABLISHED_REPLY);
}
return nf_nat_packet(ct, ctinfo, hooknum, skb);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9aaa671..39b403f 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -41,7 +41,6 @@
#include <linux/proc_fs.h>
#include <net/sock.h>
#include <net/ping.h>
-#include <net/icmp.h>
#include <net/udp.h>
#include <net/route.h>
#include <net/inet_common.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 52b0b95..aa13ef1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1316,6 +1316,23 @@ reject_redirect:
;
}
+static bool peer_pmtu_expired(struct inet_peer *peer)
+{
+ unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
+
+ return orig &&
+ time_after_eq(jiffies, orig) &&
+ cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
+}
+
+static bool peer_pmtu_cleaned(struct inet_peer *peer)
+{
+ unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
+
+ return orig &&
+ cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
+}
+
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
@@ -1331,14 +1348,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
rt_genid(dev_net(dst->dev)));
rt_del(hash, rt);
ret = NULL;
- } else if (rt->peer &&
- rt->peer->pmtu_expires &&
- time_after_eq(jiffies, rt->peer->pmtu_expires)) {
- unsigned long orig = rt->peer->pmtu_expires;
-
- if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
- dst_metric_set(dst, RTAX_MTU,
- rt->peer->pmtu_orig);
+ } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
+ dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
}
}
return ret;
@@ -1531,8 +1542,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
{
- unsigned long expires = peer->pmtu_expires;
+ unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
+ if (!expires)
+ return;
if (time_before(jiffies, expires)) {
u32 orig_dst_mtu = dst_mtu(dst);
if (peer->pmtu_learned < orig_dst_mtu) {
@@ -1555,10 +1568,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
rt_bind_peer(rt, rt->rt_dst, 1);
peer = rt->peer;
if (peer) {
+ unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
+
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
- if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
- unsigned long pmtu_expires;
+ if (!pmtu_expires || mtu < peer->pmtu_learned) {
pmtu_expires = jiffies + ip_rt_mtu_expires;
if (!pmtu_expires)
@@ -1612,13 +1626,14 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
rt_bind_peer(rt, rt->rt_dst, 0);
peer = rt->peer;
- if (peer && peer->pmtu_expires)
+ if (peer) {
check_peer_pmtu(dst, peer);
- if (peer && peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- if (check_peer_redir(dst, peer))
- return NULL;
+ if (peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway) {
+ if (check_peer_redir(dst, peer))
+ return NULL;
+ }
}
rt->rt_peer_genid = rt_peer_genid();
@@ -1649,14 +1664,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
- if (rt &&
- rt->peer &&
- rt->peer->pmtu_expires) {
- unsigned long orig = rt->peer->pmtu_expires;
-
- if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
- dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
- }
+ if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
+ dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1770,8 +1779,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
sizeof(u32) * RTAX_MAX);
dst_init_metrics(&rt->dst, peer->metrics, false);
- if (peer->pmtu_expires)
- check_peer_pmtu(&rt->dst, peer);
+ check_peer_pmtu(&rt->dst, peer);
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
@@ -1894,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
- err = 0;
- if (IS_ERR(rth))
- err = PTR_ERR(rth);
+ return IS_ERR(rth) ? PTR_ERR(rth) : 0;
e_nobufs:
return -ENOBUFS;
@@ -2775,7 +2781,8 @@ static int rt_fill_info(struct net *net,
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
- long expires;
+ long expires = 0;
+ const struct inet_peer *peer = rt->peer;
u32 id = 0, ts = 0, tsage = 0, error;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
@@ -2823,15 +2830,16 @@ static int rt_fill_info(struct net *net,
NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
error = rt->dst.error;
- expires = (rt->peer && rt->peer->pmtu_expires) ?
- rt->peer->pmtu_expires - jiffies : 0;
- if (rt->peer) {
+ if (peer) {
inet_peer_refcheck(rt->peer);
- id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
- if (rt->peer->tcp_ts_stamp) {
- ts = rt->peer->tcp_ts;
- tsage = get_seconds() - rt->peer->tcp_ts_stamp;
+ id = atomic_read(&peer->ip_id_count) & 0xffff;
+ if (peer->tcp_ts_stamp) {
+ ts = peer->tcp_ts;
+ tsage = get_seconds() - peer->tcp_ts_stamp;
}
+ expires = ACCESS_ONCE(peer->pmtu_expires);
+ if (expires)
+ expires -= jiffies;
}
if (rt_is_input_route(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7d6671..708dc20 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1589,6 +1589,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
+ sock_rps_save_rxhash(nsk, skb->rxhash);
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
goto reset;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b7919f9..d450a2f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -272,6 +272,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
+
+ if (addr->sin6_family != AF_INET6)
+ return -EINVAL;
+
addr_type = ipv6_addr_type(&addr->sin6_addr);
if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
return -EINVAL;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 413ab07..2493948 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
else
pmsg->outdev_name[0] = '\0';
- if (entry->indev && entry->skb->dev) {
+ if (entry->indev && entry->skb->dev &&
+ entry->skb->mac_header != entry->skb->network_header) {
pmsg->hw_type = entry->skb->dev->type;
pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
}
@@ -403,7 +404,8 @@ ipq_dev_drop(int ifindex)
static inline void
__ipq_rcv_skb(struct sk_buff *skb)
{
- int status, type, pid, flags, nlmsglen, skblen;
+ int status, type, pid, flags;
+ unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
skblen = skb->len;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c8af58b..4111050 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
goto out;
help = nfct_help(ct);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 1df3c8b..7c05e7e 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -177,7 +177,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
/* Update skb to refer to this connection */
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
skb->nfctinfo = *ctinfo;
- return -NF_ACCEPT;
+ return NF_ACCEPT;
}
static int
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fd287..87551ca 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1644,6 +1644,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
* the new socket..
*/
if(nsk != sk) {
+ sock_rps_save_rxhash(nsk, skb->rxhash);
if (tcp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb)
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 3647753..f876eed 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -87,6 +87,8 @@ static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
iriap_watchdog_timer_expired);
}
+static struct lock_class_key irias_objects_key;
+
/*
* Function iriap_init (void)
*
@@ -114,6 +116,9 @@ int __init iriap_init(void)
return -ENOMEM;
}
+ lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
+ "irias_objects");
+
/*
* Register some default services for IrLMP
*/
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index b8dbae8..7613013 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -258,7 +258,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
*/
pd->net = get_net_ns_by_pid(current->pid);
if (IS_ERR(pd->net)) {
- rc = -PTR_ERR(pd->net);
+ rc = PTR_ERR(pd->net);
goto err_free_pd;
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 421eaa6..56c24ca 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->u.ibss.mtx);
+ sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
+ memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
+ sdata->u.ibss.ssid_len = 0;
+
active_ibss = ieee80211_sta_active_ibss(sdata);
if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree_skb(skb);
skb_queue_purge(&sdata->skb_queue);
- memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
- sdata->u.ibss.ssid_len = 0;
del_timer_sync(&sdata->u.ibss.timer);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2025af5..090b0ec 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -775,9 +775,6 @@ struct ieee80211_local {
int tx_headroom; /* required headroom for hardware/radiotap */
- /* count for keys needing tailroom space allocation */
- int crypto_tx_tailroom_needed_cnt;
-
/* Tasklet and skb queue to process calls from IRQ mode. All frames
* added to skb_queue will be processed, but frames in
* skb_queue_unreliable may be dropped if the total length of these
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 49d4f86..dee30ae 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1145,6 +1145,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ IEEE80211_ENCRYPT_HEADROOM;
ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+ ret = dev_alloc_name(ndev, ndev->name);
+ if (ret < 0)
+ goto fail;
+
ieee80211_assign_perm_addr(local, ndev, type);
memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 31afd712..f825e2f 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -101,11 +101,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
-
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
- key->local->crypto_tx_tailroom_needed_cnt--;
-
return 0;
}
@@ -161,10 +156,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
- key->local->crypto_tx_tailroom_needed_cnt++;
}
void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
@@ -403,10 +394,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
ieee80211_aes_key_free(key->u.ccmp.tfm);
if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
- if (key->local) {
+ if (key->local)
ieee80211_debugfs_key_remove(key);
- key->local->crypto_tx_tailroom_needed_cnt--;
- }
kfree(key);
}
@@ -468,8 +457,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_debugfs_key_add(key);
- key->local->crypto_tx_tailroom_needed_cnt++;
-
ret = ieee80211_key_enable_hw_accel(key);
mutex_unlock(&sdata->local->key_mtx);
@@ -511,12 +498,8 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx);
- sdata->local->crypto_tx_tailroom_needed_cnt = 0;
-
- list_for_each_entry(key, &sdata->key_list, list) {
- sdata->local->crypto_tx_tailroom_needed_cnt++;
+ list_for_each_entry(key, &sdata->key_list, list)
ieee80211_key_enable_hw_accel(key);
- }
mutex_unlock(&sdata->local->key_mtx);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 456cccf..d595265 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
}
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
- IEEE80211_QUEUE_STOP_REASON_CSA);
-
/* channel_type change automatically detected */
ieee80211_hw_config(local, 0);
@@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
}
- ieee80211_wake_queues_by_reason(&sdata->local->hw,
- IEEE80211_QUEUE_STOP_REASON_CSA);
-
ht_opmode = le16_to_cpu(hti->operation_mode);
/* if bss configuration changed store the new one */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 64e0f75..3104c84 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1480,7 +1480,12 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
{
int tail_need = 0;
- if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) {
+ /*
+ * This could be optimised, devices that do full hardware
+ * crypto (including TKIP MMIC) need no tailroom... But we
+ * have no drivers for such devices currently.
+ */
+ if (may_encrypt) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 8041bef..42aa64b 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -767,7 +767,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
- ret = IPSET_ERR_BUSY;
+ ret = -IPSET_ERR_BUSY;
goto out;
}
}
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 4743e54..565a7c5 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -146,8 +146,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportnet4_elem data =
- { .cidr = h->nets[0].cidr || HOST_MASK };
+ struct hash_ipportnet4_elem data = {
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ };
if (data.cidr == 0)
return -EINVAL;
@@ -394,8 +395,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportnet6_elem data =
- { .cidr = h->nets[0].cidr || HOST_MASK };
+ struct hash_ipportnet6_elem data = {
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ };
if (data.cidr == 0)
return -EINVAL;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c4db202..2aeeabc 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -131,7 +131,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+ struct hash_net4_elem data = {
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ };
if (data.cidr == 0)
return -EINVAL;
@@ -296,7 +298,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+ struct hash_net6_elem data = {
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ };
if (data.cidr == 0)
return -EINVAL;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index d2a4036..e50d9bb 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -144,7 +144,8 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = {
- .cidr = h->nets[0].cidr || HOST_MASK };
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ };
if (data.cidr == 0)
return -EINVAL;
@@ -357,7 +358,8 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = {
- .cidr = h->nets[0].cidr || HOST_MASK };
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ };
if (data.cidr == 0)
return -EINVAL;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index bf28ac2..782db27 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
if (cp->control)
ip_vs_control_del(cp);
- if (cp->flags & IP_VS_CONN_F_NFCT)
+ if (cp->flags & IP_VS_CONN_F_NFCT) {
ip_vs_conn_drop_conntrack(cp);
+ /* Do not access conntracks during subsys cleanup
+ * because nf_conntrack_find_get can not be used after
+ * conntrack cleanup for the net.
+ */
+ smp_rmb();
+ if (ipvs->enable)
+ ip_vs_conn_drop_conntrack(cp);
+ }
ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index bfa808f..24c28d2 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1772,7 +1772,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
- .priority = 99,
+ .priority = NF_IP_PRI_NAT_SRC - 2,
},
/* After packet filtering, forward packet through VS/DR, VS/TUN,
* or VS/NAT(change destination), so that filtering rules can be
@@ -1782,7 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
- .priority = 101,
+ .priority = NF_IP_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
@@ -1790,7 +1790,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
- .priority = -99,
+ .priority = NF_IP_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
@@ -1798,7 +1798,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
- .priority = -98,
+ .priority = NF_IP_PRI_NAT_DST + 2,
},
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1824,7 +1824,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_IN,
- .priority = 99,
+ .priority = NF_IP6_PRI_NAT_SRC - 2,
},
/* After packet filtering, forward packet through VS/DR, VS/TUN,
* or VS/NAT(change destination), so that filtering rules can be
@@ -1834,7 +1834,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_IN,
- .priority = 101,
+ .priority = NF_IP6_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
@@ -1842,7 +1842,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
- .priority = -99,
+ .priority = NF_IP6_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
@@ -1850,7 +1850,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_OUT,
- .priority = -98,
+ .priority = NF_IP6_PRI_NAT_DST + 2,
},
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1945,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
{
EnterFunction(2);
net_ipvs(net)->enable = 0; /* Disable packet reception */
+ smp_wmb();
__ip_vs_sync_cleanup(net);
LeaveFunction(2);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2e1c11f..f7af8b8 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -850,7 +850,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
/* It exists; we have (non-exclusive) reference. */
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
- *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
+ *ctinfo = IP_CT_ESTABLISHED_REPLY;
/* Please set reply bit if this packet OK */
*set_reply = 1;
} else {
@@ -922,6 +922,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
ret = -ret;
goto out;
}
+ /* ICMP[v6] protocol trackers may assign one conntrack. */
+ if (skb->nfct)
+ goto out;
}
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
@@ -1143,7 +1146,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
/* This ICMP is in reverse direction to the packet which caused it */
ct = nf_ct_get(skb, &ctinfo);
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
- ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
+ ctinfo = IP_CT_RELATED_REPLY;
else
ctinfo = IP_CT_RELATED;
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index e17cb7c..6f5801e 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -368,7 +368,7 @@ static int help(struct sk_buff *skb,
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+ ctinfo != IP_CT_ESTABLISHED_REPLY) {
pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 18b2ce5..f03c2d4 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
int ret;
/* Until there's been traffic both ways, don't look in packets. */
- if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
- }
+
pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
@@ -1125,10 +1124,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
int ret;
/* Until there's been traffic both ways, don't look in packets. */
- if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
- }
+
pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index b394aa3..4f9390b 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
return NF_ACCEPT;
/* Until there's been traffic both ways, don't look in packets. */
- if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 0889448..2fd4565 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
u_int16_t msg;
/* don't do any tracking before tcp handshake complete */
- if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
nexthdr_off = protoff;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index d9e2773..8501823 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -78,7 +78,7 @@ static int help(struct sk_buff *skb,
ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)
+ ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index cb5a285..93faf6a 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1423,7 +1423,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
if (ctinfo != IP_CT_ESTABLISHED &&
- ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+ ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* No Data ? */
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e0ee010..2e7ccbb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -456,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
if (skb->mark)
NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
- if (indev && skb->dev) {
+ if (indev && skb->dev &&
+ skb->mac_header != skb->network_header) {
struct nfulnl_msg_packet_hw phw;
int len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index b83123f..fdd2faf 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (entskb->mark)
NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
- if (indev && entskb->dev) {
+ if (indev && entskb->dev &&
+ entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
int len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 9cc4635..fe39f7e 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct) &&
((iph->protocol != IPPROTO_ICMP &&
- ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
+ ctinfo == IP_CT_ESTABLISHED_REPLY) ||
(iph->protocol == IPPROTO_ICMP &&
- ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) &&
+ ctinfo == IP_CT_RELATED_REPLY)) &&
(ct->status & IPS_SRC_NAT_DONE)) {
daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ba248d9..c0c3cda 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -804,6 +804,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
} else {
h.h2->tp_vlan_tci = 0;
}
+ h.h2->tp_padding = 0;
hdrlen = sizeof(*h.h2);
break;
default:
@@ -1736,6 +1737,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
} else {
aux.tp_vlan_tci = 0;
}
+ aux.tp_padding = 0;
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b1721d7..b4c6809 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -251,9 +251,8 @@ static void dev_watchdog(unsigned long arg)
}
if (some_queue_timedout) {
- char drivername[64];
WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
- dev->name, netdev_drivername(dev, drivername, 64), i);
+ dev->name, netdev_drivername(dev), i);
dev->netdev_ops->ndo_tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 339ba64..5daf6cc 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -577,13 +577,13 @@ retry:
}
inode = &gss_msg->inode->vfs_inode;
for (;;) {
- prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
spin_lock(&inode->i_lock);
if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
break;
}
spin_unlock(&inode->i_lock);
- if (signalled()) {
+ if (fatal_signal_pending(current)) {
err = -ERESTARTSYS;
goto out_intr;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0a9a2ec..c3b7533 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -43,6 +43,7 @@
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <linux/crypto.h>
+#include <linux/sunrpc/gss_krb5_enctypes.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
- .gm_upcall_enctypes = "18,17,16,23,3,1,2",
+ .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
};
static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b84d739..8c91415 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1061,7 +1061,7 @@ call_allocate(struct rpc_task *task)
dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
- if (RPC_IS_ASYNC(task) || !signalled()) {
+ if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
task->tk_action = call_allocate;
rpc_delay(task, HZ>>4);
return;
@@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task)
status = -EOPNOTSUPP;
break;
}
+ if (task->tk_rebind_retry == 0)
+ break;
+ task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
case -ETIMEDOUT:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6b43ee7..a27406b 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -792,6 +792,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
+ task->tk_rebind_retry = 2;
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 88a565f..98fa8eb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
- request->ssids[i].ssid_len = nla_len(attr);
- if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
+ if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
+ request->ssids[i].ssid_len = nla_len(attr);
memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
i++;
}
@@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
- request->ssids[i].ssid_len = nla_len(attr);
- if (request->ssids[i].ssid_len >
- IEEE80211_MAX_SSID_LEN) {
+ if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
+ request->ssids[i].ssid_len = nla_len(attr);
memcpy(request->ssids[i].ssid, nla_data(attr),
nla_len(attr));
i++;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 47f1b86..b11ea69 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -265,7 +265,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
} else {
- nr = replay_esn->replay_window >> 5;
+ nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
@@ -471,7 +471,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
} else {
- nr = replay_esn->replay_window >> 5;
+ nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
index 490122c..40caf3c 100644
--- a/scripts/Makefile.asm-generic
+++ b/scripts/Makefile.asm-generic
@@ -17,6 +17,7 @@ quiet_cmd_wrap = WRAP $@
cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@
all: $(patsubst %, $(obj)/%, $(generic-y))
+ @:
$(obj)/%.h:
$(call cmd,wrap)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8657f99..b0aa2c6 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1943,6 +1943,11 @@ sub process {
WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
}
+# check for uses of printk_ratelimit
+ if ($line =~ /\bprintk_ratelimit\s*\(/) {
+ WARN("Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
+ }
+
# printk should use KERN_* levels. Note that follow on printk's on the
# same line do not need a level, so we use the current block context
# to try and find and validate the current printk. In summary the current
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
new file mode 100755
index 0000000..3b029cb
--- /dev/null
+++ b/scripts/depmod.sh
@@ -0,0 +1,48 @@
+#!/bin/sh
+#
+# A depmod wrapper used by the toplevel Makefile
+
+if test $# -ne 2; then
+ echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
+ exit 1
+fi
+DEPMOD=$1
+KERNELRELEASE=$2
+
+if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
+ echo "Warning: you may need to install module-init-tools" >&2
+ echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
+ sleep 1
+fi
+
+if ! test -r System.map -a -x "$DEPMOD"; then
+ exit 0
+fi
+# older versions of depmod require the version string to start with three
+# numbers, so we cheat with a symlink here
+depmod_hack_needed=true
+mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
+if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
+ if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
+ -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+ depmod_hack_needed=false
+ fi
+fi
+if $depmod_hack_needed; then
+ symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
+ ln -s "$KERNELRELEASE" "$symlink"
+ KERNELRELEASE=99.98.$KERNELRELEASE
+fi
+
+set -- -ae -F System.map
+if test -n "$INSTALL_MOD_PATH"; then
+ set -- "$@" -b "$INSTALL_MOD_PATH"
+fi
+"$DEPMOD" "$@" "$KERNELRELEASE"
+ret=$?
+
+if $depmod_hack_needed; then
+ rm -f "$symlink"
+fi
+
+exit $ret
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ec1bcec..3d2fd14 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -612,7 +612,7 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
static int apparmor_task_setrlimit(struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
- struct aa_profile *profile = aa_current_profile();
+ struct aa_profile *profile = __aa_current_profile();
int error = 0;
if (!unconfined(profile))
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd1f779..1be6826 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -474,17 +474,11 @@ struct cgroup_subsys devices_subsys = {
.subsys_id = devices_subsys_id,
};
-int devcgroup_inode_permission(struct inode *inode, int mask)
+int __devcgroup_inode_permission(struct inode *inode, int mask)
{
struct dev_cgroup *dev_cgroup;
struct dev_whitelist_item *wh;
- dev_t device = inode->i_rdev;
- if (!device)
- return 0;
- if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
- return 0;
-
rcu_read_lock();
dev_cgroup = task_devcgroup(current);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index d31862e..8246532 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key);
* This is called in context of freshly forked kthread before kernel_execve(),
* so we can simply install the desired session_keyring at this point.
*/
-static int umh_keys_init(struct subprocess_info *info)
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
{
- struct cred *cred = (struct cred*)current_cred();
struct key *keyring = info->data;
return install_session_keyring_to_cred(cred, keyring);
@@ -470,7 +469,7 @@ static struct key *construct_key_and_link(struct key_type *type,
} else if (ret == -EINPROGRESS) {
ret = 0;
} else {
- key = ERR_PTR(ret);
+ goto couldnt_alloc_key;
}
key_put(dest_keyring);
@@ -480,6 +479,7 @@ static struct key *construct_key_and_link(struct key_type *type,
construction_failed:
key_negate_and_link(key, key_negative_timeout, NULL, NULL);
key_put(key);
+couldnt_alloc_key:
key_put(dest_keyring);
kleave(" = %d", ret);
return ERR_PTR(ret);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 77d4413..3545934 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -29,6 +29,7 @@
#include <linux/audit.h>
#include <linux/uaccess.h>
#include <linux/kobject.h>
+#include <linux/ctype.h>
/* selinuxfs pseudo filesystem for exporting the security policy API.
Based on the proc code and the fs/nfsd/nfsctl.c code. */
@@ -751,6 +752,14 @@ out:
return length;
}
+static inline int hexcode_to_int(int code) {
+ if (code == '\0' || !isxdigit(code))
+ return -1;
+ if (isdigit(code))
+ return code - '0';
+ return tolower(code) - 'a' + 10;
+}
+
static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
{
char *scon = NULL, *tcon = NULL;
@@ -785,8 +794,34 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
if (nargs < 3 || nargs > 4)
goto out;
- if (nargs == 4)
+ if (nargs == 4) {
+ /*
+ * If and when the name of new object to be queried contains
+ * either whitespace or multibyte characters, they shall be
+ * encoded based on the percentage-encoding rule.
+ * If not encoded, the sscanf logic picks up only left-half
+ * of the supplied name; splitted by a whitespace unexpectedly.
+ */
+ char *r, *w;
+ int c1, c2;
+
+ r = w = namebuf;
+ do {
+ c1 = *r++;
+ if (c1 == '+')
+ c1 = ' ';
+ else if (c1 == '%') {
+ if ((c1 = hexcode_to_int(*r++)) < 0)
+ goto out;
+ if ((c2 = hexcode_to_int(*r++)) < 0)
+ goto out;
+ c1 = (c1 << 4) | c2;
+ }
+ *w++ = c1;
+ } while (c1 != '\0');
+
objname = namebuf;
+ }
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
if (length)
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 102e9ec..d246aca 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3222,6 +3222,9 @@ static int filename_trans_write(struct policydb *p, void *fp)
__le32 buf[1];
int rc;
+ if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+ return 0;
+
nel = 0;
rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
if (rc)
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index 162a864..9fc2e15 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -138,7 +138,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
}
if (need_dev) {
/* Get mount point or device file. */
- if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
+ if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
error = -ENOENT;
goto out;
}
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 2c41825..eb9fe2e 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -58,26 +58,6 @@ static const char *sanity_file_name(const char *path)
else
return path;
}
-
-/* print file and line with a certain printk prefix */
-static int print_snd_pfx(unsigned int level, const char *path, int line,
- const char *format)
-{
- const char *file = sanity_file_name(path);
- char tmp[] = "<0>";
- const char *pfx = level ? KERN_DEBUG : KERN_DEFAULT;
- int ret = 0;
-
- if (format[0] == '<' && format[2] == '>') {
- tmp[1] = format[1];
- pfx = tmp;
- ret = 1;
- }
- printk("%sALSA %s:%d: ", pfx, file, line);
- return ret;
-}
-#else
-#define print_snd_pfx(level, path, line, format) 0
#endif
#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
@@ -85,15 +65,29 @@ void __snd_printk(unsigned int level, const char *path, int line,
const char *format, ...)
{
va_list args;
-
+#ifdef CONFIG_SND_VERBOSE_PRINTK
+ struct va_format vaf;
+ char verbose_fmt[] = KERN_DEFAULT "ALSA %s:%d %pV";
+#endif
+
#ifdef CONFIG_SND_DEBUG
if (debug < level)
return;
#endif
+
va_start(args, format);
- if (print_snd_pfx(level, path, line, format))
- format += 3; /* skip the printk level-prefix */
+#ifdef CONFIG_SND_VERBOSE_PRINTK
+ vaf.fmt = format;
+ vaf.va = &args;
+ if (format[0] == '<' && format[2] == '>') {
+ memcpy(verbose_fmt, format, 3);
+ vaf.fmt = format + 3;
+ } else if (level)
+ memcpy(verbose_fmt, KERN_DEBUG, 3);
+ printk(verbose_fmt, sanity_file_name(path), line, &vaf);
+#else
vprintk(format, args);
+#endif
va_end(args);
}
EXPORT_SYMBOL_GPL(__snd_printk);
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 86ee16c..4400308 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -209,6 +209,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
isight->packet_index = -1;
return;
}
+ fw_iso_context_queue_flush(isight->context);
if (++index >= QUEUE_LENGTH)
index = 0;
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 2ca6f4f..e3569bd 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -27,7 +27,6 @@
#include "hpioctl.h"
#include <linux/pci.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 5e619a8..15f0161 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1440,6 +1440,14 @@ static struct snd_emu_chip_details emu_chip_details[] = {
.ca0102_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
+ /* EMU0404 PCIe */
+ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
+ .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
+ .id = "EMU0404",
+ .emu10k2_chip = 1,
+ .ca0108_chip = 1,
+ .spk71 = 1,
+ .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
/* Note that all E-mu cards require kernel 2.6 or newer. */
{.vendor = 0x1102, .device = 0x0008,
.driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index f1de1ba..55f0647 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -50,7 +50,12 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
void snd_hda_detach_beep_device(struct hda_codec *codec);
#else
-#define snd_hda_attach_beep_device(...) 0
-#define snd_hda_detach_beep_device(...)
+static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
+{
+ return 0;
+}
+static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
+{
+}
#endif
#endif
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3e6b9a8..694b9daf 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3102,6 +3102,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
+ SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7a4e100..d21191d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1141,6 +1141,13 @@ static void update_speakers(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
int on;
+ /* Control HP pins/amps depending on master_mute state;
+ * in general, HP pins/amps control should be enabled in all cases,
+ * but currently set only for master_mute, just to be safe
+ */
+ do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
+ spec->autocfg.hp_pins, spec->master_mute, true);
+
if (!spec->automute)
on = 0;
else
@@ -4876,7 +4883,6 @@ static const struct snd_pci_quirk alc880_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG),
SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST),
SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG),
- SND_PCI_QUIRK(0x103c, 0x2a09, "HP", ALC880_5ST),
SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V),
SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG),
SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG),
@@ -6201,11 +6207,6 @@ static const struct snd_kcontrol_new alc260_input_mixer[] = {
/* update HP, line and mono out pins according to the master switch */
static void alc260_hp_master_update(struct hda_codec *codec)
{
- struct alc_spec *spec = codec->spec;
-
- /* change HP pins */
- do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
- spec->autocfg.hp_pins, spec->master_mute, true);
update_speakers(codec);
}
@@ -11924,7 +11925,7 @@ static const struct hda_verb alc262_nec_verbs[] = {
* 0x1b = port replicator headphone out
*/
-#define ALC_HP_EVENT 0x37
+#define ALC_HP_EVENT ALC880_HP_EVENT
static const struct hda_verb alc262_fujitsu_unsol_verbs[] = {
{0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC_HP_EVENT},
@@ -12598,6 +12599,7 @@ static const struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
*/
enum {
PINFIX_FSC_H270,
+ PINFIX_HP_Z200,
};
static const struct alc_fixup alc262_fixups[] = {
@@ -12610,9 +12612,17 @@ static const struct alc_fixup alc262_fixups[] = {
{ }
}
},
+ [PINFIX_HP_Z200] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x16, 0x99130120 }, /* internal speaker */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc262_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", PINFIX_HP_Z200),
SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270),
{}
};
@@ -12729,6 +12739,8 @@ static const struct snd_pci_quirk alc262_cfg_tbl[] = {
ALC262_HP_BPC),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
ALC262_HP_BPC),
+ SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200",
+ ALC262_AUTO),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
ALC262_HP_BPC),
SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -13314,9 +13326,8 @@ static void alc268_acer_lc_setup(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- spec->automute_mixer_nid[0] = 0x0f;
spec->automute = 1;
- spec->automute_mode = ALC_AUTOMUTE_MIXER;
+ spec->automute_mode = ALC_AUTOMUTE_AMP;
spec->ext_mic.pin = 0x18;
spec->ext_mic.mux_idx = 0;
spec->int_mic.pin = 0x12;
@@ -13860,6 +13871,7 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
ALC268_ACER_ASPIRE_ONE),
SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
+ SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron 910", ALC268_AUTO),
SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0,
"Dell Inspiron Mini9/Vostro A90", ALC268_DELL),
/* almost compatible with toshiba but with optional digital outs;
@@ -13870,7 +13882,6 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
- SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER),
SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1),
{}
};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 605c99e..f43bb0e 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -745,12 +745,23 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
struct via_spec *spec = codec->spec;
hda_nid_t nid = kcontrol->private_value;
unsigned int pinsel = ucontrol->value.enumerated.item[0];
+ unsigned int parm0, parm1;
/* Get Independent Mode index of headphone pin widget */
spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
? 1 : 0;
- if (spec->codec_type == VT1718S)
+ if (spec->codec_type == VT1718S) {
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
+ /* Set correct mute switch for MW3 */
+ parm0 = spec->hp_independent_mode ?
+ AMP_IN_UNMUTE(0) : AMP_IN_MUTE(0);
+ parm1 = spec->hp_independent_mode ?
+ AMP_IN_MUTE(1) : AMP_IN_UNMUTE(1);
+ snd_hda_codec_write(codec, 0x1b, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, parm0);
+ snd_hda_codec_write(codec, 0x1b, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, parm1);
+ }
else
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_CONNECT_SEL, pinsel);
@@ -832,10 +843,13 @@ static int via_hp_build(struct hda_codec *codec)
knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
knew->private_value = nid;
- knew = via_clone_control(spec, &via_hp_mixer[1]);
- if (knew == NULL)
- return -ENOMEM;
- knew->subdevice = side_mute_channel(spec);
+ nid = side_mute_channel(spec);
+ if (nid) {
+ knew = via_clone_control(spec, &via_hp_mixer[1]);
+ if (knew == NULL)
+ return -ENOMEM;
+ knew->subdevice = nid;
+ }
return 0;
}
@@ -4280,9 +4294,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
{0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)},
-
- /* Setup default input of Front HP to MW9 */
- {0x28, AC_VERB_SET_CONNECT_SEL, 0x1},
/* PW9 PW10 Output enable */
{0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
{0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
@@ -4291,10 +4302,10 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
/* Enable Boost Volume backdoor */
{0x1, 0xf88, 0x8},
/* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */
- {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
@@ -4304,8 +4315,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
/* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */
{0x34, AC_VERB_SET_CONNECT_SEL, 0x2},
{0x35, AC_VERB_SET_CONNECT_SEL, 0x1},
- /* Unmute MW4's index 0 */
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{ }
};
@@ -4453,6 +4462,19 @@ static int vt1718S_auto_create_multi_out_ctls(struct via_spec *spec,
if (err < 0)
return err;
} else if (i == AUTO_SEQ_FRONT) {
+ /* add control to mixer index 0 */
+ err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
+ "Master Front Playback Volume",
+ HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
+ HDA_INPUT));
+ if (err < 0)
+ return err;
+ err = via_add_control(spec, VIA_CTL_WIDGET_MUTE,
+ "Master Front Playback Switch",
+ HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
+ HDA_INPUT));
+ if (err < 0)
+ return err;
/* Front */
sprintf(name, "%s Playback Volume", chname[i]);
err = via_add_control(
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 34b2428..2692e5a 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
}
-static int lola_parse_tree(struct lola *chip)
+static int __devinit lola_parse_tree(struct lola *chip)
{
unsigned int val;
int nid, err;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 949691a..3f08afc 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
/* revisions >= 230 indicate AES32 card */
+#define HDSPM_MADI_OLD_REV 207
#define HDSPM_MADI_REV 210
#define HDSPM_RAYDAT_REV 211
#define HDSPM_AIO_REV 212
@@ -1143,7 +1144,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
/* if wordclock has synced freq and wordclock is valid */
if ((status2 & HDSPM_wcLock) != 0 &&
- (status & HDSPM_SelSyncRef0) == 0) {
+ (status2 & HDSPM_SelSyncRef0) == 0) {
rate_bits = status2 & HDSPM_wcFreqMask;
@@ -1639,12 +1640,14 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
}
}
hmidi->pending = 0;
+ spin_unlock_irqrestore(&hmidi->lock, flags);
+ spin_lock_irqsave(&hmidi->hdspm->lock, flags);
hmidi->hdspm->control_register |= hmidi->ie;
hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
hmidi->hdspm->control_register);
+ spin_unlock_irqrestore(&hmidi->hdspm->lock, flags);
- spin_unlock_irqrestore (&hmidi->lock, flags);
return snd_hdspm_midi_output_write (hmidi);
}
@@ -6377,6 +6380,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
switch (hdspm->firmware_rev) {
case HDSPM_MADI_REV:
+ case HDSPM_MADI_OLD_REV:
hdspm->io_type = MADI;
hdspm->card_name = "RME MADI";
hdspm->midiPorts = 3;
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 7fbfa05..eda955b 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -848,9 +848,10 @@ int atmel_ssc_set_audio(int ssc_id)
if (IS_ERR(ssc))
pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
PTR_ERR(ssc));
- else
+ else {
ssc_pdev->dev.parent = &(ssc->pdev->dev);
- ssc_free(ssc);
+ ssc_free(ssc);
+ }
ret = platform_device_add(ssc_pdev);
if (ret < 0)
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index ea4951c..f79d165 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -75,7 +75,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
.cpu_dai_name = "bfin-tdm.0",
.codec_dai_name = "ad1836-hifi",
.platform_name = "bfin-tdm-pcm-audio",
- .codec_name = "ad1836.0",
+ .codec_name = "spi0.4",
.ops = &bf5xx_ad1836_ops,
},
{
@@ -84,7 +84,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
.cpu_dai_name = "bfin-tdm.1",
.codec_dai_name = "ad1836-hifi",
.platform_name = "bfin-tdm-pcm-audio",
- .codec_name = "ad1836.0",
+ .codec_name = "spi0.4",
.ops = &bf5xx_ad1836_ops,
},
};
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index ab63d52..754c496 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -145,22 +145,22 @@ static int ad1836_hw_params(struct snd_pcm_substream *substream,
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- word_len = 3;
+ word_len = AD1836_WORD_LEN_16;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
- word_len = 1;
+ word_len = AD1836_WORD_LEN_20;
break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S32_LE:
- word_len = 0;
+ word_len = AD1836_WORD_LEN_24;
break;
}
- snd_soc_update_bits(codec, AD1836_DAC_CTRL1,
- AD1836_DAC_WORD_LEN_MASK, word_len);
+ snd_soc_update_bits(codec, AD1836_DAC_CTRL1, AD1836_DAC_WORD_LEN_MASK,
+ word_len << AD1836_DAC_WORD_LEN_OFFSET);
- snd_soc_update_bits(codec, AD1836_ADC_CTRL2,
- AD1836_ADC_WORD_LEN_MASK, word_len);
+ snd_soc_update_bits(codec, AD1836_ADC_CTRL2, AD1836_ADC_WORD_LEN_MASK,
+ word_len << AD1836_ADC_WORD_OFFSET);
return 0;
}
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
index 8455967..9d6a3f8 100644
--- a/sound/soc/codecs/ad1836.h
+++ b/sound/soc/codecs/ad1836.h
@@ -25,6 +25,7 @@
#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5)
#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5)
#define AD1836_DAC_WORD_LEN_MASK 0x18
+#define AD1836_DAC_WORD_LEN_OFFSET 3
#define AD1836_DAC_CTRL2 1
#define AD1836_DACL1_MUTE 0
@@ -51,6 +52,7 @@
#define AD1836_ADCL2_MUTE 2
#define AD1836_ADCR2_MUTE 3
#define AD1836_ADC_WORD_LEN_MASK 0x30
+#define AD1836_ADC_WORD_OFFSET 5
#define AD1836_ADC_SERFMT_MASK (7 << 6)
#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
@@ -60,4 +62,8 @@
#define AD1836_NUM_REGS 16
+#define AD1836_WORD_LEN_24 0x0
+#define AD1836_WORD_LEN_20 0x1
+#define AD1836_WORD_LEN_16 0x2
+
#endif
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 6785688..9a5e67c 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -680,20 +680,25 @@ static struct snd_soc_dai_ops wm8804_dai_ops = {
#define WM8804_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE)
+#define WM8804_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
static struct snd_soc_dai_driver wm8804_dai = {
.name = "wm8804-spdif",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = WM8804_RATES,
.formats = WM8804_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = WM8804_RATES,
.formats = WM8804_FORMATS,
},
.ops = &wm8804_dai_ops,
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c
index a0b1a72..e2ab4fa 100644
--- a/sound/soc/codecs/wm8915.c
+++ b/sound/soc/codecs/wm8915.c
@@ -1839,7 +1839,7 @@ static int wm8915_set_sysclk(struct snd_soc_dai *dai,
int old;
/* Disable SYSCLK while we reconfigure */
- old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1);
+ old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1) & WM8915_SYSCLK_ENA;
snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1,
WM8915_SYSCLK_ENA, 0);
@@ -2038,6 +2038,7 @@ static int wm8915_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
break;
case WM8915_FLL_MCLK2:
reg = 1;
+ break;
case WM8915_FLL_DACLRCLK1:
reg = 2;
break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index f90ae42..5e05eed 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1999,12 +1999,12 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
return 0;
/* If the left PGA is enabled hit that VU bit... */
- if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTL_PGA_ENA)
+ if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
reg_cache[WM8962_HPOUTL_VOLUME]);
/* ...otherwise the right. The VU is stereo. */
- if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTR_PGA_ENA)
+ if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
reg_cache[WM8962_HPOUTR_VOLUME]);
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 3c2ee1b..6af23d0 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 15dac0f..6680c0b 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -310,7 +310,7 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
* should allocate a DMA buffer only for the streams that are valid.
*/
- if (dai->driver->playback.channels_min) {
+ if (pcm->streams[0].substream) {
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
fsl_dma_hardware.buffer_bytes_max,
&pcm->streams[0].substream->dma_buffer);
@@ -320,13 +320,13 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
}
}
- if (dai->driver->capture.channels_min) {
+ if (pcm->streams[1].substream) {
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
fsl_dma_hardware.buffer_bytes_max,
&pcm->streams[1].substream->dma_buffer);
if (ret) {
- snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
dev_err(card->dev, "can't alloc capture dma buffer\n");
+ snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
return ret;
}
}
@@ -449,7 +449,8 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
dma_private->ld_buf_phys = ld_buf_phys;
dma_private->dma_buf_phys = substream->dma_buffer.addr;
- ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "DMA", dma_private);
+ ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "fsldma-audio",
+ dma_private);
if (ret) {
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
dma_private->irq, ret);
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index d8f130d..bb699bb 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -11,9 +11,6 @@ menuconfig SND_IMX_SOC
if SND_IMX_SOC
-config SND_MXC_SOC_SSI
- tristate
-
config SND_MXC_SOC_FIQ
tristate
@@ -24,7 +21,6 @@ config SND_MXC_SOC_WM1133_EV1
tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted"
depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL
select SND_SOC_WM8350
- select SND_MXC_SOC_SSI
select SND_MXC_SOC_FIQ
help
Enable support for audio on the i.MX31ADS with the WM1133-EV1
@@ -34,7 +30,6 @@ config SND_SOC_MX27VIS_AIC32X4
tristate "SoC audio support for Visstrim M10 boards"
depends on MACH_IMX27_VISSTRIM_M10
select SND_SOC_TVL320AIC32X4
- select SND_MXC_SOC_SSI
select SND_MXC_SOC_MX2
help
Say Y if you want to add support for SoC audio on Visstrim SM10
@@ -44,7 +39,6 @@ config SND_SOC_PHYCORE_AC97
tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards"
depends on MACH_PCM043 || MACH_PCA100
select SND_SOC_WM9712
- select SND_MXC_SOC_SSI
select SND_MXC_SOC_FIQ
help
Say Y if you want to add support for SoC audio on Phytec phyCORE
@@ -57,7 +51,6 @@ config SND_SOC_EUKREA_TLV320
|| MACH_EUKREA_MBIMXSD35_BASEBOARD \
|| MACH_EUKREA_MBIMXSD51_BASEBOARD
select SND_SOC_TLV320AIC23
- select SND_MXC_SOC_SSI
select SND_MXC_SOC_FIQ
help
Enable I2S based access to the TLV320AIC23B codec attached
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index aab7765..4173b3d 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -337,3 +337,5 @@ static void __exit snd_imx_pcm_exit(void)
platform_driver_unregister(&imx_pcm_driver);
}
module_exit(snd_imx_pcm_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-pcm-audio");
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 5b13fec..61fceb0 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -774,4 +774,4 @@ module_exit(imx_ssi_exit);
MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
MODULE_LICENSE("GPL");
-
+MODULE_ALIAS("platform:imx-ssi");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 2ce0b2d..fab20a5 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -95,14 +95,14 @@ static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
- if (dai->driver->playback.channels_min) {
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
goto out;
}
- if (dai->driver->capture.channels_min) {
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index ffa09b3..992a732 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -191,7 +191,7 @@ static inline bool tx_active(struct i2s_dai *i2s)
if (!i2s)
return false;
- active = readl(i2s->addr + I2SMOD);
+ active = readl(i2s->addr + I2SCON);
if (is_secondary(i2s))
active &= CON_TXSDMA_ACTIVE;
@@ -223,7 +223,7 @@ static inline bool rx_active(struct i2s_dai *i2s)
if (!i2s)
return false;
- active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE;
+ active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE;
return active ? true : false;
}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 06b7b81..039b953 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -409,9 +409,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
switch (control) {
- case SND_SOC_CUSTOM:
- break;
-
case SND_SOC_I2C:
#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
codec->hw_write = (hw_write_t)i2c_master_send;
@@ -466,6 +463,9 @@ static bool snd_soc_set_cache_val(void *base, unsigned int idx,
static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
unsigned int word_size)
{
+ if (!base)
+ return -1;
+
switch (word_size) {
case 1: {
const u8 *cache = base;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 776e6f4..32ab7fc 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -350,9 +350,9 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
}
/* create new dapm mixer control */
-static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
- struct snd_soc_dapm_widget *w)
+static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
{
+ struct snd_soc_dapm_context *dapm = w->dapm;
int i, ret = 0;
size_t name_len, prefix_len;
struct snd_soc_dapm_path *path;
@@ -450,9 +450,9 @@ static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
}
/* create new dapm mux control */
-static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
- struct snd_soc_dapm_widget *w)
+static int dapm_new_mux(struct snd_soc_dapm_widget *w)
{
+ struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_soc_dapm_path *path = NULL;
struct snd_kcontrol *kcontrol;
struct snd_card *card = dapm->card->snd_card;
@@ -535,8 +535,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
}
/* create new dapm volume control */
-static int dapm_new_pga(struct snd_soc_dapm_context *dapm,
- struct snd_soc_dapm_widget *w)
+static int dapm_new_pga(struct snd_soc_dapm_widget *w)
{
if (w->num_kcontrols)
dev_err(w->dapm->dev,
@@ -1826,13 +1825,13 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
case snd_soc_dapm_mixer:
case snd_soc_dapm_mixer_named_ctl:
w->power_check = dapm_generic_check_power;
- dapm_new_mixer(dapm, w);
+ dapm_new_mixer(w);
break;
case snd_soc_dapm_mux:
case snd_soc_dapm_virt_mux:
case snd_soc_dapm_value_mux:
w->power_check = dapm_generic_check_power;
- dapm_new_mux(dapm, w);
+ dapm_new_mux(w);
break;
case snd_soc_dapm_adc:
case snd_soc_dapm_aif_out:
@@ -1845,7 +1844,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
case snd_soc_dapm_pga:
case snd_soc_dapm_out_drv:
w->power_check = dapm_generic_check_power;
- dapm_new_pga(dapm, w);
+ dapm_new_pga(w);
break;
case snd_soc_dapm_input:
case snd_soc_dapm_output:
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index a91719d..1e3ae33 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -270,7 +270,6 @@ static int usb6fire_fw_ezusb_upload(
data = 0x00; /* resume ezusb cpu */
ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
if (ret < 0) {
- release_firmware(fw);
snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
"firmware %s: end message.\n", fwname);
return ret;
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index b137b25..d144cdb 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
alsa_rt->hw = pcm_hw;
if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (rt->rate >= 0)
+ if (rt->rate < ARRAY_SIZE(rates))
alsa_rt->hw.rates = rates_alsaid[rt->rate];
alsa_rt->hw.channels_max = OUT_N_CHANNELS;
sub = &rt->playback;
} else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
- if (rt->rate >= 0)
+ if (rt->rate < ARRAY_SIZE(rates))
alsa_rt->hw.rates = rates_alsaid[rt->rate];
alsa_rt->hw.channels_max = IN_N_CHANNELS;
sub = &rt->capture;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 032ba63..940257b 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
-LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS)
+LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
ALL_CFLAGS += $(BASIC_CFLAGS)
ALL_CFLAGS += $(ARCH_CFLAGS)
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 26d4d3f..ad73300 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -23,12 +23,7 @@ if test -d ../../.git -o -f ../../.git &&
then
VN=$(echo "$VN" | sed -e 's/-/./g');
else
- eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ')
- eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
- eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
- eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ')
-
- VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
+ VN=$(MAKEFLAGS= make -sC ../.. kernelversion)
fi
VN=$(expr "$VN" : v*'\(.*\)')
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 1e88485..0a7ed5b 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
{ "TASKLET_SOFTIRQ", 6 },
{ "SCHED_SOFTIRQ", 7 },
{ "HRTIMER_SOFTIRQ", 8 },
+ { "RCU_SOFTIRQ", 9 },
{ "HRTIMER_NORESTART", 0 },
{ "HRTIMER_RESTART", 1 },
OpenPOWER on IntegriCloud