summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/HOWTO2
-rw-r--r--Documentation/RCU/NMI-RCU.txt39
-rw-r--r--Documentation/RCU/checklist.txt7
-rw-r--r--Documentation/RCU/lockdep.txt28
-rw-r--r--Documentation/RCU/whatisRCU.txt6
-rw-r--r--Documentation/cgroups/cgroups.txt3
-rw-r--r--Documentation/input/multi-touch-protocol.txt23
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/networking/timestamping.txt76
-rw-r--r--Documentation/stable_kernel_rules.txt9
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile4
-rw-r--r--arch/arm/configs/n8x0_defconfig1
-rw-r--r--arch/arm/configs/omap_zoom2_defconfig2
-rw-r--r--arch/arm/configs/omap_zoom3_defconfig2
-rw-r--r--arch/arm/configs/rx51_defconfig3
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/ucontext.h23
-rw-r--r--arch/arm/include/asm/user.h12
-rw-r--r--arch/arm/kernel/signal.c93
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S4
-rw-r--r--arch/arm/mach-omap1/timer32k.c15
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c4
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c39
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c8
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c18
-rw-r--r--arch/arm/mach-omap2/board-sdp-flash.c8
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c1
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c8
-rw-r--r--arch/arm/mach-omap2/clockdomain.c6
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c3
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S6
-rw-r--r--arch/arm/mach-omap2/omap44xx-smc.S2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c3
-rw-r--r--arch/arm/mach-omap2/powerdomain.c2
-rw-r--r--arch/arm/mach-omap2/prcm.c4
-rw-r--r--arch/arm/mach-omap2/serial.c35
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/plat-omap/common.c22
-rw-r--r--arch/arm/plat-omap/dma.c9
-rw-r--r--arch/arm/plat-omap/gpio.c6
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h2
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h2
-rw-r--r--arch/arm/plat-omap/include/plat/nand.h7
-rw-r--r--arch/arm/plat-omap/include/plat/omap44xx.h2
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h2
-rw-r--r--arch/arm/vfp/vfpmodule.c31
-rw-r--r--arch/avr32/kernel/ptrace.c2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c9
-rw-r--r--arch/m68k/include/asm/atomic_mm.h8
-rw-r--r--arch/m68k/include/asm/mcfuart.h5
-rw-r--r--arch/m68k/include/asm/sigcontext.h4
-rw-r--r--arch/m68knommu/Makefile2
-rw-r--r--arch/m68knommu/kernel/entry.S2
-rw-r--r--arch/m68knommu/platform/68360/ints.c1
-rw-r--r--arch/powerpc/kvm/book3s.c5
-rw-r--r--arch/s390/include/asm/vdso.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/swsusp_asm64.S3
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S12
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/Kconfig.debug5
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/irqflags_64.h23
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/kernel/Makefile10
-rw-r--r--arch/sparc/kernel/ftrace.c60
-rw-r--r--arch/sparc/kernel/irq_64.c31
-rw-r--r--arch/sparc/kernel/kgdb_64.c3
-rw-r--r--arch/sparc/kernel/kstack.h19
-rw-r--r--arch/sparc/kernel/nmi.c10
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pcr.c3
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/sparc/kernel/traps_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/lib/mcount.S159
-rw-r--r--arch/um/drivers/line.c1
-rw-r--r--arch/um/os-Linux/helper.c1
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h3
-rw-r--r--arch/x86/include/asm/lguest_hcall.h29
-rw-r--r--arch/x86/kernel/amd_iommu.c20
-rw-r--r--arch/x86/kernel/amd_iommu_init.c48
-rw-r--r--arch/x86/kernel/aperture_64.c15
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/crash.c6
-rw-r--r--arch/x86/kernel/dumpstack.h8
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/svm.c25
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c48
-rw-r--r--arch/x86/lguest/boot.c61
-rw-r--r--arch/x86/lguest/i386_head.S2
-rw-r--r--arch/x86/pci/acpi.c84
-rw-r--r--arch/x86/pci/i386.c3
-rw-r--r--crypto/authenc.c16
-rw-r--r--drivers/acpi/acpica/exprep.c17
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/pata_pcmcia.c4
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c8
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c8
-rw-r--r--drivers/firewire/core-cdev.c23
-rw-r--r--drivers/firewire/core-iso.c14
-rw-r--r--drivers/firewire/ohci.c23
-rw-r--r--drivers/gpio/pca953x.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c1
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c283
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h13
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c68
-rw-r--r--drivers/gpu/drm/i915/intel_display.c107
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h18
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c92
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c86
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c81
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c22
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c731
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c96
-rw-r--r--drivers/gpu/drm/radeon/atom.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c21
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/r300.c20
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4202
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5153
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/hwmon/applesmc.c18
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/hp_accel.c4
-rw-r--r--drivers/hwmon/it87.c32
-rw-r--r--drivers/hwmon/sht15.c13
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c10
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/ide/ide-cs.c4
-rw-r--r--drivers/input/input.c9
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/mouse/bcm5974.c1
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/sparse-keymap.c52
-rw-r--r--drivers/input/tablet/wacom_sys.c12
-rw-r--r--drivers/input/tablet/wacom_wac.c163
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c5
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c1
-rw-r--r--drivers/isdn/gigaset/interface.c1
-rw-r--r--drivers/isdn/gigaset/proc.c1
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c3
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c4
-rw-r--r--drivers/lguest/lguest_device.c4
-rw-r--r--drivers/lguest/x86/core.c12
-rw-r--r--drivers/md/raid5.c52
-rw-r--r--drivers/misc/Kconfig16
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/vmware_balloon.c832
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/internal.h17
-rw-r--r--drivers/mtd/mtdbdi.c43
-rw-r--r--drivers/mtd/mtdcore.c79
-rw-r--r--drivers/mtd/mtdsuper.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c8
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bnx2.c48
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/cnic.c10
-rw-r--r--drivers/net/cxgb3/ael1002.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/e100.c10
-rw-r--r--drivers/net/e1000e/82571.c20
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/netdev.c72
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/fsl_pq_mdio.c20
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/igb/igb_ethtool.c1
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c62
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ks8851.c12
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c7
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c44
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c3
-rw-r--r--drivers/net/r6040.c11
-rw-r--r--drivers/net/r8169.c32
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c13
-rw-r--r--drivers/net/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/tg3.c1
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/Kconfig22
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/cdc_ether.c1
-rw-r--r--drivers/net/usb/ipheth.c569
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/sierra_net.c1001
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c107
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c10
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pcmcia/cistpl.c9
-rw-r--r--drivers/pcmcia/db1xxx_ss.c4
-rw-r--r--drivers/pcmcia/ds.c76
-rw-r--r--drivers/pcmcia/pcmcia_resource.c10
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c16
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/asus-laptop.c8
-rw-r--r--drivers/platform/x86/dell-wmi.c3
-rw-r--r--drivers/platform/x86/eeepc-laptop.c3
-rw-r--r--drivers/platform/x86/eeepc-wmi.c333
-rw-r--r--drivers/regulator/max8925-regulator.c2
-rw-r--r--drivers/regulator/mc13783-regulator.c3
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_3990_erp.c7
-rw-r--r--drivers/s390/char/zcore.c6
-rw-r--r--drivers/s390/cio/chsc.c29
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c18
-rw-r--r--drivers/s390/cio/css.c16
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c13
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/dpt_i2o.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c29
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c2
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/serial/mcf.c6
-rw-r--r--drivers/serial/serial_cs.c9
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c14
-rw-r--r--drivers/usb/core/driver.c49
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-mem.c2
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-sched.c40
-rw-r--r--drivers/usb/host/ehci.h5
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c15
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/qcaux.c10
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c26
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h3
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/efifb.c3
-rw-r--r--drivers/virtio/virtio_balloon.c3
-rw-r--r--drivers/w1/masters/omap_hdq.c1
-rw-r--r--drivers/w1/slaves/w1_therm.c5
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/booke_wdt.c4
-rw-r--r--drivers/watchdog/max63xx_wdt.c7
-rw-r--r--drivers/watchdog/sb_wdog.c4
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c14
-rw-r--r--fs/9p/v9fs.c10
-rw-r--r--fs/9p/v9fs.h2
-rw-r--r--fs/9p/vfs_super.c1
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/mntpt.c24
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/volume.c7
-rw-r--r--fs/binfmt_elf_fdpic.c7
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/block_dev.c20
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/ceph/addr.c62
-rw-r--r--fs/ceph/caps.c42
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/inode.c10
-rw-r--r--fs/ceph/messenger.c9
-rw-r--r--fs/ceph/osdmap.c180
-rw-r--r--fs/ceph/osdmap.h1
-rw-r--r--fs/ceph/rados.h6
-rw-r--r--fs/ceph/snap.c26
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/cifs_fs_sb.h3
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/coda/inode.c8
-rw-r--r--fs/ecryptfs/crypto.c37
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h15
-rw-r--r--fs/ecryptfs/inode.c129
-rw-r--r--fs/ecryptfs/main.c10
-rw-r--r--fs/ecryptfs/mmap.c38
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/exofs/exofs.h2
-rw-r--r--fs/exofs/super.c8
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/mballoc.c21
-rw-r--r--fs/ioctl.c92
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_dmap.c16
-rw-r--r--fs/jfs/jfs_dmap.h6
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/namei.c4
-rw-r--r--fs/jfs/resize.c6
-rw-r--r--fs/jfs/symlink.c14
-rw-r--r--fs/logfs/gc.c8
-rw-r--r--fs/logfs/journal.c29
-rw-r--r--fs/logfs/logfs.h15
-rw-r--r--fs/logfs/readwrite.c75
-rw-r--r--fs/logfs/segment.c8
-rw-r--r--fs/logfs/super.c11
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/write.c44
-rw-r--r--fs/nfsd/nfs4xdr.c8
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/quota/Kconfig8
-rw-r--r--fs/quota/dquot.c16
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/xattr.c19
-rw-r--r--fs/smbfs/inode.c8
-rw-r--r--fs/squashfs/block.c5
-rw-r--r--fs/squashfs/super.c4
-rw-r--r--fs/squashfs/zlib_wrapper.c3
-rw-r--r--fs/super.c8
-rw-r--r--fs/sync.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c4
-rw-r--r--fs/xfs/xfs_dfrag.c22
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/coda_psdev.h3
-rw-r--r--include/linux/firewire-cdev.h78
-rw-r--r--include/linux/firewire-constants.h29
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/input/matrix_keypad.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/ncp_fs_sb.h2
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/poison.h9
-rw-r--r--include/linux/rcupdate.h65
-rw-r--r--include/linux/regulator/consumer.h8
-rw-r--r--include/linux/smb_fs_sb.h3
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--include/net/x25.h4
-rw-r--r--include/pcmcia/ds.h7
-rw-r--r--include/pcmcia/ss.h8
-rw-r--r--init/initramfs.c3
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/rcupdate.c7
-rw-r--r--kernel/sys.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/decompress_unlzo.c22
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/flex_array.c2
-rw-r--r--lib/vsprintf.c11
-rw-r--r--mm/backing-dev.c34
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/ksm.c12
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/rmap.c36
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/bridge/br_multicast.c10
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/ieee802154/af_ieee802154.c3
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/mac80211/agg-tx.c1
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/sta_info.c20
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/rdma_transport.c2
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/sm_make_chunk.c32
-rw-r--r--net/sctp/sm_sideeffect.c26
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c14
-rw-r--r--net/x25/af_x25.c68
-rw-r--r--net/x25/x25_facilities.c27
-rw-r--r--net/x25/x25_in.c15
-rw-r--r--security/inode.c4
-rw-r--r--security/keys/keyring.c2
-rw-r--r--security/keys/request_key.c22
-rw-r--r--security/selinux/ss/avtab.h2
-rw-r--r--sound/arm/aaci.c7
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_analog.c9
-rw-r--r--sound/pci/hda/patch_realtek.c172
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/pci/hda/patch_via.c41
-rw-r--r--sound/pci/maestro3.c9
-rw-r--r--sound/soc/codecs/wm2000.c1
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c15
-rw-r--r--sound/soc/imx/imx-pcm-fiq.c55
-rw-r--r--sound/soc/imx/imx-ssi.c3
-rw-r--r--sound/usb/usbmidi.c24
-rw-r--r--virt/kvm/kvm_main.c17
486 files changed, 7888 insertions, 3098 deletions
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index f5395af..40ada93 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -234,7 +234,7 @@ process is as follows:
Linus, usually the patches that have already been included in the
-next kernel for a few weeks. The preferred way to submit big changes
is using git (the kernel's source management tool, more information
- can be found at http://git.or.cz/) but plain patches are also just
+ can be found at http://git-scm.com/) but plain patches are also just
fine.
- After two weeks a -rc1 kernel is released it is now possible to push
only patches that do not include new features that could affect the
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index a6d32e6..a8536cb 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -34,7 +34,7 @@ NMI handler.
cpu = smp_processor_id();
++nmi_count(cpu);
- if (!rcu_dereference(nmi_callback)(regs, cpu))
+ if (!rcu_dereference_sched(nmi_callback)(regs, cpu))
default_do_nmi(regs);
nmi_exit();
@@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the
default_do_nmi() function to handle a machine-specific NMI. Finally,
preemption is restored.
-Strictly speaking, rcu_dereference() is not needed, since this code runs
-only on i386, which does not need rcu_dereference() anyway. However,
-it is a good documentation aid, particularly for anyone attempting to
-do something similar on Alpha.
+In theory, rcu_dereference_sched() is not needed, since this code runs
+only on i386, which in theory does not need rcu_dereference_sched()
+anyway. However, in practice it is a good documentation aid, particularly
+for anyone attempting to do something similar on Alpha or on systems
+with aggressive optimizing compilers.
-Quick Quiz: Why might the rcu_dereference() be necessary on Alpha,
+Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha,
given that the code referenced by the pointer is read-only?
@@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively.
Answer to Quick Quiz
- Why might the rcu_dereference() be necessary on Alpha, given
+ Why might the rcu_dereference_sched() be necessary on Alpha, given
that the code referenced by the pointer is read-only?
Answer: The caller to set_nmi_callback() might well have
- initialized some data that is to be used by the
- new NMI handler. In this case, the rcu_dereference()
- would be needed, because otherwise a CPU that received
- an NMI just after the new handler was set might see
- the pointer to the new NMI handler, but the old
- pre-initialized version of the handler's data.
-
- More important, the rcu_dereference() makes it clear
- to someone reading the code that the pointer is being
- protected by RCU.
+ initialized some data that is to be used by the new NMI
+ handler. In this case, the rcu_dereference_sched() would
+ be needed, because otherwise a CPU that received an NMI
+ just after the new handler was set might see the pointer
+ to the new NMI handler, but the old pre-initialized
+ version of the handler's data.
+
+ This same sad story can happen on other CPUs when using
+ a compiler with aggressive pointer-value speculation
+ optimizations.
+
+ More important, the rcu_dereference_sched() makes it
+ clear to someone reading the code that the pointer is
+ being protected by RCU-sched.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index cbc180f..790d1a8 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome!
The reason that it is permissible to use RCU list-traversal
primitives when the update-side lock is held is that doing so
can be quite helpful in reducing code bloat when common code is
- shared between readers and updaters.
+ shared between readers and updaters. Additional primitives
+ are provided for this case, as discussed in lockdep.txt.
10. Conversely, if you are in an RCU read-side critical section,
and you don't hold the appropriate update-side lock, you -must-
@@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome!
requiring SRCU's read-side deadlock immunity or low read-side
realtime latency.
- Note that, rcu_assign_pointer() and rcu_dereference() relate to
- SRCU just as they do to other forms of RCU.
+ Note that, rcu_assign_pointer() relates to SRCU just as they do
+ to other forms of RCU.
15. The whole point of call_rcu(), synchronize_rcu(), and friends
is to wait until all pre-existing readers have finished before
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
index fe24b58..d7a49b2 100644
--- a/Documentation/RCU/lockdep.txt
+++ b/Documentation/RCU/lockdep.txt
@@ -32,9 +32,20 @@ checking of rcu_dereference() primitives:
srcu_dereference(p, sp):
Check for SRCU read-side critical section.
rcu_dereference_check(p, c):
- Use explicit check expression "c".
+ Use explicit check expression "c". This is useful in
+ code that is invoked by both readers and updaters.
rcu_dereference_raw(p)
Don't check. (Use sparingly, if at all.)
+ rcu_dereference_protected(p, c):
+ Use explicit check expression "c", and omit all barriers
+ and compiler constraints. This is useful when the data
+ structure cannot change, for example, in code that is
+ invoked only by updaters.
+ rcu_access_pointer(p):
+ Return the value of the pointer and omit all barriers,
+ but retain the compiler constraints that prevent duplicating
+ or coalescsing. This is useful when when testing the
+ value of the pointer itself, for example, against NULL.
The rcu_dereference_check() check expression can be any boolean
expression, but would normally include one of the rcu_read_lock_held()
@@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla
RCU read-side critical sections, in case (2) the ->file_lock prevents
any change from taking place, and finally, in case (3) the current task
is the only task accessing the file_struct, again preventing any change
-from taking place.
+from taking place. If the above statement was invoked only from updater
+code, it could instead be written as follows:
+
+ file = rcu_dereference_protected(fdt->fd[fd],
+ lockdep_is_held(&files->file_lock) ||
+ atomic_read(&files->count) == 1);
+
+This would verify cases #2 and #3 above, and furthermore lockdep would
+complain if this was used in an RCU read-side critical section unless one
+of these two cases held. Because rcu_dereference_protected() omits all
+barriers and compiler constraints, it generates better code than do the
+other flavors of rcu_dereference(). On the other hand, it is illegal
+to use rcu_dereference_protected() if either the RCU-protected pointer
+or the RCU-protected data that it points to can change concurrently.
There are currently only "universal" versions of the rcu_assign_pointer()
and RCU list-/tree-traversal primitives, which do not (yet) check for
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 1dc00ee..cfaac34 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -840,6 +840,12 @@ SRCU: Initialization/cleanup
init_srcu_struct
cleanup_srcu_struct
+All: lockdep-checked RCU-protected pointer access
+
+ rcu_dereference_check
+ rcu_dereference_protected
+ rcu_access_pointer
+
See the comment headers in the source code (or the docbook generated
from them) for more information.
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index fd588ff0..a1ca592 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -235,8 +235,7 @@ containing the following files describing that cgroup:
- cgroup.procs: list of tgids in the cgroup. This list is not
guaranteed to be sorted or free of duplicate tgids, and userspace
should sort/uniquify the list if this property is required.
- Writing a tgid into this file moves all threads with that tgid into
- this cgroup.
+ This is a read-only file, for now.
- notify_on_release flag: run the release agent on exit?
- release_agent: the path to use for release notifications (this file
exists in the top cgroup only)
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index 8490480..c0fc1c7 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -68,6 +68,22 @@ like:
SYN_MT_REPORT
SYN_REPORT
+Here is the sequence after lifting one of the fingers:
+
+ ABS_MT_POSITION_X
+ ABS_MT_POSITION_Y
+ SYN_MT_REPORT
+ SYN_REPORT
+
+And here is the sequence after lifting the remaining finger:
+
+ SYN_MT_REPORT
+ SYN_REPORT
+
+If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the
+ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the
+last SYN_REPORT will be dropped by the input core, resulting in no
+zero-finger event reaching userland.
Event Semantics
---------------
@@ -217,11 +233,6 @@ where examples can be found.
difference between the contact position and the approaching tool position
could be used to derive tilt.
[2] The list can of course be extended.
-[3] The multi-touch X driver is currently in the prototyping stage. At the
-time of writing (April 2009), the MT protocol is not yet merged, and the
-prototype implements finger matching, basic mouse support and two-finger
-scrolling. The project aims at improving the quality of current multi-touch
-functionality available in the Synaptics X driver, and in addition
-implement more advanced gestures.
+[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
[4] See the section on event computation.
[5] See the section on finger tracking.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e4cbca5..839b21b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file
amd_iommu= [HW,X86-84]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
- isolate - enable device isolation (each device, as far
- as possible, will get its own protection
- domain) [default]
- share - put every device behind one IOMMU into the
- same protection domain
fullflush - enable flushing of IO/TLB entries when
they are unmapped. Otherwise they are
flushed before they will be reused, which
@@ -1199,7 +1194,7 @@ and is between 256 and 4096 characters. It is defined in the file
libata.force= [LIBATA] Force configurations. The format is comma
separated list of "[ID:]VAL" where ID is
- PORT[:DEVICE]. PORT and DEVICE are decimal numbers
+ PORT[.DEVICE]. PORT and DEVICE are decimal numbers
matching port, link or device. Basically, it matches
the ATA ID string printed on console by libata. If
the whole ID part is omitted, the last PORT and DEVICE
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 0e58b45..e8c8f4f 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in
SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
following control message:
- struct scm_timestamping {
- struct timespec systime;
- struct timespec hwtimetrans;
- struct timespec hwtimeraw;
- };
+
+struct scm_timestamping {
+ struct timespec systime;
+ struct timespec hwtimetrans;
+ struct timespec hwtimeraw;
+};
recvmsg() can be used to get this control message for regular incoming
packets. For send time stamps the outgoing packet is looped back to
@@ -87,12 +88,13 @@ by the network device and will be empty without that support.
SIOCSHWTSTAMP:
Hardware time stamping must also be initialized for each device driver
-that is expected to do hardware time stamping. The parameter is:
+that is expected to do hardware time stamping. The parameter is defined in
+/include/linux/net_tstamp.h as:
struct hwtstamp_config {
- int flags; /* no flags defined right now, must be zero */
- int tx_type; /* HWTSTAMP_TX_* */
- int rx_filter; /* HWTSTAMP_FILTER_* */
+ int flags; /* no flags defined right now, must be zero */
+ int tx_type; /* HWTSTAMP_TX_* */
+ int rx_filter; /* HWTSTAMP_FILTER_* */
};
Desired behavior is passed into the kernel and to a specific device by
@@ -139,42 +141,56 @@ enum {
/* time stamp any incoming packet */
HWTSTAMP_FILTER_ALL,
- /* return value: time stamp all packets requested plus some others */
- HWTSTAMP_FILTER_SOME,
+ /* return value: time stamp all packets requested plus some others */
+ HWTSTAMP_FILTER_SOME,
/* PTP v1, UDP, any kind of event packet */
HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
- ...
+ /* for the complete list of values, please check
+ * the include file /include/linux/net_tstamp.h
+ */
};
DEVICE IMPLEMENTATION
A driver which supports hardware time stamping must support the
-SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored
-in the skb with skb_hwtstamp_set().
+SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with
+the actual values as described in the section on SIOCSHWTSTAMP.
+
+Time stamps for received packets must be stored in the skb. To get a pointer
+to the shared time stamp structure of the skb call skb_hwtstamps(). Then
+set the time stamps in the structure:
+
+struct skb_shared_hwtstamps {
+ /* hardware time stamp transformed into duration
+ * since arbitrary point in time
+ */
+ ktime_t hwtstamp;
+ ktime_t syststamp; /* hwtstamp transformed to system time base */
+};
Time stamps for outgoing packets are to be generated as follows:
-- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware()
- returns non-zero. If yes, then the driver is expected
- to do hardware time stamping.
+- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero.
+ If yes, then the driver is expected to do hardware time stamping.
- If this is possible for the skb and requested, then declare
- that the driver is doing the time stamping by calling
- skb_hwtstamp_tx_in_progress(). A driver not supporting
- hardware time stamping doesn't do that. A driver must never
- touch sk_buff::tstamp! It is used to store how time stamping
- for an outgoing packets is to be done.
+ that the driver is doing the time stamping by setting the field
+ skb_tx(skb)->in_progress non-zero. You might want to keep a pointer
+ to the associated skb for the next step and not free the skb. A driver
+ not supporting hardware time stamping doesn't do that. A driver must
+ never touch sk_buff::tstamp! It is used to store software generated
+ time stamps by the network subsystem.
- As soon as the driver has sent the packet and/or obtained a
hardware time stamp for it, it passes the time stamp back by
calling skb_hwtstamp_tx() with the original skb, the raw
- hardware time stamp and a handle to the device (necessary
- to convert the hardware time stamp to system time). If obtaining
- the hardware time stamp somehow fails, then the driver should
- not fall back to software time stamping. The rationale is that
- this would occur at a later time in the processing pipeline
- than other software time stamping and therefore could lead
- to unexpected deltas between time stamps.
-- If the driver did not call skb_hwtstamp_tx_in_progress(), then
+ hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+ adds the timestamps, therefore the original skb has to be freed now.
+ If obtaining the hardware time stamp somehow fails, then the driver
+ should not fall back to software time stamping. The rationale is that
+ this would occur at a later time in the processing pipeline than other
+ software time stamping and therefore could lead to unexpected deltas
+ between time stamps.
+- If the driver did not call set skb_tx(skb)->in_progress, then
dev_hard_start_xmit() checks whether software time stamping
is wanted as fallback and potentially generates the time stamp.
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 5effa5b..e213f45 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the
- It cannot contain any "trivial" fixes in it (spelling changes,
whitespace cleanups, etc).
- It must follow the Documentation/SubmittingPatches rules.
- - It or an equivalent fix must already exist in Linus' tree. Quote the
- respective commit ID in Linus' tree in your patch submission to -stable.
+ - It or an equivalent fix must already exist in Linus' tree (upstream).
Procedure for submitting patches to the -stable tree:
- Send the patch, after verifying that it follows the above rules, to
- stable@kernel.org.
- - To have the patch automatically included in the stable tree, add the
- the tag
+ stable@kernel.org. You must note the upstream commit ID in the changelog
+ of your submission.
+ - To have the patch automatically included in the stable tree, add the tag
Cc: stable@kernel.org
in the sign-off area. Once the patch is merged it will be applied to
the stable tree without anything else needing to be done by the author
diff --git a/MAINTAINERS b/MAINTAINERS
index 5b42290..1838875 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -485,8 +485,8 @@ S: Maintained
F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
-M: Nicolas Boichat <nicolas@boichat.ch>
-L: mactel-linux-devel@lists.sourceforge.net
+M: Henrik Rydberg <rydberg@euromail.se>
+L: lm-sensors@lm-sensors.org
S: Maintained
F: drivers/hwmon/applesmc.c
@@ -1960,7 +1960,7 @@ F: lib/kobj*
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
-L: dri-devel@lists.sourceforge.net
+L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
S: Maintained
F: drivers/gpu/drm/
@@ -4791,12 +4791,11 @@ F: drivers/s390/crypto/
S390 ZFCP DRIVER
M: Christof Schmitt <christof.schmitt@de.ibm.com>
-M: Martin Peschke <mp3@de.ibm.com>
+M: Swen Schillig <swen@vnet.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
-F: Documentation/s390/zfcpdump.txt
F: drivers/s390/scsi/zfcp_*
S390 IUCV NETWORK LAYER
diff --git a/Makefile b/Makefile
index 9754615..fa1db90 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 34
-EXTRAVERSION = -rc4
-NAME = Man-Eating Seals of Antiquity
+EXTRAVERSION = -rc5
+NAME = Sheep on Meth
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arm/configs/n8x0_defconfig b/arch/arm/configs/n8x0_defconfig
index 216ad00..9405e32 100644
--- a/arch/arm/configs/n8x0_defconfig
+++ b/arch/arm/configs/n8x0_defconfig
@@ -1058,7 +1058,6 @@ CONFIG_JFFS2_CMODE_PRIORITY=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
diff --git a/arch/arm/configs/omap_zoom2_defconfig b/arch/arm/configs/omap_zoom2_defconfig
index f5c6e11..881faea 100644
--- a/arch/arm/configs/omap_zoom2_defconfig
+++ b/arch/arm/configs/omap_zoom2_defconfig
@@ -661,7 +661,7 @@ CONFIG_DEVKMEM=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
diff --git a/arch/arm/configs/omap_zoom3_defconfig b/arch/arm/configs/omap_zoom3_defconfig
index ea9a501..5e55b55 100644
--- a/arch/arm/configs/omap_zoom3_defconfig
+++ b/arch/arm/configs/omap_zoom3_defconfig
@@ -680,7 +680,7 @@ CONFIG_DEVKMEM=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 45135ff..473f9e1 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -59,8 +59,6 @@ CONFIG_FAIR_GROUP_SCHED=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
@@ -480,7 +478,6 @@ CONFIG_BT_HIDP=m
# CONFIG_BT_HCIBFUSB is not set
# CONFIG_BT_HCIVHCI is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
CONFIG_WIRELESS=y
CONFIG_CFG80211=y
# CONFIG_CFG80211_REG_DEBUG is not set
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00..feb988a 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
#define kmap_prot PAGE_KERNEL
-#define flush_cache_kmaps() flush_cache_all()
+#define flush_cache_kmaps() \
+ do { \
+ if (cache_is_vivt()) \
+ flush_cache_all(); \
+ } while (0)
extern pte_t *pkmap_page_table;
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
+extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type);
extern void kunmap_atomic(void *kvaddr, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern struct page *kmap_atomic_to_page(const void *ptr);
+#endif
#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949..c4b2ea3 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
+ KM_L1_CACHE,
KM_L2_CACHE,
KM_TYPE_NR
};
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f..47f023a 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
#endif /* CONFIG_IWMMXT */
#ifdef CONFIG_VFP
-#if __LINUX_ARM_ARCH__ < 6
-/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
- * word after the registers, and a word of padding at the end for
- * alignment. */
#define VFP_MAGIC 0x56465001
-#define VFP_STORAGE_SIZE 152
-#else
-#define VFP_MAGIC 0x56465002
-#define VFP_STORAGE_SIZE 144
-#endif
struct vfp_sigframe
{
unsigned long magic;
unsigned long size;
- union vfp_state storage;
-};
+ struct user_vfp ufp;
+ struct user_vfp_exc ufp_exc;
+} __attribute__((__aligned__(8)));
+
+/*
+ * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
+ * 4 bytes padding.
+ */
+#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
+
#endif /* CONFIG_VFP */
/*
@@ -91,7 +90,7 @@ struct aux_sigframe {
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
-#if 0 && defined CONFIG_VFP /* Not yet saved. */
+#ifdef CONFIG_VFP
struct vfp_sigframe vfp;
#endif
/* Something that isn't a valid magic number for any coprocessor. */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e05..05ac4b0 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@ struct user{
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
- * are ignored by the ptrace system call.
+ * are ignored by the ptrace system call and the signal handler.
*/
struct user_vfp {
unsigned long long fpregs[32];
unsigned long fpscr;
};
+/*
+ * VFP exception registers exposed to user space during signal delivery.
+ * Fields not relavant to the current VFP architecture are ignored.
+ */
+struct user_vfp_exc {
+ unsigned long fpexc;
+ unsigned long fpinst;
+ unsigned long fpinst2;
+};
+
#endif /* _ARM_USER_H */
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index e7714f3..907d5a6 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
+#include <asm/vfp.h>
#include "ptrace.h"
#include "signal.h"
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
#endif
+#ifdef CONFIG_VFP
+
+static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *h = &thread->vfpstate.hard;
+ const unsigned long magic = VFP_MAGIC;
+ const unsigned long size = VFP_STORAGE_SIZE;
+ int err = 0;
+
+ vfp_sync_hwstate(thread);
+ __put_user_error(magic, &frame->magic, err);
+ __put_user_error(size, &frame->size, err);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
+ sizeof(h->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
+
+ /*
+ * Copy the exception registers.
+ */
+ __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
+ __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+ __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_vfp_context(struct vfp_sigframe __user *frame)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *h = &thread->vfpstate.hard;
+ unsigned long magic;
+ unsigned long size;
+ unsigned long fpexc;
+ int err = 0;
+
+ __get_user_error(magic, &frame->magic, err);
+ __get_user_error(size, &frame->size, err);
+
+ if (err)
+ return -EFAULT;
+ if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return -EINVAL;
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
+ sizeof(h->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
+
+ /*
+ * Sanitise and restore the exception registers.
+ */
+ __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
+ /* Ensure the VFP is enabled. */
+ fpexc |= FPEXC_EN;
+ /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+ fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+ h->fpexc = fpexc;
+
+ __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+ __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+ if (!err)
+ vfp_flush_hwstate(thread);
+
+ return err ? -EFAULT : 0;
+}
+
+#endif
+
/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= restore_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
-// if (err == 0)
-// err |= vfp_restore_state(&sf->aux.vfp);
+ if (err == 0)
+ err |= restore_vfp_context(&aux->vfp);
#endif
return err;
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
-// if (err == 0)
-// err |= vfp_save_state(&sf->aux.vfp);
+ if (err == 0)
+ err |= preserve_vfp_context(&aux->vfp);
#endif
__put_user_error(0, &aux->end_magic, err);
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 027dd57..d400455 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d
obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
- obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index 9fcbd6c..9c5b48e 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock)
orr r3, r3, #(1 << 29) /* bit 29 always set */
str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
- wait_pllalock
-
/* Save PLLB setting and disable it */
ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
str r3, .saved_pllbr
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock)
mov r3, #AT91_PMC_PLLCOUNT
str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
- wait_pllblock
-
/* Turn off the main oscillator */
ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
bic r3, r3, #AT91_PMC_MOSCEN
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 9ad1185..20cfbcc 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -68,12 +68,6 @@ struct sys_timer omap_timer;
* ---------------------------------------------------------------------------
*/
-#if defined(CONFIG_ARCH_OMAP16XX)
-#define TIMER_32K_SYNCHRONIZED 0xfffbc410
-#else
-#error OMAP 32KHz timer does not currently work on 15XX!
-#endif
-
/* 16xx specific defines */
#define OMAP1_32K_TIMER_BASE 0xfffb9000
#define OMAP1_32K_TIMER_CR 0x08
@@ -150,15 +144,6 @@ static struct clock_event_device clockevent_32k_timer = {
.set_mode = omap_32k_timer_set_mode,
};
-/*
- * The 32KHz synchronized timer is an additional timer on 16xx.
- * It is always running.
- */
-static inline unsigned long omap_32k_sync_timer_read(void)
-{
- return omap_readl(TIMER_32K_SYNCHRONIZED);
-}
-
static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_32k_timer;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index a8a3d1e..2455dcc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -59,8 +59,10 @@ config MACH_OMAP3_BEAGLE
select OMAP_PACKAGE_CBB
config MACH_DEVKIT8000
- bool "DEVKIT8000 board"
- depends on ARCH_OMAP3
+ bool "DEVKIT8000 board"
+ depends on ARCH_OMAP3
+ select OMAP_PACKAGE_CUS
+ select OMAP_MUX
config MACH_OMAP_LDP
bool "OMAP3 LDP board"
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index a0a2a11..504d2bd 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -96,6 +96,7 @@ static struct omap_board_mux board_mux[] __initdata = {
static void __init omap_sdp_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
+ omap_serial_init();
zoom_peripherals_init();
board_smc91x_init();
enable_board_wakeup_source();
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 6ae8805..c1c4389 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -294,9 +294,9 @@ static struct omap_board_mux board_mux[] __initdata = {
static void __init am3517_evm_init(void)
{
- am3517_evm_i2c_init();
-
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+
+ am3517_evm_i2c_init();
platform_add_devices(am3517_evm_devices,
ARRAY_SIZE(am3517_evm_devices));
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 5bfc13b..47e3af2 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -50,7 +50,6 @@
#include <linux/input/matrix_keypad.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <linux/usb/otg.h>
#include <linux/dm9000.h>
#include <linux/interrupt.h>
@@ -269,20 +268,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
devkit8000_vmmc1_supply.dev = mmc[0].dev;
devkit8000_vsim_supply.dev = mmc[0].dev;
- /* REVISIT: need ehci-omap hooks for external VBUS
- * power switch and overcurrent detect
- */
-
- gpio_request(gpio + 1, "EHCI_nOC");
- gpio_direction_input(gpio + 1);
-
- /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
- gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
- gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
-
- /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
- gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
-
return 0;
}
@@ -303,7 +288,7 @@ static struct regulator_consumer_supply devkit8000_vpll2_supplies[] = {
.dev = &devkit8000_lcd_device.dev,
},
{
- .supply = "vdss_dsi",
+ .supply = "vdds_dsi",
.dev = &devkit8000_dss_device.dev,
}
};
@@ -639,17 +624,21 @@ static struct omap_musb_board_data musb_board_data = {
static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
- .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
.phy_reset = true,
.reset_gpio_port[0] = -EINVAL,
- .reset_gpio_port[1] = 147,
+ .reset_gpio_port[1] = -EINVAL,
.reset_gpio_port[2] = -EINVAL
};
static void __init devkit8000_init(void)
{
+ omap_serial_init();
+
+ omap_dm9000_init();
+
devkit8000_i2c_init();
platform_add_devices(devkit8000_devices,
ARRAY_SIZE(devkit8000_devices));
@@ -659,25 +648,15 @@ static void __init devkit8000_init(void)
spi_register_board_info(devkit8000_spi_board_info,
ARRAY_SIZE(devkit8000_spi_board_info));
- omap_serial_init();
-
- omap_dm9000_init();
-
devkit8000_ads7846_init();
- omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-
- gpio_request(170, "DVI_nPD");
- /* REVISIT leave DVI powered down until it's needed ... */
- gpio_direction_output(170, true);
-
usb_musb_init(&musb_board_data);
usb_ehci_init(&ehci_pdata);
devkit8000_flash_init();
/* Ensure SDRC pins are mux'd for self-refresh */
- omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
static void __init devkit8000_map_io(void)
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 3c7789d..d55c57b 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -458,13 +458,13 @@ static struct omap_musb_board_data musb_board_data = {
};
static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
- .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
- .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
.phy_reset = true,
- .reset_gpio_port[0] = -EINVAL,
- .reset_gpio_port[1] = IGEP2_GPIO_USBH_NRESET,
+ .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET,
+ .reset_gpio_port[1] = -EINVAL,
.reset_gpio_port[2] = -EINVAL,
};
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index da9bcb8..3ccc34e 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -216,7 +216,7 @@ static void __init n8x0_onenand_init(void) {}
*/
#define N8X0_SLOT_SWITCH_GPIO 96
#define N810_EMMC_VSD_GPIO 23
-#define NN810_EMMC_VIO_GPIO 9
+#define N810_EMMC_VIO_GPIO 9
static int n8x0_mmc_switch_slot(struct device *dev, int slot)
{
@@ -304,10 +304,10 @@ static void n810_set_power_emmc(struct device *dev,
if (power_on) {
gpio_set_value(N810_EMMC_VSD_GPIO, 1);
msleep(1);
- gpio_set_value(NN810_EMMC_VIO_GPIO, 1);
+ gpio_set_value(N810_EMMC_VIO_GPIO, 1);
msleep(1);
} else {
- gpio_set_value(NN810_EMMC_VIO_GPIO, 0);
+ gpio_set_value(N810_EMMC_VIO_GPIO, 0);
msleep(50);
gpio_set_value(N810_EMMC_VSD_GPIO, 0);
msleep(50);
@@ -468,7 +468,7 @@ static void n8x0_mmc_cleanup(struct device *dev)
if (machine_is_nokia_n810()) {
gpio_free(N810_EMMC_VSD_GPIO);
- gpio_free(NN810_EMMC_VIO_GPIO);
+ gpio_free(N810_EMMC_VIO_GPIO);
}
}
@@ -529,7 +529,7 @@ void __init n8x0_mmc_init(void)
err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch");
if (err)
- return err;
+ return;
gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0);
@@ -537,17 +537,17 @@ void __init n8x0_mmc_init(void)
err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf");
if (err) {
gpio_free(N8X0_SLOT_SWITCH_GPIO);
- return err;
+ return;
}
gpio_direction_output(N810_EMMC_VSD_GPIO, 0);
- err = gpio_request(NN810_EMMC_VIO_GPIO, "MMC slot 2 Vdd");
+ err = gpio_request(N810_EMMC_VIO_GPIO, "MMC slot 2 Vdd");
if (err) {
gpio_free(N8X0_SLOT_SWITCH_GPIO);
gpio_free(N810_EMMC_VSD_GPIO);
- return err;
+ return;
}
- gpio_direction_output(NN810_EMMC_VIO_GPIO, 0);
+ gpio_direction_output(N810_EMMC_VIO_GPIO, 0);
}
mmc_data[0] = &mmc1_data;
diff --git a/arch/arm/mach-omap2/board-sdp-flash.c b/arch/arm/mach-omap2/board-sdp-flash.c
index b1b88de..2d02632 100644
--- a/arch/arm/mach-omap2/board-sdp-flash.c
+++ b/arch/arm/mach-omap2/board-sdp-flash.c
@@ -253,20 +253,20 @@ void __init sdp_flash_init(struct flash_partitions sdp_partition_info[])
}
if (norcs > GPMC_CS_NUM)
- printk(KERN_INFO "OneNAND: Unable to find configuration "
- " in GPMC\n ");
+ printk(KERN_INFO "NOR: Unable to find configuration "
+ "in GPMC\n");
else
board_nor_init(sdp_partition_info[0], norcs);
if (onenandcs > GPMC_CS_NUM)
printk(KERN_INFO "OneNAND: Unable to find configuration "
- " in GPMC\n ");
+ "in GPMC\n");
else
board_onenand_init(sdp_partition_info[1], onenandcs);
if (nandcs > GPMC_CS_NUM)
printk(KERN_INFO "NAND: Unable to find configuration "
- " in GPMC\n ");
+ "in GPMC\n");
else
board_nand_init(sdp_partition_info[2], nandcs);
}
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index bb4018b..e15d2e8 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -96,7 +96,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
static struct platform_device zoom_debugboard_serial_device = {
.name = "serial8250",
- .id = 3,
+ .id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
},
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index ca95d8d..6b39849 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -280,7 +280,6 @@ static void enable_board_wakeup_source(void)
void __init zoom_peripherals_init(void)
{
omap_i2c_init();
- omap_serial_init();
usb_musb_init(&musb_board_data);
enable_board_wakeup_source();
}
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index d5153b6..9cba556 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -895,7 +895,7 @@ static struct clk dpll4_m4x2_ck = {
.ops = &clkops_omap2_dflt_wait,
.parent = &dpll4_m4_ck,
.enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
+ .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT,
.flags = INVERT_ENABLE,
.clkdm_name = "dpll4_clkdm",
.recalc = &omap3_clkoutx2_recalc,
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 28b1079..a5c0c9c 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -2671,10 +2671,10 @@ static struct omap_clk omap44xx_clks[] = {
CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcspi.1", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcspi.2", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcspi.3", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcspi.4", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
CLK(NULL, "uart1_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index b87ad66..6e568ec 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -240,7 +240,7 @@ static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable)
bits = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
else
bits = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
- } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) {
+ } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
if (enable)
bits = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
else
@@ -812,7 +812,7 @@ int omap2_clkdm_sleep(struct clockdomain *clkdm)
cm_set_mod_reg_bits(OMAP24XX_FORCESTATE,
clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
- } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) {
+ } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_SLEEP <<
__ffs(clkdm->clktrctrl_mask));
@@ -856,7 +856,7 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm)
cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE,
clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
- } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) {
+ } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_WAKEUP <<
__ffs(clkdm->clktrctrl_mask));
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 23e4d77..2271b9b 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -726,7 +726,7 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
if (!cpu_is_omap44xx())
return;
base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET;
- irq = OMAP44XX_IRQ_MMC4;
+ irq = OMAP44XX_IRQ_MMC5;
break;
default:
continue;
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 64d74f0..e57fb29 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -39,6 +39,9 @@ static int omap2_nand_gpmc_retime(void)
struct gpmc_timings t;
int err;
+ if (!gpmc_nand_data->gpmc_t)
+ return 0;
+
memset(&t, 0, sizeof(t));
t.sync_clk = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->sync_clk);
t.cs_on = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->cs_on);
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index ff25c7e..50fd749 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -52,7 +52,7 @@ omap_irq_base: .word 0
mrc p15, 0, \tmp, c0, c0, 0 @ get processor revision
and \tmp, \tmp, #0x000f0000 @ only check architecture
- cmp \tmp, #0x00060000 @ is v6?
+ cmp \tmp, #0x00070000 @ is v6?
beq 2400f @ found v6 so it's omap24xx
mrc p15, 0, \tmp, c0, c0, 0 @ get processor revision
and \tmp, \tmp, #0x000000f0 @ check cortex 8 or 9
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index aa3f65c..ef0e7a0 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -33,7 +33,7 @@
ENTRY(omap_secondary_startup)
hold: ldr r12,=0x103
dsb
- smc @ read from AuxCoreBoot0
+ smc #0 @ read from AuxCoreBoot0
mov r0, r0, lsr #9
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
@@ -52,7 +52,7 @@ ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr}
ldr r12, =0x104
dsb
- smc
+ smc #0
ldmfd sp!, {r1-r12, pc}
END(omap_modify_auxcoreboot0)
@@ -60,6 +60,6 @@ ENTRY(omap_auxcoreboot_addr)
stmfd sp!, {r2-r12, lr}
ldr r12, =0x105
dsb
- smc
+ smc #0
ldmfd sp!, {r2-r12, pc}
END(omap_auxcoreboot_addr)
diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S
index 89bb2b1..f61c777 100644
--- a/arch/arm/mach-omap2/omap44xx-smc.S
+++ b/arch/arm/mach-omap2/omap44xx-smc.S
@@ -27,6 +27,6 @@ ENTRY(omap_smc1)
mov r12, r0
mov r0, r1
dsb
- smc
+ smc #0
ldmfd sp!, {r2-r12, pc}
END(omap_smc1)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index c664947..e436dcb 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1511,6 +1511,9 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
c = oh->slaves[oh->_mpu_port_index]->_clk;
}
+ if (!c->clkdm)
+ return NULL;
+
return c->clkdm->pwrdm.ptr;
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 9a0fb38..ebfce7d 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -222,7 +222,7 @@ void pwrdm_init(struct powerdomain **pwrdm_list)
{
struct powerdomain **p = NULL;
- if (cpu_is_omap24xx() | cpu_is_omap34xx()) {
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
pwrstctrl_reg_offs = OMAP2_PM_PWSTCTRL;
pwrstst_reg_offs = OMAP2_PM_PWSTST;
} else if (cpu_is_omap44xx()) {
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 9537f6f..07a60f1 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -123,7 +123,7 @@ struct omap3_prcm_regs prcm_context;
u32 omap_prcm_get_reset_sources(void)
{
/* XXX This presumably needs modification for 34XX */
- if (cpu_is_omap24xx() | cpu_is_omap34xx())
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
return prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f;
if (cpu_is_omap44xx())
return prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f;
@@ -157,7 +157,7 @@ void omap_prcm_arch_reset(char mode, const char *cmd)
else
WARN_ON(1);
- if (cpu_is_omap24xx() | cpu_is_omap34xx())
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs,
OMAP2_RM_RSTCTRL);
if (cpu_is_omap44xx())
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index da77930..3771254 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -115,7 +115,6 @@ static struct plat_serial8250_port serial_platform_data2[] = {
}
};
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static struct plat_serial8250_port serial_platform_data3[] = {
{
.irq = 70,
@@ -128,23 +127,12 @@ static struct plat_serial8250_port serial_platform_data3[] = {
}
};
-static inline void omap2_set_globals_uart4(struct omap_globals *omap2_globals)
-{
- serial_platform_data3[0].mapbase = omap2_globals->uart4_phys;
-}
-#else
-static inline void omap2_set_globals_uart4(struct omap_globals *omap2_globals)
-{
-}
-#endif
-
void __init omap2_set_globals_uart(struct omap_globals *omap2_globals)
{
serial_platform_data0[0].mapbase = omap2_globals->uart1_phys;
serial_platform_data1[0].mapbase = omap2_globals->uart2_phys;
serial_platform_data2[0].mapbase = omap2_globals->uart3_phys;
- if (cpu_is_omap3630() || cpu_is_omap44xx())
- omap2_set_globals_uart4(omap2_globals);
+ serial_platform_data3[0].mapbase = omap2_globals->uart4_phys;
}
static inline unsigned int __serial_read_reg(struct uart_port *up,
@@ -550,7 +538,7 @@ static ssize_t sleep_timeout_store(struct device *dev,
unsigned int value;
if (sscanf(buf, "%u", &value) != 1) {
- printk(KERN_ERR "sleep_timeout_store: Invalid value\n");
+ dev_err(dev, "sleep_timeout_store: Invalid value\n");
return -EINVAL;
}
@@ -664,27 +652,33 @@ void __init omap_serial_early_init(void)
struct device *dev = &pdev->dev;
struct plat_serial8250_port *p = dev->platform_data;
+ /* Don't map zero-based physical address */
+ if (p->mapbase == 0) {
+ dev_warn(dev, "no physical address for uart#%d,"
+ " so skipping early_init...\n", i);
+ continue;
+ }
/*
* Module 4KB + L4 interconnect 4KB
* Static mapping, never released
*/
p->membase = ioremap(p->mapbase, SZ_8K);
if (!p->membase) {
- printk(KERN_ERR "ioremap failed for uart%i\n", i + 1);
+ dev_err(dev, "ioremap failed for uart%i\n", i + 1);
continue;
}
sprintf(name, "uart%d_ick", i + 1);
uart->ick = clk_get(NULL, name);
if (IS_ERR(uart->ick)) {
- printk(KERN_ERR "Could not get uart%d_ick\n", i + 1);
+ dev_err(dev, "Could not get uart%d_ick\n", i + 1);
uart->ick = NULL;
}
sprintf(name, "uart%d_fck", i+1);
uart->fck = clk_get(NULL, name);
if (IS_ERR(uart->fck)) {
- printk(KERN_ERR "Could not get uart%d_fck\n", i + 1);
+ dev_err(dev, "Could not get uart%d_fck\n", i + 1);
uart->fck = NULL;
}
@@ -727,6 +721,13 @@ void __init omap_serial_init_port(int port)
pdev = &uart->pdev;
dev = &pdev->dev;
+ /* Don't proceed if there's no clocks available */
+ if (unlikely(!uart->ick || !uart->fck)) {
+ WARN(1, "%s: can't init uart%d, no clocks available\n",
+ kobject_name(&dev->kobj), port);
+ return;
+ }
+
omap_uart_enable_clocks(uart);
omap_uart_reset(uart);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4de..f55fa10 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
-#ifdef CONFIG_HIGHMEM
- /*
- * kmap_atomic() doesn't set the page virtual address, and
- * kunmap_atomic() takes care of cache flushing already.
- */
- if (page_address(to) != NULL)
-#endif
- __cpuc_flush_dcache_area(kto, PAGE_SIZE);
+ __cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc..13fa536 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
vaddr += offset;
op(vaddr, len, dir);
kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ pte_t saved_pte;
+ vaddr = kmap_high_l1_vipt(page, &saved_pte);
+ op(vaddr + offset, len, dir);
+ kunmap_high_l1_vipt(page, saved_pte);
}
} else {
vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095..c6844cb 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
+#include <asm/highmem.h>
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
- void *addr = page_address(page);
-
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
-#ifdef CONFIG_HIGHMEM
- /*
- * kmap_atomic() doesn't set the page virtual address, and
- * kunmap_atomic() takes care of cache flushing already.
- */
- if (addr)
-#endif
- __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ if (!PageHighMem(page)) {
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+ } else {
+ void *addr = kmap_high_get(page);
+ if (addr) {
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ pte_t saved_pte;
+ addr = kmap_high_l1_vipt(page, &saved_pte);
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high_l1_vipt(page, saved_pte);
+ }
+ }
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7..77b030f 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) {
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+
+#include <linux/percpu.h>
+
+/*
+ * The VIVT cache of a highmem page is always flushed before the page
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
+ * in that case.
+ *
+ * However unmapped pages may still be cached with a VIPT cache, and
+ * it is not possible to perform cache maintenance on them using physical
+ * addresses unfortunately. So we have no choice but to set up a temporary
+ * virtual mapping for that purpose.
+ *
+ * Yet this VIPT cache maintenance may be triggered from DMA support
+ * functions which are possibly called from interrupt context. As we don't
+ * want to keep interrupt disabled all the time when such maintenance is
+ * taking place, we therefore allow for some reentrancy by preserving and
+ * restoring the previous fixmap entry before the interrupted context is
+ * resumed. If the reentrancy depth is 0 then there is no need to restore
+ * the previous fixmap, and leaving the current one in place allow it to
+ * be reused the next time without a TLB flush (common with DMA).
+ */
+
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
+
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
+{
+ unsigned int idx, cpu = smp_processor_id();
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+ unsigned long vaddr, flags;
+ pte_t pte, *ptep;
+
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ ptep = TOP_PTE(vaddr);
+ pte = mk_pte(page, kmap_prot);
+
+ if (!in_interrupt())
+ preempt_disable();
+
+ raw_local_irq_save(flags);
+ (*depth)++;
+ if (pte_val(*ptep) == pte_val(pte)) {
+ *saved_pte = pte;
+ } else {
+ *saved_pte = *ptep;
+ set_pte_ext(ptep, pte, 0);
+ local_flush_tlb_kernel_page(vaddr);
+ }
+ raw_local_irq_restore(flags);
+
+ return (void *)vaddr;
+}
+
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
+{
+ unsigned int idx, cpu = smp_processor_id();
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+ unsigned long vaddr, flags;
+ pte_t pte, *ptep;
+
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ ptep = TOP_PTE(vaddr);
+ pte = mk_pte(page, kmap_prot);
+
+ BUG_ON(pte_val(*ptep) != pte_val(pte));
+ BUG_ON(*depth <= 0);
+
+ raw_local_irq_save(flags);
+ (*depth)--;
+ if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
+ set_pte_ext(ptep, saved_pte, 0);
+ local_flush_tlb_kernel_page(vaddr);
+ }
+ raw_local_irq_restore(flags);
+
+ if (!in_interrupt())
+ preempt_enable();
+}
+
+#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4223d08..241c24a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
pgd_t *pgd;
int i;
- if (current->mm && current->mm->pgd)
- pgd = current->mm->pgd;
- else
- pgd = init_mm.pgd;
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+ * we don't have any user-mode mappings so we use the context that we
+ * "borrowed".
+ */
+ pgd = current->active_mm->pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 088c1a0..f12f0e3 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -44,9 +44,6 @@
#define NO_LENGTH_CHECK 0xffffffff
-unsigned char omap_bootloader_tag[512];
-int omap_bootloader_tag_len;
-
struct omap_board_config_kernel *omap_board_config;
int omap_board_config_size;
@@ -100,10 +97,17 @@ EXPORT_SYMBOL(omap_get_var_config);
#include <linux/clocksource.h>
+/*
+ * offset_32k holds the init time counter value. It is then subtracted
+ * from every counter read to achieve a counter that counts time from the
+ * kernel boot (needed for sched_clock()).
+ */
+static u32 offset_32k __read_mostly;
+
#ifdef CONFIG_ARCH_OMAP16XX
static cycle_t omap16xx_32k_read(struct clocksource *cs)
{
- return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED);
+ return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
}
#else
#define omap16xx_32k_read NULL
@@ -112,7 +116,7 @@ static cycle_t omap16xx_32k_read(struct clocksource *cs)
#ifdef CONFIG_ARCH_OMAP2420
static cycle_t omap2420_32k_read(struct clocksource *cs)
{
- return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10);
+ return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap2420_32k_read NULL
@@ -121,7 +125,7 @@ static cycle_t omap2420_32k_read(struct clocksource *cs)
#ifdef CONFIG_ARCH_OMAP2430
static cycle_t omap2430_32k_read(struct clocksource *cs)
{
- return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10);
+ return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap2430_32k_read NULL
@@ -130,7 +134,7 @@ static cycle_t omap2430_32k_read(struct clocksource *cs)
#ifdef CONFIG_ARCH_OMAP3
static cycle_t omap34xx_32k_read(struct clocksource *cs)
{
- return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10);
+ return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap34xx_32k_read NULL
@@ -139,7 +143,7 @@ static cycle_t omap34xx_32k_read(struct clocksource *cs)
#ifdef CONFIG_ARCH_OMAP4
static cycle_t omap44xx_32k_read(struct clocksource *cs)
{
- return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10);
+ return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap44xx_32k_read NULL
@@ -227,6 +231,8 @@ static int __init omap_init_clocksource_32k(void)
clocksource_32k.mult = clocksource_hz2mult(32768,
clocksource_32k.shift);
+ offset_32k = clocksource_32k.read(&clocksource_32k);
+
if (clocksource_register(&clocksource_32k))
printk(err, clocksource_32k.name);
}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 5c6c342..1d95996 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -937,6 +937,15 @@ void omap_start_dma(int lch)
{
u32 l;
+ /*
+ * The CPC/CDAC register needs to be initialized to zero
+ * before starting dma transfer.
+ */
+ if (cpu_is_omap15xx())
+ dma_write(0, CPC(lch));
+ else
+ dma_write(0, CDAC(lch));
+
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch;
char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 76a347b..45a225d 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -798,7 +798,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_MPUIO:
reg += OMAP_MPUIO_GPIO_INT_EDGE;
l = __raw_readl(reg);
- if (trigger & IRQ_TYPE_EDGE_BOTH)
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
@@ -812,7 +812,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_INT_CONTROL;
l = __raw_readl(reg);
- if (trigger & IRQ_TYPE_EDGE_BOTH)
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
@@ -846,7 +846,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_GPIO_7XX:
reg += OMAP7XX_GPIO_INT_CONTROL;
l = __raw_readl(reg);
- if (trigger & IRQ_TYPE_EDGE_BOTH)
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index b65088a..4017019 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -345,8 +345,6 @@
#define INT_34XX_MMC3_IRQ 94
#define INT_34XX_GPT12_IRQ 95
-#define INT_34XX_BENCH_MPU_EMUL 3
-
#define INT_35XX_HECC0_IRQ 24
#define INT_35XX_HECC1_IRQ 28
#define INT_35XX_EMAC_C0_RXTHRESH_IRQ 67
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index 3974835..7de903d 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -59,7 +59,7 @@
#define OMAP44XX_MCBSP1_BASE 0x49022000
#define OMAP44XX_MCBSP2_BASE 0x49024000
#define OMAP44XX_MCBSP3_BASE 0x49026000
-#define OMAP44XX_MCBSP4_BASE 0x48074000
+#define OMAP44XX_MCBSP4_BASE 0x48096000
#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h
index 6ba88d2..f8efd546 100644
--- a/arch/arm/plat-omap/include/plat/nand.h
+++ b/arch/arm/plat-omap/include/plat/nand.h
@@ -29,4 +29,11 @@ struct omap_nand_platform_data {
/* size (4 KiB) for IO mapping */
#define NAND_IO_SIZE SZ_4K
+#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
extern int gpmc_nand_init(struct omap_nand_platform_data *d);
+#else
+static inline int gpmc_nand_init(struct omap_nand_platform_data *d)
+{
+ return 0;
+}
+#endif
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index 2302474..b3ef1a7 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -32,7 +32,7 @@
#define OMAP4430_PRM_BASE 0x4a306000
#define OMAP44XX_GPMC_BASE 0x50000000
#define OMAP443X_SCM_BASE 0x4a002000
-#define OMAP443X_CTRL_BASE OMAP443X_SCM_BASE
+#define OMAP443X_CTRL_BASE 0x4a100000
#define OMAP44XX_IC_BASE 0x48200000
#define OMAP44XX_IVA_INTC_BASE 0x40000000
#define IRQ_SIR_IRQ 0x0040
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 440b416..36d6ea5 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -294,8 +294,8 @@ struct omap_hwmod_class_sysconfig {
u16 rev_offs;
u16 sysc_offs;
u16 syss_offs;
+ u16 sysc_flags;
u8 idlemodes;
- u8 sysc_flags;
u8 clockact;
struct omap_hwmod_sysc_fields *sysc_fields;
};
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a420cb9..315a540 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -428,26 +428,6 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
-/*
- * Synchronise the hardware VFP state of a thread other than current with the
- * saved one. This function is used by the ptrace mechanism.
- */
-#ifdef CONFIG_SMP
-void vfp_sync_hwstate(struct thread_info *thread)
-{
-}
-
-void vfp_flush_hwstate(struct thread_info *thread)
-{
- /*
- * On SMP systems, the VFP state is automatically saved at every
- * context switch. We mark the thread VFP state as belonging to a
- * non-existent CPU so that the saved one will be reloaded when
- * needed.
- */
- thread->vfpstate.hard.cpu = NR_CPUS;
-}
-#else
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread)
last_VFP_context[cpu] = NULL;
}
+#ifdef CONFIG_SMP
+ /*
+ * For SMP we still have to take care of the case where the thread
+ * migrates to another CPU and then back to the original CPU on which
+ * the last VFP user is still the same thread. Mark the thread VFP
+ * state as belonging to a non-existent CPU so that the saved one will
+ * be reloaded in the above case.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
put_cpu();
}
-#endif
#include <linux/smp.h>
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index dd5b882..5e73c25 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -28,7 +28,7 @@ static struct pt_regs *get_user_regs(struct task_struct *tsk)
THREAD_SIZE - sizeof(struct pt_regs));
}
-static void user_enable_single_step(struct task_struct *tsk)
+void user_enable_single_step(struct task_struct *tsk)
{
pr_debug("user_enable_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n",
tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr);
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 73c5c2b..7f3c0a2 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
{
struct kvm_memory_slot *memslot;
int r, i;
- long n, base;
+ long base;
+ unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;
for (i = 0; i < n/sizeof(long); ++i) {
@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
int r;
- int n;
+ unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h
index 88b7af2..d9d2ed6 100644
--- a/arch/m68k/include/asm/atomic_mm.h
+++ b/arch/m68k/include/asm/atomic_mm.h
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new)
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
char c;
- __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
+ __asm__ __volatile__("subl %2,%1; seq %0"
+ : "=d" (c), "+m" (*v)
+ : "id" (i));
return c != 0;
}
static inline int atomic_add_negative(int i, atomic_t *v)
{
char c;
- __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
+ __asm__ __volatile__("addl %2,%1; smi %0"
+ : "=d" (c), "+m" (*v)
+ : "id" (i));
return c != 0;
}
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h
index ef22938..01a8716 100644
--- a/arch/m68k/include/asm/mcfuart.h
+++ b/arch/m68k/include/asm/mcfuart.h
@@ -212,5 +212,10 @@ struct mcf_platform_uart {
#define MCFUART_URF_RXS 0xc0 /* Receiver status */
#endif
+#if defined(CONFIG_M5272)
+#define MCFUART_TXFIFOSIZE 25
+#else
+#define MCFUART_TXFIFOSIZE 1
+#endif
/****************************************************************************/
#endif /* mcfuart_h */
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h
index 1320eaa..a29dd74 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/asm/sigcontext.h
@@ -17,13 +17,11 @@ struct sigcontext {
#ifndef __uClinux__
# ifdef __mcoldfire__
unsigned long sc_fpregs[2][2]; /* room for two fp registers */
- unsigned long sc_fpcntl[3];
- unsigned char sc_fpstate[16+6*8];
# else
unsigned long sc_fpregs[2*3]; /* room for two fp registers */
+# endif
unsigned long sc_fpcntl[3];
unsigned char sc_fpstate[216];
-# endif
#endif
};
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index ce404bc..1404257 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200)
cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
-cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200)
+cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307)
cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200)
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index 56043ad..aff6f57 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal)
trap #0
ENTRY(ret_from_user_rt_signal)
- move #__NR_rt_sigreturn,%d0
+ movel #__NR_rt_sigreturn,%d0
trap #0
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 1143f77..6f22970 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -107,7 +107,6 @@ void init_IRQ(void)
_ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */
_ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */
_ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */
- _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */
_ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */
_ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */
_ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 25da07f..604af29 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
int is_dirty = 0;
- int r, n;
+ int r;
+ unsigned long n;
mutex_lock(&kvm->slots_lock);
@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 4a76d94..533f357 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -29,6 +29,7 @@ struct vdso_data {
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
__u32 ectg_available;
+ __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 08db736..a094089 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -61,6 +61,7 @@ int main(void)
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+ DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index b354427..c56d3f5 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -256,6 +256,9 @@ restore_registers:
lghi %r2,0
brasl %r14,arch_set_page_states
+ /* Reinitialize the channel subsystem */
+ brasl %r14,channel_subsystem_reinit
+
/* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index fba6dec..d906bf1 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -221,6 +221,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->ntp_mult = mult;
smp_wmb();
++vdso_data->tb_update_count;
}
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 4a98909..9696439 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,13 +38,13 @@ __kernel_clock_gettime:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
ahi %r0,-1
-2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
lr %r2,%r0
- lhi %r0,1000
+ l %r0,__VDSO_NTP_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 3f
- ahi %r0,1000
+ a %r0,__VDSO_NTP_MULT(%r5)
3: alr %r0,%r2
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
@@ -86,13 +86,13 @@ __kernel_clock_gettime:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
-12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
lr %r2,%r0
- lhi %r0,1000
+ l %r0,__VDSO_NTP_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 13f
- ahi %r0,1000
+ a %r0,__VDSO_NTP_MULT(%r5)
13: alr %r0,%r2
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index ad8acfc..2d36331 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,13 +35,13 @@ __kernel_gettimeofday:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
-3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
st %r0,24(%r15)
- lhi %r0,1000
+ l %r0,__VDSO_NTP_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 4f
- ahi %r0,1000
+ a %r0,__VDSO_NTP_MULT(%r5)
4: al %r0,24(%r15)
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 49106c6..f404678 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -36,7 +36,7 @@ __kernel_clock_gettime:
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- mghi %r1,1000
+ msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
lg %r0,__VDSO_XTIME_SEC(%r5)
@@ -64,7 +64,7 @@ __kernel_clock_gettime:
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- mghi %r1,1000
+ msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
lg %r0,__VDSO_XTIME_SEC(%r5)
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index f873e75..36ee674 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,7 @@ __kernel_gettimeofday:
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- mghi %r1,1000
+ msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6db5136..9908d47 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,9 @@ config SPARC64
def_bool 64BIT
select ARCH_SUPPORTS_MSI
select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_FP_TEST
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KRETPROBES
select HAVE_KPROBES
select HAVE_LMB
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 9d3c889..1b4a831 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH
bool "D-cache flush debugging"
depends on SPARC64 && DEBUG_KERNEL
-config STACK_DEBUG
- bool "Stack Overflow Detection Support"
-
config MCOUNT
bool
depends on SPARC64
- depends on STACK_DEBUG || FUNCTION_TRACER
+ depends on FUNCTION_TRACER
default y
config FRAME_POINTER
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 926397d..050ef35 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -17,7 +17,7 @@ typedef struct {
unsigned int __nmi_count;
unsigned long clock_tick; /* %tick's per second */
unsigned long __pad;
- unsigned int __pad1;
+ unsigned int irq0_irqs;
unsigned int __pad2;
/* Dcache line 2, rarely used */
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index 8b49bf9..bfa1ea4 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
*/
static inline unsigned long __raw_local_irq_save(void)
{
- unsigned long flags = __raw_local_save_flags();
-
- raw_local_irq_disable();
+ unsigned long flags, tmp;
+
+ /* Disable interrupts to PIL_NORMAL_MAX unless we already
+ * are using PIL_NMI, in which case PIL_NMI is retained.
+ *
+ * The only values we ever program into the %pil are 0,
+ * PIL_NORMAL_MAX and PIL_NMI.
+ *
+ * Since PIL_NMI is the largest %pil value and all bits are
+ * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
+ * actually is.
+ */
+ __asm__ __volatile__(
+ "rdpr %%pil, %0\n\t"
+ "or %0, %2, %1\n\t"
+ "wrpr %1, 0x0, %%pil"
+ : "=r" (flags), "=r" (tmp)
+ : "i" (PIL_NORMAL_MAX)
+ : "memory"
+ );
return flags;
}
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 9e2d944..4827a3a 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -111,7 +111,7 @@ struct thread_info {
#define THREAD_SHIFT PAGE_SHIFT
#endif /* PAGE_SHIFT == 13 */
-#define PREEMPT_ACTIVE 0x4000000
+#define PREEMPT_ACTIVE 0x10000000
/*
* macros/functions for gaining access to the thread information structure
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index c631614..0c2dc1f 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -13,6 +13,14 @@ extra-y += init_task.o
CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
extra-y += vmlinux.lds
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o := -pg
+CFLAGS_REMOVE_time_$(BITS).o := -pg
+CFLAGS_REMOVE_perf_event.o := -pg
+CFLAGS_REMOVE_pcr.o := -pg
+endif
+
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
obj-$(CONFIG_SPARC32) += rtrap_32.o
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-CFLAGS_REMOVE_ftrace.o := -pg
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_EARLYFB) += btext.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 9103a56..03ab022 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000;
static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
{
- static u32 call;
+ u32 call;
s32 off;
off = ((s32)addr - (s32)ip);
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+ u32 old, new;
+
+ old = *(u32 *) &ftrace_graph_call;
+ new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+ u32 old, new;
+
+ old = *(u32 *) &ftrace_graph_call;
+ new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
+
+ return ftrace_modify_code(ip, old, new);
+}
+
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+unsigned long prepare_ftrace_return(unsigned long parent,
+ unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long) &return_to_handler;
+ struct ftrace_graph_ent trace;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return parent + 8UL;
+
+ if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
+ frame_pointer) == -EBUSY)
+ return parent + 8UL;
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ return parent + 8UL;
+ }
+
+ return return_hooker;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1cbdb9..830d70a 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,9 @@
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/ftrace.h>
#include <linux/irq.h>
+#include <linux/kmemleak.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -45,6 +47,7 @@
#include "entry.h"
#include "cpumap.h"
+#include "kstack.h"
#define NUM_IVECS (IMAP_INR + 1)
@@ -647,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
return 0;
+
+ /* The only reference we store to the IRQ bucket is
+ * by physical address which kmemleak can't see, tell
+ * it that this object explicitly is not a leak and
+ * should be scanned.
+ */
+ kmemleak_not_leak(bucket);
+
__flush_dcache_range((unsigned long) bucket,
((unsigned long) bucket +
sizeof(struct ino_bucket)));
@@ -703,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
- void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
- __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
- if (orig_sp < sp ||
- orig_sp > (sp + THREAD_SIZE)) {
- sp += THREAD_SIZE - 192 - STACK_BIAS;
- __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
- }
-
- return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
- __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
-void handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int irq, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index f5a0fd4..0a2bd0f 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -5,6 +5,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/ftrace.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
}
#ifdef CONFIG_SMP
-void smp_kgdb_capture_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
{
unsigned long flags;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 5247283..53dfb92 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -61,4 +61,23 @@ check_magic:
}
+static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+ if (orig_sp < sp ||
+ orig_sp > (sp + THREAD_SIZE)) {
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+ }
+
+ return orig_sp;
+}
+
+static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
#endif /* _KSTACK_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b287b62..a4bd7ba 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -23,6 +23,8 @@
#include <asm/ptrace.h>
#include <asm/pcr.h>
+#include "kstack.h"
+
/* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt
* levels.
@@ -92,7 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
unsigned int sum, touched = 0;
- int cpu = smp_processor_id();
+ void *orig_sp;
clear_softint(1 << irq);
@@ -100,13 +102,15 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
nmi_enter();
+ orig_sp = set_hardirq_stack();
+
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
else
pcr_ops->write(PCR_PIC_PRIV);
- sum = kstat_irqs_cpu(0, cpu);
+ sum = local_cpu_data().irq0_irqs;
if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0;
touched = 1;
@@ -125,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
pcr_ops->write(pcr_enable);
}
+ restore_hardirq_stack(orig_sp);
+
nmi_exit();
}
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index b775658..8a00058 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
if (!rp) {
- prom_printf("Cannot allocate IOMMU resource.\n");
- prom_halt();
+ pr_info("%s: Cannot allocate IOMMU resource.\n",
+ pbm->name);
+ return;
}
rp->name = "IOMMU";
rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
rp->flags = IORESOURCE_BUSY;
- request_resource(&pbm->mem_space, rp);
+ if (request_resource(&pbm->mem_space, rp)) {
+ pr_info("%s: Unable to request IOMMU resource.\n",
+ pbm->name);
+ kfree(rp);
+ }
}
}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 2d94e7a..c4a6a50 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <linux/ftrace.h>
#include <asm/pil.h>
#include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
* Therefore in such situations we defer the work by signalling
* a lower level cpu IRQ.
*/
-void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
+void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 83f1873..090b9e9 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -130,7 +130,17 @@ rtrap_xcall:
nop
call trace_hardirqs_on
nop
- wrpr %l4, %pil
+ /* Do not actually set the %pil here. We will do that
+ * below after we clear PSTATE_IE in the %pstate register.
+ * If we re-enable interrupts here, we can recurse down
+ * the hardirq stack potentially endlessly, causing a
+ * stack overflow.
+ *
+ * It is tempting to put this test and trace_hardirqs_on
+ * call at the 'rt_continue' label, but that will not work
+ * as that path hits unconditionally and we do not want to
+ * execute this in NMI return paths, for example.
+ */
#endif
rtrap_no_irq_enable:
andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4c53345..b6a2b8f 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,6 +22,7 @@
#include <linux/profile.h>
#include <linux/bootmem.h>
#include <linux/vmalloc.h>
+#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/slab.h>
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu)
&cpumask_of_cpu(cpu));
}
-void smp_call_function_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
generic_smp_call_function_interrupt();
}
-void smp_call_function_single_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
generic_smp_call_function_single_interrupt();
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
put_cpu();
}
-void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
{
struct mm_struct *mm;
unsigned long flags;
@@ -1149,7 +1150,7 @@ void smp_release(void)
*/
extern void prom_world(int);
-void smp_penguin_jailcell(int irq, struct pt_regs *regs)
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu)
&cpumask_of_cpu(cpu));
}
-void smp_receive_signal_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 67e1651..c7bbe6cf 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -35,6 +35,7 @@
#include <linux/clocksource.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/ftrace.h>
#include <asm/oplib.h>
#include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
};
static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
-void timer_interrupt(int irq, struct pt_regs *regs)
+void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long tick_mask = tick_ops->softint_mask;
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
irq_enter();
+ local_cpu_data().irq0_irqs++;
kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
if (unlikely(!evt->event_handler)) {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 837dfc2..9da57f0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2203,27 +2203,6 @@ void dump_stack(void)
EXPORT_SYMBOL(dump_stack);
-static inline int is_kernel_stack(struct task_struct *task,
- struct reg_window *rw)
-{
- unsigned long rw_addr = (unsigned long) rw;
- unsigned long thread_base, thread_end;
-
- if (rw_addr < PAGE_OFFSET) {
- if (task != &init_task)
- return 0;
- }
-
- thread_base = (unsigned long) task_stack_page(task);
- thread_end = thread_base + sizeof(union thread_union);
- if (rw_addr >= thread_base &&
- rw_addr < thread_end &&
- !(rw_addr & 0x7UL))
- return 1;
-
- return 0;
-}
-
static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
{
unsigned long fp = rw->ins[6];
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
show_regs(regs);
add_taint(TAINT_DIE);
if (regs->tstate & TSTATE_PRIV) {
+ struct thread_info *tp = current_thread_info();
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
* find some badly aligned kernel stack.
*/
while (rw &&
- count++ < 30&&
- is_kernel_stack(current, rw)) {
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
printk("Caller[%016lx]: %pS\n", rw->ins[7],
(void *) rw->ins[7]);
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index ebce430..c752c4c 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
}
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
-static inline int decode_access_size(unsigned int insn)
+static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{
unsigned int tmp;
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
- die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
+ die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end
* of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
- int size = decode_access_size(insn);
+ int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 4e59925..0c1e678 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -46,11 +46,16 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
*(.gnu.warning)
} = 0
_etext = .;
RO_DATA(PAGE_SIZE)
+
+ /* Start of data section */
+ _sdata = .;
+
.data1 : {
*(.data1)
}
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 24b8b12..3ad6cbd 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -7,26 +7,11 @@
#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
/*
* This is the main variant and is called by C code. GCC's -pg option
* automatically instruments every C function with a call to this.
*/
-#ifdef CONFIG_STACK_DEBUG
-
-#define OVSTACKSIZE 4096 /* lets hope this is enough */
-
- .data
- .align 8
-panicstring:
- .asciz "Stack overflow\n"
- .align 8
-ovstack:
- .skip OVSTACKSIZE
-#endif
.text
.align 32
.globl _mcount
@@ -35,84 +20,48 @@ ovstack:
.type mcount,#function
_mcount:
mcount:
-#ifdef CONFIG_STACK_DEBUG
- /*
- * Check whether %sp is dangerously low.
- */
- ldub [%g6 + TI_FPDEPTH], %g1
- srl %g1, 1, %g3
- add %g3, 1, %g3
- sllx %g3, 8, %g3 ! each fpregs frame is 256b
- add %g3, 192, %g3
- add %g6, %g3, %g3 ! where does task_struct+frame end?
- sub %g3, STACK_BIAS, %g3
- cmp %sp, %g3
- bg,pt %xcc, 1f
- nop
- lduh [%g6 + TI_CPU], %g1
- sethi %hi(hardirq_stack), %g3
- or %g3, %lo(hardirq_stack), %g3
- sllx %g1, 3, %g1
- ldx [%g3 + %g1], %g7
- sub %g7, STACK_BIAS, %g7
- cmp %sp, %g7
- bleu,pt %xcc, 2f
- sethi %hi(THREAD_SIZE), %g3
- add %g7, %g3, %g7
- cmp %sp, %g7
- blu,pn %xcc, 1f
-2: sethi %hi(softirq_stack), %g3
- or %g3, %lo(softirq_stack), %g3
- ldx [%g3 + %g1], %g7
- sub %g7, STACK_BIAS, %g7
- cmp %sp, %g7
- bleu,pt %xcc, 3f
- sethi %hi(THREAD_SIZE), %g3
- add %g7, %g3, %g7
- cmp %sp, %g7
- blu,pn %xcc, 1f
- nop
- /* If we are already on ovstack, don't hop onto it
- * again, we are already trying to output the stack overflow
- * message.
- */
-3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
- or %g7, %lo(ovstack), %g7
- add %g7, OVSTACKSIZE, %g3
- sub %g3, STACK_BIAS + 192, %g3
- sub %g7, STACK_BIAS, %g7
- cmp %sp, %g7
- blu,pn %xcc, 2f
- cmp %sp, %g3
- bleu,pn %xcc, 1f
- nop
-2: mov %g3, %sp
- sethi %hi(panicstring), %g3
- call prom_printf
- or %g3, %lo(panicstring), %o0
- call prom_halt
- nop
-1:
-#endif
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
- mov %o7, %o0
- .globl mcount_call
-mcount_call:
- call ftrace_stub
- mov %o0, %o7
+ /* Do nothing, the retl/nop below is all we need. */
#else
- sethi %hi(ftrace_trace_function), %g1
+ sethi %hi(function_trace_stop), %g1
+ lduw [%g1 + %lo(function_trace_stop)], %g2
+ brnz,pn %g2, 2f
+ sethi %hi(ftrace_trace_function), %g1
sethi %hi(ftrace_stub), %g2
ldx [%g1 + %lo(ftrace_trace_function)], %g1
or %g2, %lo(ftrace_stub), %g2
cmp %g1, %g2
be,pn %icc, 1f
- mov %i7, %o1
- jmpl %g1, %g0
- mov %o7, %o0
+ mov %i7, %g3
+ save %sp, -176, %sp
+ mov %g3, %o1
+ jmpl %g1, %o7
+ mov %i7, %o0
+ ret
+ restore
/* not reached */
1:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ sethi %hi(ftrace_graph_return), %g1
+ ldx [%g1 + %lo(ftrace_graph_return)], %g3
+ cmp %g2, %g3
+ bne,pn %xcc, 5f
+ sethi %hi(ftrace_graph_entry_stub), %g2
+ sethi %hi(ftrace_graph_entry), %g1
+ or %g2, %lo(ftrace_graph_entry_stub), %g2
+ ldx [%g1 + %lo(ftrace_graph_entry)], %g1
+ cmp %g1, %g2
+ be,pt %xcc, 2f
+ nop
+5: mov %i7, %g2
+ mov %fp, %g3
+ save %sp, -176, %sp
+ mov %g2, %l0
+ ba,pt %xcc, ftrace_graph_caller
+ mov %g3, %l1
+#endif
+2:
#endif
#endif
retl
@@ -131,14 +80,50 @@ ftrace_stub:
.globl ftrace_caller
.type ftrace_caller,#function
ftrace_caller:
- mov %i7, %o1
- mov %o7, %o0
+ sethi %hi(function_trace_stop), %g1
+ mov %i7, %g2
+ lduw [%g1 + %lo(function_trace_stop)], %g1
+ brnz,pn %g1, ftrace_stub
+ mov %fp, %g3
+ save %sp, -176, %sp
+ mov %g2, %o1
+ mov %g2, %l0
+ mov %g3, %l1
.globl ftrace_call
ftrace_call:
call ftrace_stub
- mov %o0, %o7
- retl
+ mov %i7, %o0
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ call ftrace_stub
nop
+#endif
+ ret
+ restore
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .size ftrace_graph_call,.-ftrace_graph_call
+#endif
+ .size ftrace_call,.-ftrace_call
.size ftrace_caller,.-ftrace_caller
#endif
#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ mov %l0, %o0
+ mov %i7, %o1
+ call prepare_ftrace_return
+ mov %l1, %o2
+ ret
+ restore %o0, -8, %i7
+END(ftrace_graph_caller)
+
+ENTRY(return_to_handler)
+ save %sp, -176, %sp
+ call ftrace_return_to_handler
+ mov %fp, %o0
+ jmpl %o0 + 8, %g0
+ restore
+END(return_to_handler)
+#endif
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 64cda95..7a656bd 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -6,6 +6,7 @@
#include "linux/irqreturn.h"
#include "linux/kd.h"
#include "linux/sched.h"
+#include "linux/slab.h"
#include "chan_kern.h"
#include "irq_kern.h"
#include "irq_user.h"
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 06d6ccf..b6b1096 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -8,7 +8,6 @@
#include <errno.h>
#include <sched.h>
#include <linux/limits.h>
-#include <linux/slab.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include "kern_constants.h"
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 59b4556..e790bc1 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -626,7 +626,7 @@ ia32_sys_call_table:
.quad stub32_sigreturn
.quad stub32_clone /* 120 */
.quad sys_setdomainname
- .quad sys_uname
+ .quad sys_newuname
.quad sys_modify_ldt
.quad compat_sys_adjtimex
.quad sys32_mprotect /* 125 */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index ba19ad4..86a0ff0 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -21,6 +21,7 @@
#define _ASM_X86_AMD_IOMMU_TYPES_H
#include <linux/types.h>
+#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -140,6 +141,7 @@
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
+#define CMD_BUFFER_UNINITIALIZED 1
#define CMD_BUFFER_ENTRIES 512
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
@@ -237,6 +239,7 @@ struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
spinlock_t lock; /* mostly used to lock the page table*/
+ struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index ba0eed8..b60f292 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -28,22 +28,39 @@
#ifndef __ASSEMBLY__
#include <asm/hw_irq.h>
-#include <asm/kvm_para.h>
/*G:030
* But first, how does our Guest contact the Host to ask for privileged
* operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself.
*
- * We use the KVM hypercall mechanism, though completely different hypercall
- * numbers. Seventeen hypercalls are available: the hypercall number is put in
- * the %eax register, and the arguments (when required) are placed in %ebx,
- * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax.
+ * Our hypercall mechanism uses the highest unused trap code (traps 32 and
+ * above are used by real hardware interrupts). Seventeen hypercalls are
+ * available: the hypercall number is put in the %eax register, and the
+ * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
+ * If a return value makes sense, it's returned in %eax.
*
* Grossly invalid calls result in Sudden Death at the hands of the vengeful
* Host, rather than returning failure. This reflects Winston Churchill's
* definition of a gentleman: "someone who is only rude intentionally".
-:*/
+ */
+static inline unsigned long
+hcall(unsigned long call,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4)
+{
+ /* "int" is the Intel instruction to trigger a trap. */
+ asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
+ /* The call in %eax (aka "a") might be overwritten */
+ : "=a"(call)
+ /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
+ : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
+ /* "memory" means this might write somewhere in memory.
+ * This isn't true for all calls, but it's safe to tell
+ * gcc that it might happen so it doesn't get clever. */
+ : "memory");
+ return call;
+}
/* Can't use our min() macro here: needs to be a constant */
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f3dadb5..f854d89b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev)
return false;
/* No device or no PCI device */
- if (!dev || dev->bus != &pci_bus_type)
+ if (dev->bus != &pci_bus_type)
return false;
devid = get_device_id(dev);
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
u32 tail, head;
u8 *target;
+ WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
target = iommu->cmd_buf + tail;
memcpy_toio(target, cmd, sizeof(*cmd));
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void)
struct dma_ops_domain *dma_dom;
u16 devid;
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
/* Do we handle this device? */
if (!check_device(&dev->dev))
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain)
list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
struct device *dev = dev_data->dev;
- do_detach(dev);
+ __detach_device(dev);
atomic_set(&dev_data->bind, 0);
}
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void)
return NULL;
spin_lock_init(&domain->lock);
+ mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
goto out_err;
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_pagetable(domain);
- domain_id_free(domain->id);
-
- kfree(domain);
+ protection_domain_free(domain);
dom->priv = NULL;
}
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
iova &= PAGE_MASK;
paddr &= PAGE_MASK;
+ mutex_lock(&domain->api_lock);
+
for (i = 0; i < npages; ++i) {
ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
if (ret)
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
paddr += PAGE_SIZE;
}
+ mutex_unlock(&domain->api_lock);
+
return 0;
}
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
iova &= PAGE_MASK;
+ mutex_lock(&domain->api_lock);
+
for (i = 0; i < npages; ++i) {
iommu_unmap_page(domain, iova, PM_MAP_4k);
iova += PAGE_SIZE;
}
iommu_flush_tlb_pde(domain);
+
+ mutex_unlock(&domain->api_lock);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42f5350..6360abf 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -138,9 +138,9 @@ int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly;
/*
- * Set to true if ACPI table parsing and hardware intialization went properly
+ * The ACPI table parsing functions set this variable on an error
*/
-static bool amd_iommu_initialized;
+static int __initdata amd_iommu_init_err;
/*
* List of protection domains - used during resume
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
*/
for (i = 0; i < table->length; ++i)
checksum += p[i];
- if (checksum != 0)
+ if (checksum != 0) {
/* ACPI table corrupt */
- return -ENODEV;
+ amd_iommu_init_err = -ENODEV;
+ return 0;
+ }
p += IVRS_HEADER_LENGTH;
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
if (cmd_buf == NULL)
return NULL;
- iommu->cmd_buf_size = CMD_BUFFER_SIZE;
+ iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
return cmd_buf;
}
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
&entry, sizeof(entry));
amd_iommu_reset_cmd_buffer(iommu);
+ iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
}
static void __init free_command_buffer(struct amd_iommu *iommu)
{
free_pages((unsigned long)iommu->cmd_buf,
- get_order(iommu->cmd_buf_size));
+ get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
}
/* allocates the memory where the IOMMU will log its events to */
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
h->mmio_phys);
iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
- if (iommu == NULL)
- return -ENOMEM;
+ if (iommu == NULL) {
+ amd_iommu_init_err = -ENOMEM;
+ return 0;
+ }
+
ret = init_iommu_one(iommu, h);
- if (ret)
- return ret;
+ if (ret) {
+ amd_iommu_init_err = ret;
+ return 0;
+ }
break;
default:
break;
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
}
WARN_ON(p != end);
- amd_iommu_initialized = true;
-
return 0;
}
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void)
if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
return -ENODEV;
+ ret = amd_iommu_init_err;
+ if (ret)
+ goto out;
+
dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void)
if (acpi_table_parse("IVRS", init_iommu_all) != 0)
goto free;
- if (!amd_iommu_initialized)
+ if (amd_iommu_init_err) {
+ ret = amd_iommu_init_err;
goto free;
+ }
if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
goto free;
+ if (amd_iommu_init_err) {
+ ret = amd_iommu_init_err;
+ goto free;
+ }
+
ret = sysdev_class_register(&amd_iommu_sysdev_class);
if (ret)
goto free;
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void)
if (ret)
goto free;
+ enable_iommus();
+
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void)
amd_iommu_init_notifier();
- enable_iommus();
-
if (iommu_pass_through)
goto out;
@@ -1315,6 +1332,7 @@ out:
return ret;
free:
+ disable_iommus();
amd_iommu_uninit_devices();
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997..b5d8b0b 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void)
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
int dev_base, dev_limit;
+ u32 ctl;
bus = bus_dev_ranges[i].bus;
dev_base = bus_dev_ranges[i].dev_base;
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void)
gart_iommu_aperture = 1;
x86_init.iommu.iommu_init = gart_iommu_init;
- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
+ ctl = read_pci_config(bus, slot, 3,
+ AMD64_GARTAPERTURECTL);
+
+ /*
+ * Before we do anything else disable the GART. It may
+ * still be enabled if we boot into a crash-kernel here.
+ * Reconfiguring the GART while it is enabled could have
+ * unknown side-effects.
+ */
+ ctl &= ~GARTEN;
+ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+
+ aper_order = (ctl >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
aper_base <<= 25;
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 1cbed97..dfdb4db 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -22,6 +22,7 @@
*/
#include <linux/dmi.h>
+#include <linux/module.h>
#include <asm/div64.h>
#include <asm/vmware.h>
#include <asm/x86_init.h>
@@ -101,6 +102,7 @@ int vmware_platform(void)
return 0;
}
+EXPORT_SYMBOL(vmware_platform);
/*
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a4849c1..ebd4c51 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,7 +27,6 @@
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
-#include <asm/x86_init.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
-
-#ifdef CONFIG_X86_64
- x86_platform.iommu_shutdown();
-#endif
-
crash_save_cpu(regs, safe_smp_processor_id());
}
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index e39e771..e1a93be 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -14,6 +14,8 @@
#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
#endif
+#include <linux/uaccess.h>
+
extern void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl);
@@ -42,8 +44,10 @@ static inline unsigned long rewind_frame_pointer(int n)
get_bp(frame);
#ifdef CONFIG_FRAME_POINTER
- while (n--)
- frame = frame->next_frame;
+ while (n--) {
+ if (probe_kernel_address(&frame->next_frame, frame))
+ break;
+ }
#endif
return (unsigned long)frame;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 68cd24f..0f7f130 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -565,6 +565,9 @@ static void enable_gart_translations(void)
enable_gart_translation(dev, __pa(agp_gatt_table));
}
+
+ /* Flush the GART-TLB to remove stale entries */
+ k8_flush_garts();
}
/*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 48aeee8..19a8906 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
for_each_sp(pages, sp, parents, i) {
kvm_mmu_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents);
+ zapped++;
}
- zapped += pages.nr;
kvm_mmu_pages_init(parent, &parents, &pages);
}
@@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
*/
if (used_pages > kvm_nr_mmu_pages) {
- while (used_pages > kvm_nr_mmu_pages) {
+ while (used_pages > kvm_nr_mmu_pages &&
+ !list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ used_pages -= kvm_mmu_zap_page(kvm, page);
used_pages--;
}
+ kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
}
else
@@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- kvm_mmu_zap_page(kvm, sp);
+ if (kvm_mmu_zap_page(kvm, sp))
+ nn = bucket->first;
}
}
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 445c594..2ba5820 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
if (err)
goto free_svm;
+ err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- err = -ENOMEM;
+ if (!page)
goto uninit;
- }
- err = -ENOMEM;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
- goto uninit;
+ goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
- goto uninit;
-
- svm->msrpm = page_address(msrpm_pages);
- svm_vcpu_init_msrpm(svm->msrpm);
+ goto free_page2;
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
- goto uninit;
+ goto free_page3;
+
svm->nested.hsave = page_address(hsave_page);
+ svm->msrpm = page_address(msrpm_pages);
+ svm_vcpu_init_msrpm(svm->msrpm);
+
svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page);
@@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
return &svm->vcpu;
+free_page3:
+ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+ __free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 686492ed..bc933cf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -131,7 +133,7 @@ struct vcpu_vmx {
} host_state;
struct {
int vm86_active;
- u8 save_iopl;
+ ulong save_rflags;
struct kvm_save_segment {
u16 selector;
unsigned long base;
@@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
- unsigned long rflags;
+ unsigned long rflags, save_rflags;
rflags = vmcs_readl(GUEST_RFLAGS);
- if (to_vmx(vcpu)->rmode.vm86_active)
- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ }
return rflags;
}
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- if (to_vmx(vcpu)->rmode.vm86_active)
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+ }
vmcs_writel(GUEST_RFLAGS, rflags);
}
@@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vmx->rmode.save_iopl
- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ vmx->rmode.save_rflags = flags;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 24cd0ee..3c4ca98 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL) {
- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
- cr0, kvm_read_cr0(vcpu));
kvm_inject_gp(vcpu, 0);
return;
}
@@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
kvm_inject_gp(vcpu, 0);
return;
}
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
- "and a clear PE flag\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
int cs_db, cs_l;
if (!is_pae(vcpu)) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while PAE is disabled\n");
kvm_inject_gp(vcpu, 0);
return;
}
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while CS.L == 1\n");
kvm_inject_gp(vcpu, 0);
return;
@@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE)) {
- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
- "in long mode\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (cr4 & X86_CR4_VMXE) {
- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_long_mode(vcpu)) {
if (cr3 & CR3_L_MODE_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS) {
- printk(KERN_DEBUG
- "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
kvm_inject_gp(vcpu, 0);
return;
}
@@ -649,15 +627,12 @@ static u32 emulated_msrs[] = {
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & efer_reserved_bits) {
- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
- efer);
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
- /* only 0 or all 1s can be written to IA32_MCi_CTL */
+ /* only 0 or all 1s can be written to IA32_MCi_CTL
+ * some Linux kernels though clear bit 10 in bank 4 to
+ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+ * this to avoid an uncatched #GP in the guest
+ */
if ((offset & 0x3) == 0 &&
- data != 0 && data != ~(u64)0)
+ data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
vcpu->arch.mce_banks[offset] = data;
break;
@@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r, n, i;
+ int r, i;
struct kvm_memory_slot *memslot;
+ unsigned long n;
unsigned long is_dirty = 0;
unsigned long *dirty_bitmap = NULL;
@@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
r = -ENOMEM;
dirty_bitmap = vmalloc(n);
@@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_set_cr8(vcpu, kvm_run->cr8);
if (vcpu->arch.pio.cur_count) {
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = complete_pio(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r)
goto out;
}
@@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
int ret = 0;
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+ u32 desc_limit;
old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
@@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
}
- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+ desc_limit = get_desc_limit(&nseg_desc);
+ if (!nseg_desc.p ||
+ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+ desc_limit < 0x2b)) {
kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
return 1;
}
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7e59dc1..2bdf628 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1,
local_irq_save(flags);
if (lguest_data.hcall_status[next_call] != 0xFF) {
/* Table full, so do normal hcall which will flush table. */
- kvm_hypercall4(call, arg1, arg2, arg3, arg4);
+ hcall(call, arg1, arg2, arg3, arg4);
} else {
lguest_data.hcalls[next_call].arg0 = call;
lguest_data.hcalls[next_call].arg1 = arg1;
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1,
* So, when we're in lazy mode, we call async_hcall() to store the call for
* future processing:
*/
-static void lazy_hcall1(unsigned long call,
- unsigned long arg1)
+static void lazy_hcall1(unsigned long call, unsigned long arg1)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- kvm_hypercall1(call, arg1);
+ hcall(call, arg1, 0, 0, 0);
else
async_hcall(call, arg1, 0, 0, 0);
}
/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
static void lazy_hcall2(unsigned long call,
- unsigned long arg1,
- unsigned long arg2)
+ unsigned long arg1,
+ unsigned long arg2)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- kvm_hypercall2(call, arg1, arg2);
+ hcall(call, arg1, arg2, 0, 0);
else
async_hcall(call, arg1, arg2, 0, 0);
}
static void lazy_hcall3(unsigned long call,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3)
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- kvm_hypercall3(call, arg1, arg2, arg3);
+ hcall(call, arg1, arg2, arg3, 0);
else
async_hcall(call, arg1, arg2, arg3, 0);
}
#ifdef CONFIG_X86_PAE
static void lazy_hcall4(unsigned long call,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4)
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- kvm_hypercall4(call, arg1, arg2, arg3, arg4);
+ hcall(call, arg1, arg2, arg3, arg4);
else
async_hcall(call, arg1, arg2, arg3, arg4);
}
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call,
:*/
static void lguest_leave_lazy_mmu_mode(void)
{
- kvm_hypercall0(LHCALL_FLUSH_ASYNC);
+ hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
paravirt_leave_lazy_mmu();
}
static void lguest_end_context_switch(struct task_struct *next)
{
- kvm_hypercall0(LHCALL_FLUSH_ASYNC);
+ hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
paravirt_end_context_switch(next);
}
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt,
/* Keep the local copy up to date. */
native_write_idt_entry(dt, entrynum, g);
/* Tell Host about this new entry. */
- kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
+ hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
}
/*
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc)
struct desc_struct *idt = (void *)desc->address;
for (i = 0; i < (desc->size+1)/8; i++)
- kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
+ hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
}
/*
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
struct desc_struct *gdt = (void *)desc->address;
for (i = 0; i < (desc->size+1)/8; i++)
- kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
+ hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
}
/*
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
{
native_write_gdt_entry(dt, entrynum, desc, type);
/* Tell Host about this new entry. */
- kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
- dt[entrynum].a, dt[entrynum].b);
+ hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
+ dt[entrynum].a, dt[entrynum].b, 0);
}
/*
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta,
}
/* Please wake us this far in the future. */
- kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta);
+ hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
return 0;
}
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
/* A 0 argument shuts the clock down. */
- kvm_hypercall0(LHCALL_SET_CLOCKEVENT);
+ hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* This is what we expect. */
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void)
/* STOP! Until an interrupt comes in. */
static void lguest_safe_halt(void)
{
- kvm_hypercall0(LHCALL_HALT);
+ hcall(LHCALL_HALT, 0, 0, 0, 0);
}
/*
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void)
*/
static void lguest_power_off(void)
{
- kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"),
- LGUEST_SHUTDOWN_POWEROFF);
+ hcall(LHCALL_SHUTDOWN, __pa("Power down"),
+ LGUEST_SHUTDOWN_POWEROFF, 0, 0);
}
/*
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void)
*/
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
{
- kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF);
+ hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
/* The hcall won't return, but to keep gcc happy, we're "done". */
return NOTIFY_DONE;
}
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
len = sizeof(scratch) - 1;
scratch[len] = '\0';
memcpy(scratch, buf, len);
- kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch));
+ hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
/* This routine returns the number of bytes actually written. */
return len;
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
*/
static void lguest_restart(char *reason)
{
- kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
+ hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
}
/*G:050
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 27eac0f..4f420c2f 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -32,7 +32,7 @@ ENTRY(lguest_entry)
*/
movl $LHCALL_LGUEST_INIT, %eax
movl $lguest_data - __PAGE_OFFSET, %ebx
- .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
+ int $LGUEST_TRAP_ENTRY
/* Set up the initial stack so we can run C code. */
movl $(init_thread_union+THREAD_SIZE),%esp
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index c7b1ebf..31930fd 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -66,14 +66,44 @@ resource_to_addr(struct acpi_resource *resource,
struct acpi_resource_address64 *addr)
{
acpi_status status;
-
- status = acpi_resource_to_address64(resource, addr);
- if (ACPI_SUCCESS(status) &&
- (addr->resource_type == ACPI_MEMORY_RANGE ||
- addr->resource_type == ACPI_IO_RANGE) &&
- addr->address_length > 0 &&
- addr->producer_consumer == ACPI_PRODUCER) {
+ struct acpi_resource_memory24 *memory24;
+ struct acpi_resource_memory32 *memory32;
+ struct acpi_resource_fixed_memory32 *fixed_memory32;
+
+ memset(addr, 0, sizeof(*addr));
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ memory24 = &resource->data.memory24;
+ addr->resource_type = ACPI_MEMORY_RANGE;
+ addr->minimum = memory24->minimum;
+ addr->address_length = memory24->address_length;
+ addr->maximum = addr->minimum + addr->address_length - 1;
+ return AE_OK;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ memory32 = &resource->data.memory32;
+ addr->resource_type = ACPI_MEMORY_RANGE;
+ addr->minimum = memory32->minimum;
+ addr->address_length = memory32->address_length;
+ addr->maximum = addr->minimum + addr->address_length - 1;
return AE_OK;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ fixed_memory32 = &resource->data.fixed_memory32;
+ addr->resource_type = ACPI_MEMORY_RANGE;
+ addr->minimum = fixed_memory32->address;
+ addr->address_length = fixed_memory32->address_length;
+ addr->maximum = addr->minimum + addr->address_length - 1;
+ return AE_OK;
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ status = acpi_resource_to_address64(resource, addr);
+ if (ACPI_SUCCESS(status) &&
+ (addr->resource_type == ACPI_MEMORY_RANGE ||
+ addr->resource_type == ACPI_IO_RANGE) &&
+ addr->address_length > 0) {
+ return AE_OK;
+ }
+ break;
}
return AE_ERROR;
}
@@ -91,30 +121,6 @@ count_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
-static void
-align_resource(struct acpi_device *bridge, struct resource *res)
-{
- int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
-
- /*
- * Host bridge windows are not BARs, but the decoders on the PCI side
- * that claim this address space have starting alignment and length
- * constraints, so fix any obvious BIOS goofs.
- */
- if (!IS_ALIGNED(res->start, align)) {
- dev_printk(KERN_DEBUG, &bridge->dev,
- "host bridge window %pR invalid; "
- "aligning start to %d-byte boundary\n", res, align);
- res->start &= ~(align - 1);
- }
- if (!IS_ALIGNED(res->end + 1, align)) {
- dev_printk(KERN_DEBUG, &bridge->dev,
- "host bridge window %pR invalid; "
- "aligning end to %d-byte boundary\n", res, align);
- res->end = ALIGN(res->end, align) - 1;
- }
-}
-
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -124,7 +130,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
acpi_status status;
unsigned long flags;
struct resource *root, *conflict;
- u64 start, end, max_len;
+ u64 start, end;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -141,19 +147,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
} else
return AE_OK;
- max_len = addr.maximum - addr.minimum + 1;
- if (addr.address_length > max_len) {
- dev_printk(KERN_DEBUG, &info->bridge->dev,
- "host bridge window length %#llx doesn't fit in "
- "%#llx-%#llx, trimming\n",
- (unsigned long long) addr.address_length,
- (unsigned long long) addr.minimum,
- (unsigned long long) addr.maximum);
- addr.address_length = max_len;
- }
-
start = addr.minimum + addr.translation_offset;
- end = start + addr.address_length - 1;
+ end = addr.maximum + addr.translation_offset;
res = &info->res[info->res_num];
res->name = info->name;
@@ -161,7 +156,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
res->start = start;
res->end = end;
res->child = NULL;
- align_resource(info->bridge, res);
if (!pci_use_crs) {
dev_printk(KERN_DEBUG, &info->bridge->dev,
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 46fd43f..97da2ba 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -72,6 +72,9 @@ pcibios_align_resource(void *data, const struct resource *res,
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
+ } else if (res->flags & IORESOURCE_MEM) {
+ if (start < BIOS_END)
+ start = BIOS_END;
}
return start;
}
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 2bb7348..05eb32e 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -46,6 +46,12 @@ struct authenc_request_ctx {
char tail[];
};
+static void authenc_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
@@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
crypto_aead_authsize(authenc), 1);
out:
- aead_request_complete(req, err);
+ authenc_request_complete(req, err);
}
static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
@@ -208,7 +214,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
err = crypto_ablkcipher_decrypt(abreq);
out:
- aead_request_complete(req, err);
+ authenc_request_complete(req, err);
}
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -245,7 +251,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
err = crypto_ablkcipher_decrypt(abreq);
out:
- aead_request_complete(req, err);
+ authenc_request_complete(req, err);
}
static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
@@ -379,7 +385,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
err = crypto_authenc_genicv(areq, iv, 0);
}
- aead_request_complete(areq, err);
+ authenc_request_complete(areq, err);
}
static int crypto_authenc_encrypt(struct aead_request *req)
@@ -420,7 +426,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
err = crypto_authenc_genicv(areq, greq->giv, 0);
}
- aead_request_complete(areq, err);
+ authenc_request_complete(areq, err);
}
static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index a610ebe..2fbfe51 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -471,13 +471,18 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
/* allow full data read from EC address space */
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_EC) {
- if (obj_desc->common_field.bit_length > 8)
- obj_desc->common_field.access_bit_width =
- ACPI_ROUND_UP(obj_desc->common_field.
- bit_length, 8);
+ if (obj_desc->common_field.bit_length > 8) {
+ unsigned width =
+ ACPI_ROUND_BITS_UP_TO_BYTES(
+ obj_desc->common_field.bit_length);
+ // access_bit_width is u8, don't overflow it
+ if (width > 8)
+ width = 8;
obj_desc->common_field.access_byte_width =
- ACPI_DIV_8(obj_desc->common_field.
- access_bit_width);
+ width;
+ obj_desc->common_field.access_bit_width =
+ 8 * width;
+ }
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9f6cfac..228740f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
WARN_ON(!ap->ops->error_handler);
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
+ spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
}
/* okay, this error is ours */
+ memset(&tf, 0, sizeof(tf));
rc = ata_eh_read_log_10h(dev, &tag, &tf);
if (rc) {
ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3c3172d..4164dd2 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 67e0fc5..93d1f9b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1695,6 +1695,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
cf |= CF_DRY_RUN;
else {
dev_err(DEV, "--dry-run is not supported by peer");
+ kfree(p);
return 0;
}
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 54f56ea..c786023 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -899,7 +899,8 @@ retry:
drbd_thread_start(&mdev->asender);
- drbd_send_protocol(mdev);
+ if (!drbd_send_protocol(mdev))
+ return -1;
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0);
drbd_send_uuids(mdev);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d41331b..aa4248e 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -1817,8 +1817,6 @@ static int intel_845_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
/* clear any possible error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
-
- intel_i830_setup_flush();
return 0;
}
@@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = {
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
- .chipset_flush = intel_i830_chipset_flush,
};
static const struct agp_bridge_driver intel_850_driver = {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c9bc896..90b199f 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */
/* last check before exit */
- if (!io_detect_cm4000(iobase, dev))
- count = -ENODEV;
+ if (!io_detect_cm4000(iobase, dev)) {
+ rc = -ENODEV;
+ goto release_io;
+ }
if (test_bit(IS_INVREV, &dev->flags) && count > 0)
str_invert_revert(dev->rbuf, count);
if (copy_to_user(buf, dev->rbuf, count))
- return -EFAULT;
+ rc = -EFAULT;
release_io:
clear_bit(LOCK_IO, &dev->flags);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d5d575..75d293e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1113,6 +1113,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long flags;
struct cpufreq_policy *data;
+ struct kobject *kobj;
+ struct completion *cmp;
#ifdef CONFIG_SMP
struct sys_device *cpu_sys_dev;
unsigned int j;
@@ -1141,10 +1143,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
dprintk("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- sysfs_remove_link(&sys_dev->kobj, "cpufreq");
+ kobj = &sys_dev->kobj;
cpufreq_cpu_put(data);
cpufreq_debug_enable_ratelimit();
unlock_policy_rwsem_write(cpu);
+ sysfs_remove_link(kobj, "cpufreq");
return 0;
}
#endif
@@ -1181,7 +1184,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
data->governor->name, CPUFREQ_NAME_LEN);
#endif
cpu_sys_dev = get_cpu_sysdev(j);
- sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
+ kobj = &cpu_sys_dev->kobj;
+ unlock_policy_rwsem_write(cpu);
+ sysfs_remove_link(kobj, "cpufreq");
+ lock_policy_rwsem_write(cpu);
cpufreq_cpu_put(data);
}
}
@@ -1192,19 +1198,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
- kobject_put(&data->kobj);
+ kobj = &data->kobj;
+ cmp = &data->kobj_unregister;
+ unlock_policy_rwsem_write(cpu);
+ kobject_put(kobj);
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
*/
dprintk("waiting for dropping of refcount\n");
- wait_for_completion(&data->kobj_unregister);
+ wait_for_completion(cmp);
dprintk("wait complete\n");
+ lock_policy_rwsem_write(cpu);
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
-
unlock_policy_rwsem_write(cpu);
free_cpumask_var(data->related_cpus);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 599a40b..3a14787 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -444,6 +444,7 @@ static struct attribute_group dbs_attr_group_old = {
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int load = 0;
+ unsigned int max_load = 0;
unsigned int freq_target;
struct cpufreq_policy *policy;
@@ -501,6 +502,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
continue;
load = 100 * (wall_time - idle_time) / wall_time;
+
+ if (load > max_load)
+ max_load = load;
}
/*
@@ -511,7 +515,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
return;
/* Check for frequency increase */
- if (load > dbs_tuners_ins.up_threshold) {
+ if (max_load > dbs_tuners_ins.up_threshold) {
this_dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
@@ -538,7 +542,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold.
*/
- if (load < (dbs_tuners_ins.down_threshold - 10)) {
+ if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
this_dbs_info->requested_freq -= freq_target;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 702dcc9..14a34d9 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
u.packet.header_length = GET_HEADER_LENGTH(control);
if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+ if (u.packet.header_length % 4 != 0)
+ return -EINVAL;
header_length = u.packet.header_length;
} else {
/*
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
if (ctx->header_size == 0) {
if (u.packet.header_length > 0)
return -EINVAL;
- } else if (u.packet.header_length % ctx->header_size != 0) {
+ } else if (u.packet.header_length == 0 ||
+ u.packet.header_length % ctx->header_size != 0) {
return -EINVAL;
}
header_length = 0;
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client,
return -ENODEV;
if (_IOC_TYPE(cmd) != '#' ||
- _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+ _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
+ _IOC_SIZE(cmd) > sizeof(buffer))
return -EINVAL;
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
+ if (_IOC_DIR(cmd) == _IOC_READ)
+ memset(&buffer, 0, _IOC_SIZE(cmd));
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
- }
ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
if (ret < 0)
return ret;
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
return -EFAULT;
- }
return ret;
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 3784a47..8f5aebf 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
for (try = 0; try < 5; try++) {
new = allocate ? old - bandwidth : old + bandwidth;
if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
- break;
+ return -EBUSY;
data[0] = cpu_to_be32(old);
data[1] = cpu_to_be32(new);
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
__be32 c, all, old;
- int i, retry = 5;
+ int i, ret = -EIO, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
if (!(channels_mask & 1 << i))
continue;
+ ret = -EBUSY;
+
c = cpu_to_be32(1 << (31 - i));
if ((old & c) != (all & c))
continue;
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
/* 1394-1995 IRM, fall through to retry. */
default:
- if (retry--)
+ if (retry) {
+ retry--;
i--;
+ } else {
+ ret = -EIO;
+ }
}
}
- return -EIO;
+ return ret;
}
static void deallocate_channel(struct fw_card *card, int irm_id,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 0cf4d7f..94b16e0 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
- int tcode, length, ext_tcode, sel;
+ int tcode, length, ext_tcode, sel, try;
__be32 *payload, lock_old;
u32 lock_arg, lock_data;
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci,
reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
reg_write(ohci, OHCI1394_CSRControl, sel);
- if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
- lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
- else
- fw_notify("swap not done yet\n");
+ for (try = 0; try < 20; try++)
+ if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
+ lock_old = cpu_to_be32(reg_read(ohci,
+ OHCI1394_CSRData));
+ fw_fill_response(&response, packet->header,
+ RCODE_COMPLETE,
+ &lock_old, sizeof(lock_old));
+ goto out;
+ }
+
+ fw_error("swap not done (CSR lock timeout)\n");
+ fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
- fw_fill_response(&response, packet->header,
- RCODE_COMPLETE, &lock_old, sizeof(lock_old));
out:
fw_core_handle_response(&ohci->card, &response);
}
static void handle_local_request(struct context *ctx, struct fw_packet *packet)
{
- u64 offset;
- u32 csr;
+ u64 offset, csr;
if (ctx == &ctx->ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 7d521e1..b827c97 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -252,6 +252,18 @@ static void pca953x_irq_bus_lock(unsigned int irq)
static void pca953x_irq_bus_sync_unlock(unsigned int irq)
{
struct pca953x_chip *chip = get_irq_chip_data(irq);
+ uint16_t new_irqs;
+ uint16_t level;
+
+ /* Look for any newly setup interrupt */
+ new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
+ new_irqs &= ~chip->reg_direction;
+
+ while (new_irqs) {
+ level = __ffs(new_irqs);
+ pca953x_gpio_direction_input(&chip->gpio_chip, level);
+ new_irqs &= ~(1 << level);
+ }
mutex_unlock(&chip->irq_lock);
}
@@ -278,7 +290,7 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
else
chip->irq_trig_raise &= ~mask;
- return pca953x_gpio_direction_input(&chip->gpio_chip, level);
+ return 0;
}
static struct irq_chip pca953x_irq_chip = {
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3bd8727..a263b70 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -476,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->driver->disable_vblank(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
dev->vblank_enabled[crtc] = 0;
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b743411..a0c365f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
}
driver = dev->driver;
- drm_vblank_cleanup(dev);
-
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
dev->agp = NULL;
}
+ drm_vblank_cleanup(dev);
+
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b574503..a0b8447 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
} else {
struct drm_i915_gem_object *obj_priv;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
seq_printf(m, "Fenced object[%2d] = %p: %s "
"%08x %08zx %08x %s %08x %08x %d",
i, obj, get_pin_flag(obj_priv),
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dc9393..c3cfafc 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
+ dev_priv->compressed_fb = compressed_fb;
+
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (!IS_GM45(dev))
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4b26919..cc03537 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
};
const static struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i865g_info = {
@@ -80,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = {
.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i915gm_info = {
- .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i945g_info = {
.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i945gm_info = {
- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
@@ -361,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
!dev_priv->mm.suspended) {
drm_i915_ring_buffer_t *ring = &dev_priv->ring;
struct drm_gem_object *obj = ring->ring_obj;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
dev_priv->mm.suspended = 0;
/* Stop the ring if it's running. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aba8260..6e47900 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -195,6 +195,7 @@ struct intel_overlay;
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
+ u8 is_i85x : 1;
u8 is_i915g : 1;
u8 is_i9xx : 1;
u8 is_i945gm : 1;
@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
+ void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
unsigned int status_gfx_addr;
+ unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
struct resource mch_res;
@@ -611,6 +615,8 @@ typedef struct drm_i915_private {
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
+ /* indicate whether the LVDS EDID is OK */
+ bool lvds_edid_good;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
@@ -628,6 +634,9 @@ typedef struct drm_i915_private {
u8 max_delay;
enum no_fbc_reason no_fbc_reason;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -731,6 +740,8 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
+#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+
/**
* Request queue structure.
*
@@ -1066,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
@@ -1131,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 368d726..ef3d91d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages,
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj_priv->tiling_mode != I915_TILING_NONE;
@@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check source.
*
@@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto fail;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto out_unpin_object;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check destination.
*
@@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Pinned buffers may be scanout, so flush the cache */
if (obj_priv->pin_count)
@@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
@@ -1305,7 +1305,7 @@ void
i915_gem_release_mmap(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (dev->dev_mapping)
unmap_mapping_range(dev->dev_mapping,
@@ -1316,7 +1316,7 @@ static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
@@ -1347,7 +1347,7 @@ static uint32_t
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int start, i;
/*
@@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size / PAGE_SIZE;
int i;
@@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) {
@@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
static void
i915_gem_object_truncate(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
inode = obj->filp->f_path.dentry->d_inode;
@@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
}
+#define PIPE_CONTROL_FLUSH(addr) \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (dev_priv->mm.next_gem_seqno == 0)
dev_priv->mm.next_gem_seqno++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+ if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
DRM_DEBUG_DRIVER("%d\n", seqno);
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
/**
@@ -1965,7 +2009,7 @@ static int
i915_gem_object_wait_rendering(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -1997,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
#if WATCH_BUF
@@ -2173,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
@@ -2244,7 +2288,7 @@ int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count, i;
struct address_space *mapping;
struct inode *inode;
@@ -2297,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint64_t val;
@@ -2319,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint64_t val;
@@ -2339,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
int tile_width;
uint32_t fence_reg, val;
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2381,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint32_t val;
uint32_t pitch_val;
@@ -2425,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
if (!reg->obj)
return i;
- obj_priv = reg->obj->driver_private;
+ obj_priv = to_intel_bo(reg->obj);
if (!obj_priv->pin_count)
avail++;
}
@@ -2480,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg = NULL;
int ret;
@@ -2547,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (IS_GEN6(dev)) {
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2583,7 +2633,7 @@ int
i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
@@ -2621,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
@@ -2728,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
void
i915_gem_clflush_object(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
@@ -2829,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2879,7 +2929,7 @@ int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -3092,7 +3142,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
uint32_t old_read_domains;
@@ -3177,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (!obj_priv->page_cpu_valid)
return;
@@ -3217,7 +3267,7 @@ static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
@@ -3286,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int i, ret;
void __iomem *reloc_page;
bool need_fence;
@@ -3337,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EBADF;
}
- target_obj_priv = target_obj->driver_private;
+ target_obj_priv = to_intel_bo(target_obj);
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3689,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
prepare_to_wait(&dev_priv->pending_flip_queue,
&wait, TASK_INTERRUPTIBLE);
for (i = 0; i < count; i++) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (atomic_read(&obj_priv->pending_flip) > 0)
break;
}
@@ -3798,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
@@ -3924,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
@@ -3999,7 +4049,7 @@ err:
for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
obj_priv->in_execbuffer = false;
}
drm_gem_object_unreference(object_list[i]);
@@ -4177,7 +4227,7 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4210,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
obj_priv->pin_count--;
@@ -4250,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4307,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -4349,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
i915_gem_retire_requests(dev);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
* done. Otherwise, a buffer left on the flushing list but not getting
* flushed (because nobody's flushing that domain) won't ever return
@@ -4395,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) {
drm_gem_object_unreference(obj);
@@ -4456,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
trace_i915_gem_object_destroy(obj);
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret)
+ goto err_unref;
+
+ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
+ if (dev_priv->seqno_page == NULL)
+ goto err_unpin;
+
+ dev_priv->seqno_obj = obj;
+ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
static int
i915_gem_init_hws(struct drm_device *dev)
{
@@ -4563,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
obj = drm_gem_object_alloc(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
drm_gem_object_unreference(obj);
- return ret;
+ goto err_unref;
}
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unpin;
}
+
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ goto err_unpin;
+ }
+
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
if (IS_GEN6(dev)) {
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = dev_priv->seqno_obj;
+ obj_priv = to_intel_bo(obj);
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ dev_priv->seqno_obj = NULL;
+
+ dev_priv->seqno_page = NULL;
}
static void
@@ -4609,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
return;
obj = dev_priv->hws_obj;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
dev_priv->hw_status_page = NULL;
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+
/* Write high address into HWS_PGA when disabling. */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
@@ -4643,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
i915_gem_cleanup_hws(dev);
return -ENOMEM;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
@@ -4936,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
int ret;
int page_count;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (!obj_priv->phys_obj)
return;
@@ -4975,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->phys_obj) {
if (obj_priv->phys_obj->id == id)
@@ -5026,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
void *obj_addr;
int ret;
char __user *user_data;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614..35507cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c01c878..4bdccef 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (stride > 8192)
return false;
- } else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
- return false;
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
/* 965+ just needs multiples of tile width */
@@ -240,7 +236,7 @@ bool
i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->gtt_space == NULL)
return true;
@@ -280,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
drm_gem_object_unreference_unlocked(obj);
@@ -364,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -427,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size >> PAGE_SHIFT;
int i;
@@ -456,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size >> PAGE_SHIFT;
int i;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 49c458b..2b8b969 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- if (intel_output->hot_plug)
- (*intel_output->hot_plug) (intel_output);
+ if (intel_encoder->hot_plug)
+ (*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_USER_INTERRUPT) {
+ if (gt_iir & GT_PIPE_NOTIFY) {
u32 seqno = i915_get_gem_seqno(dev);
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
@@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev,
if (src == NULL)
return NULL;
- src_priv = src->driver_private;
+ src_priv = to_intel_bo(src);
if (src_priv->pages == NULL)
return NULL;
@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev)
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_USER_INTERRUPT;
+ u32 render_mask = GT_PIPE_NOTIFY;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410..8fcc75c 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
int i = 0;
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ return;
+
+ if (acpi_is_video_device(acpi_dev))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ printk(KERN_WARNING "No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_printk (KERN_ERR, &dev->pdev->dev,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ opregion->acpi->didl[i] = 0;
+ return;
+
+blind_set:
+ i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
opregion->acpi->didl[i] |= (1<<31) | output_type | i;
i++;
}
-
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- opregion->acpi->didl[i] = 0;
+ goto end;
}
int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbbf59f..4cbc521 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -230,6 +230,16 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WC_FLUSH (1<<12)
+#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_ISP_DIS (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
* Fence registers
@@ -241,7 +251,7 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
@@ -2285,6 +2295,7 @@
#define DEIER 0x4400c
/* GT interrupt */
+#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 38110ce..759c2ef 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
/* CRT should always be at 0, but check anyway */
- if (intel_output->type != INTEL_OUTPUT_ANALOG)
+ if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
return false;
- return intel_ddc_probe(intel_output);
+ return intel_ddc_probe(intel_encoder);
}
static enum drm_connector_status
-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
+intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
- status = intel_crt_load_detect(encoder->crtc, intel_output);
+ status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_output,
+ crtc = intel_get_load_detect_pipe(intel_encoder,
NULL, &dpms_mode);
if (crtc) {
- status = intel_crt_load_detect(crtc, intel_output);
- intel_release_load_detect_pipe(intel_output, dpms_mode);
+ status = intel_crt_load_detect(crtc, intel_encoder);
+ intel_release_load_detect_pipe(intel_encoder, dpms_mode);
} else
status = connector_status_unknown;
}
@@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- intel_i2c_destroy(intel_output->ddc_bus);
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct i2c_adapter *ddcbus;
struct drm_device *dev = connector->dev;
- ret = intel_ddc_get_modes(intel_output);
+ ret = intel_ddc_get_modes(intel_encoder);
if (ret || !IS_G4X(dev))
goto end;
- ddcbus = intel_output->ddc_bus;
+ ddcbus = intel_encoder->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_output->ddc_bus =
+ intel_encoder->ddc_bus =
intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
- if (!intel_output->ddc_bus) {
- intel_output->ddc_bus = ddcbus;
+ if (!intel_encoder->ddc_bus) {
+ intel_encoder->ddc_bus = ddcbus;
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_output);
+ ret = intel_ddc_get_modes(intel_encoder);
intel_i2c_destroy(ddcbus);
end:
@@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
+ intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
return;
- connector = &intel_output->base;
- drm_connector_init(dev, &intel_output->base,
+ connector = &intel_encoder->base;
+ drm_connector_init(dev, &intel_encoder->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_encoder->base,
+ &intel_encoder->enc);
/* Set up the DDC bus. */
if (HAS_PCH_SPLIT(dev))
@@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev)
if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}
- intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
- if (!intel_output->ddc_bus) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
+ if (!intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
return;
}
- intel_output->type = INTEL_OUTPUT_ANALOG;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT);
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e7e753b..c7502b6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
list_for_each_entry(l_entry, &mode_config->connector_list, head) {
if (l_entry->encoder &&
l_entry->encoder->crtc == crtc) {
- struct intel_output *intel_output = to_intel_output(l_entry);
- if (intel_output->type == type)
+ struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+ if (intel_encoder->type == type)
return true;
}
}
return false;
}
-struct drm_connector *
-intel_pipe_get_output (struct drm_crtc *crtc)
+static struct drm_connector *
+intel_pipe_get_connector (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
@@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
DPFC_CTL_PLANEB);
@@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
return;
intel_fb = to_intel_framebuffer(fb);
- obj_priv = intel_fb->obj->driver_private;
+ obj_priv = to_intel_bo(intel_fb->obj);
/*
* If FBC is already on, we just have to verify that we can
@@ -1243,7 +1243,7 @@ out_disable:
static int
intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
int ret;
@@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_fb = to_intel_framebuffer(crtc->fb);
obj = intel_fb->obj;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
@@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
- obj_priv = intel_fb->obj->driver_private;
+ obj_priv = to_intel_bo(intel_fb->obj);
i915_gem_object_unpin(intel_fb->obj);
}
intel_increase_pllclock(crtc, true);
@@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
- int refclk, num_outputs = 0;
+ int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
@@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
- switch (intel_output->type) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (intel_output->needs_tv_clock)
+ if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
@@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
break;
}
- num_outputs++;
+ num_connectors++;
}
- if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
+ if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
@@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_edp) {
struct drm_connector *edp;
target_clock = mode->clock;
- edp = intel_pipe_get_output(crtc);
- intel_edp_link_config(to_intel_output(edp),
+ edp = intel_pipe_get_connector(crtc);
+ intel_edp_link_config(to_intel_encoder(edp),
&lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
@@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
- else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2)
+ else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!bo)
return -ENOENT;
- obj_priv = bo->driver_private;
+ obj_priv = to_intel_bo(bo);
if (bo->size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
@@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
* detection.
*
* It will be up to the load-detect code to adjust the pipe as appropriate for
- * its requirements. The pipe will be connected to no other outputs.
+ * its requirements. The pipe will be connected to no other encoders.
*
- * Currently this code will only succeed if there is a pipe with no outputs
+ * Currently this code will only succeed if there is a pipe with no encoders
* configured for it. In the future, it could choose to temporarily disable
* some outputs to free up a pipe for its use.
*
@@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = {
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
-struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode,
int *dpms_mode)
{
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_crtc *supported_crtc =NULL;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
}
encoder->crtc = crtc;
- intel_output->base.encoder = encoder;
- intel_output->load_detect_temp = true;
+ intel_encoder->base.encoder = encoder;
+ intel_encoder->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
*dpms_mode = intel_crtc->dpms_mode;
@@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
return crtc;
}
-void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- if (intel_output->load_detect_temp) {
+ if (intel_encoder->load_detect_temp) {
encoder->crtc = NULL;
- intel_output->base.encoder = NULL;
- intel_output->load_detect_temp = false;
+ intel_encoder->base.encoder = NULL;
+ intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
}
- /* Switch crtc and output back off if necessary */
+ /* Switch crtc and encoder back off if necessary */
if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
if (encoder->crtc == crtc)
encoder_funcs->dpms(encoder, dpms_mode);
@@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
if (work && !work->pending) {
- obj_priv = work->pending_flip_obj->driver_private;
+ obj_priv = to_intel_bo(work->pending_flip_obj);
DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
obj_priv,
atomic_read(&obj_priv->pending_flip));
@@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->pending_flip_obj->driver_private;
+ obj_priv = to_intel_bo(work->pending_flip_obj);
/* Initial scanout buffer will have a 0 pending flip count */
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
- obj->driver_private);
+ to_intel_bo(obj));
kfree(work);
intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
@@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
@@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
int entry = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- if (type_mask & intel_output->clone_mask)
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ if (type_mask & intel_encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
@@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_tv_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = &intel_encoder->enc;
- encoder->possible_crtcs = intel_output->crtc_mask;
+ encoder->possible_crtcs = intel_encoder->crtc_mask;
encoder->possible_clones = intel_connector_clones(dev,
- intel_output->clone_mask);
+ intel_encoder->clone_mask);
}
}
@@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev)
struct drm_i915_gem_object *obj_priv = NULL;
if (dev_priv->pwrctx) {
- obj_priv = dev_priv->pwrctx->driver_private;
+ obj_priv = to_intel_bo(dev_priv->pwrctx);
} else {
struct drm_gem_object *pwrctx;
pwrctx = intel_alloc_power_context(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
+ obj_priv = to_intel_bo(pwrctx);
}
}
@@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
+ } else if (IS_I965GM(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ else if (IS_I9XX(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
} else {
- if (IS_I85X(dev))
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- else if (IS_845G(dev))
+ dev_priv->display.update_wm = i830_update_wm;
+ if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
- dev_priv->display.update_wm = i830_update_wm;
}
}
@@ -4957,7 +4958,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
- obj_priv = dev_priv->pwrctx->driver_private;
+ obj_priv = to_intel_bo(dev_priv->pwrctx);
I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
I915_READ(PWRCTXA);
i915_gem_object_unpin(dev_priv->pwrctx);
@@ -4978,9 +4979,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- return &intel_output->enc;
+ return &intel_encoder->enc;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8e283f7..77e40cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -55,23 +55,23 @@ struct intel_dp_priv {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
};
static void
-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
static void
-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
void
-intel_edp_link_config (struct intel_output *intel_output,
+intel_edp_link_config (struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
*lane_num = dp_priv->lane_count;
if (dp_priv->link_bw == DP_LINK_BW_1_62)
@@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output,
}
static int
-intel_dp_max_lane_count(struct intel_output *intel_output)
+intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
if (dp_priv->dpcd[0] >= 0x11) {
@@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output)
}
static int
-intel_dp_max_link_bw(struct intel_output *intel_output)
+intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_link_bw = dp_priv->dpcd[1];
switch (max_link_bw) {
@@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
intel_dp_link_required(struct drm_device *dev,
- struct intel_output *intel_output, int pixel_clock)
+ struct intel_encoder *intel_encoder, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_eDP(intel_output))
+ if (IS_eDP(intel_encoder))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
@@ -141,11 +141,11 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
- int max_lanes = intel_dp_max_lane_count(intel_output);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
+ int max_lanes = intel_dp_max_lane_count(intel_encoder);
- if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
+ if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
> max_link_clock * max_lanes)
return MODE_CLOCK_HIGH;
@@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev)
}
static int
-intel_dp_aux_ch(struct intel_output *intel_output,
+intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_output))
+ if (IS_eDP(intel_encoder))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
@@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
/* Write data to the aux channel in native mode */
static int
-intel_dp_aux_native_write(struct intel_output *intel_output,
+intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
/* Write a single byte to the aux channel in native mode */
static int
-intel_dp_aux_native_write_1(struct intel_output *intel_output,
+intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t byte)
{
- return intel_dp_aux_native_write(intel_output, address, &byte, 1);
+ return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
-intel_dp_aux_native_read(struct intel_output *intel_output,
+intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
reply_bytes = recv_bytes + 1;
for (;;) {
- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
@@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
- struct intel_output *intel_output = dp_priv->intel_output;
+ struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
- ret = intel_dp_aux_ch(intel_output,
+ ret = intel_dp_aux_ch(intel_encoder,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
@@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("i2c_init %s\n", name);
dp_priv->algo.running = false;
@@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_output->base.kdev;
+ dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@@ -489,18 +489,18 @@ static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_output);
- int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
+ int max_lane_count = intel_dp_max_lane_count(intel_encoder);
+ int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
- if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
+ if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
<= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
@@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_dp_m_n m_n;
/*
- * Find the lane count in the intel_output private
+ * Find the lane count in the intel_encoder private
*/
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
- if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
}
@@ -626,9 +626,9 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- struct drm_crtc *crtc = intel_output->enc.crtc;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dp_priv->DP = (DP_LINK_TRAIN_OFF |
@@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_crtc->pipe == 1)
dp_priv->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_output)) {
+ if (IS_eDP(intel_encoder)) {
/* don't miss out required setting for eDP */
dp_priv->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
@@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) {
- intel_dp_link_down(intel_output, dp_priv->DP);
- if (IS_eDP(intel_output))
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
+ if (IS_eDP(intel_encoder))
ironlake_edp_backlight_off(dev);
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_output))
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+ if (IS_eDP(intel_encoder))
ironlake_edp_backlight_on(dev);
}
}
@@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_output *intel_output,
+intel_dp_get_link_status(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
- ret = intel_dp_aux_native_read(intel_output,
+ ret = intel_dp_aux_native_read(intel_encoder,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
@@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
static void
intel_dp_save(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
+ intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
dp_priv->save_link_configuration,
sizeof (dp_priv->save_link_configuration));
}
@@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_output *intel_output,
+intel_get_adjust_train(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
@@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
-intel_dp_set_link_train(struct intel_output *intel_output,
+intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
I915_WRITE(dp_priv->output_reg, dp_reg_value);
@@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output,
if (first)
intel_wait_for_vblank(dev);
- intel_dp_aux_native_write_1(intel_output,
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_output,
+ ret = intel_dp_aux_native_write(intel_encoder,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
@@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output,
}
static void
-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
@@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
int tries;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_output, 0x100,
+ intel_dp_aux_native_write(intel_encoder, 0x100,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
@@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
+ if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_output, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
@@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
}
/* channel equalization */
@@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
+ if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_output, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
@@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
break;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
++tries;
}
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
POSTING_READ(dp_priv->output_reg);
- intel_dp_aux_native_write_1(intel_output,
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_output)) {
+ if (IS_eDP(intel_encoder)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(dp_priv->output_reg, DP);
POSTING_READ(dp_priv->output_reg);
@@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
udelay(17000);
- if (IS_eDP(intel_output))
+ if (IS_eDP(intel_encoder))
DP |= DP_LINK_TRAIN_OFF;
I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(dp_priv->output_reg);
@@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
static void
intel_dp_restore(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
+ intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
else
- intel_dp_link_down(intel_output, dp_priv->save_DP);
+ intel_dp_link_down(intel_encoder, dp_priv->save_DP);
}
/*
@@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector)
*/
static void
-intel_dp_check_link_status(struct intel_output *intel_output)
+intel_dp_check_link_status(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
- if (!intel_output->enc.crtc)
+ if (!intel_encoder->enc.crtc)
return;
- if (!intel_dp_get_link_status(intel_output, link_status)) {
- intel_dp_link_down(intel_output, dp_priv->DP);
+ if (!intel_dp_get_link_status(intel_encoder, link_status)) {
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
return;
}
if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_output,
+ if (intel_dp_aux_native_read(intel_encoder,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
@@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
@@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_output,
+ if (intel_dp_aux_native_read(intel_encoder,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
@@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_output);
+ ret = intel_ddc_get_modes(intel_encoder);
if (ret)
return ret;
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_output)) {
+ if (IS_eDP(intel_encoder)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
};
void
-intel_dp_hot_plug(struct intel_output *intel_output)
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_output);
+ intel_dp_check_link_status(intel_encoder);
}
void
@@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
- intel_output = kcalloc(sizeof(struct intel_output) +
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
- if (!intel_output)
+ if (!intel_encoder)
return;
- dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+ dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
- connector = &intel_output->base;
+ connector = &intel_encoder->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
if (output_reg == DP_A)
- intel_output->type = INTEL_OUTPUT_EDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
else
- intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
+ intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
+ intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_output))
- intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+ if (IS_eDP(intel_encoder))
+ intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- dp_priv->intel_output = intel_output;
+ dp_priv->intel_encoder = intel_encoder;
dp_priv->output_reg = output_reg;
dp_priv->has_audio = false;
dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
- intel_output->dev_priv = dp_priv;
+ intel_encoder->dev_priv = dp_priv;
- drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_encoder->base,
+ &intel_encoder->enc);
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
@@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_output, name);
+ intel_dp_i2c_init(intel_encoder, name);
- intel_output->ddc_bus = &dp_priv->adapter;
- intel_output->hot_plug = intel_dp_hot_plug;
+ intel_encoder->ddc_bus = &dp_priv->adapter;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
if (output_reg == DP_A) {
/* initialize panel mode from VBT if available for eDP */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3a467ca..e302537 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -95,7 +95,7 @@ struct intel_framebuffer {
};
-struct intel_output {
+struct intel_encoder {
struct drm_connector base;
struct drm_encoder enc;
@@ -105,7 +105,7 @@ struct intel_output {
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
- void (*hot_plug)(struct intel_output *);
+ void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
};
@@ -152,15 +152,15 @@ struct intel_crtc {
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_output(x) container_of(x, struct intel_output, base)
-#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_output *intel_output);
-extern bool intel_ddc_probe(struct intel_output *intel_output);
+int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-extern void intel_edp_link_config (struct intel_output *, int *, int *);
+extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
extern int intel_panel_fitter_pipe (struct drm_device *dev);
@@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode,
int *dpms_mode);
-extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
+extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 0427ca5..ebf213c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = {
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
u32 dvo_reg = dvo->dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
static void intel_dvo_save(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* Each output should probably just save the registers it touches,
* but for now, use more overkill.
@@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector)
static void intel_dvo_restore(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
dvo->dev_ops->restore(dvo);
@@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector)
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
@@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_output);
+ intel_ddc_get_modes(intel_encoder);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (dvo) {
if (dvo->dev_ops->destroy)
@@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector)
/* no need, in i830_dvoices[] now */
//kfree(dvo);
}
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
}
#ifdef RANDR_GET_CRTC_INTERFACE
@@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
return intel_pipe_to_crtc(pScrn, pipe);
@@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
+ intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
return;
/* Set up the DDC bus */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
- if (!intel_output->ddc_bus)
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
+ if (!intel_encoder->ddc_bus)
goto free_intel;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_output->base;
+ struct drm_connector *connector = &intel_encoder->base;
int gpio;
dvo = &intel_dvo_devices[i];
@@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev)
if (!ret)
continue;
- intel_output->type = INTEL_OUTPUT_DVO;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_output->clone_mask =
+ intel_encoder->clone_mask =
(1 << INTEL_DVO_TMDS_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
drm_connector_init(dev, connector,
@@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_output->clone_mask =
+ intel_encoder->clone_mask =
(1 << INTEL_DVO_LVDS_CLONE_BIT);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
@@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- intel_output->dev_priv = dvo;
- intel_output->i2c_bus = i2cbus;
+ intel_encoder->dev_priv = dvo;
+ intel_encoder->i2c_bus = i2cbus;
- drm_encoder_init(dev, &intel_output->enc,
+ drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
- drm_encoder_helper_add(&intel_output->enc,
+ drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_encoder->base,
+ &intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
@@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- intel_i2c_destroy(intel_output->ddc_bus);
+ intel_i2c_destroy(intel_encoder->ddc_bus);
/* Didn't find a chip, so tear down. */
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
- kfree(intel_output);
+ kfree(intel_encoder);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 69bbef9..8a0b3bc 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOMEM;
goto out;
}
- obj_priv = fbo->driver_private;
+ obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1ed02f6..48cade0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI |
@@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 temp;
temp = I915_READ(hdmi_priv->sdvox_reg);
@@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
}
@@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
POSTING_READ(hdmi_priv->sdvox_reg);
@@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = drm_get_edid(&intel_encoder->base,
+ intel_encoder->ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
- intel_output->base.display_info.raw_edid = NULL;
+ intel_encoder->base.display_info.raw_edid = NULL;
kfree(edid);
}
@@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_output);
+ return intel_ddc_get_modes(intel_encoder);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_hdmi_priv *hdmi_priv;
- intel_output = kcalloc(sizeof(struct intel_output) +
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
- if (!intel_output)
+ if (!intel_encoder)
return;
- hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1);
+ hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
- connector = &intel_output->base;
+ connector = &intel_encoder->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_output->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
- intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
- intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
- intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
- intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
+ intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
- intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
+ intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
- if (!intel_output->ddc_bus)
+ if (!intel_encoder->ddc_bus)
goto err_connector;
hdmi_priv->sdvox_reg = sdvox_reg;
- intel_output->dev_priv = hdmi_priv;
+ intel_encoder->dev_priv = hdmi_priv;
- drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_encoder->base,
+ &intel_encoder->enc);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 216e9f5..b66806a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
u32 pfit_control = 0, pfit_pgm_ratios = 0;
int left_border = 0, right_border = 0, top_border = 0;
int bottom_border = 0;
@@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = intel_ddc_get_modes(intel_output);
+ if (dev_priv->lvds_edid_good) {
+ ret = intel_ddc_get_modes(intel_encoder);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
@@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output =
- to_intel_output(connector);
+ struct intel_encoder *intel_encoder =
+ to_intel_encoder(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
{ } /* terminating entry */
};
@@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output) +
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_lvds_priv), GFP_KERNEL);
- if (!intel_output) {
+ if (!intel_encoder) {
return;
}
- connector = &intel_output->base;
- encoder = &intel_output->enc;
- drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
+ connector = &intel_encoder->base;
+ encoder = &intel_encoder->enc;
+ drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- intel_output->type = INTEL_OUTPUT_LVDS;
+ drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
- intel_output->crtc_mask = (1 << 1);
+ intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_encoder->crtc_mask = (1 << 1);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
- intel_output->dev_priv = lvds_priv;
+ lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
+ intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
* the initial panel fitting mode will be FULL_SCREEN.
*/
- drm_connector_attach_property(&intel_output->base,
+ drm_connector_attach_property(&intel_encoder->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
- if (!intel_output->ddc_bus) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
+ if (!intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
goto failed;
@@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_ddc_get_modes(intel_output);
+ dev_priv->lvds_edid_good = true;
+
+ if (!intel_ddc_get_modes(intel_encoder))
+ dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
mutex_lock(&dev->mode_config.mutex);
@@ -1133,9 +1146,9 @@ out:
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_output);
+ kfree(intel_encoder);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 89d303d..8e5c83b 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -34,7 +34,7 @@
* intel_ddc_probe
*
*/
-bool intel_ddc_probe(struct intel_output *intel_output)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder)
{
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
}
};
- intel_i2c_quirk_set(intel_output->base.dev, true);
- ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_output->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->base.dev, true);
+ ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
+ intel_i2c_quirk_set(intel_encoder->base.dev, false);
if (ret == 2)
return true;
@@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output)
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_output *intel_output)
+int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
{
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_output->base.dev, true);
- edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
- intel_i2c_quirk_set(intel_output->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->base.dev, true);
+ edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
+ intel_i2c_quirk_set(intel_encoder->base.dev, false);
if (edid) {
- drm_mode_connector_update_edid_property(&intel_output->base,
+ drm_mode_connector_update_edid_property(&intel_encoder->base,
edid);
- ret = drm_add_edid_modes(&intel_output->base, edid);
- intel_output->base.display_info.raw_edid = NULL;
+ ret = drm_add_edid_modes(&intel_encoder->base, edid);
+ intel_encoder->base.display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 60595fc..6d524a1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+ struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
intel_overlay_continue(overlay, scale_changed);
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = new_bo->driver_private;
+ overlay->vid_bo = to_intel_bo(new_bo);
return 0;
@@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
- overlay->reg_bo = reg_bo->driver_private;
+ overlay->reg_bo = to_intel_bo(reg_bo);
if (OVERLAY_NONPHYSICAL(dev)) {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26e13a0..87d9536 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -54,7 +54,7 @@ struct intel_sdvo_priv {
u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
- int output_device;
+ int sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@@ -124,7 +124,7 @@ struct intel_sdvo_priv {
*/
struct intel_sdvo_encode encode;
- /* DDC bus used by this SDVO output */
+ /* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
/* Mac mini hack -- use the same DDC as the analog connector */
@@ -162,22 +162,22 @@ struct intel_sdvo_priv {
};
static bool
-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
- if (sdvo_priv->output_device == SDVOB) {
+ if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
bval = I915_READ(SDVOB);
@@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
}
}
-static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
+static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
u8 *ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2];
u8 buf[2];
int ret;
@@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
out_buf[0] = addr;
out_buf[1] = 0;
- if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
return false;
}
-static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
+static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
u8 ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
@@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
+ if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
{
return true;
}
@@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
+#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
-static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
+static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
void *args, int args_len)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
@@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
DRM_LOG_KMS("\n");
}
-static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
+static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
void *args, int args_len)
{
int i;
- intel_sdvo_debug_write(intel_output, cmd, args, args_len);
+ intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i,
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
((u8*)args)[i]);
}
- intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
}
static const char *cmd_status_names[] = {
@@ -404,11 +404,11 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_output *intel_output,
+static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
void *response, int response_len,
u8 status)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
@@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
DRM_LOG_KMS("\n");
}
-static u8 intel_sdvo_read_response(struct intel_output *intel_output,
+static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
void *response, int response_len)
{
int i;
@@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
- intel_sdvo_read_byte(intel_output,
+ intel_sdvo_read_byte(intel_encoder,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]);
}
/* read the return status */
- intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS,
+ intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
&status);
- intel_sdvo_debug_response(intel_output, response, response_len,
+ intel_sdvo_debug_response(intel_encoder, response, response_len,
status);
if (status != SDVO_CMD_STATUS_PENDING)
return status;
@@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
* another I2C transaction after issuing the DDC bus switch, it will be
* switched to the internal SDVO register.
*/
-static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
+static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
u8 target)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
struct i2c_msg msgs[] = {
{
@@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
},
};
- intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target, 1);
/* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
out_buf[0] = SDVO_I2C_OPCODE;
out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
ret_value[0] = 0;
ret_value[1] = 0;
- ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
+ ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
if (ret != 3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
return;
}
-static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
+static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
{
struct intel_sdvo_set_target_input_args targets = {0};
u8 status;
@@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
if (target_1)
targets.target_1 = 1;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
sizeof(targets));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
@@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, sizeof(response));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output,
+static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
u16 *outputs)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output,
+static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
sizeof(outputs));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output,
+static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
int mode)
{
u8 status, state = SDVO_ENCODER_STATE_ON;
@@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
break;
}
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
sizeof(state));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
NULL, 0);
- status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks));
+ status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_output *intel_output,
+static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
u16 outputs)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
sizeof(outputs));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
+static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &dtd->part1,
+ intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
sizeof(dtd->part1));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
- intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &dtd->part2,
+ intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
sizeof(dtd->part2));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
return true;
}
-static bool intel_sdvo_get_input_timing(struct intel_output *intel_output,
+static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_get_timing(intel_output,
+ return intel_sdvo_get_timing(intel_encoder,
SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_get_output_timing(struct intel_output *intel_output,
+static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_get_timing(intel_output,
+ return intel_sdvo_get_timing(intel_encoder,
SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
- intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
-static bool intel_sdvo_set_input_timing(struct intel_output *intel_output,
+static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_output,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
+static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_output,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_output *output,
+intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
uint8_t status;
memset(&args, 0, sizeof(args));
@@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
&args, sizeof(args));
- status = intel_sdvo_read_response(output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
bool status;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
NULL, 0);
- status = intel_sdvo_read_response(output, &dtd->part1,
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
sizeof(dtd->part1));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
NULL, 0);
- status = intel_sdvo_read_response(output, &dtd->part2,
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
sizeof(dtd->part2));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
return false;
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
+static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
{
u8 response, status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 1);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 1);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
@@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
return response;
}
-static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_output *output,
+static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
struct intel_sdvo_encode *encode)
{
uint8_t status;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(output, encode, sizeof(*encode));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
memset(encode, 0, sizeof(*encode));
return false;
@@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output,
return true;
}
-static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode)
+static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
+ uint8_t mode)
{
uint8_t status;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1);
- status = intel_sdvo_read_response(output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_colorimetry(struct intel_output *output,
+static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
uint8_t mode)
{
uint8_t status;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
- status = intel_sdvo_read_response(output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
+static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
{
int i, j;
uint8_t set_buf_index[2];
@@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
- intel_sdvo_read_response(output, &av_split, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
+ intel_sdvo_read_response(encoder, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX,
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
- intel_sdvo_read_response(output, &buf_size, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
pos = buf;
for (j = 0; j <= buf_size; j += 8) {
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA,
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
NULL, 0);
- intel_sdvo_read_response(output, pos, 8);
+ intel_sdvo_read_response(encoder, pos, 8);
pos += 8;
}
}
}
#endif
-static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index,
- uint8_t *data, int8_t size, uint8_t tx_rate)
+static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
+ int index,
+ uint8_t *data, int8_t size, uint8_t tx_rate)
{
uint8_t set_buf_index[2];
set_buf_index[0] = index;
set_buf_index[1] = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
for (; size > 0; size -= 8) {
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
data += 8;
}
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
}
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1034,7 +1038,7 @@ struct dip_infoframe {
} __attribute__ ((packed)) u;
} __attribute__((packed));
-static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
+static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
struct drm_display_mode * mode)
{
struct dip_infoframe avi_if = {
@@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
4 + avi_if.len);
- intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len,
+ intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
+ 4 + avi_if.len,
SDVO_HBUF_TX_VSYNC);
}
-static void intel_sdvo_set_tv_format(struct intel_output *output)
+static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
{
struct intel_sdvo_tv_format format;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
uint32_t format_map, i;
uint8_t status;
@@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
sizeof(format) : sizeof(format_map));
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
sizeof(format));
- status = intel_sdvo_read_response(output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
DRM_DEBUG_KMS("%s: Failed to set TV format\n",
SDVO_NAME(sdvo_priv));
@@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *dev_priv = output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
if (dev_priv->is_tv) {
struct intel_sdvo_dtd output_dtd;
@@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Set output timings */
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- intel_sdvo_set_target_output(output,
+ intel_sdvo_set_target_output(intel_encoder,
dev_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &output_dtd);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- success = intel_sdvo_create_preferred_input_timing(output,
+ success = intel_sdvo_create_preferred_input_timing(intel_encoder,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay);
if (success) {
struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_preferred_input_timing(output,
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
&input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
intel_sdvo_get_dtd_from_mode(&output_dtd,
dev_priv->sdvo_lvds_fixed_mode);
- intel_sdvo_set_target_output(output,
+ intel_sdvo_set_target_output(intel_encoder,
dev_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &output_dtd);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
success = intel_sdvo_create_preferred_input_timing(
- output,
+ intel_encoder,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay);
@@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (success) {
struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_preferred_input_timing(output,
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
&input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 sdvox = 0;
int sdvo_pixel_multiply;
struct intel_sdvo_in_out_map in_out;
@@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
in_out.in0 = sdvo_priv->controlled_output;
in_out.in1 = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- status = intel_sdvo_read_response(output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (sdvo_priv->is_hdmi) {
- intel_sdvo_set_avi_infoframe(output, mode);
+ intel_sdvo_set_avi_infoframe(intel_encoder, mode);
sdvox |= SDVO_AUDIO_ENABLE;
}
@@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
*/
if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
- intel_sdvo_set_target_output(output,
+ intel_sdvo_set_target_output(intel_encoder,
sdvo_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &input_dtd);
+ intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
if (sdvo_priv->is_tv)
- intel_sdvo_set_tv_format(output);
+ intel_sdvo_set_tv_format(intel_encoder);
/* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
@@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* output the preferred timing, and we don't support that currently.
*/
#if 0
- success = intel_sdvo_create_preferred_input_timing(output, clock,
+ success = intel_sdvo_create_preferred_input_timing(encoder, clock,
width, height);
if (success) {
struct intel_sdvo_dtd *input_dtd;
- intel_sdvo_get_preferred_input_timing(output, &input_dtd);
- intel_sdvo_set_input_timing(output, &input_dtd);
+ intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
+ intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- intel_sdvo_set_input_timing(output, &input_dtd);
+ intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
#endif
switch (intel_sdvo_get_pixel_multiplier(mode)) {
case 1:
- intel_sdvo_set_clock_rate_mult(output,
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
SDVO_CLOCK_RATE_MULT_1X);
break;
case 2:
- intel_sdvo_set_clock_rate_mult(output,
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
SDVO_CLOCK_RATE_MULT_2X);
break;
case 4:
- intel_sdvo_set_clock_rate_mult(output,
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
SDVO_CLOCK_RATE_MULT_4X);
break;
}
@@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
SDVO_VSYNC_ACTIVE_HIGH |
SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(sdvo_priv->output_device);
- switch (sdvo_priv->output_device) {
+ sdvox |= I915_READ(sdvo_priv->sdvo_reg);
+ switch (sdvo_priv->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
@@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(output, sdvox);
+ intel_sdvo_write_sdvox(intel_encoder, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_output, 0);
+ intel_sdvo_set_active_outputs(intel_encoder, 0);
if (0)
- intel_sdvo_set_encoder_power_state(intel_output, mode);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(sdvo_priv->output_device);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
}
}
} else {
@@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
int i;
u8 status;
- temp = I915_READ(sdvo_priv->output_device);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_output, &input1,
+ status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
&input2);
@@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
}
if (0)
- intel_sdvo_set_encoder_power_state(intel_output, mode);
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
}
return;
}
@@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int o;
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output);
- intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs);
+ sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
+ intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_output, true, false);
- intel_sdvo_get_input_timing(intel_output,
+ intel_sdvo_set_target_input(intel_encoder, true, false);
+ intel_sdvo_get_input_timing(intel_encoder,
&sdvo_priv->save_input_dtd_1);
}
if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_output, false, true);
- intel_sdvo_get_input_timing(intel_output,
+ intel_sdvo_set_target_input(intel_encoder, false, true);
+ intel_sdvo_get_input_timing(intel_encoder,
&sdvo_priv->save_input_dtd_2);
}
@@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector)
u16 this_output = (1 << o);
if (sdvo_priv->caps.output_flags & this_output)
{
- intel_sdvo_set_target_output(intel_output, this_output);
- intel_sdvo_get_output_timing(intel_output,
+ intel_sdvo_set_target_output(intel_encoder, this_output);
+ intel_sdvo_get_output_timing(intel_encoder,
&sdvo_priv->save_output_dtd[o]);
}
}
@@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector)
/* XXX: Save TV format/enhancements. */
}
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
+ sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
}
static void intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int o;
int i;
bool input1, input2;
u8 status;
- intel_sdvo_set_active_outputs(intel_output, 0);
+ intel_sdvo_set_active_outputs(intel_encoder, 0);
for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
{
u16 this_output = (1 << o);
if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_output, this_output);
- intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]);
+ intel_sdvo_set_target_output(intel_encoder, this_output);
+ intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
}
}
if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_output, true, false);
- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
+ intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
}
if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_output, false, true);
- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2);
+ intel_sdvo_set_target_input(intel_encoder, false, true);
+ intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
}
- intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
+ intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
if (sdvo_priv->is_tv) {
/* XXX: Restore TV format/enhancements. */
}
- intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX);
+ intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
{
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
+ status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
DRM_DEBUG_KMS("First %s output reported failure to "
"sync\n", SDVO_NAME(sdvo_priv));
}
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
}
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
- struct intel_output *iout = NULL;
+ struct intel_encoder *iout = NULL;
struct intel_sdvo_priv *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_output(connector);
+ iout = to_intel_encoder(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
sdvo = iout->dev_priv;
- if (sdvo->output_device == SDVOB && sdvoB)
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
return connector;
- if (sdvo->output_device == SDVOC && !sdvoB)
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
return connector;
}
@@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
- intel_output = to_intel_output(connector);
+ intel_encoder = to_intel_encoder(connector);
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
if (response[0] !=0)
return 1;
@@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
if (on) {
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
}
static bool
-intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
+intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int caps = 0;
if (sdvo_priv->caps.output_flags &
@@ -1593,11 +1598,11 @@ static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_output = to_intel_output(connector);
- if (intel_output->type == INTEL_OUTPUT_ANALOG)
+ intel_encoder = to_intel_encoder(connector);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
return connector;
}
return NULL;
@@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev)
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = drm_get_edid(&intel_encoder->base,
+ intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
uint8_t saved_ddc, temp_ddc;
saved_ddc = sdvo_priv->ddc_bus;
temp_ddc = sdvo_priv->ddc_bus >> 1;
@@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
while(temp_ddc > 1) {
sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = drm_get_edid(&intel_encoder->base,
+ intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
@@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
if (edid == NULL &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_output->base.dev))
- edid = drm_get_edid(&intel_output->base,
+ !intel_analog_is_connected(intel_encoder->base.dev))
+ edid = drm_get_edid(&intel_encoder->base,
sdvo_priv->analog_ddc_bus);
if (edid != NULL) {
/* Don't report the output as connected if it's a DVI-I
@@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
}
kfree(edid);
- intel_output->base.display_info.raw_edid = NULL;
+ intel_encoder->base.display_info.raw_edid = NULL;
} else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
status = connector_status_disconnected;
@@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
{
uint16_t response;
u8 status;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
if (sdvo_priv->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
mdelay(30);
}
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
@@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_output) &&
+ if (intel_sdvo_multifunc_encoder(intel_encoder) &&
sdvo_priv->attached_output != response) {
if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_output, response) != true)
+ intel_sdvo_output_setup(intel_encoder, response) != true)
return connector_status_unknown;
sdvo_priv->attached_output = response;
}
@@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_output);
+ num_modes = intel_ddc_get_modes(intel_encoder);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
*/
if (num_modes == 0 &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_output->base.dev)) {
+ !intel_analog_is_connected(intel_encoder->base.dev)) {
struct i2c_adapter *digital_ddc_bus;
/* Switch to the analog ddc bus and try that
*/
- digital_ddc_bus = intel_output->ddc_bus;
- intel_output->ddc_bus = sdvo_priv->analog_ddc_bus;
+ digital_ddc_bus = intel_encoder->ddc_bus;
+ intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
- (void) intel_ddc_get_modes(intel_output);
+ (void) intel_ddc_get_modes(intel_encoder);
- intel_output->ddc_bus = digital_ddc_bus;
+ intel_encoder->ddc_bus = digital_ddc_bus;
}
}
@@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_output *output = to_intel_output(connector);
+ struct intel_encoder *output = to_intel_encoder(connector);
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
@@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
/*
@@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(intel_output);
+ intel_ddc_get_modes(intel_encoder);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1897,7 +1902,7 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_output *output = to_intel_output(connector);
+ struct intel_encoder *output = to_intel_encoder(connector);
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
if (sdvo_priv->is_tv)
@@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static
void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_device *dev = connector->dev;
if (sdvo_priv->is_tv) {
@@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
if (sdvo_priv->analog_ddc_bus)
intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
@@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
}
static int
@@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
sdvo_priv->cur_brightness = temp_value;
}
if (cmd) {
- intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
- status = intel_sdvo_read_response(intel_output,
+ intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
+ status = intel_sdvo_read_response(intel_encoder,
NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO command \n");
@@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
{
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
@@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
return true;
}
-static struct intel_output *
-intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
+static struct intel_encoder *
+intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
struct drm_connector *connector;
- struct intel_output *intel_output = NULL;
+ struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
- intel_output = to_intel_output(connector);
+ if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
+ intel_encoder = to_intel_encoder(connector);
break;
}
}
- return intel_output;
+ return intel_encoder;
}
static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_output =
- intel_sdvo_chan_to_intel_output(
+ intel_encoder =
+ intel_sdvo_chan_to_intel_encoder(
(struct intel_i2c_chan *)(algo_data->data));
- if (intel_output == NULL)
+ if (intel_encoder == NULL)
return -EINVAL;
- sdvo_priv = intel_output->dev_priv;
- algo = intel_output->i2c_bus->algo;
+ sdvo_priv = intel_encoder->dev_priv;
+ algo = intel_encoder->i2c_bus->algo;
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
+ intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
}
@@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
};
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
+intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (output_device == SDVOB) {
+ if (sdvo_reg == SDVOB) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (output_device == SDVOB)
+ if (sdvo_reg == SDVOB)
return 0x70;
else
return 0x72;
@@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = {
};
static bool
-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
{
- struct drm_connector *connector = &intel_output->base;
- struct drm_encoder *encoder = &intel_output->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_connector *connector = &intel_encoder->base;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
bool ret = true, registered = false;
sdvo_priv->is_tv = false;
- intel_output->needs_tv_clock = false;
+ intel_encoder->needs_tv_clock = false;
sdvo_priv->is_lvds = false;
if (device_is_registered(&connector->kdev)) {
@@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_output,
+ if (intel_sdvo_get_supp_encode(intel_encoder,
&sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_output) &&
+ intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
sdvo_priv->is_hdmi) {
/* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_output,
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_output->clone_mask =
+ intel_encoder->clone_mask =
(1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
}
@@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
sdvo_priv->is_tv = true;
- intel_output->needs_tv_clock = true;
- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_RGB0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_RGB1) {
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_CVBS0) {
@@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
sdvo_priv->is_tv = true;
- intel_output->needs_tv_clock = true;
- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
sdvo_priv->is_lvds = true;
- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_LVDS1) {
@@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
sdvo_priv->is_lvds = true;
- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT);
} else {
@@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
bytes[0], bytes[1]);
ret = false;
}
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
if (ret && registered)
ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
@@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
static void intel_sdvo_tv_create_property(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
uint8_t status;
- intel_sdvo_set_target_output(intel_output,
+ intel_sdvo_set_target_output(intel_encoder,
sdvo_priv->controlled_output);
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&format, sizeof(format));
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_enhancements_reply sdvo_data;
struct drm_device *dev = connector->dev;
uint8_t status;
uint16_t response, data_value[2];
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
NULL, 0);
- status = intel_sdvo_read_response(intel_output, &sdvo_data,
+ status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
sizeof(sdvo_data));
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS(" incorrect response is returned\n");
@@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
* property
*/
if (sdvo_data.overscan_h) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO max "
"h_overscan\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
@@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.overscan_v) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO max "
"v_overscan\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
@@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.position_h) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
@@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.position_v) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
@@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
}
if (sdvo_priv->is_tv) {
if (sdvo_data.saturation) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
@@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.contrast) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
@@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.hue) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
@@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
}
if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
if (sdvo_data.brightness) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
@@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
return;
}
-bool intel_sdvo_init(struct drm_device *dev, int output_device)
+bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
u8 ch[0x40];
int i;
- intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
- if (!intel_output) {
+ intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+ if (!intel_encoder) {
return false;
}
- sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
- sdvo_priv->output_device = output_device;
+ sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
+ sdvo_priv->sdvo_reg = sdvo_reg;
- intel_output->dev_priv = sdvo_priv;
- intel_output->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->dev_priv = sdvo_priv;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
/* setup the DDC bus. */
- if (output_device == SDVOB)
- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ if (sdvo_reg == SDVOB)
+ intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
else
- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
- if (!intel_output->i2c_bus)
+ if (!intel_encoder->i2c_bus)
goto err_inteloutput;
- sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
/* Save the bit-banging i2c functionality for use by the DDC wrapper */
- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
+ intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
+ if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
+ sdvo_reg == SDVOB ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (output_device == SDVOB) {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
+ if (sdvo_reg == SDVOB) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
- if (intel_output->ddc_bus == NULL)
+ if (intel_encoder->ddc_bus == NULL)
goto err_i2c;
/* Wrap with our custom algo which switches to DDC mode */
- intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
/* In default case sdvo lvds is false */
- intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
+ intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
- if (intel_sdvo_output_setup(intel_output,
+ if (intel_sdvo_output_setup(intel_encoder,
sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
+ sdvo_reg == SDVOB ? 'B' : 'C');
goto err_i2c;
}
- connector = &intel_output->base;
+ connector = &intel_encoder->base;
drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
connector->connector_type);
@@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
connector->doublescan_allowed = 0;
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- drm_encoder_init(dev, &intel_output->enc,
- &intel_sdvo_enc_funcs, intel_output->enc.encoder_type);
+ drm_encoder_init(dev, &intel_encoder->enc,
+ &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
- drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
if (sdvo_priv->is_tv)
intel_sdvo_tv_create_property(connector);
@@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
intel_sdvo_select_ddc_bus(sdvo_priv);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_get_input_pixel_clock_range(intel_output,
+ intel_sdvo_get_input_pixel_clock_range(intel_encoder,
&sdvo_priv->pixel_clock_min,
&sdvo_priv->pixel_clock_max);
@@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
err_i2c:
if (sdvo_priv->analog_ddc_bus != NULL)
intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
- if (intel_output->ddc_bus != NULL)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (intel_output->i2c_bus != NULL)
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_encoder->ddc_bus != NULL)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (intel_encoder->i2c_bus != NULL)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
err_inteloutput:
- kfree(intel_output);
+ kfree(intel_encoder);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec11..d7d39b2 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
int i;
tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = connector->encoder->crtc;
struct intel_crtc *intel_crtc;
int i;
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_output *intel_output)
+intel_tv_mode_find (struct intel_encoder *intel_encoder)
{
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
return intel_tv_mode_lookup(tv_priv->tv_format);
}
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
struct drm_encoder *other_encoder;
if (!tv_mode)
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
+intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_encoder->enc;
int dpms_mode;
int type = tv_priv->type;
@@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector)
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(encoder->crtc, intel_output);
+ type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
+ crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(crtc, intel_output);
- intel_release_load_detect_pipe(intel_output, dpms_mode);
+ type = intel_tv_detect_type(crtc, intel_encoder);
+ intel_release_load_detect_pipe(intel_encoder, dpms_mode);
} else
type = -1;
}
@@ -1525,8 +1525,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1550,8 +1550,8 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
}
@@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_output = kzalloc(sizeof(struct intel_output) +
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_tv_priv), GFP_KERNEL);
- if (!intel_output) {
+ if (!intel_encoder) {
return;
}
- connector = &intel_output->base;
+ connector = &intel_encoder->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- tv_priv = (struct intel_tv_priv *)(intel_output + 1);
- intel_output->type = INTEL_OUTPUT_TVOUT;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
- intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
- intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_output->dev_priv = tv_priv;
+ drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_encoder->dev_priv = tv_priv;
tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
@@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev)
tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
- drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index bcec2d7..1d56983 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
@@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fd4ef6d..a87990b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ pll->algo = PLL_ALGO_LEGACY;
+ pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c958049..d7388fd 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
{
struct radeon_bo *robj;
unsigned long size;
- unsigned u, i, w, h;
+ unsigned u, i, w, h, d;
int ret;
for (u = 0; u < track->num_texture; u++) {
@@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
+ if (track->textures[u].tex_coord_type == 1) {
+ d = (1 << track->textures[u].txdepth) / (1 << i);
+ if (!d)
+ d = 1;
+ } else {
+ d = 1;
+ }
if (track->textures[u].compress_format) {
- size += r100_track_compress_size(track->textures[u].compress_format, w, h);
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
- size += w * h;
+ size += w * h * d;
}
size *= track->textures[u].cpp;
switch (track->textures[u].tex_coord_type) {
case 0:
- break;
case 1:
- size *= (1 << track->textures[u].txdepth);
break;
case 2:
if (track->separate_cube) {
@@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
}
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
- nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ if (track->vap_vf_cntl & (1 << 14)) {
+ nverts = track->vap_alt_nverts;
+ } else {
+ nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ }
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index b27a699..fadfe68 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -64,6 +64,7 @@ struct r100_cs_track {
unsigned maxy;
unsigned vtx_size;
unsigned vap_vf_cntl;
+ unsigned vap_alt_nverts;
unsigned immd_dwords;
unsigned num_arrays;
unsigned max_indx;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2b9affe..eaf1f6b 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -324,13 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev)
uint32_t gb_tile_config, tmp;
r100_hdp_reset(rdev);
- /* FIXME: rv380 one pipes ? */
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
- (rdev->family == CHIP_R350)) {
+ (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
rdev->num_gb_pipes = 2;
} else {
- /* rv350,rv370,rv380,r300 AD */
+ /* rv350,rv370,rv380,r300 AD, r350 AH */
rdev->num_gb_pipes = 1;
}
rdev->num_z_pipes = 1;
@@ -730,6 +729,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* VAP_VF_MAX_VTX_INDX */
track->max_indx = idx_value & 0x00FFFFFFUL;
break;
+ case 0x2088:
+ /* VAP_ALT_NUM_VERTICES - only valid on r500 */
+ if (p->rdev->family < CHIP_RV515)
+ goto fail;
+ track->vap_alt_nverts = idx_value & 0xFFFFFF;
+ break;
case 0x43E4:
/* SC_SCISSOR1 */
track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
@@ -767,7 +772,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
-
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@@ -1052,11 +1056,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
/* fallthrough do not move */
default:
- printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
- reg, idx);
- return -EINVAL;
+ goto fail;
}
return 0;
+fail:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
}
static int r300_packet3_check(struct radeon_cs_parser *p,
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index ea46d55..c5c2742 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
sizeof(stack_ptr_addr), &stack_ptr_addr);
- ref_age_base = (u32 *)(unsigned long)*ptr_addr;
+ ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3dc968c..c2bda4a 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -59,6 +59,12 @@ void r420_pipes_init(struct radeon_device *rdev)
/* get max number of pipes */
gb_pipe_select = RREG32(0x402C);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+ /* SE chips have 1 pipe */
+ if ((rdev->pdev->device == 0x5e4c) ||
+ (rdev->pdev->device == 0x5e4f))
+ num_pipes = 1;
+
rdev->num_gb_pipes = num_pipes;
tmp = 0;
switch (num_pipes) {
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index dac7042..1d89805 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return rdev->family >= CHIP_R600
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 029fa14..2616b82 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
if (!offset)
return;
@@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
if (!radeon_encoder->hdmi_offset) {
r600_hdmi_assign_block(encoder);
if (!radeon_encoder->hdmi_offset) {
@@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
if (!radeon_encoder->hdmi_offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3fba505..1331351 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
+ struct radeon_connector *radeon_conflict;
int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
if (conflict == connector)
continue;
+ radeon_conflict = to_radeon_connector(conflict);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (conflict->encoder_ids[i] == 0)
break;
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
if (conflict->status != connector_status_connected)
continue;
+ if (radeon_conflict->use_digital)
+ continue;
+
if (priority == true) {
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
if (property == rdev->mode_info.coherent_mode_property) {
struct radeon_encoder_atom_dig *dig;
+ bool new_coherent_mode;
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
dig = radeon_encoder->enc_priv;
- dig->coherent_mode = val ? true : false;
- radeon_property_change_mode(&radeon_encoder->base);
+ new_coherent_mode = val ? true : false;
+ if (dig->coherent_mode != new_coherent_mode) {
+ dig->coherent_mode = new_coherent_mode;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
}
if (property == rdev->mode_info.tv_std_property) {
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 419630d..2f042a3 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -435,14 +435,19 @@ static void radeon_init_pipes(struct drm_device *dev)
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+ /* SE cards have 1 pipe */
+ if ((dev->pdev->device == 0x5e4c) ||
+ (dev->pdev->device == 0x5e4f))
+ dev_priv->num_gb_pipes = 1;
} else {
/* R3xx */
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
dev->pdev->device != 0x4144) ||
- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+ dev->pdev->device != 0x4148)) {
dev_priv->num_gb_pipes = 2;
} else {
- /* RV3xx/R300 AD */
+ /* RV3xx/R300 AD/R350 AH */
dev_priv->num_gb_pipes = 1;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bddf17f..7b629e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -36,6 +36,54 @@
#include "radeon.h"
#include "atom.h"
+static const char radeon_family_name[][16] = {
+ "R100",
+ "RV100",
+ "RS100",
+ "RV200",
+ "RS200",
+ "R200",
+ "RV250",
+ "RS300",
+ "RV280",
+ "R300",
+ "R350",
+ "RV350",
+ "RV380",
+ "R420",
+ "R423",
+ "RV410",
+ "RS400",
+ "RS480",
+ "RS600",
+ "RS690",
+ "RS740",
+ "RV515",
+ "R520",
+ "RV530",
+ "RV560",
+ "RV570",
+ "R580",
+ "R600",
+ "RV610",
+ "RV630",
+ "RV670",
+ "RV620",
+ "RV635",
+ "RS780",
+ "RS880",
+ "RV770",
+ "RV730",
+ "RV710",
+ "RV740",
+ "CEDAR",
+ "REDWOOD",
+ "JUNIPER",
+ "CYPRESS",
+ "HEMLOCK",
+ "LAST",
+};
+
/*
* Clear GPU surface registers.
*/
@@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev,
int r;
int dma_bits;
- DRM_INFO("radeon: Initializing kernel modesetting.\n");
rdev->shutdown = false;
rdev->dev = &pdev->dev;
rdev->ddev = ddev;
@@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->gpu_lockup = false;
rdev->accel_working = false;
+
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device);
+
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->cs_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8d6728..bb1c122 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
- WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
- WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
for (i = 0; i < 256; i++) {
- WREG32(EVERGREEN_DC_LUT_30_COLOR,
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
(radeon_crtc->lut_r[i] << 20) |
(radeon_crtc->lut_g[i] << 10) |
(radeon_crtc->lut_b[i] << 0));
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 055a517..4b05563 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -43,9 +43,10 @@
* - 2.0.0 - initial interface
* - 2.1.0 - add square tiling interface
* - 2.2.0 - add r6xx/r7xx const buffer support
+ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 2
+#define KMS_DRIVER_MINOR 3
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c52fc30..fed7b808 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -865,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v3.acConfig.fDualLinkConnector = 1;
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
@@ -888,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.acConfig.fDualLinkConnector = 1;
}
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
@@ -1322,7 +1326,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
radeon_encoder->pixel_clock = adjusted_mode->clock;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
@@ -1373,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 93c7d5d4..e329066 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -36,7 +36,7 @@
* Radeon chip families
*/
enum radeon_family {
- CHIP_R100,
+ CHIP_R100 = 0,
CHIP_RV100,
CHIP_RS100,
CHIP_RV200,
@@ -99,4 +99,5 @@ enum radeon_chip_flags {
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
};
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d3657dc..c633319 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -165,7 +165,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
@@ -177,7 +177,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
@@ -191,7 +191,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return;
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 19c4663..1e97b2d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -125,6 +125,8 @@ r300 0x4f60
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 989f7a0..e958980 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -125,6 +125,8 @@ r420 0x4f60
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 6801b86..83e8bc0 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -125,6 +125,8 @@ rs600 0x6d40
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 38abf63..1e46233 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -35,6 +35,7 @@ rv515 0x6d40
0x1DA8 VAP_VPORT_ZSCALE
0x1DAC VAP_VPORT_ZOFFSET
0x2080 VAP_CNTL
+0x208C VAP_INDEX_OFFSET
0x2090 VAP_OUT_VTX_FMT_0
0x2094 VAP_OUT_VTX_FMT_1
0x20B0 VAP_VTE_CNTL
@@ -158,6 +159,8 @@ rv515 0x6d40
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index abf824c..a81bc7a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
+ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index c1605b5..0f28d91 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = {
"TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
"TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
NULL },
+/* Set 17: iMac 9,1 */
+ { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
+ "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
+/* Set 18: MacBook Pro 2,2 */
+ { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
+ "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 1, .light = 1, .temperature_set = 15 },
/* MacPro3,1: temperature set 16 */
{ .accelerometer = 0, .light = 0, .temperature_set = 16 },
+/* iMac 9,1: light sensor only, temperature set 17 */
+ { .accelerometer = 0, .light = 0, .temperature_set = 17 },
+/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 18 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
&applesmc_dmi_data[9]},
+ { applesmc_dmi_match, "Apple MacBook Pro 2,2", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
+ &applesmc_dmi_data[18]},
{ applesmc_dmi_match, "Apple MacBook Pro", {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
@@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
&applesmc_dmi_data[4]},
+ { applesmc_dmi_match, "Apple iMac 9,1", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
+ &applesmc_dmi_data[17]},
{ applesmc_dmi_match, "Apple iMac 8", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 75f3fa5..16c4202 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data)
int err;
list_for_each_entry(s, &data->sensor_list, list) {
+ sysfs_attr_init(&s->input_attr.attr);
err = device_create_file(data->hwmon_dev, &s->input_attr);
if (err)
return err;
+ sysfs_attr_init(&s->label_attr.attr);
err = device_create_file(data->hwmon_dev, &s->label_attr);
if (err)
return err;
+ sysfs_attr_init(&s->limit1_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit1_attr);
if (err)
return err;
+ sysfs_attr_init(&s->limit2_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit2_attr);
if (err)
return err;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index be475e8..c8ab505 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -217,6 +217,10 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
+ AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
+ AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1002bef..5be09c0 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
+ u8 reg;
if (strict_strtol(buf, 10, &val) < 0)
return -EINVAL;
- mutex_lock(&data->update_lock);
-
- data->sensor &= ~(1 << nr);
- data->sensor &= ~(8 << nr);
+ reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+ reg &= ~(1 << nr);
+ reg &= ~(8 << nr);
if (val == 2) { /* backwards compatibility */
dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
"instead\n");
@@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
}
/* 3 = thermal diode; 4 = thermistor; 0 = disabled */
if (val == 3)
- data->sensor |= 1 << nr;
+ reg |= 1 << nr;
else if (val == 4)
- data->sensor |= 8 << nr;
- else if (val != 0) {
- mutex_unlock(&data->update_lock);
+ reg |= 8 << nr;
+ else if (val != 0)
return -EINVAL;
- }
+
+ mutex_lock(&data->update_lock);
+ data->sensor = reg;
it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
+ data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
@@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev)
it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
}
- /* Check if temperature channels are reset manually or by some reason */
- tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE);
- if ((tmp & 0x3f) == 0) {
- /* Temp1,Temp3=thermistor; Temp2=thermal diode */
- tmp = (tmp & 0xc0) | 0x2a;
- it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp);
- }
- data->sensor = tmp;
+ /* Temperature channels are not forcibly enabled, as they can be
+ * set to two different sensor types and we can't guess which one
+ * is correct for a given system. These channels can be enabled at
+ * run-time through the temp{1-3}_type sysfs accessors if needed. */
/* Check if voltage monitors are reset manually or by some reason */
tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 6b2d8ae..a610e78 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -303,13 +303,13 @@ error_ret:
**/
static inline int sht15_calc_temp(struct sht15_data *data)
{
- int d1 = 0;
+ int d1 = temppoints[0].d1;
int i;
- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+ for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
- d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+ d1 = (data->supply_uV - temppoints[i - 1].vdd)
* (temppoints[i].d1 - temppoints[i - 1].d1)
/ (temppoints[i].vdd - temppoints[i - 1].vdd)
+ temppoints[i - 1].d1;
@@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
/* If a regulator is available, query what the supply voltage actually is!*/
data->reg = regulator_get(data->dev, "vcc");
if (!IS_ERR(data->reg)) {
- data->supply_uV = regulator_get_voltage(data->reg);
+ int voltage;
+
+ voltage = regulator_get_voltage(data->reg);
+ if (voltage)
+ data->supply_uV = voltage;
+
regulator_enable(data->reg);
/* setup a notifier block to update this if another device
* causes the voltage to change */
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index f7e27b7..d1ff940 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -146,10 +146,10 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
"<%s> I2C Interrupted\n", __func__);
return -EINTR;
}
- if (time_after(jiffies, orig_jiffies + HZ / 1000)) {
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&i2c_imx->adapter.dev,
"<%s> I2C bus is busy\n", __func__);
- return -EIO;
+ return -ETIMEDOUT;
}
schedule();
}
@@ -444,6 +444,8 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
result = i2c_imx_read(i2c_imx, &msgs[i]);
else
result = i2c_imx_write(i2c_imx, &msgs[i]);
+ if (result)
+ goto fail0;
}
fail0:
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6bd0f19..389ac60 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -903,6 +903,11 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
+ if (cpu_is_omap7xx())
+ dev->reg_shift = 1;
+ else
+ dev->reg_shift = 2;
+
if ((r = omap_i2c_get_clocks(dev)) != 0)
goto err_iounmap;
@@ -926,11 +931,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev->b_hw = 1; /* Enable hardware fixes */
}
- if (cpu_is_omap7xx())
- dev->reg_shift = 1;
- else
- dev->reg_shift = 2;
-
/* reset ASAP, clearing any IRQs */
omap_i2c_init(dev);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 2471033..a97e3fe 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -173,6 +173,9 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
/* We still have something to talk about... */
val = *alg_data->mif.buf++;
+ if (alg_data->mif.len == 1)
+ val |= stop_bit;
+
alg_data->mif.len--;
iowrite32(val, I2C_REG_TX(alg_data));
@@ -246,6 +249,9 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
__func__);
if (alg_data->mif.len == 1) {
+ /* Last byte, do not acknowledge next rcv. */
+ val |= stop_bit;
+
/*
* Enable interrupt RFDAIE (data in Rx fifo),
* and disable DRMIE (need data for Tx)
@@ -633,6 +639,8 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
*/
tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
+ if (tmp > 0x3FF)
+ tmp = 0x3FF;
iowrite32(tmp, I2C_REG_CKH(alg_data));
iowrite32(tmp, I2C_REG_CKL(alg_data));
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 1f5b38b..495be45 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -498,7 +498,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
int i = 0;
/* Locate the apropriate clock setting */
- while (i < ARRAY_SIZE(stu300_clktable) &&
+ while (i < ARRAY_SIZE(stu300_clktable) - 1 &&
stu300_clktable[i].rate < clkrate)
i++;
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index ab87e4f..defce28 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/input/input.c b/drivers/input/input.c
index afd4e2b..9c79bd5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev,
int input_get_keycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode)
{
- return dev->getkeycode(dev, scancode, keycode);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ retval = dev->getkeycode(dev, scancode, keycode);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return retval;
}
EXPORT_SYMBOL(input_get_keycode);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index ffc25cf..b443e08 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ if (!pdata->no_autorepeat)
+ input_dev->evbit[0] |= BIT_MASK(EV_REP);
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 99d5876..0d22cb9 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = {
{ { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
{ { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
+ { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */
{ { 0x52, 0x01, 0x14 }, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
};
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 4f8fe08..b89879b 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = {
.disconnect = bcm5974_disconnect,
.suspend = bcm5974_suspend,
.resume = bcm5974_resume,
- .reset_resume = bcm5974_resume,
.id_table = bcm5974_table,
.supports_autosuspend = 1,
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 577688b..6440a8f 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
static bool i8042_nomux;
module_param_named(nomux, i8042_nomux, bool, 0);
-MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present.");
+MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present.");
static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 82ae18d..0142483 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev,
unsigned int scancode,
unsigned int *keycode)
{
- const struct key_entry *key =
- sparse_keymap_entry_from_scancode(dev, scancode);
+ const struct key_entry *key;
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
+ if (dev->keycode) {
+ key = sparse_keymap_entry_from_scancode(dev, scancode);
+ if (key && key->type == KE_KEY) {
+ *keycode = key->keycode;
+ return 0;
+ }
}
return -EINVAL;
@@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev,
struct key_entry *key;
int old_keycode;
- if (keycode < 0 || keycode > KEY_MAX)
- return -EINVAL;
-
- key = sparse_keymap_entry_from_scancode(dev, scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
+ if (dev->keycode) {
+ key = sparse_keymap_entry_from_scancode(dev, scancode);
+ if (key && key->type == KE_KEY) {
+ old_keycode = key->keycode;
+ key->keycode = keycode;
+ set_bit(keycode, dev->keybit);
+ if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
+ clear_bit(old_keycode, dev->keybit);
+ return 0;
+ }
}
return -EINVAL;
@@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev,
return 0;
err_out:
- kfree(keymap);
+ kfree(map);
return error;
}
@@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup);
*
* This function is used to free memory allocated by sparse keymap
* in an input device that was set up by sparse_keymap_setup().
+ * NOTE: It is safe to cal this function while input device is
+ * still registered (however the drivers should care not to try to
+ * use freed keymap and thus have to shut off interrups/polling
+ * before freeing the keymap).
*/
void sparse_keymap_free(struct input_dev *dev)
{
+ unsigned long flags;
+
+ /*
+ * Take event lock to prevent racing with input_get_keycode()
+ * and input_set_keycode() if we are called while input device
+ * is still registered.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+
kfree(dev->keycode);
dev->keycode = NULL;
dev->keycodemax = 0;
- dev->getkeycode = NULL;
- dev->setkeycode = NULL;
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
EXPORT_SYMBOL(sparse_keymap_free);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 8b5d287..f465025 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf)
int rv;
mutex_lock(&wacom->lock);
- if (wacom->open) {
+
+ /* switch to wacom mode first */
+ wacom_query_tablet_data(intf, features);
+
+ if (wacom->open)
rv = usb_submit_urb(wacom->irq, GFP_NOIO);
- /* switch to wacom mode if needed */
- if (!wacom_retrieve_hid_descriptor(intf, features))
- wacom_query_tablet_data(intf, features);
- } else
+ else
rv = 0;
+
mutex_unlock(&wacom->lock);
return rv;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b3ba343..4a852d8 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- int x, y, prox;
- int rw = 0;
- int retval = 0;
+ int x, y, rw;
+ static int penData = 0;
if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
- goto exit;
+ return 0;
}
- prox = data[1] & 0x80;
- if (prox || wacom->id[0]) {
- if (prox) {
- switch ((data[1] >> 5) & 3) {
+ if (data[1] & 0x80) {
+ /* in prox and not a pad data */
+ penData = 1;
+
+ switch ((data[1] >> 5) & 3) {
case 0: /* Pen */
wacom->tool[0] = BTN_TOOL_PEN;
@@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
case 2: /* Mouse with wheel */
wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
+ if (features->type == WACOM_G4 || features->type == WACOM_MO) {
+ rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
+ wacom_report_rel(wcombo, REL_WHEEL, -rw);
+ } else
+ wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
/* fall through */
case 3: /* Mouse without wheel */
wacom->tool[0] = BTN_TOOL_MOUSE;
wacom->id[0] = CURSOR_DEVICE_ID;
+ wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
+ wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
+ if (features->type == WACOM_G4 || features->type == WACOM_MO)
+ wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
+ else
+ wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
break;
- }
}
x = wacom_le16_to_cpu(&data[2]);
y = wacom_le16_to_cpu(&data[4]);
@@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
- } else {
- wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
- if (features->type == WACOM_G4 ||
- features->type == WACOM_MO) {
- wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
- rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
- } else {
- wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
- rw = -(signed)data[6];
- }
- wacom_report_rel(wcombo, REL_WHEEL, rw);
}
-
- if (!prox)
- wacom->id[0] = 0;
wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[0], prox);
- wacom_input_sync(wcombo); /* sync last event */
+ wacom_report_key(wcombo, wacom->tool[0], 1);
+ } else if (wacom->id[0]) {
+ wacom_report_abs(wcombo, ABS_X, 0);
+ wacom_report_abs(wcombo, ABS_Y, 0);
+ if (wacom->tool[0] == BTN_TOOL_MOUSE) {
+ wacom_report_key(wcombo, BTN_LEFT, 0);
+ wacom_report_key(wcombo, BTN_RIGHT, 0);
+ wacom_report_abs(wcombo, ABS_DISTANCE, 0);
+ } else {
+ wacom_report_abs(wcombo, ABS_PRESSURE, 0);
+ wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom_report_key(wcombo, BTN_STYLUS, 0);
+ wacom_report_key(wcombo, BTN_STYLUS2, 0);
+ }
+ wacom->id[0] = 0;
+ wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
+ wacom_report_key(wcombo, wacom->tool[0], 0);
}
/* send pad data */
switch (features->type) {
case WACOM_G4:
- prox = data[7] & 0xf8;
- if (prox || wacom->id[1]) {
+ if (data[7] & 0xf8) {
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = PAD_DEVICE_ID;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_rel(wcombo, REL_WHEEL, rw);
wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- if (!prox)
- wacom->id[1] = 0;
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
+ wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ } else if (wacom->id[1]) {
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
+ wacom->id[1] = 0;
+ wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
+ wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+ wacom_report_rel(wcombo, REL_WHEEL, 0);
+ wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
+ wacom_report_abs(wcombo, ABS_MISC, 0);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
}
- retval = 1;
break;
case WACOM_MO:
- prox = (data[7] & 0xf8) || data[8];
- if (prox || wacom->id[1]) {
+ if ((data[7] & 0xf8) || (data[8] & 0xff)) {
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = PAD_DEVICE_ID;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- if (!prox)
- wacom->id[1] = 0;
wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ } else if (wacom->id[1]) {
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
+ wacom->id[1] = 0;
+ wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
+ wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
+ wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
+ wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
+ wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
+ wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
+ wacom_report_abs(wcombo, ABS_MISC, 0);
+ wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
}
- retval = 1;
break;
}
-exit:
- return retval;
+ return 1;
}
static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
@@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
{
wacom_report_abs(wcombo, ABS_X,
- data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8));
+ (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
wacom_report_abs(wcombo, ABS_Y,
- data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8));
+ (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
wacom_report_key(wcombo, wacom->tool[idx], 1);
if (idx)
@@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
touchInProx = 0;
- if (!wacom->id[0]) { /* first in prox */
- /* Going into proximity select tool */
- wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[0] == BTN_TOOL_PEN)
- wacom->id[0] = STYLUS_DEVICE_ID;
- else
- wacom->id[0] = ERASER_DEVICE_ID;
- }
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- pressure = ((data[7] & 0x01) << 8) | data[6];
- if (pressure < 0)
- pressure = features->pressure_max + pressure + 1;
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
- if (!prox) { /* out-prox */
+ if (prox) { /* in prox */
+ if (!wacom->id[0]) {
+ /* Going into proximity select tool */
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom->tool[0] == BTN_TOOL_PEN)
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ else
+ wacom->id[0] = ERASER_DEVICE_ID;
+ }
+ wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
+ wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
+ wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
+ wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
+ pressure = ((data[7] & 0x01) << 8) | data[6];
+ if (pressure < 0)
+ pressure = features->pressure_max + pressure + 1;
+ wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
+ wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
+ } else {
+ wacom_report_abs(wcombo, ABS_X, 0);
+ wacom_report_abs(wcombo, ABS_Y, 0);
+ wacom_report_abs(wcombo, ABS_PRESSURE, 0);
+ wacom_report_key(wcombo, BTN_STYLUS, 0);
+ wacom_report_key(wcombo, BTN_STYLUS2, 0);
+ wacom_report_key(wcombo, BTN_TOUCH, 0);
wacom->id[0] = 0;
/* pen is out so touch can be enabled now */
touchInProx = 1;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 0be15c7..47a5ffe 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -14,11 +14,6 @@
*/
#include "gigaset.h"
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index eb7e271..964a55f 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -12,8 +12,6 @@
*/
#include "gigaset.h"
-#include <linux/slab.h>
-#include <linux/ctype.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/isdn/capilli.h>
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 0b39b38..f6f45f2 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -14,10 +14,8 @@
*/
#include "gigaset.h"
-#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/slab.h>
/* Version Information */
#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 9ef5b04..05947f9 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -20,11 +20,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/usb.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ppp_defs.h>
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index c99fb97..c22e5ac 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -15,7 +15,6 @@
#include "gigaset.h"
#include <linux/isdnif.h>
-#include <linux/slab.h>
#define HW_HDR_LEN 2 /* Header size used to store ack info */
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index f0dc6c9..c9f28dd 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -13,7 +13,6 @@
#include "gigaset.h"
#include <linux/gigaset_dev.h>
-#include <linux/tty.h>
#include <linux/tty_flip.h>
/*** our ioctls ***/
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index b69f73a..b943efb 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -14,7 +14,6 @@
*/
#include "gigaset.h"
-#include <linux/ctype.h>
static ssize_t show_cidmode(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8b0afd2..e96c058 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -11,13 +11,10 @@
*/
#include "gigaset.h"
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
-#include <linux/tty.h>
#include <linux/completion.h>
-#include <linux/slab.h>
/* Version Information */
#define DRIVER_AUTHOR "Tilman Schmidt"
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 9430a2b..76dbb20 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -16,10 +16,6 @@
*/
#include "gigaset.h"
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 07090f3..69c84a1 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status)
/* We set the status. */
to_lgdev(vdev)->desc->status = status;
- kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset);
+ hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
}
static void lg_set_status(struct virtio_device *vdev, u8 status)
@@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq)
*/
struct lguest_vq_info *lvq = vq->priv;
- kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT);
+ hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
}
/* An extern declaration inside a C file is bad form. Don't do it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index fb2b7ef..b4eb675 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu)
insn = lgread(cpu, physaddr, u8);
/*
+ * Around 2.6.33, the kernel started using an emulation for the
+ * cmpxchg8b instruction in early boot on many configurations. This
+ * code isn't paravirtualized, and it tries to disable interrupts.
+ * Ignore it, which will Mostly Work.
+ */
+ if (insn == 0xfa) {
+ /* "cli", or Clear Interrupt Enable instruction. Skip it. */
+ cpu->regs->eip++;
+ return 1;
+ }
+
+ /*
* 0x66 is an "operand prefix". It means it's using the upper 16 bits
* of the eax register.
*/
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e3e9a36..58ea0ec 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
int previous, int *dd_idx,
struct stripe_head *sh)
{
- long stripe;
- unsigned long chunk_number;
+ sector_t stripe, stripe2;
+ sector_t chunk_number;
unsigned int chunk_offset;
int pd_idx, qd_idx;
int ddf_layout = 0;
@@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
*/
chunk_offset = sector_div(r_sector, sectors_per_chunk);
chunk_number = r_sector;
- BUG_ON(r_sector != chunk_number);
/*
* Compute the stripe number
*/
- stripe = chunk_number / data_disks;
-
- /*
- * Compute the data disk and parity disk indexes inside the stripe
- */
- *dd_idx = chunk_number % data_disks;
-
+ stripe = chunk_number;
+ *dd_idx = sector_div(stripe, data_disks);
+ stripe2 = stripe;
/*
* Select the parity disk based on the user selected algorithm.
*/
@@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
case 5:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
- pd_idx = data_disks - stripe % raid_disks;
+ pd_idx = data_disks - sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_LEFT_SYMMETRIC:
- pd_idx = data_disks - stripe % raid_disks;
+ pd_idx = data_disks - sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_PARITY_0:
@@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_LEFT_SYMMETRIC:
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
@@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
/* Exactly the same as RIGHT_ASYMMETRIC, but or
* of blocks for computing Q is different.
*/
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
* D D D P Q rather than
* Q D D D P
*/
- pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
+ stripe2 += 1;
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
case ALGORITHM_ROTATING_N_CONTINUE:
/* Same as left_symmetric but Q is before P */
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
ddf_layout = 1;
@@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
case ALGORITHM_LEFT_ASYMMETRIC_6:
/* RAID5 left_asymmetric, with Q on last device */
- pd_idx = data_disks - stripe % (raid_disks-1);
+ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_ASYMMETRIC_6:
- pd_idx = stripe % (raid_disks-1);
+ pd_idx = sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_LEFT_SYMMETRIC_6:
- pd_idx = data_disks - stripe % (raid_disks-1);
+ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_SYMMETRIC_6:
- pd_idx = stripe % (raid_disks-1);
+ pd_idx = sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
@@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
: conf->algorithm;
sector_t stripe;
int chunk_offset;
- int chunk_number, dummy1, dd_idx = i;
+ sector_t chunk_number;
+ int dummy1, dd_idx = i;
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
- BUG_ON(new_sector != stripe);
if (i == sh->pd_idx)
return 0;
@@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
}
chunk_number = stripe * data_disks + i;
- r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
+ r_sector = chunk_number * sectors_per_chunk + chunk_offset;
check = raid5_compute_sector(conf, r_sector,
previous, &dummy1, &sh2);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2191c8d..0d0d625 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -311,6 +311,22 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be calles ti_dac7512.
+config VMWARE_BALLOON
+ tristate "VMware Balloon Driver"
+ depends on X86
+ help
+ This is VMware physical memory management driver which acts
+ like a "balloon" that can be inflated to reclaim physical pages
+ by reserving them in the guest and invalidating them in the
+ monitor, freeing up the underlying machine pages so they can
+ be allocated to other guests. The balloon can also be deflated
+ to allow the guest to use more physical memory.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmware_balloon.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 27c4843..7b6f7ee 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-y += eeprom/
obj-y += cb710/
+obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
new file mode 100644
index 0000000..e7161c4
--- /dev/null
+++ b/drivers/misc/vmware_balloon.c
@@ -0,0 +1,832 @@
+/*
+ * VMware Balloon driver.
+ *
+ * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ */
+
+/*
+ * This is VMware physical memory management driver for Linux. The driver
+ * acts like a "balloon" that can be inflated to reclaim physical pages by
+ * reserving them in the guest and invalidating them in the monitor,
+ * freeing up the underlying machine pages so they can be allocated to
+ * other guests. The balloon can also be deflated to allow the guest to
+ * use more physical memory. Higher level policies can control the sizes
+ * of balloons in VMs in order to manage physical memory resources.
+ */
+
+//#define DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/vmware.h>
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
+MODULE_VERSION("1.2.1.0-K");
+MODULE_ALIAS("dmi:*:svnVMware*:*");
+MODULE_ALIAS("vmware_vmmemctl");
+MODULE_LICENSE("GPL");
+
+/*
+ * Various constants controlling rate of inflaint/deflating balloon,
+ * measured in pages.
+ */
+
+/*
+ * Rate of allocating memory when there is no memory pressure
+ * (driver performs non-sleeping allocations).
+ */
+#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
+
+/*
+ * Rates of memory allocaton when guest experiences memory pressure
+ * (driver performs sleeping allocations).
+ */
+#define VMW_BALLOON_RATE_ALLOC_MIN 512U
+#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
+#define VMW_BALLOON_RATE_ALLOC_INC 16U
+
+/*
+ * Rates for releasing pages while deflating balloon.
+ */
+#define VMW_BALLOON_RATE_FREE_MIN 512U
+#define VMW_BALLOON_RATE_FREE_MAX 16384U
+#define VMW_BALLOON_RATE_FREE_INC 16U
+
+/*
+ * When guest is under memory pressure, use a reduced page allocation
+ * rate for next several cycles.
+ */
+#define VMW_BALLOON_SLOW_CYCLES 4
+
+/*
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
+ * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * __GFP_NOWARN, to suppress page allocation failure warnings.
+ */
+#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+
+/*
+ * Use GFP_HIGHUSER when executing in a separate kernel thread
+ * context and allocation can sleep. This is less stressful to
+ * the guest memory system, since it allows the thread to block
+ * while memory is reclaimed, and won't take pages from emergency
+ * low-memory pools.
+ */
+#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
+
+/* Maximum number of page allocations without yielding processor */
+#define VMW_BALLOON_YIELD_THRESHOLD 1024
+
+
+/*
+ * Hypervisor communication port definitions.
+ */
+#define VMW_BALLOON_HV_PORT 0x5670
+#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
+#define VMW_BALLOON_PROTOCOL_VERSION 2
+#define VMW_BALLOON_GUEST_ID 1 /* Linux */
+
+#define VMW_BALLOON_CMD_START 0
+#define VMW_BALLOON_CMD_GET_TARGET 1
+#define VMW_BALLOON_CMD_LOCK 2
+#define VMW_BALLOON_CMD_UNLOCK 3
+#define VMW_BALLOON_CMD_GUEST_ID 4
+
+/* error codes */
+#define VMW_BALLOON_SUCCESS 0
+#define VMW_BALLOON_FAILURE -1
+#define VMW_BALLOON_ERROR_CMD_INVALID 1
+#define VMW_BALLOON_ERROR_PPN_INVALID 2
+#define VMW_BALLOON_ERROR_PPN_LOCKED 3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
+#define VMW_BALLOON_ERROR_PPN_PINNED 5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
+#define VMW_BALLOON_ERROR_RESET 7
+#define VMW_BALLOON_ERROR_BUSY 8
+
+#define VMWARE_BALLOON_CMD(cmd, data, result) \
+({ \
+ unsigned long __stat, __dummy1, __dummy2; \
+ __asm__ __volatile__ ("inl (%%dx)" : \
+ "=a"(__stat), \
+ "=c"(__dummy1), \
+ "=d"(__dummy2), \
+ "=b"(result) : \
+ "0"(VMW_BALLOON_HV_MAGIC), \
+ "1"(VMW_BALLOON_CMD_##cmd), \
+ "2"(VMW_BALLOON_HV_PORT), \
+ "3"(data) : \
+ "memory"); \
+ result &= -1UL; \
+ __stat & -1UL; \
+})
+
+#ifdef CONFIG_DEBUG_FS
+struct vmballoon_stats {
+ unsigned int timer;
+
+ /* allocation statustics */
+ unsigned int alloc;
+ unsigned int alloc_fail;
+ unsigned int sleep_alloc;
+ unsigned int sleep_alloc_fail;
+ unsigned int refused_alloc;
+ unsigned int refused_free;
+ unsigned int free;
+
+ /* monitor operations */
+ unsigned int lock;
+ unsigned int lock_fail;
+ unsigned int unlock;
+ unsigned int unlock_fail;
+ unsigned int target;
+ unsigned int target_fail;
+ unsigned int start;
+ unsigned int start_fail;
+ unsigned int guest_type;
+ unsigned int guest_type_fail;
+};
+
+#define STATS_INC(stat) (stat)++
+#else
+#define STATS_INC(stat)
+#endif
+
+struct vmballoon {
+
+ /* list of reserved physical pages */
+ struct list_head pages;
+
+ /* transient list of non-balloonable pages */
+ struct list_head refused_pages;
+
+ /* balloon size in pages */
+ unsigned int size;
+ unsigned int target;
+
+ /* reset flag */
+ bool reset_required;
+
+ /* adjustment rates (pages per second) */
+ unsigned int rate_alloc;
+ unsigned int rate_free;
+
+ /* slowdown page allocations for next few cycles */
+ unsigned int slow_allocation_cycles;
+
+#ifdef CONFIG_DEBUG_FS
+ /* statistics */
+ struct vmballoon_stats stats;
+
+ /* debugfs file exporting statistics */
+ struct dentry *dbg_entry;
+#endif
+
+ struct sysinfo sysinfo;
+
+ struct delayed_work dwork;
+};
+
+static struct vmballoon balloon;
+static struct workqueue_struct *vmballoon_wq;
+
+/*
+ * Send "start" command to the host, communicating supported version
+ * of the protocol.
+ */
+static bool vmballoon_send_start(struct vmballoon *b)
+{
+ unsigned long status, dummy;
+
+ STATS_INC(b->stats.start);
+
+ status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+ if (status == VMW_BALLOON_SUCCESS)
+ return true;
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.start_fail);
+ return false;
+}
+
+static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
+{
+ switch (status) {
+ case VMW_BALLOON_SUCCESS:
+ return true;
+
+ case VMW_BALLOON_ERROR_RESET:
+ b->reset_required = true;
+ /* fall through */
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * Communicate guest type to the host so that it can adjust ballooning
+ * algorithm to the one most appropriate for the guest. This command
+ * is normally issued after sending "start" command and is part of
+ * standard reset sequence.
+ */
+static bool vmballoon_send_guest_id(struct vmballoon *b)
+{
+ unsigned long status, dummy;
+
+ status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
+
+ STATS_INC(b->stats.guest_type);
+
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.guest_type_fail);
+ return false;
+}
+
+/*
+ * Retrieve desired balloon size from the host.
+ */
+static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+{
+ unsigned long status;
+ unsigned long target;
+ unsigned long limit;
+ u32 limit32;
+
+ /*
+ * si_meminfo() is cheap. Moreover, we want to provide dynamic
+ * max balloon size later. So let us call si_meminfo() every
+ * iteration.
+ */
+ si_meminfo(&b->sysinfo);
+ limit = b->sysinfo.totalram;
+
+ /* Ensure limit fits in 32-bits */
+ limit32 = (u32)limit;
+ if (limit != limit32)
+ return false;
+
+ /* update stats */
+ STATS_INC(b->stats.target);
+
+ status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
+ if (vmballoon_check_status(b, status)) {
+ *new_target = target;
+ return true;
+ }
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.target_fail);
+ return false;
+}
+
+/*
+ * Notify the host about allocated page so that host can use it without
+ * fear that guest will need it. Host may reject some pages, we need to
+ * check the return value and maybe submit a different page.
+ */
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+{
+ unsigned long status, dummy;
+ u32 pfn32;
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+ return false;
+
+ STATS_INC(b->stats.lock);
+
+ status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.lock_fail);
+ return false;
+}
+
+/*
+ * Notify the host that guest intends to release given page back into
+ * the pool of available (to the guest) pages.
+ */
+static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
+{
+ unsigned long status, dummy;
+ u32 pfn32;
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+ return false;
+
+ STATS_INC(b->stats.unlock);
+
+ status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.unlock_fail);
+ return false;
+}
+
+/*
+ * Quickly release all pages allocated for the balloon. This function is
+ * called when host decides to "reset" balloon for one reason or another.
+ * Unlike normal "deflate" we do not (shall not) notify host of the pages
+ * being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+ struct page *page, *next;
+ unsigned int count = 0;
+
+ list_for_each_entry_safe(page, next, &b->pages, lru) {
+ list_del(&page->lru);
+ __free_page(page);
+ STATS_INC(b->stats.free);
+ b->size--;
+
+ if (++count >= b->rate_free) {
+ count = 0;
+ cond_resched();
+ }
+ }
+}
+
+/*
+ * Perform standard reset sequence by popping the balloon (in case it
+ * is not empty) and then restarting protocol. This operation normally
+ * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
+ */
+static void vmballoon_reset(struct vmballoon *b)
+{
+ /* free all pages, skipping monitor unlock */
+ vmballoon_pop(b);
+
+ if (vmballoon_send_start(b)) {
+ b->reset_required = false;
+ if (!vmballoon_send_guest_id(b))
+ pr_err("failed to send guest ID to the host\n");
+ }
+}
+
+/*
+ * Allocate (or reserve) a page for the balloon and notify the host. If host
+ * refuses the page put it on "refuse" list and allocate another one until host
+ * is satisfied. "Refused" pages are released at the end of inflation cycle
+ * (when we allocate b->rate_alloc pages).
+ */
+static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+{
+ struct page *page;
+ gfp_t flags;
+ bool locked = false;
+
+ do {
+ if (!can_sleep)
+ STATS_INC(b->stats.alloc);
+ else
+ STATS_INC(b->stats.sleep_alloc);
+
+ flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
+ page = alloc_page(flags);
+ if (!page) {
+ if (!can_sleep)
+ STATS_INC(b->stats.alloc_fail);
+ else
+ STATS_INC(b->stats.sleep_alloc_fail);
+ return -ENOMEM;
+ }
+
+ /* inform monitor */
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+ if (!locked) {
+ if (b->reset_required) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ /* place on list of non-balloonable pages, retry allocation */
+ list_add(&page->lru, &b->refused_pages);
+ STATS_INC(b->stats.refused_alloc);
+ }
+ } while (!locked);
+
+ /* track allocated page */
+ list_add(&page->lru, &b->pages);
+
+ /* update balloon size */
+ b->size++;
+
+ return 0;
+}
+
+/*
+ * Release the page allocated for the balloon. Note that we first notify
+ * the host so it can make sure the page will be available for the guest
+ * to use, if needed.
+ */
+static int vmballoon_release_page(struct vmballoon *b, struct page *page)
+{
+ if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
+ return -EIO;
+
+ list_del(&page->lru);
+
+ /* deallocate page */
+ __free_page(page);
+ STATS_INC(b->stats.free);
+
+ /* update balloon size */
+ b->size--;
+
+ return 0;
+}
+
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
+ list_del(&page->lru);
+ __free_page(page);
+ STATS_INC(b->stats.refused_free);
+ }
+}
+
+/*
+ * Inflate the balloon towards its target size. Note that we try to limit
+ * the rate of allocation to make sure we are not choking the rest of the
+ * system.
+ */
+static void vmballoon_inflate(struct vmballoon *b)
+{
+ unsigned int goal;
+ unsigned int rate;
+ unsigned int i;
+ unsigned int allocations = 0;
+ int error = 0;
+ bool alloc_can_sleep = false;
+
+ pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+ /*
+ * First try NOSLEEP page allocations to inflate balloon.
+ *
+ * If we do not throttle nosleep allocations, we can drain all
+ * free pages in the guest quickly (if the balloon target is high).
+ * As a side-effect, draining free pages helps to inform (force)
+ * the guest to start swapping if balloon target is not met yet,
+ * which is a desired behavior. However, balloon driver can consume
+ * all available CPU cycles if too many pages are allocated in a
+ * second. Therefore, we throttle nosleep allocations even when
+ * the guest is not under memory pressure. OTOH, if we have already
+ * predicted that the guest is under memory pressure, then we
+ * slowdown page allocations considerably.
+ */
+
+ goal = b->target - b->size;
+ /*
+ * Start with no sleep allocation rate which may be higher
+ * than sleeping allocation rate.
+ */
+ rate = b->slow_allocation_cycles ?
+ b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
+
+ pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
+ __func__, goal, rate, b->rate_alloc);
+
+ for (i = 0; i < goal; i++) {
+
+ error = vmballoon_reserve_page(b, alloc_can_sleep);
+ if (error) {
+ if (error != -ENOMEM) {
+ /*
+ * Not a page allocation failure, stop this
+ * cycle. Maybe we'll get new target from
+ * the host soon.
+ */
+ break;
+ }
+
+ if (alloc_can_sleep) {
+ /*
+ * CANSLEEP page allocation failed, so guest
+ * is under severe memory pressure. Quickly
+ * decrease allocation rate.
+ */
+ b->rate_alloc = max(b->rate_alloc / 2,
+ VMW_BALLOON_RATE_ALLOC_MIN);
+ break;
+ }
+
+ /*
+ * NOSLEEP page allocation failed, so the guest is
+ * under memory pressure. Let us slow down page
+ * allocations for next few cycles so that the guest
+ * gets out of memory pressure. Also, if we already
+ * allocated b->rate_alloc pages, let's pause,
+ * otherwise switch to sleeping allocations.
+ */
+ b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
+
+ if (i >= b->rate_alloc)
+ break;
+
+ alloc_can_sleep = true;
+ /* Lower rate for sleeping allocations. */
+ rate = b->rate_alloc;
+ }
+
+ if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
+ cond_resched();
+ allocations = 0;
+ }
+
+ if (i >= rate) {
+ /* We allocated enough pages, let's take a break. */
+ break;
+ }
+ }
+
+ /*
+ * We reached our goal without failures so try increasing
+ * allocation rate.
+ */
+ if (error == 0 && i >= b->rate_alloc) {
+ unsigned int mult = i / b->rate_alloc;
+
+ b->rate_alloc =
+ min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
+ VMW_BALLOON_RATE_ALLOC_MAX);
+ }
+
+ vmballoon_release_refused_pages(b);
+}
+
+/*
+ * Decrease the size of the balloon allowing guest to use more memory.
+ */
+static void vmballoon_deflate(struct vmballoon *b)
+{
+ struct page *page, *next;
+ unsigned int i = 0;
+ unsigned int goal;
+ int error;
+
+ pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+ /* limit deallocation rate */
+ goal = min(b->size - b->target, b->rate_free);
+
+ pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
+
+ /* free pages to reach target */
+ list_for_each_entry_safe(page, next, &b->pages, lru) {
+ error = vmballoon_release_page(b, page);
+ if (error) {
+ /* quickly decrease rate in case of error */
+ b->rate_free = max(b->rate_free / 2,
+ VMW_BALLOON_RATE_FREE_MIN);
+ return;
+ }
+
+ if (++i >= goal)
+ break;
+ }
+
+ /* slowly increase rate if there were no errors */
+ b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
+ VMW_BALLOON_RATE_FREE_MAX);
+}
+
+/*
+ * Balloon work function: reset protocol, if needed, get the new size and
+ * adjust balloon as needed. Repeat in 1 sec.
+ */
+static void vmballoon_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
+ unsigned int target;
+
+ STATS_INC(b->stats.timer);
+
+ if (b->reset_required)
+ vmballoon_reset(b);
+
+ if (b->slow_allocation_cycles > 0)
+ b->slow_allocation_cycles--;
+
+ if (vmballoon_send_get_target(b, &target)) {
+ /* update target, adjust size */
+ b->target = target;
+
+ if (b->size < target)
+ vmballoon_inflate(b);
+ else if (b->size > target)
+ vmballoon_deflate(b);
+ }
+
+ queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+static int vmballoon_debug_show(struct seq_file *f, void *offset)
+{
+ struct vmballoon *b = f->private;
+ struct vmballoon_stats *stats = &b->stats;
+
+ /* format size info */
+ seq_printf(f,
+ "target: %8d pages\n"
+ "current: %8d pages\n",
+ b->target, b->size);
+
+ /* format rate info */
+ seq_printf(f,
+ "rateNoSleepAlloc: %8d pages/sec\n"
+ "rateSleepAlloc: %8d pages/sec\n"
+ "rateFree: %8d pages/sec\n",
+ VMW_BALLOON_NOSLEEP_ALLOC_MAX,
+ b->rate_alloc, b->rate_free);
+
+ seq_printf(f,
+ "\n"
+ "timer: %8u\n"
+ "start: %8u (%4u failed)\n"
+ "guestType: %8u (%4u failed)\n"
+ "lock: %8u (%4u failed)\n"
+ "unlock: %8u (%4u failed)\n"
+ "target: %8u (%4u failed)\n"
+ "primNoSleepAlloc: %8u (%4u failed)\n"
+ "primCanSleepAlloc: %8u (%4u failed)\n"
+ "primFree: %8u\n"
+ "errAlloc: %8u\n"
+ "errFree: %8u\n",
+ stats->timer,
+ stats->start, stats->start_fail,
+ stats->guest_type, stats->guest_type_fail,
+ stats->lock, stats->lock_fail,
+ stats->unlock, stats->unlock_fail,
+ stats->target, stats->target_fail,
+ stats->alloc, stats->alloc_fail,
+ stats->sleep_alloc, stats->sleep_alloc_fail,
+ stats->free,
+ stats->refused_alloc, stats->refused_free);
+
+ return 0;
+}
+
+static int vmballoon_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vmballoon_debug_show, inode->i_private);
+}
+
+static const struct file_operations vmballoon_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = vmballoon_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init vmballoon_debugfs_init(struct vmballoon *b)
+{
+ int error;
+
+ b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
+ &vmballoon_debug_fops);
+ if (IS_ERR(b->dbg_entry)) {
+ error = PTR_ERR(b->dbg_entry);
+ pr_err("failed to create debugfs entry, error: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
+{
+ debugfs_remove(b->dbg_entry);
+}
+
+#else
+
+static inline int vmballoon_debugfs_init(struct vmballoon *b)
+{
+ return 0;
+}
+
+static inline void vmballoon_debugfs_exit(struct vmballoon *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __init vmballoon_init(void)
+{
+ int error;
+
+ /*
+ * Check if we are running on VMware's hypervisor and bail out
+ * if we are not.
+ */
+ if (!vmware_platform())
+ return -ENODEV;
+
+ vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+ if (!vmballoon_wq) {
+ pr_err("failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&balloon.pages);
+ INIT_LIST_HEAD(&balloon.refused_pages);
+
+ /* initialize rates */
+ balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
+ balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
+
+ INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
+
+ /*
+ * Start balloon.
+ */
+ if (!vmballoon_send_start(&balloon)) {
+ pr_err("failed to send start command to the host\n");
+ error = -EIO;
+ goto fail;
+ }
+
+ if (!vmballoon_send_guest_id(&balloon)) {
+ pr_err("failed to send guest ID to the host\n");
+ error = -EIO;
+ goto fail;
+ }
+
+ error = vmballoon_debugfs_init(&balloon);
+ if (error)
+ goto fail;
+
+ queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
+
+ return 0;
+
+fail:
+ destroy_workqueue(vmballoon_wq);
+ return error;
+}
+module_init(vmballoon_init);
+
+static void __exit vmballoon_exit(void)
+{
+ cancel_delayed_work_sync(&balloon.dwork);
+ destroy_workqueue(vmballoon_wq);
+
+ vmballoon_debugfs_exit(&balloon);
+
+ /*
+ * Deallocate all reserved memory, and reset connection with monitor.
+ * Reset connection before deallocating memory to avoid potential for
+ * additional spurious resets from guest touching deallocated pages.
+ */
+ vmballoon_send_start(&balloon);
+ vmballoon_pop(&balloon);
+}
+module_exit(vmballoon_exit);
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 82d1e4d..4521b1e 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,7 +4,7 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdbdi.o
+mtd-y := mtdcore.o mtdsuper.o
mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
diff --git a/drivers/mtd/internal.h b/drivers/mtd/internal.h
index c658fe7..e69de29 100644
--- a/drivers/mtd/internal.h
+++ b/drivers/mtd/internal.h
@@ -1,17 +0,0 @@
-/* Internal MTD definitions
- *
- * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * mtdbdi.c
- */
-extern struct backing_dev_info mtd_bdi_unmappable;
-extern struct backing_dev_info mtd_bdi_ro_mappable;
-extern struct backing_dev_info mtd_bdi_rw_mappable;
diff --git a/drivers/mtd/mtdbdi.c b/drivers/mtd/mtdbdi.c
index 5ca5aed..e69de29 100644
--- a/drivers/mtd/mtdbdi.c
+++ b/drivers/mtd/mtdbdi.c
@@ -1,43 +0,0 @@
-/* MTD backing device capabilities
- *
- * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/backing-dev.h>
-#include <linux/mtd/mtd.h>
-#include "internal.h"
-
-/*
- * backing device capabilities for non-mappable devices (such as NAND flash)
- * - permits private mappings, copies are taken of the data
- */
-struct backing_dev_info mtd_bdi_unmappable = {
- .capabilities = BDI_CAP_MAP_COPY,
-};
-
-/*
- * backing device capabilities for R/O mappable devices (such as ROM)
- * - permits private mappings, copies are taken of the data
- * - permits non-writable shared mappings
- */
-struct backing_dev_info mtd_bdi_ro_mappable = {
- .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
- BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
-};
-
-/*
- * backing device capabilities for writable mappable devices (such as RAM)
- * - permits private mappings, copies are taken of the data
- * - permits non-writable shared mappings
- */
-struct backing_dev_info mtd_bdi_rw_mappable = {
- .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
- BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
- BDI_CAP_WRITE_MAP),
-};
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5b38b17..b177e75 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -2,6 +2,9 @@
* Core registration and callback routines for MTD
* drivers and users.
*
+ * bdi bits are:
+ * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
*/
#include <linux/module.h>
@@ -16,11 +19,39 @@
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
+#include <linux/backing-dev.h>
#include <linux/mtd/mtd.h>
-#include "internal.h"
#include "mtdcore.h"
+/*
+ * backing device capabilities for non-mappable devices (such as NAND flash)
+ * - permits private mappings, copies are taken of the data
+ */
+struct backing_dev_info mtd_bdi_unmappable = {
+ .capabilities = BDI_CAP_MAP_COPY,
+};
+
+/*
+ * backing device capabilities for R/O mappable devices (such as ROM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+struct backing_dev_info mtd_bdi_ro_mappable = {
+ .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+ BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
+};
+
+/*
+ * backing device capabilities for writable mappable devices (such as RAM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+struct backing_dev_info mtd_bdi_rw_mappable = {
+ .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+ BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
+ BDI_CAP_WRITE_MAP),
+};
static int mtd_cls_suspend(struct device *dev, pm_message_t state);
static int mtd_cls_resume(struct device *dev);
@@ -628,20 +659,55 @@ done:
/*====================================================================*/
/* Init code */
+static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+{
+ int ret;
+
+ ret = bdi_init(bdi);
+ if (!ret)
+ ret = bdi_register(bdi, NULL, name);
+
+ if (ret)
+ bdi_destroy(bdi);
+
+ return ret;
+}
+
static int __init init_mtd(void)
{
int ret;
+
ret = class_register(&mtd_class);
+ if (ret)
+ goto err_reg;
+
+ ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
+ if (ret)
+ goto err_bdi1;
+
+ ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
+ if (ret)
+ goto err_bdi2;
+
+ ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
+ if (ret)
+ goto err_bdi3;
- if (ret) {
- pr_err("Error registering mtd class: %d\n", ret);
- return ret;
- }
#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
proc_mtd->read_proc = mtd_read_proc;
#endif /* CONFIG_PROC_FS */
return 0;
+
+err_bdi3:
+ bdi_destroy(&mtd_bdi_ro_mappable);
+err_bdi2:
+ bdi_destroy(&mtd_bdi_unmappable);
+err_bdi1:
+ class_unregister(&mtd_class);
+err_reg:
+ pr_err("Error registering mtd class or bdi: %d\n", ret);
+ return ret;
}
static void __exit cleanup_mtd(void)
@@ -651,6 +717,9 @@ static void __exit cleanup_mtd(void)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
class_unregister(&mtd_class);
+ bdi_destroy(&mtd_bdi_unmappable);
+ bdi_destroy(&mtd_bdi_ro_mappable);
+ bdi_destroy(&mtd_bdi_rw_mappable);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index af8b42e..7c00319 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -13,6 +13,7 @@
#include <linux/mtd/super.h>
#include <linux/namei.h>
#include <linux/ctype.h>
+#include <linux/slab.h>
/*
* compare superblocks to see if they're equivalent
@@ -44,6 +45,7 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
sb->s_mtd = mtd;
sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ sb->s_bdi = mtd->backing_dev_info;
return 0;
}
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index f59c074..d60fc57 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
}
buf64 = (uint64_t *)buf;
while (i < len/8) {
- uint64_t x;
+ /*
+ * Since GCC has no proper constraint (PR 43518)
+ * force x variable to r2/r3 registers as ldrd instruction
+ * requires first register to be even.
+ */
+ register uint64_t x asm ("r2");
+
asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
buf64[i++] = x;
}
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a03d291..f0d23de 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
__func__, rx_status, rx_size, cur_rx);
#if RTL8139_DEBUG > 2
- print_dump_hex(KERN_DEBUG, "Frame contents: ",
+ print_hex_dump(KERN_DEBUG, "Frame contents: ",
DUMP_PREFIX_OFFSET, 16, 1,
&rx_ring[ring_offset], 70, true);
#endif
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a583b50..12b280a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/
obj-$(CONFIG_USB_HSO) += usb/
obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
+obj-$(CONFIG_USB_IPHETH) += usb/
obj-y += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a257bab..ac90a38 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.8"
-#define DRV_MODULE_RELDATE "Feb 15, 2010"
+#define DRV_MODULE_VERSION "2.0.9"
+#define DRV_MODULE_RELDATE "April 27, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
}
static void
-bnx2_netif_stop(struct bnx2 *bp)
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
{
- bnx2_cnic_stop(bp);
+ if (stop_cnic)
+ bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
int i;
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
}
static void
-bnx2_netif_start(struct bnx2 *bp)
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
- bnx2_cnic_start(bp);
+ if (start_cnic)
+ bnx2_cnic_start(bp);
}
}
}
@@ -4759,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
rc = bnx2_alloc_bad_rbuf(bp);
}
- if (bp->flags & BNX2_FLAG_USING_MSIX)
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
bnx2_setup_msix_tbl(bp);
+ /* Prevent MSIX table reads and write from timing out */
+ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+ }
return rc;
}
@@ -6273,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work)
return;
}
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 1);
atomic_set(&bp->intr_sem, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
rtnl_unlock();
}
@@ -6320,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev))
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, false);
bp->vlgrp = vlgrp;
@@ -6331,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, false);
}
#endif
@@ -7051,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 0);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
return 0;
@@ -7083,7 +7089,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
/* Reset will erase chipset stats; save them */
bnx2_save_stats(bp);
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
@@ -7111,7 +7117,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bnx2_setup_cnic_irq_info(bp);
mutex_unlock(&bp->cnic_lock);
#endif
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
return 0;
}
@@ -7364,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
bnx2_free_skbs(bp);
@@ -7383,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
bnx2_shutdown_chip(bp);
else {
bnx2_init_nic(bp, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
/* wait for link up */
@@ -8377,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
flush_scheduled_work();
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
@@ -8399,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev)
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
bnx2_init_nic(bp, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
return 0;
}
@@ -8426,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
}
if (netif_running(dev)) {
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
del_timer_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
@@ -8483,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(dev))
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
netif_device_attach(dev);
rtnl_unlock();
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 3345109..d800b59 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1006,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf,
netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
if (!netdev) {
- dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
+ dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
return -ENOMEM;
}
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->intr_urb) {
- dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n");
+ dev_err(&intf->dev, "Couldn't alloc intr URB\n");
goto cleanup_candev;
}
dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
if (!dev->intr_in_buffer) {
- dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n");
+ dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
goto cleanup_intr_urb;
}
dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
sizeof(struct ems_cpc_msg), GFP_KERNEL);
if (!dev->tx_msg_buffer) {
- dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n");
+ dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
goto cleanup_intr_in_buffer;
}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 9781942..4b451a7 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
struct cnic_local *cp = dev->cnic_priv;
u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
- prefetch(cp->status_blk.bnx2x);
- prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+ if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ prefetch(cp->status_blk.bnx2x);
+ prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
tasklet_schedule(&cp->cnic_irq_task);
-
- cnic_chk_pkt_rings(cp);
+ cnic_chk_pkt_rings(cp);
+ }
return 0;
}
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 5248f9e..35cd367 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
+ cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
"10GBASE-CX4");
return 0;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aced6c5..e3f1b85 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
unsigned long n)
{
- int attempts = 5;
+ int attempts = 10;
while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
if (!--attempts)
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b997e57..7910803 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -166,6 +166,7 @@
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/firmware.h>
+#include <linux/rtnetlink.h>
#include <asm/unaligned.h>
@@ -2265,8 +2266,13 @@ static void e100_tx_timeout_task(struct work_struct *work)
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
ioread8(&nic->csr->scb.status));
- e100_down(netdev_priv(netdev));
- e100_up(netdev_priv(netdev));
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ e100_down(netdev_priv(netdev));
+ e100_up(netdev_priv(netdev));
+ }
+ rtnl_unlock();
}
static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 712ccc6..9015555 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
static int global_quad_port_a; /* global port a indication */
struct pci_dev *pdev = adapter->pdev;
- u16 eeprom_data = 0;
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
s32 rc;
@@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
adapter->flags &= ~FLAG_HAS_WOL;
break;
-
case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ /* Disable ASPM L0s due to hardware errata */
+ e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
+
if (pdev->device == E1000_DEV_ID_82573L) {
- if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
- &eeprom_data) < 0)
- break;
- if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
- adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
- adapter->max_hw_frame_size = DEFAULT_JUMBO;
- }
+ adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = DEFAULT_JUMBO;
}
break;
default:
@@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = {
| FLAG_RESET_OVERWRITES_LAA /* errata */
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
+ .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = {
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
+ .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = {
struct e1000_info e1000_82573_info = {
.mac = e1000_82573,
.flags = FLAG_HAS_HW_VLAN_FILTER
- | FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
- | FLAG_HAS_ERT
| FLAG_HAS_SWSM_ON_LOAD,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 118bdf4..ee32b9b 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include "hw.h"
@@ -374,7 +375,7 @@ struct e1000_adapter {
struct e1000_info {
enum e1000_mac_type mac;
unsigned int flags;
- unsigned int flags2;
+ unsigned int flags2;
u32 pba;
u32 max_hw_frame_size;
s32 (*get_variants)(struct e1000_adapter *);
@@ -421,6 +422,7 @@ struct e1000_info {
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define FLAG2_IS_DISCARDING (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -461,6 +463,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter);
extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index cfd09ce..fb8fc7d 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
i = 0;
}
+ if (i == tx_ring->next_to_use)
+ break;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
@@ -4281,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
+ /* 82573 Errata 17 */
+ if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82574)) &&
+ (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+ e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+ }
+
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
msleep(1);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4603,29 +4613,39 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
}
}
-static void e1000e_disable_l1aspm(struct pci_dev *pdev)
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ pci_disable_link_state(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
int pos;
- u16 val;
+ u16 reg16;
/*
- * 82573 workaround - disable L1 ASPM on mobile chipsets
- *
- * L1 ASPM on various mobile (ich7) chipsets do not behave properly
- * resulting in lost data or garbage information on the pci-e link
- * level. This could result in (false) bad EEPROM checksum errors,
- * long ping times (up to 2s) or even a system freeze/hang.
- *
- * Unfortunately this feature saves about 1W power consumption when
- * active.
+ * Both device and parent should have the same ASPM setting.
+ * Disable ASPM in downstream component first and then upstream.
*/
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
- if (val & 0x2) {
- dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
- val &= ~0x2;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
- }
+ pos = pci_pcie_cap(pdev);
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+ pos = pci_pcie_cap(pdev->bus->self);
+ pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+ (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+ __e1000e_disable_aspm(pdev, state);
}
#ifdef CONFIG_PM
@@ -4651,7 +4671,8 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_save_state(pdev);
- e1000e_disable_l1aspm(pdev);
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -4793,7 +4814,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
int err;
pci_ers_result_t result;
- e1000e_disable_l1aspm(pdev);
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
@@ -4887,13 +4909,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
}
-
- ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
- if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
- /* ASPM enable */
- dev_warn(&adapter->pdev->dev,
- "Warning: detected ASPM enabled in EEPROM\n");
- }
}
static const struct net_device_ops e1000e_netdev_ops = {
@@ -4942,7 +4957,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- e1000e_disable_l1aspm(pdev);
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+ e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
err = pci_enable_device_mem(pdev);
if (err)
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 73b260c..5c98f7c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5899,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
- if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
+ if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d5160ed..3acac5f 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
struct gfar __iomem *enet_regs;
- u32 __iomem *ioremap_tbipa;
- u64 addr, size;
/*
* This is mildly evil, but so is our hardware for doing this.
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
return &enet_regs->tbipa;
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
- addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
- ioremap_tbipa = ioremap(addr, size);
- return ioremap_tbipa;
+ return of_iomap(np, 1);
} else
return NULL;
}
@@ -279,6 +275,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
u32 __iomem *tbipa;
struct mii_bus *new_bus;
int tbiaddr = -1;
+ const u32 *addrp;
u64 addr = 0, size = 0;
int err = 0;
@@ -297,8 +294,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
+ addrp = of_get_address(np, 0, &size, NULL);
+ if (!addrp) {
+ err = -EINVAL;
+ goto err_free_bus;
+ }
+
/* Set the PHY base address */
- addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+ addr = of_translate_address(np, addrp);
+ if (addr == OF_BAD_ADDR) {
+ err = -EINVAL;
+ goto err_free_bus;
+ }
+
map = ioremap(addr, size);
if (!map) {
err = -ENOMEM;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 080d1ce..4e97ca1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -549,12 +549,8 @@ static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
- u64 addr, size;
-
- addr = of_translate_address(np,
- of_get_address(np, 0, &size, NULL));
- priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
+ priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
if (!priv->gfargrp[priv->num_grps].regs)
return -ENOMEM;
@@ -1515,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
- while (!(gfar_read(&regs->ievent) &
- (IEVENT_GRSC | IEVENT_GTSC)))
- cpu_relax();
+ spin_event_timeout(((gfar_read(&regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
}
}
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index d313fae..7430384 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
retval = 0;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* quad port adapters only support WoL on port A */
if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
wol->supported = 0;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b3c51a..c9baa2a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
adapter->eeprom_wol = 0;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b405a00..12fc0e7 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,6 +39,8 @@
#define IXGBE_82599_MC_TBL_SIZE 128
#define IXGBE_82599_VFT_TBL_SIZE 128
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
@@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
return status;
}
+ /**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(100);
+}
+
/**
* ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
* @hw: pointer to hardware structure
@@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
**/
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
- u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- udelay(100);
-
- /* Enable tx laser; allow 100ms to light up */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- msleep(100);
-
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
hw->mac.autotry_restart = false;
}
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 8f677cb..6c00ee4 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2982,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
+ /* enable the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.enable_tx_laser(hw);
+
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -3243,6 +3247,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* power down the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.disable_tx_laser(hw);
+
/* disable receive for all VFs and wait one second */
if (adapter->num_vfs) {
/* ping all the active vfs to let them know we are going down */
@@ -6253,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_eeprom;
}
+ /* power down the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.disable_tx_laser(hw);
+
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &ixgbe_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6400,16 +6412,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->sfp_task);
- if (adapter->hw.phy.multispeed_fiber) {
- struct ixgbe_hw *hw = &adapter->hw;
- /*
- * Restart clause 37 autoneg, disable and re-enable
- * the tx laser, to clear & alert the link partner
- * that it needs to restart autotry
- */
- hw->mac.autotry_restart = true;
- hw->mac.ops.flap_tx_laser(hw);
- }
cancel_work_sync(&adapter->multispeed_fiber_task);
cancel_work_sync(&adapter->sfp_config_module_task);
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 4ec6dc1..534affc 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2398,6 +2398,8 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
/* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 13cc1ca..9e9f9b3 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -722,12 +722,14 @@ static void ks8851_tx_work(struct work_struct *work)
txb = skb_dequeue(&ks->txq);
last = skb_queue_empty(&ks->txq);
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
- ks8851_wrpkt(ks, txb, last);
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
- ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+ if (txb != NULL) {
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrpkt(ks, txb, last);
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
- ks8851_done_tx(ks, txb);
+ ks8851_done_tx(ks, txb);
+ }
}
mutex_unlock(&ks->lock);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 4718877..ecde087 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev,
if (pause->tx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->tx_pause);
if (pause->rx_pause != mgp->pause)
- return myri10ge_change_pause(mgp, pause->tx_pause);
+ return myri10ge_change_pause(mgp, pause->rx_pause);
if (pause->autoneg != 0)
return -EINVAL;
return 0;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 3d1d3a7..757f87b 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -781,8 +781,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
inw(ioaddr + EL3_STATUS));
spin_lock_irqsave(&lp->window_lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO);
outw(0, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
dev->trans_start = jiffies;
@@ -1021,8 +1026,6 @@ static void update_stats(struct net_device *dev)
/* BadSSD */ inb(ioaddr + 12);
up = inb(ioaddr + 13);
- dev->stats.tx_bytes += tx + ((up & 0xf0) << 12);
-
EL3WINDOW(1);
}
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index ff7eb91..ccc5537 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1608,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
- u_int multicast_table[ 2 ] = { 0, };
+ unsigned char multicast_table[8];
unsigned long flags;
u_short rx_cfg_setting;
+ int i;
+
+ memset(multicast_table, 0, sizeof(multicast_table));
if (dev->flags & IFF_PROMISC) {
rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
@@ -1622,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(mc_addr, dev) {
u_int position = ether_crc(6, mc_addr->dmi_addr);
-#ifndef final_version /* Verify multicast address. */
- if ((mc_addr->dmi_addr[0] & 1) == 0)
- continue;
-#endif
multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
}
}
@@ -1635,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev)
/* Load MC table and Rx setting into the chip without interrupts. */
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
- outl(multicast_table[0], ioaddr + MULTICAST0);
- outl(multicast_table[1], ioaddr + MULTICAST4);
+ for (i = 0; i < 8; i++)
+ outb(multicast_table[i], ioaddr + MULTICAST0 + i);
SMC_SELECT_BANK(0);
outw(rx_cfg_setting, ioaddr + RCR);
SMC_SELECT_BANK(2);
@@ -1805,23 +1804,30 @@ static void media_check(u_long arg)
SMC_SELECT_BANK(1);
media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+
/* Check for pending interrupt with watchdog flag set: with
this, we can limp along even if the interrupt is blocked */
if (smc->watchdog++ && ((i>>8) & i)) {
if (!smc->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ local_irq_save(flags);
smc_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
smc->fast_poll = HZ;
}
if (smc->fast_poll) {
smc->fast_poll--;
smc->media.expires = jiffies + HZ/100;
add_timer(&smc->media);
- SMC_SELECT_BANK(saved_bank);
- spin_unlock_irqrestore(&smc->lock, flags);
return;
}
+ spin_lock_irqsave(&smc->lock, flags);
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
if (smc->cfg & CFG_MII_SELECT) {
if (smc->mii_if.phy_id < 0)
goto reschedule;
@@ -1979,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_gset(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -1997,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_sset(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2015,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
u32 ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
ret = smc_link_ok(dev);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2057,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
int rc = 0;
u16 saved_bank;
unsigned int ioaddr = dev->base_addr;
+ unsigned long flags;
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
saved_bank = inw(ioaddr + BANK_SELECT);
SMC_SELECT_BANK(3);
rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return rc;
}
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index a6ef266..e73ba45 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev)
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
qlcnic_nic_add_mac(adapter, adapter->mac_addr);
qlcnic_nic_add_mac(adapter, bcast_addr);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 43afdb6..0298d8c 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -134,7 +134,7 @@
#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
-#define MCAST_MAX 4 /* Max number multicast addresses to filter */
+#define MCAST_MAX 3 /* Max number multicast addresses to filter */
/* Descriptor status */
#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
@@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev)
crc >>= 26;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
- /* Write the index of the hash table */
- for (i = 0; i < 4; i++)
- iowrite16(hash_table[i] << 14, ioaddr + MCR1);
/* Fill the MAC hash tables with their values */
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
@@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev)
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
} else {
- iowrite16(0xffff, ioaddr + MID_0L + 8 * i);
- iowrite16(0xffff, ioaddr + MID_0M + 8 * i);
- iowrite16(0xffff, ioaddr + MID_0H + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
}
i++;
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dbb1f5a..4748c21 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2759,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
{
iounmap(ioaddr);
pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
pci_disable_device(pdev);
free_netdev(dev);
}
@@ -2825,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
spin_lock_irq(&tp->lock);
RTL_W8(Cfg9346, Cfg9346_Unlock);
+
RTL_W32(MAC4, high);
+ RTL_R32(MAC4);
+
RTL_W32(MAC0, low);
+ RTL_R32(MAC0);
+
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
@@ -3014,9 +3020,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_dev_1;
}
- rc = pci_set_mwi(pdev);
- if (rc < 0)
- goto err_out_disable_2;
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
/* make sure PCI base addr 1 is MMIO */
if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
@@ -3024,7 +3029,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"region #%d not an MMIO resource, aborting\n",
region);
rc = -ENODEV;
- goto err_out_mwi_3;
+ goto err_out_mwi_2;
}
/* check for weird/broken PCI region reporting */
@@ -3032,13 +3037,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_err(tp, probe, dev,
"Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
- goto err_out_mwi_3;
+ goto err_out_mwi_2;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_3;
+ goto err_out_mwi_2;
}
tp->cp_cmd = PCIMulRW | RxChkSum;
@@ -3051,7 +3056,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_4;
+ goto err_out_free_res_3;
}
}
@@ -3060,7 +3065,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!ioaddr) {
netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
- goto err_out_free_res_4;
+ goto err_out_free_res_3;
}
tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
@@ -3102,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (i == ARRAY_SIZE(rtl_chip_info)) {
dev_err(&pdev->dev,
"driver bug, MAC version not found in rtl_chip_info\n");
- goto err_out_msi_5;
+ goto err_out_msi_4;
}
tp->chipset = i;
@@ -3167,7 +3172,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_msi_5;
+ goto err_out_msi_4;
pci_set_drvdata(pdev, dev);
@@ -3190,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
-err_out_msi_5:
+err_out_msi_4:
rtl_disable_msi(pdev, tp);
iounmap(ioaddr);
-err_out_free_res_4:
+err_out_free_res_3:
pci_release_regions(pdev);
-err_out_mwi_3:
+err_out_mwi_2:
pci_clear_mwi(pdev);
-err_out_disable_2:
pci_disable_device(pdev);
err_out_free_dev_1:
free_netdev(dev);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 6486657..649a264 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1861,6 +1861,7 @@ out:
}
if (disabled) {
+ dev_close(efx->net_dev);
EFX_ERR(efx, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
@@ -1884,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data)
}
rtnl_lock();
- if (efx_reset(efx, efx->reset_pending))
- dev_close(efx->net_dev);
+ (void)efx_reset(efx, efx->reset_pending);
rtnl_unlock();
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d294d66..08278e7 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1320,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
- falcon_probe_board(efx, board_rev);
+ rc = falcon_probe_board(efx, board_rev);
+ if (rc)
+ goto fail2;
kfree(nvconfig);
return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fdd..c7a933a 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
},
};
-static const struct falcon_board_type falcon_dummy_board = {
- .init = efx_port_dummy_op_int,
- .init_phy = efx_port_dummy_op_void,
- .fini = efx_port_dummy_op_void,
- .set_id_led = efx_port_dummy_op_set_id_led,
- .monitor = efx_port_dummy_op_int,
-};
-
-void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
+ return 0;
} else {
EFX_ERR(efx, "unknown board type %d\n", type_id);
- board->type = &falcon_dummy_board;
+ return -ENODEV;
}
}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c03..3166baf 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
**************************************************************************
*/
-extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 38dcc42..e0c46f5 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -456,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
static void siena_update_nic_stats(struct efx_nic *efx)
{
- while (siena_try_update_nic_stats(efx) == -EAGAIN)
- cpu_relax();
+ int retry;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (retry = 0; retry < 100; ++retry) {
+ if (siena_try_update_nic_stats(efx) == 0)
+ return;
+ udelay(100);
+ }
+
+ /* Use the old values instead */
}
static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index a214a16..4111a85 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
pr_info("done!\n");
- if (!request_mem_region(res->start, (res->end - res->start),
+ if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
pr_err("%s: ERROR: memory allocation failed"
"cannot get the I/O addr 0x%x\n",
@@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
goto out;
}
- addr = ioremap(res->start, (res->end - res->start));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
- pr_err("%s: ERROR: memory mapping failed \n", __func__);
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
ret = -ENOMEM;
goto out;
}
@@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
out:
if (ret < 0) {
platform_set_drvdata(pdev, NULL);
- release_mem_region(res->start, (res->end - res->start));
+ release_mem_region(res->start, resource_size(res));
if (addr != NULL)
iounmap(addr);
}
@@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
iounmap((void *)ndev->base_addr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, (res->end - res->start));
+ release_mem_region(res->start, resource_size(res));
free_netdev(ndev);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 22cf1c4..ecc41cf 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8633,6 +8633,7 @@ static int tg3_test_msi(struct tg3 *tp)
pci_disable_msi(tp->pdev);
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+ tp->napi[0].irq_vec = tp->pdev->irq;
err = tg3_request_irq(tp, 0);
if (err)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 96c39bd..4326520 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ /* Orphan the skb - required as we might hang on to it
+ * for indefinite time. */
+ skb_orphan(skb);
+
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
dev->trans_start = jiffies;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index ba56ce4..5d58abc 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,4 +385,26 @@ config USB_CDC_PHONET
cellular modem, as found on most Nokia handsets with the
"PC suite" USB profile.
+config USB_IPHETH
+ tristate "Apple iPhone USB Ethernet driver"
+ default n
+ ---help---
+ Module used to share Internet connection (tethering) from your
+ iPhone (Original, 3G and 3GS) to your system.
+ Note that you need userspace libraries and programs that are needed
+ to pair your device with your system and that understand the iPhone
+ protocol.
+
+ For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+
+config USB_SIERRA_NET
+ tristate "USB-to-WWAN Driver for Sierra Wireless modems"
+ depends on USB_USBNET
+ default y
+ help
+ Choose this option if you have a Sierra Wireless USB-to-WWAN device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sierra_net.
+
endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 82ea629..b13a279 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
+obj-$(CONFIG_USB_IPHETH) += ipheth.o
+obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c8cdb7f..3547cf1 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -431,6 +431,7 @@ static const struct driver_info mbm_info = {
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = cdc_status,
+ .manage_power = cdc_manage_power,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
new file mode 100644
index 0000000..418825d
--- /dev/null
+++ b/drivers/net/usb/ipheth.c
@@ -0,0 +1,569 @@
+/*
+ * ipheth.c - Apple iPhone USB Ethernet driver
+ *
+ * Copyright (c) 2009 Diego Giagio <diego@giagio.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of GIAGIO.COM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ *
+ * Attention: iPhone device must be paired, otherwise it won't respond to our
+ * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+
+#define USB_VENDOR_APPLE 0x05ac
+#define USB_PRODUCT_IPHONE 0x1290
+#define USB_PRODUCT_IPHONE_3G 0x1292
+#define USB_PRODUCT_IPHONE_3GS 0x1294
+
+#define IPHETH_USBINTF_CLASS 255
+#define IPHETH_USBINTF_SUBCLASS 253
+#define IPHETH_USBINTF_PROTO 1
+
+#define IPHETH_BUF_SIZE 1516
+#define IPHETH_TX_TIMEOUT (5 * HZ)
+
+#define IPHETH_INTFNUM 2
+#define IPHETH_ALT_INTFNUM 1
+
+#define IPHETH_CTRL_ENDP 0x00
+#define IPHETH_CTRL_BUF_SIZE 0x40
+#define IPHETH_CTRL_TIMEOUT (5 * HZ)
+
+#define IPHETH_CMD_GET_MACADDR 0x00
+#define IPHETH_CMD_CARRIER_CHECK 0x45
+
+#define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ)
+#define IPHETH_CARRIER_ON 0x04
+
+static struct usb_device_id ipheth_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ipheth_table);
+
+struct ipheth_device {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct net_device *net;
+ struct sk_buff *tx_skb;
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
+ unsigned char *ctrl_buf;
+ u8 bulk_in;
+ u8 bulk_out;
+ struct delayed_work carrier_work;
+};
+
+static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
+
+static int ipheth_alloc_urbs(struct ipheth_device *iphone)
+{
+ struct urb *tx_urb = NULL;
+ struct urb *rx_urb = NULL;
+ u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+
+ tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (tx_urb == NULL)
+ goto error_nomem;
+
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (rx_urb == NULL)
+ goto free_tx_urb;
+
+ tx_buf = usb_buffer_alloc(iphone->udev,
+ IPHETH_BUF_SIZE,
+ GFP_KERNEL,
+ &tx_urb->transfer_dma);
+ if (tx_buf == NULL)
+ goto free_rx_urb;
+
+ rx_buf = usb_buffer_alloc(iphone->udev,
+ IPHETH_BUF_SIZE,
+ GFP_KERNEL,
+ &rx_urb->transfer_dma);
+ if (rx_buf == NULL)
+ goto free_tx_buf;
+
+
+ iphone->tx_urb = tx_urb;
+ iphone->rx_urb = rx_urb;
+ iphone->tx_buf = tx_buf;
+ iphone->rx_buf = rx_buf;
+ return 0;
+
+free_tx_buf:
+ usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
+ tx_urb->transfer_dma);
+free_rx_urb:
+ usb_free_urb(rx_urb);
+free_tx_urb:
+ usb_free_urb(tx_urb);
+error_nomem:
+ return -ENOMEM;
+}
+
+static void ipheth_free_urbs(struct ipheth_device *iphone)
+{
+ usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ iphone->rx_urb->transfer_dma);
+ usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
+ iphone->tx_urb->transfer_dma);
+ usb_free_urb(iphone->rx_urb);
+ usb_free_urb(iphone->tx_urb);
+}
+
+static void ipheth_kill_urbs(struct ipheth_device *dev)
+{
+ usb_kill_urb(dev->tx_urb);
+ usb_kill_urb(dev->rx_urb);
+}
+
+static void ipheth_rcvbulk_callback(struct urb *urb)
+{
+ struct ipheth_device *dev;
+ struct sk_buff *skb;
+ int status;
+ char *buf;
+ int len;
+
+ dev = urb->context;
+ if (dev == NULL)
+ return;
+
+ status = urb->status;
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ return;
+ case 0:
+ break;
+ default:
+ err("%s: urb status: %d", __func__, urb->status);
+ return;
+ }
+
+ len = urb->actual_length;
+ buf = urb->transfer_buffer;
+
+ skb = dev_alloc_skb(NET_IP_ALIGN + len);
+ if (!skb) {
+ err("%s: dev_alloc_skb: -ENOMEM", __func__);
+ dev->net->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
+ skb->dev = dev->net;
+ skb->protocol = eth_type_trans(skb, dev->net);
+
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += len;
+
+ netif_rx(skb);
+ ipheth_rx_submit(dev, GFP_ATOMIC);
+}
+
+static void ipheth_sndbulk_callback(struct urb *urb)
+{
+ struct ipheth_device *dev;
+
+ dev = urb->context;
+ if (dev == NULL)
+ return;
+
+ if (urb->status != 0 &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN)
+ err("%s: urb status: %d", __func__, urb->status);
+
+ dev_kfree_skb_irq(dev->tx_skb);
+ netif_wake_queue(dev->net);
+}
+
+static int ipheth_carrier_set(struct ipheth_device *dev)
+{
+ struct usb_device *udev = dev->udev;
+ int retval;
+
+ retval = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
+ IPHETH_CMD_CARRIER_CHECK, /* request */
+ 0xc0, /* request type */
+ 0x00, /* value */
+ 0x02, /* index */
+ dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
+ IPHETH_CTRL_TIMEOUT);
+ if (retval < 0) {
+ err("%s: usb_control_msg: %d", __func__, retval);
+ return retval;
+ }
+
+ if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+ netif_carrier_on(dev->net);
+ else
+ netif_carrier_off(dev->net);
+
+ return 0;
+}
+
+static void ipheth_carrier_check_work(struct work_struct *work)
+{
+ struct ipheth_device *dev = container_of(work, struct ipheth_device,
+ carrier_work.work);
+
+ ipheth_carrier_set(dev);
+ schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
+}
+
+static int ipheth_get_macaddr(struct ipheth_device *dev)
+{
+ struct usb_device *udev = dev->udev;
+ struct net_device *net = dev->net;
+ int retval;
+
+ retval = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
+ IPHETH_CMD_GET_MACADDR, /* request */
+ 0xc0, /* request type */
+ 0x00, /* value */
+ 0x02, /* index */
+ dev->ctrl_buf,
+ IPHETH_CTRL_BUF_SIZE,
+ IPHETH_CTRL_TIMEOUT);
+ if (retval < 0) {
+ err("%s: usb_control_msg: %d", __func__, retval);
+ } else if (retval < ETH_ALEN) {
+ err("%s: usb_control_msg: short packet: %d bytes",
+ __func__, retval);
+ retval = -EINVAL;
+ } else {
+ memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
+{
+ struct usb_device *udev = dev->udev;
+ int retval;
+
+ usb_fill_bulk_urb(dev->rx_urb, udev,
+ usb_rcvbulkpipe(udev, dev->bulk_in),
+ dev->rx_buf, IPHETH_BUF_SIZE,
+ ipheth_rcvbulk_callback,
+ dev);
+ dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ retval = usb_submit_urb(dev->rx_urb, mem_flags);
+ if (retval)
+ err("%s: usb_submit_urb: %d", __func__, retval);
+ return retval;
+}
+
+static int ipheth_open(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ struct usb_device *udev = dev->udev;
+ int retval = 0;
+
+ usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM);
+
+ retval = ipheth_carrier_set(dev);
+ if (retval)
+ return retval;
+
+ retval = ipheth_rx_submit(dev, GFP_KERNEL);
+ if (retval)
+ return retval;
+
+ schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
+ netif_start_queue(net);
+ return retval;
+}
+
+static int ipheth_close(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+
+ cancel_delayed_work_sync(&dev->carrier_work);
+ netif_stop_queue(net);
+ return 0;
+}
+
+static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ struct usb_device *udev = dev->udev;
+ int retval;
+
+ /* Paranoid */
+ if (skb->len > IPHETH_BUF_SIZE) {
+ WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+ dev->net->stats.tx_dropped++;
+ dev_kfree_skb_irq(skb);
+ return NETDEV_TX_OK;
+ }
+
+ memcpy(dev->tx_buf, skb->data, skb->len);
+ if (skb->len < IPHETH_BUF_SIZE)
+ memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
+
+ usb_fill_bulk_urb(dev->tx_urb, udev,
+ usb_sndbulkpipe(udev, dev->bulk_out),
+ dev->tx_buf, IPHETH_BUF_SIZE,
+ ipheth_sndbulk_callback,
+ dev);
+ dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
+ if (retval) {
+ err("%s: usb_submit_urb: %d", __func__, retval);
+ dev->net->stats.tx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ dev->tx_skb = skb;
+
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += skb->len;
+ netif_stop_queue(net);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void ipheth_tx_timeout(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+
+ err("%s: TX timeout", __func__);
+ dev->net->stats.tx_errors++;
+ usb_unlink_urb(dev->tx_urb);
+}
+
+static struct net_device_stats *ipheth_stats(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ return &dev->net->stats;
+}
+
+static u32 ipheth_ethtool_op_get_link(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ return netif_carrier_ok(dev->net);
+}
+
+static struct ethtool_ops ops = {
+ .get_link = ipheth_ethtool_op_get_link
+};
+
+static const struct net_device_ops ipheth_netdev_ops = {
+ .ndo_open = &ipheth_open,
+ .ndo_stop = &ipheth_close,
+ .ndo_start_xmit = &ipheth_tx,
+ .ndo_tx_timeout = &ipheth_tx_timeout,
+ .ndo_get_stats = &ipheth_stats,
+};
+
+static struct device_type ipheth_type = {
+ .name = "wwan",
+};
+
+static int ipheth_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_host_interface *hintf;
+ struct usb_endpoint_descriptor *endp;
+ struct ipheth_device *dev;
+ struct net_device *netdev;
+ int i;
+ int retval;
+
+ netdev = alloc_etherdev(sizeof(struct ipheth_device));
+ if (!netdev)
+ return -ENOMEM;
+
+ netdev->netdev_ops = &ipheth_netdev_ops;
+ netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
+ strcpy(netdev->name, "wwan%d");
+
+ dev = netdev_priv(netdev);
+ dev->udev = udev;
+ dev->net = netdev;
+ dev->intf = intf;
+
+ /* Set up endpoints */
+ hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
+ if (hintf == NULL) {
+ retval = -ENODEV;
+ err("Unable to find alternate settings interface");
+ goto err_endpoints;
+ }
+
+ for (i = 0; i < hintf->desc.bNumEndpoints; i++) {
+ endp = &hintf->endpoint[i].desc;
+ if (usb_endpoint_is_bulk_in(endp))
+ dev->bulk_in = endp->bEndpointAddress;
+ else if (usb_endpoint_is_bulk_out(endp))
+ dev->bulk_out = endp->bEndpointAddress;
+ }
+ if (!(dev->bulk_in && dev->bulk_out)) {
+ retval = -ENODEV;
+ err("Unable to find endpoints");
+ goto err_endpoints;
+ }
+
+ dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL);
+ if (dev->ctrl_buf == NULL) {
+ retval = -ENOMEM;
+ goto err_alloc_ctrl_buf;
+ }
+
+ retval = ipheth_get_macaddr(dev);
+ if (retval)
+ goto err_get_macaddr;
+
+ INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work);
+
+ retval = ipheth_alloc_urbs(dev);
+ if (retval) {
+ err("error allocating urbs: %d", retval);
+ goto err_alloc_urbs;
+ }
+
+ usb_set_intfdata(intf, dev);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ SET_ETHTOOL_OPS(netdev, &ops);
+ SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
+
+ retval = register_netdev(netdev);
+ if (retval) {
+ err("error registering netdev: %d", retval);
+ retval = -EIO;
+ goto err_register_netdev;
+ }
+
+ dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
+ return 0;
+
+err_register_netdev:
+ ipheth_free_urbs(dev);
+err_alloc_urbs:
+err_get_macaddr:
+err_alloc_ctrl_buf:
+ kfree(dev->ctrl_buf);
+err_endpoints:
+ free_netdev(netdev);
+ return retval;
+}
+
+static void ipheth_disconnect(struct usb_interface *intf)
+{
+ struct ipheth_device *dev;
+
+ dev = usb_get_intfdata(intf);
+ if (dev != NULL) {
+ unregister_netdev(dev->net);
+ ipheth_kill_urbs(dev);
+ ipheth_free_urbs(dev);
+ kfree(dev->ctrl_buf);
+ free_netdev(dev->net);
+ }
+ usb_set_intfdata(intf, NULL);
+ dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n");
+}
+
+static struct usb_driver ipheth_driver = {
+ .name = "ipheth",
+ .probe = ipheth_probe,
+ .disconnect = ipheth_disconnect,
+ .id_table = ipheth_table,
+};
+
+static int __init ipheth_init(void)
+{
+ int retval;
+
+ retval = usb_register(&ipheth_driver);
+ if (retval) {
+ err("usb_register failed: %d", retval);
+ return retval;
+ }
+ return 0;
+}
+
+static void __exit ipheth_exit(void)
+{
+ usb_deregister(&ipheth_driver);
+}
+
+module_init(ipheth_init);
+module_exit(ipheth_exit);
+
+MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
+MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52671ea..c4c334d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
+ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 0000000..a44f9e0
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1001 @@
+/*
+ * USB-to-WWAN Driver for Sierra Wireless modems
+ *
+ * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
+ * <linux@sierrawireless.com>
+ *
+ * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
+ * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
+ *
+ * IMPORTANT DISCLAIMER: This driver is not commercially supported by
+ * Sierra Wireless. Use at your own risk.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define DRIVER_VERSION "v.2.0"
+#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
+#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
+static const char driver_name[] = "sierra_net";
+
+/* if defined debug messages enabled */
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <asm/unaligned.h>
+#include <linux/usb/usbnet.h>
+
+#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
+#define SWI_GET_FW_ATTR_MASK 0x08
+
+/* atomic counter partially included in MAC address to make sure 2 devices
+ * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+ */
+static atomic_t iface_counter = ATOMIC_INIT(0);
+
+/*
+ * SYNC Timer Delay definition used to set the expiry time
+ */
+#define SIERRA_NET_SYNCDELAY (2*HZ)
+
+/* Max. MTU supported. The modem buffers are limited to 1500 */
+#define SIERRA_NET_MAX_SUPPORTED_MTU 1500
+
+/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
+ * message reception ... and thus the max. received packet.
+ * (May be the cause for parse_hip returning -EINVAL)
+ */
+#define SIERRA_NET_USBCTL_BUF_LEN 1024
+
+/* list of interface numbers - used for constructing interface lists */
+struct sierra_net_iface_info {
+ const u32 infolen; /* number of interface numbers on list */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+};
+
+struct sierra_net_info_data {
+ u16 rx_urb_size;
+ struct sierra_net_iface_info whitelist;
+};
+
+/* Private data structure */
+struct sierra_net_data {
+
+ u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
+
+ u16 link_up; /* air link up or down */
+ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
+
+ u8 sync_msg[4]; /* SYNC message */
+ u8 shdwn_msg[4]; /* Shutdown message */
+
+ /* Backpointer to the container */
+ struct usbnet *usbnet;
+
+ u8 ifnum; /* interface number */
+
+/* Bit masks, must be a power of 2 */
+#define SIERRA_NET_EVENT_RESP_AVAIL 0x01
+#define SIERRA_NET_TIMER_EXPIRY 0x02
+ unsigned long kevent_flags;
+ struct work_struct sierra_net_kevent;
+ struct timer_list sync_timer; /* For retrying SYNC sequence */
+};
+
+struct param {
+ int is_present;
+ union {
+ void *ptr;
+ u32 dword;
+ u16 word;
+ u8 byte;
+ };
+};
+
+/* HIP message type */
+#define SIERRA_NET_HIP_EXTENDEDID 0x7F
+#define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */
+#define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */
+#define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */
+#define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */
+
+#define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202
+#define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002
+
+/* 3G UMTS Link Sense Indication definitions */
+#define SIERRA_NET_HIP_LSI_UMTSID 0x78
+
+/* Reverse Channel Grant Indication HIP message */
+#define SIERRA_NET_HIP_RCGI 0x64
+
+/* LSI Protocol types */
+#define SIERRA_NET_PROTOCOL_UMTS 0x01
+/* LSI Coverage */
+#define SIERRA_NET_COVERAGE_NONE 0x00
+#define SIERRA_NET_COVERAGE_NOPACKET 0x01
+
+/* LSI Session */
+#define SIERRA_NET_SESSION_IDLE 0x00
+/* LSI Link types */
+#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+
+struct lsi_umts {
+ u8 protocol;
+ u8 unused1;
+ __be16 length;
+ /* eventually use a union for the rest - assume umts for now */
+ u8 coverage;
+ u8 unused2[41];
+ u8 session_state;
+ u8 unused3[33];
+ u8 link_type;
+ u8 pdp_addr_len; /* NW-supplied PDP address len */
+ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
+ u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
+ u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
+ u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
+ u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
+ u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
+ u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
+ u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
+ u8 unused5[4];
+ u8 gw_addr_len; /* NW-supplied GW address len */
+ u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
+ u8 reserved[8];
+} __attribute__ ((packed));
+
+#define SIERRA_NET_LSI_COMMON_LEN 4
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+
+/* Forward definitions */
+static void sierra_sync_timer(unsigned long syncdata);
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
+
+/* Our own net device operations structure */
+static const struct net_device_ops sierra_net_device_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = sierra_net_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* get private data associated with passed in usbnet device */
+static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
+{
+ return (struct sierra_net_data *)dev->data[0];
+}
+
+/* set private data associated with passed in usbnet device */
+static inline void sierra_net_set_private(struct usbnet *dev,
+ struct sierra_net_data *priv)
+{
+ dev->data[0] = (unsigned long)priv;
+}
+
+/* is packet IPv4 */
+static inline int is_ip(struct sk_buff *skb)
+{
+ return (skb->protocol == cpu_to_be16(ETH_P_IP));
+}
+
+/*
+ * check passed in packet and make sure that:
+ * - it is linear (no scatter/gather)
+ * - it is ethernet (mac_header properly set)
+ */
+static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
+{
+ skb_reset_mac_header(skb); /* ethernet header */
+
+ if (skb_is_nonlinear(skb)) {
+ netdev_err(dev->net, "Non linear buffer-dropping\n");
+ return 0;
+ }
+
+ if (!pskb_may_pull(skb, ETH_HLEN))
+ return 0;
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ return 1;
+}
+
+static const u8 *save16bit(struct param *p, const u8 *datap)
+{
+ p->is_present = 1;
+ p->word = get_unaligned_be16(datap);
+ return datap + sizeof(p->word);
+}
+
+static const u8 *save8bit(struct param *p, const u8 *datap)
+{
+ p->is_present = 1;
+ p->byte = *datap;
+ return datap + sizeof(p->byte);
+}
+
+/*----------------------------------------------------------------------------*
+ * BEGIN HIP *
+ *----------------------------------------------------------------------------*/
+/* HIP header */
+#define SIERRA_NET_HIP_HDR_LEN 4
+/* Extended HIP header */
+#define SIERRA_NET_HIP_EXT_HDR_LEN 6
+
+struct hip_hdr {
+ int hdrlen;
+ struct param payload_len;
+ struct param msgid;
+ struct param msgspecific;
+ struct param extmsgid;
+};
+
+static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
+{
+ const u8 *curp = buf;
+ int padded;
+
+ if (buflen < SIERRA_NET_HIP_HDR_LEN)
+ return -EPROTO;
+
+ curp = save16bit(&hh->payload_len, curp);
+ curp = save8bit(&hh->msgid, curp);
+ curp = save8bit(&hh->msgspecific, curp);
+
+ padded = hh->msgid.byte & 0x80;
+ hh->msgid.byte &= 0x7F; /* 7 bits */
+
+ hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
+ if (hh->extmsgid.is_present) {
+ if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
+ return -EPROTO;
+
+ hh->payload_len.word &= 0x3FFF; /* 14 bits */
+
+ curp = save16bit(&hh->extmsgid, curp);
+ hh->extmsgid.word &= 0x03FF; /* 10 bits */
+
+ hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
+ } else {
+ hh->payload_len.word &= 0x07FF; /* 11 bits */
+ hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
+ }
+
+ if (padded) {
+ hh->hdrlen++;
+ hh->payload_len.word--;
+ }
+
+ /* if real packet shorter than the claimed length */
+ if (buflen < (hh->hdrlen + hh->payload_len.word))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void build_hip(u8 *buf, const u16 payloadlen,
+ struct sierra_net_data *priv)
+{
+ /* the following doesn't have the full functionality. We
+ * currently build only one kind of header, so it is faster this way
+ */
+ put_unaligned_be16(payloadlen, buf);
+ memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
+}
+/*----------------------------------------------------------------------------*
+ * END HIP *
+ *----------------------------------------------------------------------------*/
+
+static int sierra_net_send_cmd(struct usbnet *dev,
+ u8 *cmd, int cmdlen, const char * cmd_name)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ int status;
+
+ status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_CDC_SEND_ENCAPSULATED_COMMAND,
+ USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
+ priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+
+ if (status != cmdlen && status != -ENODEV)
+ netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
+
+ return status;
+}
+
+static int sierra_net_send_sync(struct usbnet *dev)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ status = sierra_net_send_cmd(dev, priv->sync_msg,
+ sizeof(priv->sync_msg), "SYNC");
+
+ return status;
+}
+
+static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
+{
+ dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
+ priv->tx_hdr_template[0] = 0x3F;
+ priv->tx_hdr_template[1] = ctx_ix;
+ *((u16 *)&priv->tx_hdr_template[2]) =
+ cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
+}
+
+static inline int sierra_net_is_valid_addrlen(u8 len)
+{
+ return (len == sizeof(struct in_addr));
+}
+
+static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
+{
+ struct lsi_umts *lsi = (struct lsi_umts *)data;
+
+ if (datalen < sizeof(struct lsi_umts)) {
+ netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
+ __func__, datalen,
+ sizeof(struct lsi_umts));
+ return -1;
+ }
+
+ if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length),
+ (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
+ return -1;
+ }
+
+ /* Validate the protocol - only support UMTS for now */
+ if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+ netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+ lsi->protocol);
+ return -1;
+ }
+
+ /* Validate the link type */
+ if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ lsi->link_type);
+ return -1;
+ }
+
+ /* Validate the coverage */
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
+ || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
+ return 0;
+ }
+
+ /* Validate the session state */
+ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+ netdev_err(dev->net, "Session idle, 0x%02x\n",
+ lsi->session_state);
+ return 0;
+ }
+
+ /* Set link_sense true */
+ return 1;
+}
+
+static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
+ struct hip_hdr *hh)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ int link_up;
+
+ link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
+ hh->payload_len.word);
+ if (link_up < 0) {
+ netdev_err(dev->net, "Invalid LSI\n");
+ return;
+ }
+ if (link_up) {
+ sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
+ priv->link_up = 1;
+ netif_carrier_on(dev->net);
+ } else {
+ priv->link_up = 0;
+ netif_carrier_off(dev->net);
+ }
+}
+
+static void sierra_net_dosync(struct usbnet *dev)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* tell modem we are ready */
+ status = sierra_net_send_sync(dev);
+ if (status < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed, status %d\n", status);
+ status = sierra_net_send_sync(dev);
+ if (status < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed, status %d\n", status);
+
+ /* Now, start a timer and make sure we get the Restart Indication */
+ priv->sync_timer.function = sierra_sync_timer;
+ priv->sync_timer.data = (unsigned long) dev;
+ priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
+ add_timer(&priv->sync_timer);
+}
+
+static void sierra_net_kevent(struct work_struct *work)
+{
+ struct sierra_net_data *priv =
+ container_of(work, struct sierra_net_data, sierra_net_kevent);
+ struct usbnet *dev = priv->usbnet;
+ int len;
+ int err;
+ u8 *buf;
+ u8 ifnum;
+
+ if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
+ clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
+
+ /* Query the modem for the LSI message */
+ buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev->net,
+ "failed to allocate buf for LS msg\n");
+ return;
+ }
+ ifnum = priv->ifnum;
+ len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_CDC_GET_ENCAPSULATED_RESPONSE,
+ USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+ 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len < 0) {
+ netdev_err(dev->net,
+ "usb_control_msg failed, status %d\n", len);
+ } else {
+ struct hip_hdr hh;
+
+ dev_dbg(&dev->udev->dev, "%s: Received status message,"
+ " %04x bytes", __func__, len);
+
+ err = parse_hip(buf, len, &hh);
+ if (err) {
+ netdev_err(dev->net, "%s: Bad packet,"
+ " parse result %d\n", __func__, err);
+ kfree(buf);
+ return;
+ }
+
+ /* Validate packet length */
+ if (len != hh.hdrlen + hh.payload_len.word) {
+ netdev_err(dev->net, "%s: Bad packet, received"
+ " %d, expected %d\n", __func__, len,
+ hh.hdrlen + hh.payload_len.word);
+ kfree(buf);
+ return;
+ }
+
+ /* Switch on received message types */
+ switch (hh.msgid.byte) {
+ case SIERRA_NET_HIP_LSI_UMTSID:
+ dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
+ hh.msgspecific.byte);
+ sierra_net_handle_lsi(dev, buf, &hh);
+ break;
+ case SIERRA_NET_HIP_RESTART_ID:
+ dev_dbg(&dev->udev->dev, "Restart reported: %d,"
+ " stopping sync timer",
+ hh.msgspecific.byte);
+ /* Got sync resp - stop timer & clear mask */
+ del_timer_sync(&priv->sync_timer);
+ clear_bit(SIERRA_NET_TIMER_EXPIRY,
+ &priv->kevent_flags);
+ break;
+ case SIERRA_NET_HIP_HSYNC_ID:
+ dev_dbg(&dev->udev->dev, "SYNC received");
+ err = sierra_net_send_sync(dev);
+ if (err < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed %d\n", err);
+ break;
+ case SIERRA_NET_HIP_EXTENDEDID:
+ netdev_err(dev->net, "Unrecognized HIP msg, "
+ "extmsgid 0x%04x\n", hh.extmsgid.word);
+ break;
+ case SIERRA_NET_HIP_RCGI:
+ /* Ignored */
+ break;
+ default:
+ netdev_err(dev->net, "Unrecognized HIP msg, "
+ "msgid 0x%02x\n", hh.msgid.byte);
+ break;
+ }
+ }
+ kfree(buf);
+ }
+ /* The sync timer bit might be set */
+ if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
+ clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
+ dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
+ sierra_net_dosync(priv->usbnet);
+ }
+
+ if (priv->kevent_flags)
+ dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
+ "kevent_flags = 0x%lx", priv->kevent_flags);
+}
+
+static void sierra_net_defer_kevent(struct usbnet *dev, int work)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ set_bit(work, &priv->kevent_flags);
+ schedule_work(&priv->sierra_net_kevent);
+}
+
+/*
+ * Sync Retransmit Timer Handler. On expiry, kick the work queue
+ */
+void sierra_sync_timer(unsigned long syncdata)
+{
+ struct usbnet *dev = (struct usbnet *)syncdata;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+ /* Kick the tasklet */
+ sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
+}
+
+static void sierra_net_status(struct usbnet *dev, struct urb *urb)
+{
+ struct usb_cdc_notification *event;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ if (urb->actual_length < sizeof *event)
+ return;
+
+ /* Add cases to handle other standard notifications. */
+ event = urb->transfer_buffer;
+ switch (event->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+ /* USB 305 sends those */
+ break;
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
+ break;
+ default:
+ netdev_err(dev->net, ": unexpected notification %02x!\n",
+ event->bNotificationType);
+ break;
+ }
+}
+
+static void sierra_net_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, driver_name, sizeof info->driver);
+ strncpy(info->version, DRIVER_VERSION, sizeof info->version);
+}
+
+static u32 sierra_net_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ /* Report link is down whenever the interface is down */
+ return sierra_net_get_private(dev)->link_up && netif_running(net);
+}
+
+static struct ethtool_ops sierra_net_ethtool_ops = {
+ .get_drvinfo = sierra_net_get_drvinfo,
+ .get_link = sierra_net_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+/* MTU can not be more than 1500 bytes, enforce it. */
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
+ return -EINVAL;
+
+ return usbnet_change_mtu(net, new_mtu);
+}
+
+static int is_whitelisted(const u8 ifnum,
+ const struct sierra_net_iface_info *whitelist)
+{
+ if (whitelist) {
+ const u8 *list = whitelist->ifaceinfo;
+ int i;
+
+ for (i = 0; i < whitelist->infolen; i++) {
+ if (list[i] == ifnum)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
+{
+ int result = 0;
+ u16 *attrdata;
+
+ attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
+ if (!attrdata)
+ return -ENOMEM;
+
+ result = usb_control_msg(
+ dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ /* _u8 vendor specific request */
+ SWI_USB_REQUEST_GET_FW_ATTR,
+ USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
+ 0x0000, /* __u16 value not used */
+ 0x0000, /* __u16 index not used */
+ attrdata, /* char *data */
+ sizeof(*attrdata), /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+
+ if (result < 0) {
+ kfree(attrdata);
+ return -EIO;
+ }
+
+ *datap = *attrdata;
+
+ kfree(attrdata);
+ return result;
+}
+
+/*
+ * collects the bulk endpoints, the status endpoint.
+ */
+static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u8 ifacenum;
+ u8 numendpoints;
+ u16 fwattr = 0;
+ int status;
+ struct ethhdr *eth;
+ struct sierra_net_data *priv;
+ static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
+ 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
+ static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
+ 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
+
+ struct sierra_net_info_data *data =
+ (struct sierra_net_info_data *)dev->driver_info->data;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
+ /* We only accept certain interfaces */
+ if (!is_whitelisted(ifacenum, &data->whitelist)) {
+ dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
+ return -ENODEV;
+ }
+ numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
+ /* We have three endpoints, bulk in and out, and a status */
+ if (numendpoints != 3) {
+ dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
+ numendpoints);
+ return -ENODEV;
+ }
+ /* Status endpoint set in usbnet_get_endpoints() */
+ dev->status = NULL;
+ status = usbnet_get_endpoints(dev, intf);
+ if (status < 0) {
+ dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
+ status);
+ return -ENODEV;
+ }
+ /* Initialize sierra private data */
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->udev->dev, "No memory");
+ return -ENOMEM;
+ }
+
+ priv->usbnet = dev;
+ priv->ifnum = ifacenum;
+ dev->net->netdev_ops = &sierra_net_device_ops;
+
+ /* change MAC addr to include, ifacenum, and to be unique */
+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+ dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+
+ /* we will have to manufacture ethernet headers, prepare template */
+ eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
+ memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ /* prepare shutdown message template */
+ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
+ /* set context index initially to 0 - prepares tx hdr template */
+ sierra_net_set_ctx_index(priv, 0);
+
+ /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
+ dev->rx_urb_size = data->rx_urb_size;
+ if (dev->udev->speed != USB_SPEED_HIGH)
+ dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
+
+ dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ /* Set up the netdev */
+ dev->net->flags |= IFF_NOARP;
+ dev->net->ethtool_ops = &sierra_net_ethtool_ops;
+ netif_carrier_off(dev->net);
+
+ sierra_net_set_private(dev, priv);
+
+ priv->kevent_flags = 0;
+
+ /* Use the shared workqueue */
+ INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
+
+ /* Only need to do this once */
+ init_timer(&priv->sync_timer);
+
+ /* verify fw attributes */
+ status = sierra_net_get_fw_attr(dev, &fwattr);
+ dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
+
+ /* test whether firmware supports DHCP */
+ if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
+ /* found incompatible firmware version */
+ dev_err(&dev->udev->dev, "Incompatible driver and firmware"
+ " versions\n");
+ kfree(priv);
+ return -ENODEV;
+ }
+ /* prepare sync message from template */
+ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
+ return 0;
+}
+
+static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* Kill the timer then flush the work queue */
+ del_timer_sync(&priv->sync_timer);
+
+ flush_scheduled_work();
+
+ /* tell modem we are going away */
+ status = sierra_net_send_cmd(dev, priv->shdwn_msg,
+ sizeof(priv->shdwn_msg), "Shutdown");
+ if (status < 0)
+ netdev_err(dev->net,
+ "usb_control_msg failed, status %d\n", status);
+
+ sierra_net_set_private(dev, NULL);
+
+ kfree(priv);
+}
+
+static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
+ struct sk_buff *skb, int len)
+{
+ struct sk_buff *new_skb;
+
+ /* clone skb */
+ new_skb = skb_clone(skb, GFP_ATOMIC);
+
+ /* remove len bytes from original */
+ skb_pull(skb, len);
+
+ /* trim next packet to it's length */
+ if (new_skb) {
+ skb_trim(new_skb, len);
+ } else {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "failed to get skb\n");
+ dev->net->stats.rx_dropped++;
+ }
+
+ return new_skb;
+}
+
+/* ---------------------------- Receive data path ----------------------*/
+static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int err;
+ struct hip_hdr hh;
+ struct sk_buff *new_skb;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* could contain multiple packets */
+ while (likely(skb->len)) {
+ err = parse_hip(skb->data, skb->len, &hh);
+ if (err) {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "Invalid HIP header %d\n",
+ err);
+ /* dev->net->stats.rx_errors incremented by caller */
+ dev->net->stats.rx_length_errors++;
+ return 0;
+ }
+
+ /* Validate Extended HIP header */
+ if (!hh.extmsgid.is_present
+ || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
+
+ dev->net->stats.rx_frame_errors++;
+ /* dev->net->stats.rx_errors incremented by caller */;
+ return 0;
+ }
+
+ skb_pull(skb, hh.hdrlen);
+
+ /* We are going to accept this packet, prepare it */
+ memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
+ ETH_HLEN);
+
+ /* Last packet in batch handled by usbnet */
+ if (hh.payload_len.word == skb->len)
+ return 1;
+
+ new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
+ if (new_skb)
+ usbnet_skb_return(dev, new_skb);
+
+ } /* while */
+
+ return 0;
+}
+
+/* ---------------------------- Transmit data path ----------------------*/
+struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ u16 len;
+ bool need_tail;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+ if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
+ /* enough head room as is? */
+ if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
+ /* Save the Eth/IP length and set up HIP hdr */
+ len = skb->len;
+ skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
+ /* Handle ZLP issue */
+ need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
+ % dev->maxpacket == 0);
+ if (need_tail) {
+ if (unlikely(skb_tailroom(skb) == 0)) {
+ netdev_err(dev->net, "tx_fixup:"
+ "no room for packet\n");
+ dev_kfree_skb_any(skb);
+ return NULL;
+ } else {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ len = len + 1;
+ }
+ }
+ build_hip(skb->data, len, priv);
+ return skb;
+ } else {
+ /*
+ * compensate in the future if necessary
+ */
+ netdev_err(dev->net, "tx_fixup: no room for HIP\n");
+ } /* headroom */
+ }
+
+ if (!priv->link_up)
+ dev->net->stats.tx_carrier_errors++;
+
+ /* tx_dropped incremented by usbnet */
+
+ /* filter the packet out, release it */
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
+static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+ .rx_urb_size = 8 * 1024,
+ .whitelist = {
+ .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
+ .ifaceinfo = sierra_net_ifnum_list
+ }
+};
+
+static const struct driver_info sierra_net_info_68A3 = {
+ .description = "Sierra Wireless USB-to-WWAN Modem",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = sierra_net_bind,
+ .unbind = sierra_net_unbind,
+ .status = sierra_net_status,
+ .rx_fixup = sierra_net_rx_fixup,
+ .tx_fixup = sierra_net_tx_fixup,
+ .data = (unsigned long)&sierra_net_info_data_68A3,
+};
+
+static const struct usb_device_id products[] = {
+ {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+ .driver_info = (unsigned long) &sierra_net_info_68A3},
+
+ {}, /* last item */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+/* We are based on usbnet, so let it handle the USB driver specifics */
+static struct usb_driver sierra_net_driver = {
+ .name = "sierra_net",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .no_dynamic_id = 1,
+};
+
+static int __init sierra_net_init(void)
+{
+ BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+ < sizeof(struct cdc_state));
+
+ return usb_register(&sierra_net_driver);
+}
+
+static void __exit sierra_net_exit(void)
+{
+ usb_deregister(&sierra_net_driver);
+}
+
+module_exit(sierra_net_exit);
+module_init(sierra_net_init);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fb783c..b0577dd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
struct scatterlist sg[2];
int err;
+ sg_init_table(sg, 2);
skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
return -ENOMEM;
@@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
char *p;
int i, err, offset;
+ sg_init_table(sg, MAX_SKB_FRAGS + 2);
/* page in sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(vi, gfp);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b9b9d6b..941f053 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
}
+static void ppp_close(struct net_device *dev)
+{
+ ppp_tx_flush();
+}
+
static struct hdlc_proto proto = {
.start = ppp_start,
.stop = ppp_stop,
+ .close = ppp_close,
.type_trans = ppp_type_trans,
.ioctl = ppp_ioctl,
.netif_rx = ppp_rx,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 67ca4e5..115e1ae 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
all_wiphys_idle = ath9k_all_wiphys_idle(sc);
ath9k_set_wiphy_idle(aphy, idle);
- if (!idle && all_wiphys_idle)
- enable_radio = true;
+ enable_radio = (!idle && all_wiphys_idle);
/*
* After we unlock here its possible another wiphy
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 83c52a6..8972166 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ if (qc)
+ iwl_free_tfds_in_queue(priv, sta_id,
+ tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ if (qc && likely(sta_id != IWL_INVALID_STATION))
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ else if (sta_id == IWL_INVALID_STATION)
+ IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
-
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ if (qc && likely(sta_id != IWL_INVALID_STATION))
+ iwl_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c4844ad..92b3e64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -259,7 +259,7 @@ static struct iwl_lib_ops iwl6000_lib = {
EEPROM_5000_REG_BAND_3_CHANNELS,
EEPROM_5000_REG_BAND_4_CHANNELS,
EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_5000_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
@@ -323,7 +323,7 @@ static struct iwl_lib_ops iwl6050_lib = {
EEPROM_5000_REG_BAND_3_CHANNELS,
EEPROM_5000_REG_BAND_4_CHANNELS,
EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_5000_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 35f819a..1460116 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
!!(rate_n_flags & RATE_MCS_ANT_C_MSK);
}
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
/**
* rs_collect_tx_data - Update the success/failure sliding window
*
@@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
-static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
- int scale_index, s32 tpt, int attempts,
- int successes)
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
{
struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
- s32 fail_count;
+ s32 fail_count, tpt;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
/* Select window for current tx bit rate */
- window = &(windows[scale_index]);
+ window = &(tbl->win[scale_index]);
+
+ /* Get expected throughput */
+ tpt = get_expected_tpt(tbl, scale_index);
/*
* Keep track of only the latest 62 tx frame attempts in this rate's
@@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
(a->is_SGI == b->is_SGI);
}
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
- if (tbl->expected_tpt)
- return tbl->expected_tpt[rs_index];
- return 0;
-}
/*
* mac80211 sends us Tx status
@@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_priv *priv = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_rate_scale_data *window = NULL;
enum mac80211_rate_control_flags mac_flags;
u32 tx_rate;
struct iwl_scale_tbl_info tbl_type;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl;
- s32 tpt = 0;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
return;
}
- window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
/*
* Updating the frame history depends on whether packets were
@@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
&rs_index);
- tpt = get_expected_tpt(curr_tbl, rs_index);
- rs_collect_tx_data(window, rs_index, tpt,
+ rs_collect_tx_data(curr_tbl, rs_index,
info->status.ampdu_ack_len,
info->status.ampdu_ack_map);
@@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
* table as active/search.
*/
if (table_type_matches(&tbl_type, curr_tbl))
- tpt = get_expected_tpt(curr_tbl, rs_index);
+ tmp_tbl = curr_tbl;
else if (table_type_matches(&tbl_type, other_tbl))
- tpt = get_expected_tpt(other_tbl, rs_index);
+ tmp_tbl = other_tbl;
else
continue;
-
- /* Constants mean 1 transmission, 0 successes */
- if (i < retries)
- rs_collect_tx_data(window, rs_index, tpt, 1,
- 0);
- else
- rs_collect_tx_data(window, rs_index, tpt, 1,
- legacy_success);
+ rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ i < retries ? 0 : legacy_success);
}
/* Update success/fail counts if not searching for new mode */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8b8e3e1..bdff565 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3331,6 +3331,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
+ cancel_work_sync(&priv->start_internal_scan);
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index de3b3f4..8b516c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
}
}
+ /*
+ * The above algorithm sometimes fails when the ucode
+ * reports 0 for all chains. It's not clear why that
+ * happens to start with, but it is then causing trouble
+ * because this can make us enable more chains than the
+ * hardware really has.
+ *
+ * To be safe, simply mask out any chains that we know
+ * are not on the device.
+ */
+ active_chains &= priv->hw_params.valid_rx_ant;
+
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index db050b8..049b652 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
- /* Allocate and init all Tx and Command queues */
- ret = iwl_txq_ctx_reset(priv);
- if (ret)
- return ret;
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwl_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwl_txq_ctx_reset(priv);
set_bit(STATUS_INIT, &priv->status);
@@ -3355,7 +3358,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
*/
IWL_DEBUG_INFO(priv, "perform radio reset.\n");
iwl_internal_short_hw_scan(priv);
- return;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 4ef7739..36940a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* TX
******************************************************/
-int iwl_txq_ctx_reset(struct iwl_priv *priv);
+int iwl_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl_txq_ctx_reset(struct iwl_priv *priv);
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
@@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
@@ -503,7 +506,7 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
-int iwl_internal_short_hw_scan(struct iwl_priv *priv);
+void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ie, int ie_len, int left);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6054c5f..ef1720a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1296,6 +1296,7 @@ struct iwl_priv {
struct work_struct tt_work;
struct work_struct ct_enter;
struct work_struct ct_exit;
+ struct work_struct start_internal_scan;
struct tasklet_struct irq_tasklet;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 4e1ba82..8171c70 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -203,6 +203,10 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
+/* 6000 regulatory - indirect access */
+#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
+ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
+
/* 6000 and up regulatory tx power - indirect access */
/* max. elements per section */
#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 9ab0e41..12e455a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -470,6 +470,8 @@ EXPORT_SYMBOL(iwl_init_scan_params);
static int iwl_scan_initiate(struct iwl_priv *priv)
{
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = false;
@@ -547,24 +549,31 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
* internal short scan, this function should only been called while associated.
* It will reset and tune the radio to prevent possible RF related problem
*/
-int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+void iwl_internal_short_hw_scan(struct iwl_priv *priv)
{
- int ret = 0;
+ queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_bg_start_internal_scan(struct work_struct *work)
+{
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, start_internal_scan);
+
+ mutex_lock(&priv->mutex);
if (!iwl_is_ready_rf(priv)) {
- ret = -EIO;
IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
- goto out;
+ goto unlock;
}
+
if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out;
+ goto unlock;
}
+
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
- ret = -EAGAIN;
- goto out;
+ goto unlock;
}
priv->scan_bands = 0;
@@ -577,9 +586,8 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv)
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = true;
queue_work(priv->workqueue, &priv->request_scan);
-
-out:
- return ret;
+ unlock:
+ mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL(iwl_internal_short_hw_scan);
@@ -965,6 +973,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
}
EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index f0b7e6c..8dd0c03 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
struct iwl_queue *q = &txq->q;
struct device *dev = &priv->pci_dev->dev;
int i;
+ bool huge = false;
if (q->n_bd == 0)
return;
+ for (; q->read_ptr != q->write_ptr;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ /* we have no way to tell if it is a huge cmd ATM */
+ i = get_cmd_index(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+ huge = true;
+ continue;
+ }
+
+ pci_unmap_single(priv->pci_dev,
+ pci_unmap_addr(&txq->meta[i], mapping),
+ pci_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+ if (huge) {
+ i = q->n_window;
+ pci_unmap_single(priv->pci_dev,
+ pci_unmap_addr(&txq->meta[i], mapping),
+ pci_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+
/* De-alloc array of command/tx buffers */
for (i = 0; i <= TFD_CMD_SLOTS; i++)
kfree(txq->cmd[i]);
@@ -410,6 +434,26 @@ out_free_arrays:
}
EXPORT_SYMBOL(iwl_tx_queue_init);
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_tx_queue_reset);
+
/**
* iwl_hw_txq_ctx_free - Free TXQ Context
*
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
/* Tx queues */
if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
- txq_id++)
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
iwl_cmd_queue_free(priv);
else
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
/**
- * iwl_txq_ctx_reset - Reset TX queue context
- * Destroys all DMA structures and initialize them again
+ * iwl_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
*
* @param priv
* @return error code
*/
-int iwl_txq_ctx_reset(struct iwl_priv *priv)
+int iwl_txq_ctx_alloc(struct iwl_priv *priv)
{
- int ret = 0;
+ int ret;
int txq_id, slots_num;
unsigned long flags;
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
return ret;
}
+void iwl_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
+ }
+}
+
/**
- * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
+ * iwl_txq_ctx_stop - Stop all Tx DMA channels
*/
void iwl_txq_ctx_stop(struct iwl_priv *priv)
{
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
1000);
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Deallocate memory for all Tx queues */
- iwl_hw_txq_ctx_free(priv);
}
EXPORT_SYMBOL(iwl_txq_ctx_stop);
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
spin_lock_irqsave(&priv->hcmd_lock, flags);
+ /* If this is a huge cmd, mark the huge flag also on the meta.flags
+ * of the _original_ cmd. This is used for DMA mapping clean up.
+ */
+ if (cmd->flags & CMD_SIZE_HUGE) {
+ idx = get_cmd_index(q, q->write_ptr, 0);
+ txq->meta[idx].flags = CMD_SIZE_HUGE;
+ }
+
idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
out_cmd = txq->cmd[idx];
out_meta = &txq->meta[idx];
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
+ struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
return;
}
- cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
- cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
- meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
+ /* If this is a huge cmd, clear the huge flag on the meta.flags
+ * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
+ * the DMA buffer for the scan (huge) command.
+ */
+ if (huge) {
+ cmd_index = get_cmd_index(&txq->q, index, 0);
+ txq->meta[cmd_index].flags = 0;
+ }
+ cmd_index = get_cmd_index(&txq->q, index, huge);
+ cmd = txq->cmd[cmd_index];
+ meta = &txq->meta[cmd_index];
pci_unmap_single(priv->pci_dev,
pci_unmap_addr(meta, mapping),
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
get_cmd_string(cmd->hdr.cmd));
wake_up_interruptible(&priv->wait_command_queue);
}
+ meta->flags = 0;
}
EXPORT_SYMBOL(iwl_tx_cmd_complete);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5ea587e..3749912 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -679,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
- return state > PCI_D0 ?
+ return state >= PCI_D0 ?
pci_platform_power_transition(dev, state) : -EINVAL;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -716,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
return 0;
- /* Check if we're already there */
- if (dev->current_state == state)
- return 0;
-
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index aa495ad..7a711ee 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -244,11 +244,17 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
/* Assert Secondary Bus Reset */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET;
+ p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+ /*
+ * we should send hot reset message for 2ms to allow it time to
+ * propogate to all downstream ports
+ */
+ msleep(2);
+
/* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET;
+ p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
/*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 882bd8d..c82548a 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -174,19 +174,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
- if (!sz)
- goto fail; /* BAR not implemented */
-
/*
* All bits set in sz means the device isn't working properly.
- * If it's a memory BAR or a ROM, bit 0 must be clear; if it's
- * an io BAR, bit 1 must be clear.
+ * If the BAR isn't implemented, all bits must be 0. If it's a
+ * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
+ * 1 must be clear.
*/
- if (sz == 0xffffffff) {
- dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n",
- pos, sz);
+ if (!sz || sz == 0xffffffff)
goto fail;
- }
/*
* I don't know how l can have all bits set. Copied from old code.
@@ -249,17 +244,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pos, res);
}
} else {
- u32 size = pci_size(l, sz, mask);
+ sz = pci_size(l, sz, mask);
- if (!size) {
- dev_err(&dev->dev, "reg %x: invalid size "
- "(l %#x sz %#x mask %#x); broken device?",
- pos, l, sz, mask);
+ if (!sz)
goto fail;
- }
res->start = l;
- res->end = l + size;
+ res->end = l + sz;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index f230f65..854959c 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1484,6 +1484,11 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
if (!s)
return -EINVAL;
+ if (s->functions) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
/* We do not want to validate the CIS cache... */
mutex_lock(&s->ops_mutex);
destroy_cis_cache(s);
@@ -1639,7 +1644,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
count = 0;
else {
struct pcmcia_socket *s;
- unsigned int chains;
+ unsigned int chains = 1;
if (off + count > size)
count = size - off;
@@ -1648,7 +1653,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
if (!(s->state & SOCKET_PRESENT))
return -ENODEV;
- if (pccard_validate_cis(s, &chains))
+ if (!s->functions && pccard_validate_cis(s, &chains))
return -EIO;
if (!chains)
return -ENODATA;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 6206408..2d48196 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -166,8 +166,10 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
IRQF_DISABLED, "pcmcia_insert", sock);
- if (ret)
+ if (ret) {
+ local_irq_restore(flags);
goto out1;
+ }
ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
IRQF_DISABLED, "pcmcia_eject", sock);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index cb6036d..508f94a 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -335,7 +335,6 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
mutex_lock(&s->ops_mutex);
list_del(&p_dev->socket_device_list);
- p_dev->_removed = 1;
mutex_unlock(&s->ops_mutex);
dev_dbg(&p_dev->dev, "unregistering device\n");
@@ -654,14 +653,7 @@ static int pcmcia_requery_callback(struct device *dev, void * _data)
static void pcmcia_requery(struct pcmcia_socket *s)
{
- int present, has_pfc;
-
- mutex_lock(&s->ops_mutex);
- present = s->pcmcia_state.present;
- mutex_unlock(&s->ops_mutex);
-
- if (!present)
- return;
+ int has_pfc;
if (s->functions == 0) {
pcmcia_card_add(s);
@@ -687,12 +679,10 @@ static void pcmcia_requery(struct pcmcia_socket *s)
new_funcs = mfc.nfn;
else
new_funcs = 1;
- if (old_funcs > new_funcs) {
+ if (old_funcs != new_funcs) {
+ /* we need to re-start */
pcmcia_card_remove(s, NULL);
pcmcia_card_add(s);
- } else if (new_funcs > old_funcs) {
- s->functions = new_funcs;
- pcmcia_device_add(s, 1);
}
}
@@ -728,6 +718,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
struct pcmcia_socket *s = dev->socket;
const struct firmware *fw;
int ret = -ENOMEM;
+ cistpl_longlink_mfc_t mfc;
+ int old_funcs, new_funcs = 1;
if (!filename)
return -EINVAL;
@@ -750,6 +742,14 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
goto release;
}
+ /* we need to re-start if the number of functions changed */
+ old_funcs = s->functions;
+ if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC,
+ &mfc))
+ new_funcs = mfc.nfn;
+
+ if (old_funcs != new_funcs)
+ ret = -EBUSY;
/* update information */
pcmcia_device_query(dev);
@@ -820,11 +820,12 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
}
if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) {
- if (dev->device_no != did->device_no)
- return 0;
+ dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n");
mutex_lock(&dev->socket->ops_mutex);
dev->socket->pcmcia_state.has_pfc = 1;
mutex_unlock(&dev->socket->ops_mutex);
+ if (dev->device_no != did->device_no)
+ return 0;
}
if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) {
@@ -835,7 +836,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
/* if this is a pseudo-multi-function device,
* we need explicit matches */
- if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO)
+ if (dev->socket->pcmcia_state.has_pfc)
return 0;
if (dev->device_no)
return 0;
@@ -858,10 +859,8 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
dev_dbg(&dev->dev, "device needs a fake CIS\n");
if (!dev->socket->fake_cis)
- pcmcia_load_firmware(dev, did->cisfile);
-
- if (!dev->socket->fake_cis)
- return 0;
+ if (pcmcia_load_firmware(dev, did->cisfile))
+ return 0;
}
if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) {
@@ -1254,9 +1253,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
switch (event) {
case CS_EVENT_CARD_REMOVAL:
- mutex_lock(&s->ops_mutex);
- s->pcmcia_state.present = 0;
- mutex_unlock(&s->ops_mutex);
+ atomic_set(&skt->present, 0);
pcmcia_card_remove(skt, NULL);
handle_event(skt, event);
mutex_lock(&s->ops_mutex);
@@ -1265,9 +1262,9 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
break;
case CS_EVENT_CARD_INSERTION:
+ atomic_set(&skt->present, 1);
mutex_lock(&s->ops_mutex);
s->pcmcia_state.has_pfc = 0;
- s->pcmcia_state.present = 1;
destroy_cis_cache(s); /* to be on the safe side... */
mutex_unlock(&s->ops_mutex);
pcmcia_card_add(skt);
@@ -1307,7 +1304,13 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
return 0;
} /* ds_event */
-
+/*
+ * NOTE: This is racy. There's no guarantee the card will still be
+ * physically present, even if the call to this function returns
+ * non-NULL. Furthermore, the device driver most likely is unbound
+ * almost immediately, so the timeframe where pcmcia_dev_present
+ * returns NULL is probably really really small.
+ */
struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
{
struct pcmcia_device *p_dev;
@@ -1317,22 +1320,9 @@ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
if (!p_dev)
return NULL;
- mutex_lock(&p_dev->socket->ops_mutex);
- if (!p_dev->socket->pcmcia_state.present)
- goto out;
-
- if (p_dev->socket->pcmcia_state.dead)
- goto out;
-
- if (p_dev->_removed)
- goto out;
-
- if (p_dev->suspended)
- goto out;
+ if (atomic_read(&p_dev->socket->present) != 0)
+ ret = p_dev;
- ret = p_dev;
- out:
- mutex_unlock(&p_dev->socket->ops_mutex);
pcmcia_put_dev(p_dev);
return ret;
}
@@ -1382,6 +1372,8 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
return ret;
}
+ atomic_set(&socket->present, 0);
+
return 0;
}
@@ -1393,10 +1385,6 @@ static void pcmcia_bus_remove_socket(struct device *dev,
if (!socket)
return;
- mutex_lock(&socket->ops_mutex);
- socket->pcmcia_state.dead = 1;
- mutex_unlock(&socket->ops_mutex);
-
pccard_register_pcmcia(socket, NULL);
/* unregister any unbound devices */
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index caec1de..7c3d03b 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -755,12 +755,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
else
printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
-#ifdef CONFIG_PCMCIA_PROBE
-
- if (s->irq.AssignedIRQ != 0) {
- /* If the interrupt is already assigned, it must be the same */
+ /* If the interrupt is already assigned, it must be the same */
+ if (s->irq.AssignedIRQ != 0)
irq = s->irq.AssignedIRQ;
- } else {
+
+#ifdef CONFIG_PCMCIA_PROBE
+ if (!irq) {
int try;
u32 mask = s->irq_mask;
void *data = p_dev; /* something unique to this device */
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 559069a..a6eb7b5 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -214,7 +214,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
return;
}
for (i = base, most = 0; i < base+num; i += 8) {
- res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
+ res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
if (!res)
continue;
hole = inb(i);
@@ -231,9 +231,14 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
bad = any = 0;
for (i = base; i < base+num; i += 8) {
- res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
- if (!res)
+ res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
+ if (!res) {
+ if (!any)
+ printk(" excluding");
+ if (!bad)
+ bad = any = i;
continue;
+ }
for (j = 0; j < 8; j++)
if (inb(i+j) != most)
break;
@@ -253,6 +258,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
}
if (bad) {
if ((num > 16) && (bad == base) && (i == base+num)) {
+ sub_interval(&s_data->io_db, bad, i-bad);
printk(" nothing: probe failed.\n");
return;
} else {
@@ -804,7 +810,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end)
{
struct socket_data *data = s->resource_data;
- unsigned long size = end - start + 1;
+ unsigned long size;
int ret = 0;
#if defined(CONFIG_X86)
@@ -814,6 +820,8 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
start = 0x100;
#endif
+ size = end - start + 1;
+
if (end < start)
return -EINVAL;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7bec458..6c3320d 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -390,6 +390,7 @@ config EEEPC_WMI
depends on ACPI_WMI
depends on INPUT
depends on EXPERIMENTAL
+ select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 52262b0..efe8f63 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -79,15 +79,15 @@ static uint wapf = 1;
module_param(wapf, uint, 0644);
MODULE_PARM_DESC(wapf, "WAPF value");
-static uint wlan_status = 1;
-static uint bluetooth_status = 1;
+static int wlan_status = 1;
+static int bluetooth_status = 1;
-module_param(wlan_status, uint, 0644);
+module_param(wlan_status, int, 0644);
MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
-module_param(bluetooth_status, uint, 0644);
+module_param(bluetooth_status, int, 0644);
MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 6ba6c30..66f53c3 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -217,6 +217,7 @@ static void dell_wmi_notify(u32 value, void *context)
if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
printk(KERN_INFO "dell-wmi: Received unknown WMI event"
" (0x%x)\n", buffer_entry[1]);
+ kfree(obj);
return;
}
@@ -234,7 +235,7 @@ static void dell_wmi_notify(u32 value, void *context)
key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
/* Don't report brightness notifications that will also
* come via ACPI */
- return;
+ ;
} else {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 54a0157..0306174 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -169,7 +169,6 @@ struct eeepc_laptop {
struct backlight_device *backlight_device;
struct input_dev *inputdev;
- struct key_entry *keymap;
struct rfkill *wlan_rfkill;
struct rfkill *bluetooth_rfkill;
@@ -1204,8 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
static void eeepc_input_exit(struct eeepc_laptop *eeepc)
{
if (eeepc->inputdev) {
+ sparse_keymap_free(eeepc->inputdev);
input_unregister_device(eeepc->inputdev);
- kfree(eeepc->keymap);
}
}
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 9f88226..b227eb4 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -23,6 +23,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -30,22 +32,34 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define EEEPC_WMI_FILE "eeepc-wmi"
+
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
MODULE_LICENSE("GPL");
#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
+#define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
+MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define EEEPC_WMI_METHODID_DEVS 0x53564544
+#define EEEPC_WMI_METHODID_DSTS 0x53544344
+
+#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012
+
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x5d, { KEY_WLAN } },
@@ -58,18 +72,198 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_END, 0},
};
-static struct input_dev *eeepc_wmi_input_dev;
+struct bios_args {
+ u32 dev_id;
+ u32 ctrl_param;
+};
+
+struct eeepc_wmi {
+ struct input_dev *inputdev;
+ struct backlight_device *backlight_device;
+};
+
+static struct platform_device *platform_device;
+
+static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
+{
+ int err;
+
+ eeepc->inputdev = input_allocate_device();
+ if (!eeepc->inputdev)
+ return -ENOMEM;
+
+ eeepc->inputdev->name = "Eee PC WMI hotkeys";
+ eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
+ eeepc->inputdev->id.bustype = BUS_HOST;
+ eeepc->inputdev->dev.parent = &platform_device->dev;
+
+ err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(eeepc->inputdev);
+ if (err)
+ goto err_free_keymap;
+
+ return 0;
+
+err_free_keymap:
+ sparse_keymap_free(eeepc->inputdev);
+err_free_dev:
+ input_free_device(eeepc->inputdev);
+ return err;
+}
+
+static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc)
+{
+ if (eeepc->inputdev) {
+ sparse_keymap_free(eeepc->inputdev);
+ input_unregister_device(eeepc->inputdev);
+ }
+
+ eeepc->inputdev = NULL;
+}
+
+static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
+{
+ struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ u32 tmp;
+
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
+ 1, EEEPC_WMI_METHODID_DSTS, &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32)obj->integer.value;
+ else
+ tmp = 0;
+
+ if (ctrl_param)
+ *ctrl_param = tmp;
+
+ kfree(obj);
+
+ return status;
+
+}
+
+static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
+{
+ struct bios_args args = {
+ .dev_id = dev_id,
+ .ctrl_param = ctrl_param,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
+ 1, EEEPC_WMI_METHODID_DEVS, &input, NULL);
+
+ return status;
+}
+
+static int read_brightness(struct backlight_device *bd)
+{
+ static u32 ctrl_param;
+ acpi_status status;
+
+ status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param);
+
+ if (ACPI_FAILURE(status))
+ return -1;
+ else
+ return ctrl_param & 0xFF;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+
+ static u32 ctrl_param;
+ acpi_status status;
+
+ ctrl_param = bd->props.brightness;
+
+ status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param);
+
+ if (ACPI_FAILURE(status))
+ return -1;
+ else
+ return 0;
+}
+
+static const struct backlight_ops eeepc_wmi_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code)
+{
+ struct backlight_device *bd = eeepc->backlight_device;
+ int old = bd->props.brightness;
+ int new;
+
+ if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+ new = code - NOTIFY_BRNUP_MIN + 1;
+ else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+ new = code - NOTIFY_BRNDOWN_MIN;
+
+ bd->props.brightness = new;
+ backlight_update_status(bd);
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 15;
+ bd = backlight_device_register(EEEPC_WMI_FILE,
+ &platform_device->dev, eeepc,
+ &eeepc_wmi_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ eeepc->backlight_device = bd;
+
+ bd->props.brightness = read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc)
+{
+ if (eeepc->backlight_device)
+ backlight_device_unregister(eeepc->backlight_device);
+
+ eeepc->backlight_device = NULL;
+}
static void eeepc_wmi_notify(u32 value, void *context)
{
+ struct eeepc_wmi *eeepc = context;
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int code;
+ int orig_code;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_err("EEEPC WMI: bad event status 0x%x\n", status);
+ pr_err("bad event status 0x%x\n", status);
return;
}
@@ -77,81 +271,142 @@ static void eeepc_wmi_notify(u32 value, void *context)
if (obj && obj->type == ACPI_TYPE_INTEGER) {
code = obj->integer.value;
+ orig_code = code;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = NOTIFY_BRNUP_MIN;
- else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+ else if (code >= NOTIFY_BRNDOWN_MIN &&
+ code <= NOTIFY_BRNDOWN_MAX)
code = NOTIFY_BRNDOWN_MIN;
- if (!sparse_keymap_report_event(eeepc_wmi_input_dev,
+ if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
+ if (!acpi_video_backlight_support())
+ eeepc_wmi_backlight_notify(eeepc, orig_code);
+ }
+
+ if (!sparse_keymap_report_event(eeepc->inputdev,
code, 1, true))
- pr_info("EEEPC WMI: Unknown key %x pressed\n", code);
+ pr_info("Unknown key %x pressed\n", code);
}
kfree(obj);
}
-static int eeepc_wmi_input_setup(void)
+static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
{
+ struct eeepc_wmi *eeepc;
int err;
+ acpi_status status;
- eeepc_wmi_input_dev = input_allocate_device();
- if (!eeepc_wmi_input_dev)
- return -ENOMEM;
-
- eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys";
- eeepc_wmi_input_dev->phys = "wmi/input0";
- eeepc_wmi_input_dev->id.bustype = BUS_HOST;
+ eeepc = platform_get_drvdata(device);
- err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL);
+ err = eeepc_wmi_input_init(eeepc);
if (err)
- goto err_free_dev;
+ goto error_input;
- err = input_register_device(eeepc_wmi_input_dev);
- if (err)
- goto err_free_keymap;
+ if (!acpi_video_backlight_support()) {
+ err = eeepc_wmi_backlight_init(eeepc);
+ if (err)
+ goto error_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
+
+ status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
+ eeepc_wmi_notify, eeepc);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Unable to register notify handler - %d\n",
+ status);
+ err = -ENODEV;
+ goto error_wmi;
+ }
return 0;
-err_free_keymap:
- sparse_keymap_free(eeepc_wmi_input_dev);
-err_free_dev:
- input_free_device(eeepc_wmi_input_dev);
+error_wmi:
+ eeepc_wmi_backlight_exit(eeepc);
+error_backlight:
+ eeepc_wmi_input_exit(eeepc);
+error_input:
return err;
}
+static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
+{
+ struct eeepc_wmi *eeepc;
+
+ eeepc = platform_get_drvdata(device);
+ wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
+ eeepc_wmi_backlight_exit(eeepc);
+ eeepc_wmi_input_exit(eeepc);
+
+ return 0;
+}
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = EEEPC_WMI_FILE,
+ .owner = THIS_MODULE,
+ },
+ .probe = eeepc_wmi_platform_probe,
+ .remove = __devexit_p(eeepc_wmi_platform_remove),
+};
+
static int __init eeepc_wmi_init(void)
{
+ struct eeepc_wmi *eeepc;
int err;
- acpi_status status;
- if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) {
- pr_warning("EEEPC WMI: No known WMI GUID found\n");
+ if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
+ !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) {
+ pr_warning("No known WMI GUID found\n");
return -ENODEV;
}
- err = eeepc_wmi_input_setup();
- if (err)
- return err;
+ eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
+ if (!eeepc)
+ return -ENOMEM;
- status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
- eeepc_wmi_notify, NULL);
- if (ACPI_FAILURE(status)) {
- sparse_keymap_free(eeepc_wmi_input_dev);
- input_unregister_device(eeepc_wmi_input_dev);
- pr_err("EEEPC WMI: Unable to register notify handler - %d\n",
- status);
- return -ENODEV;
+ platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
+ if (!platform_device) {
+ pr_warning("Unable to allocate platform device\n");
+ err = -ENOMEM;
+ goto fail_platform;
+ }
+
+ err = platform_device_add(platform_device);
+ if (err) {
+ pr_warning("Unable to add platform device\n");
+ goto put_dev;
+ }
+
+ platform_set_drvdata(platform_device, eeepc);
+
+ err = platform_driver_register(&platform_driver);
+ if (err) {
+ pr_warning("Unable to register platform driver\n");
+ goto del_dev;
}
return 0;
+
+del_dev:
+ platform_device_del(platform_device);
+put_dev:
+ platform_device_put(platform_device);
+fail_platform:
+ kfree(eeepc);
+
+ return err;
}
static void __exit eeepc_wmi_exit(void)
{
- wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
- sparse_keymap_free(eeepc_wmi_input_dev);
- input_unregister_device(eeepc_wmi_input_dev);
+ struct eeepc_wmi *eeepc;
+
+ eeepc = platform_get_drvdata(platform_device);
+ platform_driver_unregister(&platform_driver);
+ platform_device_unregister(platform_device);
+ kfree(eeepc);
}
module_init(eeepc_wmi_init);
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index b6218f1..552cad8 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -109,7 +109,7 @@ static int max8925_is_enabled(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
- ret = max8925_reg_read(info->i2c, info->vol_reg);
+ ret = max8925_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index a681f5e..ad036dd 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
dev_get_platdata(&pdev->dev);
int i;
+ platform_set_drvdata(pdev, NULL);
+
for (i = 0; i < pdata->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
+ kfree(priv);
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bbea90b..acf222f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1899,7 +1899,8 @@ restart:
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
erp_fn = base->discipline->erp_action(cqr);
- erp_fn(cqr);
+ if (IS_ERR(erp_fn(cqr)))
+ continue;
goto restart;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6927e75..6632649 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2309,7 +2309,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
cqr->retries);
dasd_block_set_timer(device->block, (HZ << 3));
}
- return cqr;
+ return erp;
}
ccw = cqr->cpaddr;
@@ -2372,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
/* add erp and initialize with default TIC */
erp = dasd_3990_erp_add_erp(cqr);
+ if (IS_ERR(erp))
+ return erp;
+
/* inspect sense, determine specific ERP if possible */
if (erp != cqr) {
@@ -2711,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
if (erp == NULL) {
/* no matching erp found - set up erp */
erp = dasd_3990_erp_additional_erp(cqr);
+ if (IS_ERR(erp))
+ return erp;
} else {
/* matching erp found - set all leading erp's to DONE */
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 18daf16a..7217966 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -638,11 +638,7 @@ static int __init zcore_reipl_init(void)
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
- if (rc) {
- free_page((unsigned long) ipl_block);
- return rc;
- }
- if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4038f5b4..ce7cb87 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -29,6 +29,7 @@
#include "chsc.h"
static void *sei_page;
+static DEFINE_SPINLOCK(sda_lock);
/**
* chsc_error_from_response() - convert a chsc response to an error
@@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void)
kfree(sei_page);
}
-int __init
-chsc_enable_facility(int operation_code)
+int chsc_enable_facility(int operation_code)
{
int ret;
- struct {
+ static struct {
struct chsc_header request;
u8 reserved1:4;
u8 format:4;
@@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code)
u32 reserved5:4;
u32 format2:4;
u32 reserved6:24;
- } __attribute__ ((packed)) *sda_area;
+ } __attribute__ ((packed, aligned(4096))) sda_area;
- sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!sda_area)
- return -ENOMEM;
- sda_area->request.length = 0x0400;
- sda_area->request.code = 0x0031;
- sda_area->operation_code = operation_code;
+ spin_lock(&sda_lock);
+ memset(&sda_area, 0, sizeof(sda_area));
+ sda_area.request.length = 0x0400;
+ sda_area.request.code = 0x0031;
+ sda_area.operation_code = operation_code;
- ret = chsc(sda_area);
+ ret = chsc(&sda_area);
if (ret > 0) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
- switch (sda_area->response.code) {
+ switch (sda_area.response.code) {
case 0x0101:
ret = -EOPNOTSUPP;
break;
default:
- ret = chsc_error_from_response(sda_area->response.code);
+ ret = chsc_error_from_response(sda_area.response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
- operation_code, sda_area->response.code);
+ operation_code, sda_area.response.code);
out:
- free_page((unsigned long)sda_area);
+ spin_unlock(&sda_lock);
return ret;
}
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 404f630..3b6f4ad 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -124,7 +124,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch)
* since we don't have a way to clear the subchannel and
* cannot disable it with a request running.
*/
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
if (!cc && scsw_stctl(&schib.scsw))
return -EAGAIN;
return 0;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f736cdc..5feea1a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel *sch)
struct schib schib;
int ccode, retry, ret = 0;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
for (retry = 0; retry < 5; retry++) {
@@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel *sch)
return ccode;
switch (ccode) {
case 0: /* successful */
- if (stsch(sch->schid, &schib) ||
+ if (stsch_err(sch->schid, &schib) ||
!css_sch_is_valid(&schib))
return -ENODEV;
if (cio_check_config(sch, &schib)) {
@@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel *sch)
{
struct schib schib;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
@@ -771,7 +771,7 @@ cio_get_console_sch_no(void)
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
- if (stsch(schid, &console_subchannel.schib) != 0 ||
+ if (stsch_err(schid, &console_subchannel.schib) != 0 ||
(console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
!console_subchannel.schib.pmcw.dnv)
return -1;
@@ -863,10 +863,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
cc = 0;
for (retry=0;retry<3;retry++) {
schib->pmcw.ena = 0;
- cc = msch(schid, schib);
+ cc = msch_err(schid, schib);
if (cc)
return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
+ if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
return -ENODEV;
if (!schib->pmcw.ena)
return 0;
@@ -913,7 +913,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr)
pgm_check_occured = 0;
s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
+ rc = stsch_err(schid, addr);
s390_base_pgm_handler_fn = NULL;
/* The program check handler could have changed pgm_check_occured. */
@@ -950,7 +950,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
/* No default clear strategy */
break;
}
- stsch(schid, &schib);
+ stsch_err(schid, &schib);
__disable_subchannel_easy(schid, &schib);
}
out:
@@ -1086,7 +1086,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one)
return -ENODEV;
- if (stsch(schid, &schib))
+ if (stsch_err(schid, &schib))
return -ENODEV;
if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
return -ENODEV;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2769da5..5116491 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -870,15 +870,10 @@ static int __init css_bus_init(void)
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
- switch (ret) {
- case 0: /* Success. */
- max_ssid = __MAX_SSID;
- break;
- case -ENOMEM:
- goto out;
- default:
+ if (ret)
max_ssid = 0;
- }
+ else /* Success. */
+ max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
@@ -1048,6 +1043,11 @@ static int __init channel_subsystem_init_sync(void)
}
subsys_initcall_sync(channel_subsystem_init_sync);
+void channel_subsystem_reinit(void)
+{
+ chsc_enable_facility(CHSC_SDA_OC_MSS);
+}
+
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c56ab94..c9b8526 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
"device information:\n", get_clock());
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 1856489..b3b1d2f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2105,7 +2105,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
- if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+ !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
blktrc.flags |= ZFCP_BLK_LAT_VALID;
blktrc.channel_lat = lat_in->channel_lat * ticks;
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2157,9 +2158,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
- zfcp_fsf_req_trace(req, scpnt);
-
skip_fsfstatus:
+ zfcp_fsf_req_trace(req, scpnt);
zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
scpnt->host_scribble = NULL;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 72617b6..e641922 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -169,6 +169,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for"
"mgmt_invalidate_icds \n");
+ spin_unlock(&ctrl->mbox_lock);
return -1;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6cf9dc3..6b624e7 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -362,6 +362,7 @@ struct bnx2i_hba {
u32 num_ccell;
int ofld_conns_active;
+ wait_queue_head_t eh_wait;
int max_active_conns;
struct iscsi_cid_queue cid_que;
@@ -381,6 +382,7 @@ struct bnx2i_hba {
spinlock_t lock; /* protects hba structure access */
struct mutex net_dev_lock;/* sync net device access */
+ int hba_shutdown_tmo;
/*
* PCI related info.
*/
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 6d8172e..5d9296c 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -177,11 +177,22 @@ void bnx2i_stop(void *handle)
struct bnx2i_hba *hba = handle;
/* check if cleanup happened in GOING_DOWN context */
- clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
&hba->adapter_state))
iscsi_host_for_each_session(hba->shost,
bnx2i_drop_session);
+
+ /* Wait for all endpoints to be torn down, Chip will be reset once
+ * control returns to network driver. So it is required to cleanup and
+ * release all connection resources before returning from this routine.
+ */
+ wait_event_interruptible_timeout(hba->eh_wait,
+ (hba->ofld_conns_active == 0),
+ hba->hba_shutdown_tmo);
+ /* This flag should be cleared last so that ep_disconnect() gracefully
+ * cleans up connection context
+ */
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
}
/**
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f2e9b18..fa68ab3 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -820,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
spin_lock_init(&hba->lock);
mutex_init(&hba->net_dev_lock);
+ init_waitqueue_head(&hba->eh_wait);
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ hba->hba_shutdown_tmo = 240 * HZ;
+ else /* 5706/5708/5709 */
+ hba->hba_shutdown_tmo = 30 * HZ;
if (iscsi_host_add(shost, &hba->pcidev->dev))
goto free_dump_mem;
@@ -1658,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
*/
hba = bnx2i_check_route(dst_addr);
- if (!hba) {
- rc = -ENOMEM;
+ if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+ rc = -EINVAL;
goto check_busy;
}
@@ -1804,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
(bnx2i_ep->state ==
EP_STATE_CONNECT_COMPL)),
msecs_to_jiffies(timeout_ms));
- if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+ if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
rc = -1;
if (rc > 0)
@@ -1957,6 +1962,8 @@ return_bnx2i_ep:
if (!hba->ofld_conns_active)
bnx2i_unreg_dev_all();
+
+ wake_up_interruptible(&hba->eh_wait);
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 4967643..0435d04 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids);
static int adpt_detect(struct scsi_host_template* sht)
{
struct pci_dev *pDev = NULL;
- adpt_hba* pHba;
+ adpt_hba *pHba;
+ adpt_hba *next;
PINFO("Detecting Adaptec I2O RAID controllers...\n");
@@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_template* sht)
}
/* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
// Activate does get status , init outbound, and get hrt
if (adpt_i2o_activate_hba(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
@@ -243,7 +245,8 @@ rebuild_sys_tab:
PDEBUG("HBA's in OPERATIONAL state\n");
printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
if (adpt_i2o_lct_get(pHba) < 0){
adpt_i2o_delete_hba(pHba);
@@ -263,7 +266,8 @@ rebuild_sys_tab:
adpt_sysfs_class = NULL;
}
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
if (adpt_scsi_host_alloc(pHba, sht) < 0){
adpt_i2o_delete_hba(pHba);
continue;
@@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
}
}
pci_dev_put(pHba->pDev);
- kfree(pHba);
-
if (adpt_sysfs_class)
device_destroy(adpt_sysfs_class,
MKDEV(DPTI_I2O_MAJOR, pHba->unit));
+ kfree(pHba);
if(hba_count <= 0){
unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ff5ec5a..88bad0e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -323,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
srp_cmd->buf_fmt = fmt;
}
-static void unmap_sg_list(int num_entries,
- struct device *dev,
- struct srp_direct_buf *md)
-{
- int i;
-
- for (i = 0; i < num_entries; ++i)
- dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
-}
-
/**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped
@@ -350,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
return;
- else if (out_fmt == SRP_DATA_DESC_DIRECT ||
- in_fmt == SRP_DATA_DESC_DIRECT) {
- struct srp_direct_buf *data =
- (struct srp_direct_buf *) cmd->add_data;
- dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
- } else {
- struct srp_indirect_buf *indirect =
- (struct srp_indirect_buf *) cmd->add_data;
- int num_mapped = indirect->table_desc.len /
- sizeof(struct srp_direct_buf);
- if (num_mapped <= MAX_INDIRECT_BUFS) {
- unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
- return;
- }
-
- unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
- }
+ if (evt_struct->cmnd)
+ scsi_dma_unmap(evt_struct->cmnd);
}
static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0ee725c..02143af 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -599,7 +599,7 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
- if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
+ if (sock->sk->sk_sleep) {
sock->sk->sk_err = EIO;
wake_up_interruptible(sock->sk->sk_sleep);
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index ec37238..d62b3e4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -433,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
dd_data = cmdiocbq->context1;
/* normal completion and timeout crossed paths, already done */
if (!dd_data) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
@@ -1196,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
dd_data = cmdiocbq->context1;
/* normal completion and timeout crossed paths, already done */
if (!dd_data) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 359e9a7..1c7ef55 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2393,6 +2393,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
return 0;
done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
kfree(sp->ctx);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 09d6d4b..caeb7d1 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
if (conn_err_detail)
*conn_err_detail = mbox_sts[5];
if (tcp_source_port_num)
- *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
+ *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
if (connection_id)
*connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
status = QLA_SUCCESS;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d0b7d2f..333580b 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1587,7 +1587,7 @@ static int wd7000_host_reset(struct scsi_cmnd *SCpnt)
{
Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
- spin_unlock_irq(SCpnt->device->host->host_lock);
+ spin_lock_irq(SCpnt->device->host->host_lock);
if (wd7000_adapter_reset(host) < 0) {
spin_unlock_irq(SCpnt->device->host->host_lock);
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
index 7bb5fee..b5aaef9 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/serial/mcf.c
@@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
}
spin_lock_irqsave(&port->lock, flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
@@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
static void mcf_config_port(struct uart_port *port, int flags)
{
port->type = PORT_MCF;
+ port->fifosize = MCFUART_TXFIFOSIZE;
/* Clear mask, so no surprise interrupts. */
writeb(0, port->membase + MCFUART_UIMR);
@@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/*
* Define the basic serial functions we support.
*/
-static struct uart_ops mcf_uart_ops = {
+static const struct uart_ops mcf_uart_ops = {
.tx_empty = mcf_tx_empty,
.get_mctrl = mcf_get_mctrl,
.set_mctrl = mcf_set_mctrl,
@@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = {
.verify_port = mcf_verify_port,
};
-static struct mcf_uart mcf_ports[3];
+static struct mcf_uart mcf_ports[4];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 175d202..8cfa5b1 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -105,6 +105,10 @@ struct serial_cfg_mem {
* manfid 0x0160, 0x0104
* This card appears to have a 14.7456MHz clock.
*/
+/* Generic Modem: MD55x (GPRS/EDGE) have
+ * Elan VPU16551 UART with 14.7456MHz oscillator
+ * manfid 0x015D, 0x4C45
+ */
static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port)
{
port->uartclk = 14745600;
@@ -196,6 +200,11 @@ static const struct serial_quirk quirks[] = {
.multi = -1,
.setup = quirk_setup_brainboxes_0104,
}, {
+ .manfid = 0x015D,
+ .prodid = 0x4C45,
+ .multi = -1,
+ .setup = quirk_setup_brainboxes_0104,
+ }, {
.manfid = MANFID_IBM,
.prodid = ~0,
.multi = -1,
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c
index a67c622..e2c44ec 100644
--- a/drivers/staging/dt3155/dt3155_drv.c
+++ b/drivers/staging/dt3155/dt3155_drv.c
@@ -57,19 +57,8 @@ MA 02111-1307 USA
extern void printques(int);
-#ifdef MODULE
#include <linux/module.h>
#include <linux/interrupt.h>
-
-
-MODULE_LICENSE("GPL");
-
-#endif
-
-#ifndef CONFIG_PCI
-#error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)"
-#endif
-
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/poll.h>
@@ -84,6 +73,9 @@ MODULE_LICENSE("GPL");
#include "dt3155_io.h"
#include "allocator.h"
+
+MODULE_LICENSE("GPL");
+
/* Error variable. Zero means no error. */
int dt3155_errno = 0;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 6a3b5ca..2f3dc4c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -301,7 +301,7 @@ static int usb_probe_interface(struct device *dev)
intf->condition = USB_INTERFACE_BINDING;
- /* Bound interfaces are initially active. They are
+ /* Probed interfaces are initially active. They are
* runtime-PM-enabled only if the driver has autosuspend support.
* They are sensitive to their children's power states.
*/
@@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* Bound interfaces are initially active. They are
+ /* Claimed interfaces are initially inactive (suspended). They are
* runtime-PM-enabled only if the driver has autosuspend support.
* They are sensitive to their children's power states.
*/
- pm_runtime_set_active(dev);
+ pm_runtime_set_suspended(dev);
pm_suspend_ignore_children(dev, false);
if (driver->supports_autosuspend)
pm_runtime_enable(dev);
@@ -1170,7 +1170,7 @@ done:
static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
{
int status = 0;
- int i = 0;
+ int i = 0, n = 0;
struct usb_interface *intf;
if (udev->state == USB_STATE_NOTATTACHED ||
@@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
- for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ n = udev->actconfig->desc.bNumInterfaces;
+ for (i = n - 1; i >= 0; --i) {
intf = udev->actconfig->interface[i];
status = usb_suspend_interface(udev, intf, msg);
if (status != 0)
@@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
- while (--i >= 0) {
+ while (++i < n) {
intf = udev->actconfig->interface[i];
usb_resume_interface(udev, intf, msg, 0);
}
@@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
return status;
}
+static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
+{
+ int w, i;
+ struct usb_interface *intf;
+
+ /* Remote wakeup is needed only when we actually go to sleep.
+ * For things like FREEZE and QUIESCE, if the device is already
+ * autosuspended then its current wakeup setting is okay.
+ */
+ if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
+ if (udev->state != USB_STATE_SUSPENDED)
+ udev->do_remote_wakeup = 0;
+ return;
+ }
+
+ /* If remote wakeup is permitted, see whether any interface drivers
+ * actually want it.
+ */
+ w = 0;
+ if (device_may_wakeup(&udev->dev) && udev->actconfig) {
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ intf = udev->actconfig->interface[i];
+ w |= intf->needs_remote_wakeup;
+ }
+ }
+
+ /* If the device is autosuspended with the wrong wakeup setting,
+ * autoresume now so the setting can be changed.
+ */
+ if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
+ pm_runtime_resume(&udev->dev);
+ udev->do_remote_wakeup = w;
+}
+
/* The device lock is held by the PM core */
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
do_unbind_rebind(udev, DO_UNBIND);
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ choose_wakeup(udev, msg);
return usb_suspend_both(udev, msg);
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 207e7a8..13ead00 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
*/
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
+ INIT_LIST_HEAD(&ehci->cached_sitd_list);
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1937267..c7178bc 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -801,7 +801,7 @@ static int ehci_hub_control (
* this bit; seems too long to spin routinely...
*/
retval = handshake(ehci, status_reg,
- PORT_RESET, 0, 750);
+ PORT_RESET, 0, 1000);
if (retval != 0) {
ehci_err (ehci, "port %d reset error %d\n",
wIndex + 1, retval);
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index aeda96e..1f3f01e 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
if (ehci->async)
qh_put (ehci->async);
ehci->async = NULL;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a67a003..40a8583 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
snprintf(supply, sizeof(supply), "hsusb%d", i);
omap->regulator[i] = regulator_get(omap->dev, supply);
- if (IS_ERR(omap->regulator[i]))
+ if (IS_ERR(omap->regulator[i])) {
+ omap->regulator[i] = NULL;
dev_dbg(&pdev->dev,
"failed to get ehci port%d regulator\n", i);
- else
+ } else {
regulator_enable(omap->regulator[i]);
+ }
}
ret = omap_start_ehc(omap, hcd);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a0aaaaf..805ec63 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,7 +510,7 @@ static int disable_periodic (struct ehci_hcd *ehci)
ehci_writel(ehci, cmd, &ehci->regs->command);
/* posted write ... */
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->next_uframe = -1;
return 0;
@@ -2139,13 +2139,27 @@ sitd_complete (
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
iso_stream_put (ehci, stream);
- /* OK to recycle this SITD now that its completion callback ran. */
+
done:
sitd->urb = NULL;
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
-
+ if (ehci->clock_frame != sitd->frame) {
+ /* OK to recycle this SITD now. */
+ sitd->stream = NULL;
+ list_move(&sitd->sitd_list, &stream->free_list);
+ iso_stream_put(ehci, stream);
+ } else {
+ /* HW might remember this SITD, so we can't recycle it yet.
+ * Move it to a safe place until a new frame starts.
+ */
+ list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
+ if (stream->refcount == 2) {
+ /* If iso_stream_put() were called here, stream
+ * would be freed. Instead, just prevent reuse.
+ */
+ stream->ep->hcpriv = NULL;
+ stream->ep = NULL;
+ }
+ }
return retval;
}
@@ -2211,9 +2225,10 @@ done:
/*-------------------------------------------------------------------------*/
-static void free_cached_itd_list(struct ehci_hcd *ehci)
+static void free_cached_lists(struct ehci_hcd *ehci)
{
struct ehci_itd *itd, *n;
+ struct ehci_sitd *sitd, *sn;
list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
struct ehci_iso_stream *stream = itd->stream;
@@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
list_move(&itd->itd_list, &stream->free_list);
iso_stream_put(ehci, stream);
}
+
+ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+ struct ehci_iso_stream *stream = sitd->stream;
+ sitd->stream = NULL;
+ list_move(&sitd->sitd_list, &stream->free_list);
+ iso_stream_put(ehci, stream);
+ }
}
/*-------------------------------------------------------------------------*/
@@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci)
clock_frame = -1;
}
if (ehci->clock_frame != clock_frame) {
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
clock %= mod;
@@ -2414,7 +2436,7 @@ restart:
clock = now;
clock_frame = clock >> 3;
if (ehci->clock_frame != clock_frame) {
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
} else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b1dce96..556c0b4 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
int next_uframe; /* scan periodic, start here */
unsigned periodic_sched; /* periodic activity count */
- /* list of itds completed while clock_frame was still active */
+ /* list of itds & sitds completed while clock_frame was still active */
struct list_head cached_itd_list;
+ struct list_head cached_sitd_list;
unsigned clock_frame;
/* per root hub port */
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
clear_bit (action, &ehci->actions);
}
-static void free_cached_itd_list(struct ehci_hcd *ehci);
+static void free_cached_lists(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 4aa08d3..d22fb4d 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -23,7 +23,7 @@
#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
#endif
-#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
+#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
static struct clk *usb11_clk;
static struct clk *usb20_clk;
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index a9555cb..de8ef94 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -49,6 +49,7 @@ struct usb_sevsegdev {
u16 textlength;
u8 shadow_power; /* for PM */
+ u8 has_interface_pm;
};
/* sysfs_streq can't replace this completely
@@ -68,12 +69,16 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
{
int rc;
- if (!mydev->shadow_power && mydev->powered) {
+ if (mydev->powered && !mydev->has_interface_pm) {
rc = usb_autopm_get_interface(mydev->intf);
if (rc < 0)
return;
+ mydev->has_interface_pm = 1;
}
+ if (mydev->shadow_power != 1)
+ return;
+
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
@@ -86,8 +91,10 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
if (rc < 0)
dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
- if (mydev->shadow_power && !mydev->powered)
+ if (!mydev->powered && mydev->has_interface_pm) {
usb_autopm_put_interface(mydev->intf);
+ mydev->has_interface_pm = 0;
+ }
}
static void update_display_mode(struct usb_sevsegdev *mydev)
@@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_interface *interface,
mydev->intf = interface;
usb_set_intfdata(interface, mydev);
+ /* PM */
+ mydev->shadow_power = 1; /* currently active */
+ mydev->has_interface_pm = 0; /* have not issued autopm_get */
+
/*set defaults */
mydev->textmode = 0x02; /* ascii mode */
mydev->mode_msb = 0x06; /* 6 characters */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 73d5f34..c97a0bb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -97,6 +97,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
+ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index d640dc9..a352d5f3 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -134,3 +134,7 @@
/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
#define SANWA_VENDOR_ID 0x11ad
#define SANWA_PRODUCT_ID 0x0001
+
+/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
+#define ADLINK_VENDOR_ID 0x0b63
+#define ADLINK_ND6530_PRODUCT_ID 0x6530
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 0b93620..7e3bea2 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -42,6 +42,14 @@
#define CMOTECH_PRODUCT_CDU550 0x5553
#define CMOTECH_PRODUCT_CDX650 0x6512
+/* LG devices */
+#define LG_VENDOR_ID 0x1004
+#define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */
+
+/* Sanyo devices */
+#define SANYO_VENDOR_ID 0x0474
+#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
+
static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
@@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 9202f94..ef0bdb0 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -230,6 +230,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
+ { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0afe5c7..880e990 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -172,7 +172,7 @@ static unsigned int product_5052_count;
/* the array dimension is the number of default entries plus */
/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
/* null entry */
-static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
@@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
};
-static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1]
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw");
MODULE_FIRMWARE("mts_cdma.fw");
MODULE_FIRMWARE("mts_gsm.fw");
MODULE_FIRMWARE("mts_edge.fw");
+MODULE_FIRMWARE("mts_mt9234mu.fw");
+MODULE_FIRMWARE("mts_mt9234zba.fw");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
@@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct ti_device *tdev)
const struct firmware *fw_p;
char buf[32];
+ dbg("%s\n", __func__);
/* try ID specific firmware first, then try generic firmware */
sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
dev->descriptor.idProduct);
@@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct ti_device *tdev)
case MTS_EDGE_PRODUCT_ID:
strcpy(buf, "mts_edge.fw");
break;
- }
+ case MTS_MT9234MU_PRODUCT_ID:
+ strcpy(buf, "mts_mt9234mu.fw");
+ break;
+ case MTS_MT9234ZBA_PRODUCT_ID:
+ strcpy(buf, "mts_mt9234zba.fw");
+ break;
+ case MTS_MT9234ZBAOLD_PRODUCT_ID:
+ strcpy(buf, "mts_mt9234zba.fw");
+ break; }
}
if (buf[0] == '\0') {
if (tdev->td_is_3410)
@@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct ti_device *tdev)
return -ENOENT;
}
if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
- dev_err(&dev->dev, "%s - firmware too large\n", __func__);
+ dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size);
return -ENOENT;
}
@@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct ti_device *tdev)
status = ti_do_download(dev, pipe, buffer, fw_p->size);
kfree(buffer);
} else {
+ dbg("%s ENOMEM\n", __func__);
status = -ENOMEM;
}
release_firmware(fw_p);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index f323c60..2aac195 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -45,6 +45,9 @@
#define MTS_CDMA_PRODUCT_ID 0xF110
#define MTS_GSM_PRODUCT_ID 0xF111
#define MTS_EDGE_PRODUCT_ID 0xF112
+#define MTS_MT9234MU_PRODUCT_ID 0xF114
+#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
+#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
/* Commands */
#define TI_GET_VERSION 0x01
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 46e79d3..7ec24e4 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
keep_alives = 0;
for (cnt = 0;
- keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max;
+ keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max;
cnt++) {
unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5be11c9..e69d238 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
int log_all)
{
int i;
+
+ if (!mem)
+ return 0;
+
for (i = 0; i < mem->nregions; ++i) {
struct vhost_memory_region *m = mem->regions + i;
unsigned long a = m->userspace_addr;
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 581d2db..ecf4055 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -49,6 +49,7 @@ enum {
M_MBP_2, /* MacBook Pro 2nd gen */
M_MBP_SR, /* MacBook Pro (Santa Rosa) */
M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
M_UNKNOWN /* placeholder */
};
@@ -70,6 +71,7 @@ static struct efifb_dmi_info {
[M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
};
@@ -106,6 +108,7 @@ static struct dmi_system_id __initdata dmi_system_table[] = {
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
{},
};
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 3aed388..bfec7c2 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -103,7 +103,8 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
num = min(num, ARRAY_SIZE(vb->pfns));
for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
- struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY);
+ struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN);
if (!page) {
if (printk_ratelimit())
dev_printk(KERN_INFO, &vb->vdev->dev,
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index ef36fca..3a7e9ff 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <asm/irq.h>
#include <mach/hardware.h>
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 1ed3d55..17726a0 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = {
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
- int t = ((s16)rom[1] << 8) | rom[0];
- t = t*1000/16;
- return t;
+ s16 t = le16_to_cpup((__le16 *)rom);
+ return t*1000/16;
}
static inline int w1_DS18S20_convert_temp(u8 rom[9])
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0e8468f..0bf5020 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -194,10 +194,10 @@ config EP93XX_WATCHDOG
config OMAP_WATCHDOG
tristate "OMAP Watchdog"
- depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
help
- Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y'
- here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer.
+ Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y'
+ here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
config PNX4008_WATCHDOG
tristate "PNX4008 Watchdog"
@@ -302,7 +302,7 @@ config TS72XX_WATCHDOG
config MAX63XX_WATCHDOG
tristate "Max63xx watchdog"
- depends on ARM
+ depends on ARM && HAS_IOMEM
help
Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 8b724aa..801ead1 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
#ifdef CONFIG_FSL_BOOKE
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
-#define WDTP_MASK (WDTP(0))
+#define WDTP_MASK (WDTP(0x3f))
#else
#define WDTP(x) (TCR_WP(x))
#define WDTP_MASK (TCR_WP_MASK)
@@ -121,7 +121,7 @@ static ssize_t booke_wdt_write(struct file *file, const char __user *buf,
return count;
}
-static const struct watchdog_info ident = {
+static struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "PowerPC Book-E Watchdog",
};
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 75f3a83..3053ff0 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry)
static void max63xx_wdt_disable(void)
{
+ u8 val;
+
spin_lock(&io_lock);
- __raw_writeb(3, wdt_base);
+ val = __raw_readb(wdt_base);
+ val &= ~MAX6369_WDSET;
+ val |= 3;
+ __raw_writeb(val, wdt_base);
spin_unlock(&io_lock);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index c8eadd4..88c83aa 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -67,8 +67,8 @@ static DEFINE_SPINLOCK(sbwd_lock);
void sbwdog_set(char __iomem *wdog, unsigned long t)
{
spin_lock(&sbwd_lock);
- __raw_writeb(0, wdog - 0x10);
- __raw_writeq(t & 0x7fffffUL, wdog);
+ __raw_writeb(0, wdog);
+ __raw_writeq(t & 0x7fffffUL, wdog - 0x10);
spin_unlock(&sbwd_lock);
}
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 8d44c9b..c7d67e9 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -30,7 +30,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned int margin = 60; /* (secs) Default is 1 minute */
static unsigned long wdt_status;
-static DEFINE_SPINLOCK(wdt_lock);
+static DEFINE_MUTEX(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -45,26 +45,26 @@ static DEFINE_SPINLOCK(wdt_lock);
static void wdt_send_data(unsigned char command, unsigned char data)
{
- outb(command, COMMAND_PORT);
- msleep(100);
outb(data, DATA_PORT);
msleep(200);
+ outb(command, COMMAND_PORT);
+ msleep(100);
}
static void wdt_enable(void)
{
- spin_lock(&wdt_lock);
+ mutex_lock(&wdt_lock);
wdt_send_data(IFACE_ON_COMMAND, 1);
wdt_send_data(REBOOT_COMMAND, margin);
- spin_unlock(&wdt_lock);
+ mutex_unlock(&wdt_lock);
}
static void wdt_disable(void)
{
- spin_lock(&wdt_lock);
+ mutex_lock(&wdt_lock);
wdt_send_data(IFACE_ON_COMMAND, 0);
wdt_send_data(REBOOT_COMMAND, 0);
- spin_unlock(&wdt_lock);
+ mutex_unlock(&wdt_lock);
}
static int fitpc2_wdt_open(struct inode *inode, struct file *file)
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 5c5bc84..f8b86e9 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -238,6 +238,13 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
return ERR_PTR(-ENOMEM);
}
+ rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY);
+ if (rc) {
+ __putname(v9ses->aname);
+ __putname(v9ses->uname);
+ return ERR_PTR(rc);
+ }
+
spin_lock(&v9fs_sessionlist_lock);
list_add(&v9ses->slist, &v9fs_sessionlist);
spin_unlock(&v9fs_sessionlist_lock);
@@ -301,6 +308,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
return fid;
error:
+ bdi_destroy(&v9ses->bdi);
return ERR_PTR(retval);
}
@@ -326,6 +334,8 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
__putname(v9ses->uname);
__putname(v9ses->aname);
+ bdi_destroy(&v9ses->bdi);
+
spin_lock(&v9fs_sessionlist_lock);
list_del(&v9ses->slist);
spin_unlock(&v9fs_sessionlist_lock);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index a0a8d3d..bec4d0b 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -20,6 +20,7 @@
* Boston, MA 02111-1301 USA
*
*/
+#include <linux/backing-dev.h>
/**
* enum p9_session_flags - option flags for each 9P session
@@ -102,6 +103,7 @@ struct v9fs_session_info {
u32 uid; /* if ACCESS_SINGLE, the uid that has access */
struct p9_client *clnt; /* 9p client */
struct list_head slist; /* list of sessions registered with v9fs */
+ struct backing_dev_info bdi;
};
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 491108b..806da5d 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -77,6 +77,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_blocksize = 1 << sb->s_blocksize_bits;
sb->s_magic = V9FS_MAGIC;
sb->s_op = &v9fs_super_ops;
+ sb->s_bdi = &v9ses->bdi;
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
MS_NOATIME;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c54dad4e60..a10f258 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/fscache.h>
+#include <linux/backing-dev.h>
#include "afs.h"
#include "afs_vl.h"
@@ -313,6 +314,7 @@ struct afs_volume {
unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
struct afs_server *servers[8]; /* servers on which volume resides (ordered) */
struct rw_semaphore server_sem; /* lock for accessing current server */
+ struct backing_dev_info bdi;
};
/*
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 5e813a8..b3feddc 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
{
struct afs_super_info *super;
struct vfsmount *mnt;
- struct page *page = NULL;
+ struct page *page;
size_t size;
- char *buf, *devname = NULL, *options = NULL;
+ char *buf, *devname, *options;
int ret;
_enter("{%s}", mntpt->d_name.name);
@@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
ret = -EINVAL;
size = mntpt->d_inode->i_size;
if (size > PAGE_SIZE - 1)
- goto error;
+ goto error_no_devname;
ret = -ENOMEM;
devname = (char *) get_zeroed_page(GFP_KERNEL);
if (!devname)
- goto error;
+ goto error_no_devname;
options = (char *) get_zeroed_page(GFP_KERNEL);
if (!options)
- goto error;
+ goto error_no_options;
/* read the contents of the AFS special symlink */
page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto error;
+ goto error_no_page;
}
ret = -EIO;
@@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
return mnt;
error:
- if (page)
- page_cache_release(page);
- if (devname)
- free_page((unsigned long) devname);
- if (options)
- free_page((unsigned long) options);
+ page_cache_release(page);
+error_no_page:
+ free_page((unsigned long) options);
+error_no_options:
+ free_page((unsigned long) devname);
+error_no_devname:
_leave(" = %d", ret);
return ERR_PTR(ret);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 14f6431..e932e5a 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -311,6 +311,7 @@ static int afs_fill_super(struct super_block *sb, void *data)
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
sb->s_fs_info = as;
+ sb->s_bdi = &as->volume->bdi;
/* allocate the root inode and dentry */
fid.vid = as->volume->vid;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index a353e69..401eeb2 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -106,6 +106,10 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
volume->cell = params->cell;
volume->vid = vlocation->vldb.vid[params->type];
+ ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY);
+ if (ret)
+ goto error_bdi;
+
init_rwsem(&volume->server_sem);
/* look up all the applicable server records */
@@ -151,6 +155,8 @@ error:
return ERR_PTR(ret);
error_discard:
+ bdi_destroy(&volume->bdi);
+error_bdi:
up_write(&params->cell->vl_sem);
for (loop = volume->nservers - 1; loop >= 0; loop--)
@@ -200,6 +206,7 @@ void afs_put_volume(struct afs_volume *volume)
for (loop = volume->nservers - 1; loop >= 0; loop--)
afs_put_server(volume->servers[loop]);
+ bdi_destroy(&volume->bdi);
kfree(volume);
_leave(" [destroyed]");
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 7ab23e0..2c5f9a0 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1005,15 +1005,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
}
} else if (!mm->start_data) {
mm->start_data = seg->addr;
-#ifndef CONFIG_MMU
mm->end_data = seg->addr + phdr->p_memsz;
-#endif
}
-
-#ifdef CONFIG_MMU
- if (seg->addr + phdr->p_memsz > mm->end_data)
- mm->end_data = seg->addr + phdr->p_memsz;
-#endif
}
seg++;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index e0e769b..49566c1 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
- (int) r,(int)(start_brk-start_code),(int)text_len);
+ (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
goto failed;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index dd76930..55dcb78 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -406,17 +406,23 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
- struct block_device *bdev = I_BDEV(filp->f_mapping->host);
+ struct inode *bd_inode = filp->f_mapping->host;
+ struct block_device *bdev = I_BDEV(bd_inode);
int error;
- error = sync_blockdev(bdev);
- if (error)
- return error;
-
- error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL,
- (BLKDEV_IFL_WAIT));
+ /*
+ * There is no need to serialise calls to blkdev_issue_flush with
+ * i_mutex and doing so causes performance issues with concurrent
+ * O_SYNC writers to a block device.
+ */
+ mutex_unlock(&bd_inode->i_mutex);
+
+ error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
if (error == -EOPNOTSUPP)
error = 0;
+
+ mutex_lock(&bd_inode->i_mutex);
+
return error;
}
EXPORT_SYMBOL(blkdev_fsync);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e7b8f2c..feca041 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -44,8 +44,6 @@ static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
-static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
-
/*
* end_io_wq structs are used to do processing in task context when an IO is
* complete. This is used during reads to verify checksums, and it is used
@@ -1375,19 +1373,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
int err;
- bdi->name = "btrfs";
bdi->capabilities = BDI_CAP_MAP_COPY;
- err = bdi_init(bdi);
+ err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
if (err)
return err;
- err = bdi_register(bdi, NULL, "btrfs-%d",
- atomic_inc_return(&btrfs_bdi_num));
- if (err) {
- bdi_destroy(bdi);
- return err;
- }
-
bdi->ra_pages = default_backing_dev_info.ra_pages;
bdi->unplug_io_fn = btrfs_unplug_io_fn;
bdi->unplug_io_data = info;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index aa3cd7c..4125937 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -337,16 +337,15 @@ out:
/*
* Get ref for the oldest snapc for an inode with dirty data... that is, the
* only snap context we are allowed to write back.
- *
- * Caller holds i_lock.
*/
-static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
- u64 *snap_size)
+static struct ceph_snap_context *get_oldest_context(struct inode *inode,
+ u64 *snap_size)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
+ spin_lock(&inode->i_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
@@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
break;
}
}
- if (!snapc && ci->i_snap_realm) {
- snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
+ if (!snapc && ci->i_head_snapc) {
+ snapc = ceph_get_snap_context(ci->i_head_snapc);
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
}
- return snapc;
-}
-
-static struct ceph_snap_context *get_oldest_context(struct inode *inode,
- u64 *snap_size)
-{
- struct ceph_snap_context *snapc = NULL;
-
- spin_lock(&inode->i_lock);
- snapc = __get_oldest_context(inode, snap_size);
spin_unlock(&inode->i_lock);
return snapc;
}
@@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
int len = PAGE_CACHE_SIZE;
loff_t i_size;
int err = 0;
- struct ceph_snap_context *snapc;
+ struct ceph_snap_context *snapc, *oldest;
u64 snap_size = 0;
long writeback_stat;
@@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %p page %p not dirty?\n", inode, page);
goto out;
}
- if (snapc != get_oldest_context(inode, &snap_size)) {
+ oldest = get_oldest_context(inode, &snap_size);
+ if (snapc->seq > oldest->seq) {
dout("writepage %p page %p snapc %p not writeable - noop\n",
inode, page, (void *)page->private);
/* we should only noop if called by kswapd */
WARN_ON((current->flags & PF_MEMALLOC) == 0);
+ ceph_put_snap_context(oldest);
goto out;
}
+ ceph_put_snap_context(oldest);
/* is this a partial page at end of file? */
if (snap_size)
@@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
ClearPagePrivate(page);
end_page_writeback(page);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
- ceph_put_snap_context(snapc);
+ ceph_put_snap_context(snapc); /* page's reference */
out:
return err;
}
@@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req,
dout("inode %p skipping page %p\n", inode, page);
wbc->pages_skipped++;
}
+ ceph_put_snap_context((void *)page->private);
page->private = 0;
ClearPagePrivate(page);
- ceph_put_snap_context(snapc);
dout("unlocking %d %p\n", i, page);
end_page_writeback(page);
@@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping,
int range_whole = 0;
int should_loop = 1;
pgoff_t max_pages = 0, max_pages_ever = 0;
- struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
+ struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
struct pagevec pvec;
int done = 0;
int rc = 0;
@@ -770,9 +762,10 @@ get_more_pages:
}
/* only if matching snap context */
- if (snapc != (void *)page->private) {
- dout("page snapc %p != oldest %p\n",
- (void *)page->private, snapc);
+ pgsnapc = (void *)page->private;
+ if (pgsnapc->seq > snapc->seq) {
+ dout("page snapc %p %lld > oldest %p %lld\n",
+ pgsnapc, pgsnapc->seq, snapc, snapc->seq);
unlock_page(page);
if (!locked_pages)
continue; /* keep looking for snap */
@@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode,
struct ceph_snap_context *snapc)
{
struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
- return !oldest || snapc->seq <= oldest->seq;
+ int ret = !oldest || snapc->seq <= oldest->seq;
+
+ ceph_put_snap_context(oldest);
+ return ret;
}
/*
@@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file,
int pos_in_page = pos & ~PAGE_CACHE_MASK;
int end_in_page = pos_in_page + len;
loff_t i_size;
- struct ceph_snap_context *snapc;
int r;
+ struct ceph_snap_context *snapc, *oldest;
retry_locked:
/* writepages currently holds page lock, but if we change that later, */
@@ -947,23 +943,24 @@ retry_locked:
BUG_ON(!ci->i_snap_realm);
down_read(&mdsc->snap_rwsem);
BUG_ON(!ci->i_snap_realm->cached_context);
- if (page->private &&
- (void *)page->private != ci->i_snap_realm->cached_context) {
+ snapc = (void *)page->private;
+ if (snapc && snapc != ci->i_head_snapc) {
/*
* this page is already dirty in another (older) snap
* context! is it writeable now?
*/
- snapc = get_oldest_context(inode, NULL);
+ oldest = get_oldest_context(inode, NULL);
up_read(&mdsc->snap_rwsem);
- if (snapc != (void *)page->private) {
+ if (snapc->seq > oldest->seq) {
+ ceph_put_snap_context(oldest);
dout(" page %p snapc %p not current or oldest\n",
- page, (void *)page->private);
+ page, snapc);
/*
* queue for writeback, and wait for snapc to
* be writeable or written
*/
- snapc = ceph_get_snap_context((void *)page->private);
+ snapc = ceph_get_snap_context(snapc);
unlock_page(page);
ceph_queue_writeback(inode);
r = wait_event_interruptible(ci->i_cap_wq,
@@ -973,6 +970,7 @@ retry_locked:
return r;
return -EAGAIN;
}
+ ceph_put_snap_context(oldest);
/* yay, writeable, do it now (without dropping page lock) */
dout(" page %p snapc %p not current, but oldest\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 3710e07..aa2239f 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1205,6 +1205,12 @@ retry:
if (capsnap->dirty_pages || capsnap->writing)
continue;
+ /*
+ * if cap writeback already occurred, we should have dropped
+ * the capsnap in ceph_put_wrbuffer_cap_refs.
+ */
+ BUG_ON(capsnap->dirty == 0);
+
/* pick mds, take s_mutex */
mds = __ceph_get_cap_mds(ci, &mseq);
if (session && session->s_mds != mds) {
@@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
}
spin_unlock(&inode->i_lock);
- dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
- last ? "last" : "");
+ dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
+ last ? " last" : "", put ? " put" : "");
if (last && !flushsnaps)
ceph_check_caps(ci, 0, NULL);
@@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
{
struct inode *inode = &ci->vfs_inode;
int last = 0;
- int last_snap = 0;
+ int complete_capsnap = 0;
+ int drop_capsnap = 0;
int found = 0;
struct ceph_cap_snap *capsnap = NULL;
@@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->context == snapc) {
found = 1;
- capsnap->dirty_pages -= nr;
- last_snap = !capsnap->dirty_pages;
break;
}
}
BUG_ON(!found);
+ capsnap->dirty_pages -= nr;
+ if (capsnap->dirty_pages == 0) {
+ complete_capsnap = 1;
+ if (capsnap->dirty == 0)
+ /* cap writeback completed before we created
+ * the cap_snap; no FLUSHSNAP is needed */
+ drop_capsnap = 1;
+ }
dout("put_wrbuffer_cap_refs on %p cap_snap %p "
- " snap %lld %d/%d -> %d/%d %s%s\n",
+ " snap %lld %d/%d -> %d/%d %s%s%s\n",
inode, capsnap, capsnap->context->seq,
ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
ci->i_wrbuffer_ref, capsnap->dirty_pages,
last ? " (wrbuffer last)" : "",
- last_snap ? " (capsnap last)" : "");
+ complete_capsnap ? " (complete capsnap)" : "",
+ drop_capsnap ? " (drop capsnap)" : "");
+ if (drop_capsnap) {
+ ceph_put_snap_context(capsnap->context);
+ list_del(&capsnap->ci_item);
+ list_del(&capsnap->flushing_item);
+ ceph_put_cap_snap(capsnap);
+ }
}
spin_unlock(&inode->i_lock);
@@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
iput(inode);
- } else if (last_snap) {
+ } else if (complete_capsnap) {
ceph_flush_snaps(ci);
wake_up(&ci->i_cap_wq);
}
+ if (drop_capsnap)
+ iput(inode);
}
/*
@@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
break;
}
WARN_ON(capsnap->dirty_pages || capsnap->writing);
- dout(" removing cap_snap %p follows %lld\n",
- capsnap, follows);
+ dout(" removing %p cap_snap %p follows %lld\n",
+ inode, capsnap, follows);
ceph_put_snap_context(capsnap->context);
list_del(&capsnap->ci_item);
list_del(&capsnap->flushing_item);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 7261dc6..ea8ee2e 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -171,11 +171,11 @@ more:
spin_lock(&inode->i_lock);
spin_lock(&dcache_lock);
+ last = dentry;
+
if (err < 0)
goto out_unlock;
- last = dentry;
-
p = p->prev;
filp->f_pos++;
@@ -312,7 +312,7 @@ more:
req->r_readdir_offset = fi->next_offset;
req->r_args.readdir.frag = cpu_to_le32(frag);
req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
- req->r_num_caps = max_entries;
+ req->r_num_caps = max_entries + 1;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err < 0) {
ceph_mdsc_put_request(req);
@@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct inode *inode = ceph_get_snapdir(parent);
dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
dentry, dentry->d_name.len, dentry->d_name.name, inode);
+ BUG_ON(!d_unhashed(dentry));
d_add(dentry, inode);
err = 0;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index aca82d5..26f883c 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct inode *in = NULL;
struct ceph_mds_reply_inode *ininfo;
struct ceph_vino vino;
+ struct ceph_client *client = ceph_sb_to_client(sb);
int i = 0;
int err = 0;
@@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
return err;
}
- if (rinfo->head->is_dentry && !req->r_aborted) {
+ /*
+ * ignore null lease/binding on snapdir ENOENT, or else we
+ * will have trouble splicing in the virtual snapdir later
+ */
+ if (rinfo->head->is_dentry && !req->r_aborted &&
+ (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
+ client->mount_args->snapdir_name,
+ req->r_dentry->d_name.len))) {
/*
* lookup link rename : null -> possibly existing inode
* mknod symlink mkdir : null -> new inode
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 8f1715f..cdaaa13 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
+#ifdef CONFIG_LOCKDEP
+static struct lock_class_key socket_class;
+#endif
+
static void queue_con(struct ceph_connection *con);
static void con_work(struct work_struct *);
@@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
con->sock = sock;
sock->sk->sk_allocation = GFP_NOFS;
+#ifdef CONFIG_LOCKDEP
+ lockdep_set_class(&sock->sk->sk_lock, &socket_class);
+#endif
+
set_sock_callbacks(sock, con);
dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
@@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con)
con->out_msg = NULL;
}
con->in_seq = 0;
+ con->in_seq_acked = 0;
}
/*
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 21c6623..2e2c15e 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -314,71 +314,6 @@ bad:
return ERR_PTR(err);
}
-
-/*
- * osd map
- */
-void ceph_osdmap_destroy(struct ceph_osdmap *map)
-{
- dout("osdmap_destroy %p\n", map);
- if (map->crush)
- crush_destroy(map->crush);
- while (!RB_EMPTY_ROOT(&map->pg_temp)) {
- struct ceph_pg_mapping *pg =
- rb_entry(rb_first(&map->pg_temp),
- struct ceph_pg_mapping, node);
- rb_erase(&pg->node, &map->pg_temp);
- kfree(pg);
- }
- while (!RB_EMPTY_ROOT(&map->pg_pools)) {
- struct ceph_pg_pool_info *pi =
- rb_entry(rb_first(&map->pg_pools),
- struct ceph_pg_pool_info, node);
- rb_erase(&pi->node, &map->pg_pools);
- kfree(pi);
- }
- kfree(map->osd_state);
- kfree(map->osd_weight);
- kfree(map->osd_addr);
- kfree(map);
-}
-
-/*
- * adjust max osd value. reallocate arrays.
- */
-static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
-{
- u8 *state;
- struct ceph_entity_addr *addr;
- u32 *weight;
-
- state = kcalloc(max, sizeof(*state), GFP_NOFS);
- addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
- weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
- if (state == NULL || addr == NULL || weight == NULL) {
- kfree(state);
- kfree(addr);
- kfree(weight);
- return -ENOMEM;
- }
-
- /* copy old? */
- if (map->osd_state) {
- memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
- memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
- memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
- kfree(map->osd_state);
- kfree(map->osd_addr);
- kfree(map->osd_weight);
- }
-
- map->osd_state = state;
- map->osd_weight = weight;
- map->osd_addr = addr;
- map->max_osd = max;
- return 0;
-}
-
/*
* rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
* to a set of osds)
@@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
return NULL;
}
+static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
+{
+ rb_erase(&pi->node, root);
+ kfree(pi->name);
+ kfree(pi);
+}
+
void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
{
ceph_decode_copy(p, &pi->v, sizeof(pi->v));
@@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
*p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
}
+static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
+{
+ struct ceph_pg_pool_info *pi;
+ u32 num, len, pool;
+
+ ceph_decode_32_safe(p, end, num, bad);
+ dout(" %d pool names\n", num);
+ while (num--) {
+ ceph_decode_32_safe(p, end, pool, bad);
+ ceph_decode_32_safe(p, end, len, bad);
+ dout(" pool %d len %d\n", pool, len);
+ pi = __lookup_pg_pool(&map->pg_pools, pool);
+ if (pi) {
+ kfree(pi->name);
+ pi->name = kmalloc(len + 1, GFP_NOFS);
+ if (pi->name) {
+ memcpy(pi->name, *p, len);
+ pi->name[len] = '\0';
+ dout(" name is %s\n", pi->name);
+ }
+ }
+ *p += len;
+ }
+ return 0;
+
+bad:
+ return -EINVAL;
+}
+
+/*
+ * osd map
+ */
+void ceph_osdmap_destroy(struct ceph_osdmap *map)
+{
+ dout("osdmap_destroy %p\n", map);
+ if (map->crush)
+ crush_destroy(map->crush);
+ while (!RB_EMPTY_ROOT(&map->pg_temp)) {
+ struct ceph_pg_mapping *pg =
+ rb_entry(rb_first(&map->pg_temp),
+ struct ceph_pg_mapping, node);
+ rb_erase(&pg->node, &map->pg_temp);
+ kfree(pg);
+ }
+ while (!RB_EMPTY_ROOT(&map->pg_pools)) {
+ struct ceph_pg_pool_info *pi =
+ rb_entry(rb_first(&map->pg_pools),
+ struct ceph_pg_pool_info, node);
+ __remove_pg_pool(&map->pg_pools, pi);
+ }
+ kfree(map->osd_state);
+ kfree(map->osd_weight);
+ kfree(map->osd_addr);
+ kfree(map);
+}
+
+/*
+ * adjust max osd value. reallocate arrays.
+ */
+static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
+{
+ u8 *state;
+ struct ceph_entity_addr *addr;
+ u32 *weight;
+
+ state = kcalloc(max, sizeof(*state), GFP_NOFS);
+ addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
+ weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
+ if (state == NULL || addr == NULL || weight == NULL) {
+ kfree(state);
+ kfree(addr);
+ kfree(weight);
+ return -ENOMEM;
+ }
+
+ /* copy old? */
+ if (map->osd_state) {
+ memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
+ memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
+ memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
+ kfree(map->osd_state);
+ kfree(map->osd_addr);
+ kfree(map->osd_weight);
+ }
+
+ map->osd_state = state;
+ map->osd_weight = weight;
+ map->osd_addr = addr;
+ map->max_osd = max;
+ return 0;
+}
+
/*
* decode a full map.
*/
@@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_32_safe(p, end, max, bad);
while (max--) {
ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
- pi = kmalloc(sizeof(*pi), GFP_NOFS);
+ pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
goto bad;
pi->id = ceph_decode_32(p);
@@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
__decode_pool(p, pi);
__insert_pg_pool(&map->pg_pools, pi);
}
+
+ if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+ goto bad;
+
ceph_decode_32_safe(p, end, map->pool_max, bad);
ceph_decode_32_safe(p, end, map->flags, bad);
@@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (!pi) {
- pi = kmalloc(sizeof(*pi), GFP_NOFS);
+ pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi) {
err = -ENOMEM;
goto bad;
@@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
__decode_pool(p, pi);
}
+ if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+ goto bad;
/* old_pool */
ceph_decode_32_safe(p, end, len, bad);
@@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
ceph_decode_32_safe(p, end, pool, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
- if (pi) {
- rb_erase(&pi->node, &map->pg_pools);
- kfree(pi);
- }
+ if (pi)
+ __remove_pg_pool(&map->pg_pools, pi);
}
/* new_up */
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
index 1fb55af..8bc9f1e 100644
--- a/fs/ceph/osdmap.h
+++ b/fs/ceph/osdmap.h
@@ -23,6 +23,7 @@ struct ceph_pg_pool_info {
int id;
struct ceph_pg_pool v;
int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
+ char *name;
};
struct ceph_pg_mapping {
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
index 26ac8b8..a1fc1d0 100644
--- a/fs/ceph/rados.h
+++ b/fs/ceph/rados.h
@@ -11,8 +11,10 @@
/*
* osdmap encoding versions
*/
-#define CEPH_OSDMAP_INC_VERSION 4
-#define CEPH_OSDMAP_VERSION 4
+#define CEPH_OSDMAP_INC_VERSION 5
+#define CEPH_OSDMAP_INC_VERSION_EXT 5
+#define CEPH_OSDMAP_VERSION 5
+#define CEPH_OSDMAP_VERSION_EXT 5
/*
* fs id
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e6f9bc5..2b88126 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num)
* Caller must hold snap_rwsem for read (i.e., the realm topology won't
* change).
*/
-void ceph_queue_cap_snap(struct ceph_inode_info *ci,
- struct ceph_snap_context *snapc)
+void ceph_queue_cap_snap(struct ceph_inode_info *ci)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
@@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
as no new writes are allowed to start when pending, so any
writes in progress now were started before the previous
cap_snap. lucky us. */
- dout("queue_cap_snap %p snapc %p seq %llu used %d"
- " already pending\n", inode, snapc, snapc->seq, used);
+ dout("queue_cap_snap %p already pending\n", inode);
kfree(capsnap);
} else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+ struct ceph_snap_context *snapc = ci->i_head_snapc;
+
igrab(inode);
atomic_set(&capsnap->nref, 1);
@@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
INIT_LIST_HEAD(&capsnap->flushing_item);
capsnap->follows = snapc->seq - 1;
- capsnap->context = ceph_get_snap_context(snapc);
capsnap->issued = __ceph_caps_issued(ci, NULL);
capsnap->dirty = __ceph_caps_dirty(ci);
@@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
snapshot. */
capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
ci->i_wrbuffer_ref_head = 0;
- ceph_put_snap_context(ci->i_head_snapc);
+ capsnap->context = snapc;
ci->i_head_snapc = NULL;
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
@@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->ctime = inode->i_ctime;
capsnap->time_warp_seq = ci->i_time_warp_seq;
if (capsnap->dirty_pages) {
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
"still has %d dirty pages\n", inode, capsnap,
capsnap->context, capsnap->context->seq,
- capsnap->size, capsnap->dirty_pages);
+ ceph_cap_string(capsnap->dirty), capsnap->size,
+ capsnap->dirty_pages);
return 0;
}
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
inode, capsnap, capsnap->context,
- capsnap->context->seq, capsnap->size);
+ capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+ capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
@@ -602,7 +603,7 @@ more:
if (lastinode)
iput(lastinode);
lastinode = inode;
- ceph_queue_cap_snap(ci, realm->cached_context);
+ ceph_queue_cap_snap(ci);
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
@@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
spin_unlock(&realm->inodes_with_caps_lock);
spin_unlock(&inode->i_lock);
- ceph_queue_cap_snap(ci,
- ci->i_snap_realm->cached_context);
+ ceph_queue_cap_snap(ci);
iput(inode);
continue;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index ca702c6..e30dfbb 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m,
extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
-extern void ceph_queue_cap_snap(struct ceph_inode_info *ci,
- struct ceph_snap_context *snapc);
+extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4797787..246a167 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -18,6 +18,8 @@
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
+#include <linux/backing-dev.h>
+
#define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */
#define CIFS_MOUNT_SET_UID 2 /* set current's euid in create etc. */
#define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */
@@ -50,5 +52,6 @@ struct cifs_sb_info {
#ifdef CONFIG_CIFS_DFS_UPCALL
char *mountdata; /* mount options received at mount time */
#endif
+ struct backing_dev_info bdi;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index ded66be..ad235d6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -103,6 +103,12 @@ cifs_read_super(struct super_block *sb, void *data,
if (cifs_sb == NULL)
return -ENOMEM;
+ rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
+ if (rc) {
+ kfree(cifs_sb);
+ return rc;
+ }
+
#ifdef CONFIG_CIFS_DFS_UPCALL
/* copy mount params to sb for use in submounts */
/* BB: should we move this after the mount so we
@@ -115,6 +121,7 @@ cifs_read_super(struct super_block *sb, void *data,
int len = strlen(data);
cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
if (cifs_sb->mountdata == NULL) {
+ bdi_destroy(&cifs_sb->bdi);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
return -ENOMEM;
@@ -135,6 +142,7 @@ cifs_read_super(struct super_block *sb, void *data,
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
+ sb->s_bdi = &cifs_sb->bdi;
/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize =
cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
@@ -183,6 +191,7 @@ out_mount_failed:
}
#endif
unload_nls(cifs_sb->local_nls);
+ bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb);
}
return rc;
@@ -214,6 +223,7 @@ cifs_put_super(struct super_block *sb)
#endif
unload_nls(cifs_sb->local_nls);
+ bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb);
unlock_kernel();
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index a1695dc..d97f993 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -167,6 +167,10 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
return -EBUSY;
}
+ error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY);
+ if (error)
+ goto bdi_err;
+
vc->vc_sb = sb;
sb->s_fs_info = vc;
@@ -175,6 +179,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = 12;
sb->s_magic = CODA_SUPER_MAGIC;
sb->s_op = &coda_super_operations;
+ sb->s_bdi = &vc->bdi;
/* get root fid from Venus: this needs the root inode */
error = venus_rootfid(sb, &fid);
@@ -200,6 +205,8 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
return 0;
error:
+ bdi_destroy(&vc->bdi);
+ bdi_err:
if (root)
iput(root);
if (vc)
@@ -210,6 +217,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
static void coda_put_super(struct super_block *sb)
{
+ bdi_destroy(&coda_vcp(sb)->bdi);
coda_vcp(sb)->vc_sb = NULL;
sb->s_fs_info = NULL;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index efb2b94..1cc0876 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -382,8 +382,8 @@ out:
static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
struct ecryptfs_crypt_stat *crypt_stat)
{
- (*offset) = (crypt_stat->num_header_bytes_at_front
- + (crypt_stat->extent_size * extent_num));
+ (*offset) = ecryptfs_lower_header_size(crypt_stat)
+ + (crypt_stat->extent_size * extent_num);
}
/**
@@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
set_extent_mask_and_shift(crypt_stat);
crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- crypt_stat->num_header_bytes_at_front = 0;
+ crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else {
if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
- crypt_stat->num_header_bytes_at_front =
+ crypt_stat->metadata_size =
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
- crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE;
+ crypt_stat->metadata_size = PAGE_CACHE_SIZE;
}
}
@@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written)
(*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
}
-static void
-write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat,
- size_t *written)
+void ecryptfs_write_crypt_stat_flags(char *page_virt,
+ struct ecryptfs_crypt_stat *crypt_stat,
+ size_t *written)
{
u32 flags = 0;
int i;
@@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt,
header_extent_size = (u32)crypt_stat->extent_size;
num_header_extents_at_front =
- (u16)(crypt_stat->num_header_bytes_at_front
- / crypt_stat->extent_size);
+ (u16)(crypt_stat->metadata_size / crypt_stat->extent_size);
put_unaligned_be32(header_extent_size, virt);
virt += 4;
put_unaligned_be16(num_header_extents_at_front, virt);
@@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
offset = ECRYPTFS_FILE_SIZE_BYTES;
write_ecryptfs_marker((page_virt + offset), &written);
offset += written;
- write_ecryptfs_flags((page_virt + offset), crypt_stat, &written);
+ ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat,
+ &written);
offset += written;
ecryptfs_write_header_metadata((page_virt + offset), crypt_stat,
&written);
@@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
rc = -EINVAL;
goto out;
}
- virt_len = crypt_stat->num_header_bytes_at_front;
+ virt_len = crypt_stat->metadata_size;
order = get_order(virt_len);
/* Released in this function */
virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
@@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
header_extent_size = get_unaligned_be32(virt);
virt += sizeof(__be32);
num_header_extents_at_front = get_unaligned_be16(virt);
- crypt_stat->num_header_bytes_at_front =
- (((size_t)num_header_extents_at_front
- * (size_t)header_extent_size));
+ crypt_stat->metadata_size = (((size_t)num_header_extents_at_front
+ * (size_t)header_extent_size));
(*bytes_read) = (sizeof(__be32) + sizeof(__be16));
if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
- && (crypt_stat->num_header_bytes_at_front
+ && (crypt_stat->metadata_size
< ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
rc = -EINVAL;
printk(KERN_WARNING "Invalid header size: [%zd]\n",
- crypt_stat->num_header_bytes_at_front);
+ crypt_stat->metadata_size);
}
return rc;
}
@@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
*/
static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
{
- crypt_stat->num_header_bytes_at_front =
- ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
+ crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
/**
@@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
ecryptfs_dentry,
ECRYPTFS_VALIDATE_HEADER_SIZE);
if (rc) {
+ memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 542f625..bfc2e0f 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -35,6 +35,7 @@
#include <linux/scatterlist.h>
#include <linux/hash.h>
#include <linux/nsproxy.h>
+#include <linux/backing-dev.h>
/* Version verification for shared data structures w/ userspace */
#define ECRYPTFS_VERSION_MAJOR 0x00
@@ -273,7 +274,7 @@ struct ecryptfs_crypt_stat {
u32 flags;
unsigned int file_version;
size_t iv_bytes;
- size_t num_header_bytes_at_front;
+ size_t metadata_size;
size_t extent_size; /* Data extent size; default is 4096 */
size_t key_size;
size_t extent_shift;
@@ -393,6 +394,7 @@ struct ecryptfs_mount_crypt_stat {
struct ecryptfs_sb_info {
struct super_block *wsi_sb;
struct ecryptfs_mount_crypt_stat mount_crypt_stat;
+ struct backing_dev_info bdi;
};
/* file private data. */
@@ -464,6 +466,14 @@ struct ecryptfs_daemon {
extern struct mutex ecryptfs_daemon_hash_mux;
+static inline size_t
+ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
+{
+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
+ return 0;
+ return crypt_stat->metadata_size;
+}
+
static inline struct ecryptfs_file_info *
ecryptfs_file_to_private(struct file *file)
{
@@ -651,6 +661,9 @@ int ecryptfs_decrypt_page(struct page *page);
int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
+void ecryptfs_write_crypt_stat_flags(char *page_virt,
+ struct ecryptfs_crypt_stat *crypt_stat,
+ size_t *written);
int ecryptfs_read_and_validate_header_region(char *data,
struct inode *ecryptfs_inode);
int ecryptfs_read_and_validate_xattr_region(char *page_virt,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index d3362fa..e2d4418 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
rc = ecryptfs_read_and_validate_header_region(page_virt,
ecryptfs_dentry->d_inode);
if (rc) {
+ memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_read_and_validate_xattr_region(page_virt,
ecryptfs_dentry);
if (rc) {
@@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- file_size = (crypt_stat->num_header_bytes_at_front
+ file_size = (crypt_stat->metadata_size
+ i_size_read(lower_dentry->d_inode));
else
file_size = i_size_read(lower_dentry->d_inode);
@@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- printk(KERN_ERR "%s: lookup_one_len() returned [%d] on "
- "lower_dentry = [%s]\n", __func__, rc,
- ecryptfs_dentry->d_name.name);
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ "[%d] on lower_dentry = [%s]\n", __func__, rc,
+ encrypted_and_encoded_name);
goto out_d_drop;
}
if (lower_dentry->d_inode)
@@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- printk(KERN_ERR "%s: lookup_one_len() returned [%d] on "
- "lower_dentry = [%s]\n", __func__, rc,
- encrypted_and_encoded_name);
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ "[%d] on lower_dentry = [%s]\n", __func__, rc,
+ encrypted_and_encoded_name);
goto out_d_drop;
}
lookup_and_interpose:
@@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
if (rc)
goto out_lock;
- fsstack_copy_attr_times(dir, lower_new_dentry->d_inode);
- fsstack_copy_inode_size(dir, lower_new_dentry->d_inode);
+ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
+ fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
old_dentry->d_inode->i_nlink =
ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink;
i_size_write(new_dentry->d_inode, file_size_save);
@@ -648,38 +649,17 @@ out_lock:
return rc;
}
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+ size_t *bufsiz)
{
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
char *lower_buf;
- size_t lower_bufsiz;
- struct dentry *lower_dentry;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
- char *plaintext_name;
- size_t plaintext_name_size;
+ size_t lower_bufsiz = PATH_MAX;
mm_segment_t old_fs;
int rc;
- lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->readlink) {
- rc = -EINVAL;
- goto out;
- }
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- dentry->d_sb)->mount_crypt_stat;
- /*
- * If the lower filename is encrypted, it will result in a significantly
- * longer name. If needed, truncate the name after decode and decrypt.
- */
- if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
- lower_bufsiz = PATH_MAX;
- else
- lower_bufsiz = bufsiz;
- /* Released in this function */
lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
- if (lower_buf == NULL) {
- printk(KERN_ERR "%s: Out of memory whilst attempting to "
- "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
+ if (!lower_buf) {
rc = -ENOMEM;
goto out;
}
@@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
(char __user *)lower_buf,
lower_bufsiz);
set_fs(old_fs);
- if (rc >= 0) {
- rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
- &plaintext_name_size,
- dentry, lower_buf,
- rc);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to decode and "
- "decrypt filename; rc = [%d]\n", __func__,
- rc);
- goto out_free_lower_buf;
- }
- /* Check for bufsiz <= 0 done in sys_readlinkat() */
- rc = copy_to_user(buf, plaintext_name,
- min((size_t) bufsiz, plaintext_name_size));
- if (rc)
- rc = -EFAULT;
- else
- rc = plaintext_name_size;
- kfree(plaintext_name);
- fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
- }
-out_free_lower_buf:
+ if (rc < 0)
+ goto out;
+ lower_bufsiz = rc;
+ rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
+ lower_buf, lower_bufsiz);
+out:
kfree(lower_buf);
+ return rc;
+}
+
+static int
+ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+ char *kbuf;
+ size_t kbufsiz, copied;
+ int rc;
+
+ rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
+ if (rc)
+ goto out;
+ copied = min_t(size_t, bufsiz, kbufsiz);
+ rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
+ kfree(kbuf);
+ fsstack_copy_attr_atime(dentry->d_inode,
+ ecryptfs_dentry_to_lower(dentry)->d_inode);
out:
return rc;
}
@@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
{
loff_t lower_size;
- lower_size = crypt_stat->num_header_bytes_at_front;
+ lower_size = ecryptfs_lower_header_size(crypt_stat);
if (upper_size != 0) {
loff_t num_extents;
@@ -1016,6 +998,28 @@ out:
return rc;
}
+int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ int rc = 0;
+
+ mount_crypt_stat = &ecryptfs_superblock_to_private(
+ dentry->d_sb)->mount_crypt_stat;
+ generic_fillattr(dentry->d_inode, stat);
+ if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
+ char *target;
+ size_t targetsiz;
+
+ rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
+ if (!rc) {
+ kfree(target);
+ stat->size = targetsiz;
+ }
+ }
+ return rc;
+}
+
int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
@@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->setxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
int rc = 0;
if (!lower_dentry->d_inode->i_op->getxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->listxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->removexattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
.put_link = ecryptfs_put_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
+ .getattr = ecryptfs_getattr_link,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index af1a8f0..760983d 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -497,17 +497,25 @@ struct kmem_cache *ecryptfs_sb_info_cache;
static int
ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
{
+ struct ecryptfs_sb_info *esi;
int rc = 0;
/* Released in ecryptfs_put_super() */
ecryptfs_set_superblock_private(sb,
kmem_cache_zalloc(ecryptfs_sb_info_cache,
GFP_KERNEL));
- if (!ecryptfs_superblock_to_private(sb)) {
+ esi = ecryptfs_superblock_to_private(sb);
+ if (!esi) {
ecryptfs_printk(KERN_WARNING, "Out of memory\n");
rc = -ENOMEM;
goto out;
}
+
+ rc = bdi_setup_and_register(&esi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
+ if (rc)
+ goto out;
+
+ sb->s_bdi = &esi->bdi;
sb->s_op = &ecryptfs_sops;
/* Released through deactivate_super(sb) from get_sb_nodev */
sb->s_root = d_alloc(NULL, &(const struct qstr) {
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index d491237..2ee9a3a 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -83,6 +83,19 @@ out:
return rc;
}
+static void strip_xattr_flag(char *page_virt,
+ struct ecryptfs_crypt_stat *crypt_stat)
+{
+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
+ size_t written;
+
+ crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
+ ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
+ &written);
+ crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
+ }
+}
+
/**
* Header Extent:
* Octets 0-7: Unencrypted file size (big-endian)
@@ -98,19 +111,6 @@ out:
* (big-endian)
* Octet 26: Begin RFC 2440 authentication token packet set
*/
-static void set_header_info(char *page_virt,
- struct ecryptfs_crypt_stat *crypt_stat)
-{
- size_t written;
- size_t save_num_header_bytes_at_front =
- crypt_stat->num_header_bytes_at_front;
-
- crypt_stat->num_header_bytes_at_front =
- ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
- ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
- crypt_stat->num_header_bytes_at_front =
- save_num_header_bytes_at_front;
-}
/**
* ecryptfs_copy_up_encrypted_with_header
@@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
* num_extents_per_page)
+ extent_num_in_page);
size_t num_header_extents_at_front =
- (crypt_stat->num_header_bytes_at_front
- / crypt_stat->extent_size);
+ (crypt_stat->metadata_size / crypt_stat->extent_size);
if (view_extent_num < num_header_extents_at_front) {
/* This is a header extent */
@@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
memset(page_virt, 0, PAGE_CACHE_SIZE);
/* TODO: Support more than one header extent */
if (view_extent_num == 0) {
+ size_t written;
+
rc = ecryptfs_read_xattr_region(
page_virt, page->mapping->host);
- set_header_info(page_virt, crypt_stat);
+ strip_xattr_flag(page_virt + 16, crypt_stat);
+ ecryptfs_write_header_metadata(page_virt + 20,
+ crypt_stat,
+ &written);
}
kunmap_atomic(page_virt, KM_USER0);
flush_dcache_page(page);
@@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
/* This is an encrypted data extent */
loff_t lower_offset =
((view_extent_num * crypt_stat->extent_size)
- - crypt_stat->num_header_bytes_at_front);
+ - crypt_stat->metadata_size);
rc = ecryptfs_read_lower_page_segment(
page, (lower_offset >> PAGE_CACHE_SHIFT),
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index fcef41c..0c0ae49 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
if (lower_dentry->d_inode) {
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
- d_drop(lower_dentry);
}
}
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
@@ -123,6 +122,7 @@ static void ecryptfs_put_super(struct super_block *sb)
lock_kernel();
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
+ bdi_destroy(&sb_info->bdi);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
ecryptfs_set_superblock_private(sb, NULL);
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 8442e35..5437327 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -35,6 +35,7 @@
#include <linux/fs.h>
#include <linux/time.h>
+#include <linux/backing-dev.h>
#include "common.h"
/* FIXME: Remove once pnfs hits mainline
@@ -92,6 +93,7 @@ struct exofs_sb_info {
struct exofs_layout layout; /* Default files layout,
* contains the variable osd_dev
* array. Keep last */
+ struct backing_dev_info bdi;
struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
};
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 18e57ea..03149b9a 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -302,6 +302,7 @@ static void exofs_put_super(struct super_block *sb)
_exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0],
sbi->layout.s_pid);
+ bdi_destroy(&sbi->bdi);
exofs_free_sbi(sbi);
sb->s_fs_info = NULL;
}
@@ -546,6 +547,10 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi)
return -ENOMEM;
+ ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
+ if (ret)
+ goto free_bdi;
+
/* use mount options to fill superblock */
od = osduld_path_lookup(opts->dev_name);
if (IS_ERR(od)) {
@@ -612,6 +617,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
}
/* set up operation vectors */
+ sb->s_bdi = &sbi->bdi;
sb->s_fs_info = sbi;
sb->s_op = &exofs_sops;
sb->s_export_op = &exofs_export_ops;
@@ -643,6 +649,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
free_sbi:
+ bdi_destroy(&sbi->bdi);
+free_bdi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
opts->dev_name, sbi->layout.s_pid, ret);
exofs_free_sbi(sbi);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 94c8ee8..236b834 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3879,6 +3879,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
} else { /* external block */
physical = EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5381802..81d6054 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5375,7 +5375,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
} else {
struct ext4_iloc iloc;
- err = ext4_get_inode_loc(inode, &iloc);
+ err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -5386,6 +5386,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
(unsigned long long)iloc.bh->b_blocknr);
err = -EIO;
}
+ brelse(iloc.bh);
}
return err;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index bde9d0b..b423a36 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2535,6 +2535,17 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
+ if (test_opt(sb, DISCARD)) {
+ ext4_fsblk_t discard_block;
+
+ discard_block = entry->start_blk +
+ ext4_group_first_block_no(sb, entry->group);
+ trace_ext4_discard_blocks(sb,
+ (unsigned long long)discard_block,
+ entry->count);
+ sb_issue_discard(sb, discard_block, entry->count);
+ }
+
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
@@ -2556,16 +2567,6 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
page_cache_release(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->group);
- if (test_opt(sb, DISCARD)) {
- ext4_fsblk_t discard_block;
-
- discard_block = entry->start_blk +
- ext4_group_first_block_no(sb, entry->group);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
- }
kmem_cache_free(ext4_free_ext_cachep, entry);
ext4_mb_release_desc(&e4b);
}
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 6c75110..7faefb4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -228,14 +228,23 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
#ifdef CONFIG_BLOCK
-#define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits)
-#define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits);
+static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
+{
+ return (offset >> inode->i_blkbits);
+}
+
+static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
+{
+ return (blk << inode->i_blkbits);
+}
/**
* __generic_block_fiemap - FIEMAP for block based inodes (no locking)
- * @inode - the inode to map
- * @arg - the pointer to userspace where we copy everything to
- * @get_block - the fs's get_block function
+ * @inode: the inode to map
+ * @fieinfo: the fiemap info struct that will be passed back to userspace
+ * @start: where to start mapping in the inode
+ * @len: how much space to map
+ * @get_block: the fs's get_block function
*
* This does FIEMAP for block based inodes. Basically it will just loop
* through get_block until we hit the number of extents we want to map, or we
@@ -250,58 +259,63 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
*/
int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block)
+ struct fiemap_extent_info *fieinfo, loff_t start,
+ loff_t len, get_block_t *get_block)
{
- struct buffer_head tmp;
- unsigned long long start_blk;
- long long length = 0, map_len = 0;
+ struct buffer_head map_bh;
+ sector_t start_blk, last_blk;
+ loff_t isize = i_size_read(inode);
u64 logical = 0, phys = 0, size = 0;
u32 flags = FIEMAP_EXTENT_MERGED;
- int ret = 0, past_eof = 0, whole_file = 0;
+ bool past_eof = false, whole_file = false;
+ int ret = 0;
- if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
+ ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ if (ret)
return ret;
- start_blk = logical_to_blk(inode, start);
-
- length = (long long)min_t(u64, len, i_size_read(inode));
- if (length < len)
- whole_file = 1;
+ /*
+ * Either the i_mutex or other appropriate locking needs to be held
+ * since we expect isize to not change at all through the duration of
+ * this call.
+ */
+ if (len >= isize) {
+ whole_file = true;
+ len = isize;
+ }
- map_len = length;
+ start_blk = logical_to_blk(inode, start);
+ last_blk = logical_to_blk(inode, start + len - 1);
do {
/*
* we set b_size to the total size we want so it will map as
* many contiguous blocks as possible at once
*/
- memset(&tmp, 0, sizeof(struct buffer_head));
- tmp.b_size = map_len;
+ memset(&map_bh, 0, sizeof(struct buffer_head));
+ map_bh.b_size = len;
- ret = get_block(inode, start_blk, &tmp, 0);
+ ret = get_block(inode, start_blk, &map_bh, 0);
if (ret)
break;
/* HOLE */
- if (!buffer_mapped(&tmp)) {
- length -= blk_to_logical(inode, 1);
+ if (!buffer_mapped(&map_bh)) {
start_blk++;
/*
- * we want to handle the case where there is an
+ * We want to handle the case where there is an
* allocated block at the front of the file, and then
* nothing but holes up to the end of the file properly,
* to make sure that extent at the front gets properly
* marked with FIEMAP_EXTENT_LAST
*/
if (!past_eof &&
- blk_to_logical(inode, start_blk) >=
- blk_to_logical(inode, 0)+i_size_read(inode))
+ blk_to_logical(inode, start_blk) >= isize)
past_eof = 1;
/*
- * first hole after going past the EOF, this is our
+ * First hole after going past the EOF, this is our
* last extent
*/
if (past_eof && size) {
@@ -309,15 +323,18 @@ int __generic_block_fiemap(struct inode *inode,
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
- break;
+ } else if (size) {
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ size = 0;
}
/* if we have holes up to/past EOF then we're done */
- if (length <= 0 || past_eof)
+ if (start_blk > last_blk || past_eof || ret)
break;
} else {
/*
- * we have gone over the length of what we wanted to
+ * We have gone over the length of what we wanted to
* map, and it wasn't the entire file, so add the extent
* we got last time and exit.
*
@@ -331,7 +348,7 @@ int __generic_block_fiemap(struct inode *inode,
* are good to go, just add the extent to the fieinfo
* and break
*/
- if (length <= 0 && !whole_file) {
+ if (start_blk > last_blk && !whole_file) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
@@ -351,11 +368,10 @@ int __generic_block_fiemap(struct inode *inode,
}
logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, tmp.b_blocknr);
- size = tmp.b_size;
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
flags = FIEMAP_EXTENT_MERGED;
- length -= tmp.b_size;
start_blk += logical_to_blk(inode, size);
/*
@@ -363,15 +379,13 @@ int __generic_block_fiemap(struct inode *inode,
* soon as we find a hole that the last extent we found
* is marked with FIEMAP_EXTENT_LAST
*/
- if (!past_eof &&
- logical+size >=
- blk_to_logical(inode, 0)+i_size_read(inode))
- past_eof = 1;
+ if (!past_eof && logical + size >= isize)
+ past_eof = true;
}
cond_resched();
} while (1);
- /* if ret is 1 then we just hit the end of the extent array */
+ /* If ret is 1 then we just hit the end of the extent array */
if (ret == 1)
ret = 0;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9dd1262..ed9ba6f 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &page_symlink_inode_operations;
inode->i_mapping->a_ops = &jfs_aops;
} else {
- inode->i_op = &jfs_symlink_inode_operations;
+ inode->i_op = &jfs_fast_symlink_inode_operations;
/*
* The inline data should be null-terminated, but
* don't let on-disk corruption crash the kernel
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 6c4dfcbf..9e2f6a7 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap)
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
- bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
@@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap)
dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
- dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth);
+ dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
@@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
* tree index of this allocation group within the control page.
*/
agperlev =
- (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth;
+ (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
/* dmap control page trees fan-out by 4 and a single allocation
@@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
* the subtree to find the leftmost leaf that describes this
* free space.
*/
- for (k = bmp->db_agheigth; k > 0; k--) {
+ for (k = bmp->db_agheight; k > 0; k--) {
for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
if (l2nb <= dcp->stree[m + n]) {
ti = m + n;
@@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap)
}
/*
- * compute db_aglevel, db_agheigth, db_width, db_agstart:
+ * compute db_aglevel, db_agheight, db_width, db_agstart:
* an ag is covered in aglevel dmapctl summary tree,
* at agheight level height (from leaf) with agwidth number of nodes
* each, which starts at agstart index node of the smmary tree node
@@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap)
bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
l2nl =
bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
- bmp->db_agheigth = l2nl >> 1;
- bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1));
- for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0;
+ bmp->db_agheight = l2nl >> 1;
+ bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
+ for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
i--) {
bmp->db_agstart += n;
n <<= 2;
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 1a6eb41..6dcb906 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -210,7 +210,7 @@ struct dbmap_disk {
__le32 dn_maxag; /* 4: max active alloc group number */
__le32 dn_agpref; /* 4: preferred alloc group (hint) */
__le32 dn_aglevel; /* 4: dmapctl level holding the AG */
- __le32 dn_agheigth; /* 4: height in dmapctl of the AG */
+ __le32 dn_agheight; /* 4: height in dmapctl of the AG */
__le32 dn_agwidth; /* 4: width in dmapctl of the AG */
__le32 dn_agstart; /* 4: start tree index at AG height */
__le32 dn_agl2size; /* 4: l2 num of blks per alloc group */
@@ -229,7 +229,7 @@ struct dbmap {
int dn_maxag; /* max active alloc group number */
int dn_agpref; /* preferred alloc group (hint) */
int dn_aglevel; /* dmapctl level holding the AG */
- int dn_agheigth; /* height in dmapctl of the AG */
+ int dn_agheight; /* height in dmapctl of the AG */
int dn_agwidth; /* width in dmapctl of the AG */
int dn_agstart; /* start tree index at AG height */
int dn_agl2size; /* l2 num of blks per alloc group */
@@ -255,7 +255,7 @@ struct bmap {
#define db_agsize db_bmap.dn_agsize
#define db_agl2size db_bmap.dn_agl2size
#define db_agwidth db_bmap.dn_agwidth
-#define db_agheigth db_bmap.dn_agheigth
+#define db_agheight db_bmap.dn_agheight
#define db_agstart db_bmap.dn_agstart
#define db_numag db_bmap.dn_numag
#define db_maxlevel db_bmap.dn_maxlevel
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 79e2c79..9e6bda3 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -48,5 +48,6 @@ extern const struct file_operations jfs_dir_operations;
extern const struct inode_operations jfs_file_inode_operations;
extern const struct file_operations jfs_file_operations;
extern const struct inode_operations jfs_symlink_inode_operations;
+extern const struct inode_operations jfs_fast_symlink_inode_operations;
extern const struct dentry_operations jfs_ci_dentry_operations;
#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 4a3e9f3..a9cf8e8 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
*/
if (ssize <= IDATASIZE) {
- ip->i_op = &jfs_symlink_inode_operations;
+ ip->i_op = &jfs_fast_symlink_inode_operations;
i_fastsymlink = JFS_IP(ip)->i_inline;
memcpy(i_fastsymlink, name, ssize);
@@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
else {
jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);
- ip->i_op = &page_symlink_inode_operations;
+ ip->i_op = &jfs_symlink_inode_operations;
ip->i_mapping->a_ops = &jfs_aops;
/*
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 7f24a0b..1aba003 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
uint old_agsize;
+ int agsizechanged = 0;
struct buffer_head *bh, *bh2;
/* If the volume hasn't grown, get out now */
@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
*/
if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
goto error_out;
+
+ agsizechanged |= (bmp->db_agsize != old_agsize);
+
/*
* the map now has extended to cover additional nblocks:
* dn_mapsize = oldMapsize + nblocks;
@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
* will correctly identify the new ag);
*/
/* if new AG size the same as old AG size, done! */
- if (bmp->db_agsize != old_agsize) {
+ if (agsizechanged) {
if ((rc = diExtendFS(ipimap, ipbmap)))
goto error_out;
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index 4af1a05..205b946 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
return NULL;
}
-const struct inode_operations jfs_symlink_inode_operations = {
+const struct inode_operations jfs_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = jfs_follow_link,
+ .setattr = jfs_setattr,
+ .setxattr = jfs_setxattr,
+ .getxattr = jfs_getxattr,
+ .listxattr = jfs_listxattr,
+ .removexattr = jfs_removexattr,
+};
+
+const struct inode_operations jfs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+ .setattr = jfs_setattr,
.setxattr = jfs_setxattr,
.getxattr = jfs_getxattr,
.listxattr = jfs_listxattr,
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
index 84e36f5..76c242f 100644
--- a/fs/logfs/gc.c
+++ b/fs/logfs/gc.c
@@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target)
struct logfs_block *block;
int round, progress, last_progress = 0;
+ /*
+ * Doing too many changes to the segfile at once would result
+ * in a large number of aliases. Write the journal before
+ * things get out of hand.
+ */
+ if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES)
+ logfs_write_anchor(sb);
+
if (no_free_segments(sb) >= target &&
super->s_no_object_aliases < MAX_OBJ_ALIASES)
return;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 33bd260..fb0a613 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -389,7 +389,10 @@ static void journal_get_erase_count(struct logfs_area *area)
static int journal_erase_segment(struct logfs_area *area)
{
struct super_block *sb = area->a_sb;
- struct logfs_segment_header sh;
+ union {
+ struct logfs_segment_header sh;
+ unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)];
+ } u;
u64 ofs;
int err;
@@ -397,20 +400,21 @@ static int journal_erase_segment(struct logfs_area *area)
if (err)
return err;
- sh.pad = 0;
- sh.type = SEG_JOURNAL;
- sh.level = 0;
- sh.segno = cpu_to_be32(area->a_segno);
- sh.ec = cpu_to_be32(area->a_erase_count);
- sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
- sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
+ memset(&u, 0, sizeof(u));
+ u.sh.pad = 0;
+ u.sh.type = SEG_JOURNAL;
+ u.sh.level = 0;
+ u.sh.segno = cpu_to_be32(area->a_segno);
+ u.sh.ec = cpu_to_be32(area->a_erase_count);
+ u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
+ u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4);
/* This causes a bug in segment.c. Not yet. */
//logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0);
ofs = dev_ofs(sb, area->a_segno, 0);
- area->a_used_bytes = ALIGN(sizeof(sh), 16);
- logfs_buf_write(area, ofs, &sh, sizeof(sh));
+ area->a_used_bytes = sizeof(u);
+ logfs_buf_write(area, ofs, &u, sizeof(u));
return 0;
}
@@ -494,6 +498,8 @@ static void account_shadows(struct super_block *sb)
btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
+ btree_grim_visitor32(&tree->segment_map, 0, NULL);
+ tree->no_shadowed_segments = 0;
if (li->li_block) {
/*
@@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type,
if (len == 0)
return logfs_write_header(super, header, 0, type);
+ BUG_ON(len > sb->s_blocksize);
compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
if (compr_len < 0 || type == JE_ANCHOR) {
- BUG_ON(len > sb->s_blocksize);
memcpy(data, buf, len);
compr_len = len;
compr = COMPR_NONE;
@@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
if (ofs < 0)
return ofs;
logfs_buf_write(area, ofs, super->s_compressed_je, len);
+ BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
return 0;
}
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index b84b0ee..0a3df1a 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -257,10 +257,14 @@ struct logfs_shadow {
* struct shadow_tree
* @new: shadows where old_ofs==0, indexed by new_ofs
* @old: shadows where old_ofs!=0, indexed by old_ofs
+ * @segment_map: bitfield of segments containing shadows
+ * @no_shadowed_segment: number of segments containing shadows
*/
struct shadow_tree {
struct btree_head64 new;
struct btree_head64 old;
+ struct btree_head32 segment_map;
+ int no_shadowed_segments;
};
struct object_alias_item {
@@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix,
level_t level, int child_no, __be64 val);
struct logfs_block_ops {
void (*write_block)(struct logfs_block *block);
- gc_level_t (*block_level)(struct logfs_block *block);
void (*free_block)(struct super_block *sb, struct logfs_block*block);
int (*write_alias)(struct super_block *sb,
struct logfs_block *block,
write_alias_t *write_one_alias);
};
+#define MAX_JOURNAL_ENTRIES 256
+
struct logfs_super {
struct mtd_info *s_mtd; /* underlying device */
struct block_device *s_bdev; /* underlying device */
@@ -378,7 +383,7 @@ struct logfs_super {
u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
u64 s_last_version;
struct logfs_area *s_journal_area; /* open journal segment */
- __be64 s_je_array[64];
+ __be64 s_je_array[MAX_JOURNAL_ENTRIES];
int s_no_je;
int s_sum_index; /* for the 12 summaries */
@@ -722,4 +727,10 @@ static inline struct logfs_area *get_area(struct super_block *sb,
return logfs_super(sb)->s_area[(__force u8)gc_level];
}
+static inline void logfs_mempool_destroy(mempool_t *pool)
+{
+ if (pool)
+ mempool_destroy(pool);
+}
+
#endif
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index bff4025..3159db6 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block)
}
}
-static gc_level_t inode_block_level(struct logfs_block *block)
-{
- BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER);
- return GC_LEVEL(LOGFS_MAX_LEVELS);
-}
-
-static gc_level_t indirect_block_level(struct logfs_block *block)
-{
- struct page *page;
- struct inode *inode;
- u64 bix;
- level_t level;
-
- page = block->page;
- inode = page->mapping->host;
- logfs_unpack_index(page->index, &bix, &level);
- return expand_level(inode->i_ino, level);
-}
-
/*
* This silences a false, yet annoying gcc warning. I hate it when my editor
* jumps into bitops.h each time I recompile this file.
@@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb,
static struct logfs_block_ops inode_block_ops = {
.write_block = inode_write_block,
- .block_level = inode_block_level,
.free_block = inode_free_block,
.write_alias = inode_write_alias,
};
struct logfs_block_ops indirect_block_ops = {
.write_block = indirect_write_block,
- .block_level = indirect_block_level,
.free_block = indirect_free_block,
.write_alias = indirect_write_alias,
};
@@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
mempool_free(shadow, super->s_shadow_pool);
}
+static void mark_segment(struct shadow_tree *tree, u32 segno)
+{
+ int err;
+
+ if (!btree_lookup32(&tree->segment_map, segno)) {
+ err = btree_insert32(&tree->segment_map, segno, (void *)1,
+ GFP_NOFS);
+ BUG_ON(err);
+ tree->no_shadowed_segments++;
+ }
+}
+
/**
* fill_shadow_tree - Propagate shadow tree changes due to a write
* @inode: Inode owning the page
@@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page,
super->s_dirty_used_bytes += shadow->new_len;
super->s_dirty_free_bytes += shadow->old_len;
+ mark_segment(tree, shadow->old_ofs >> super->s_segshift);
+ mark_segment(tree, shadow->new_ofs >> super->s_segshift);
}
}
@@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size)
return logfs_truncate_direct(inode, size);
}
-int logfs_truncate(struct inode *inode, u64 size)
+/*
+ * Truncate, by changing the segment file, can consume a fair amount
+ * of resources. So back off from time to time and do some GC.
+ * 8 or 2048 blocks should be well within safety limits even if
+ * every single block resided in a different segment.
+ */
+#define TRUNCATE_STEP (8 * 1024 * 1024)
+int logfs_truncate(struct inode *inode, u64 target)
{
struct super_block *sb = inode->i_sb;
- int err;
+ u64 size = i_size_read(inode);
+ int err = 0;
- logfs_get_wblocks(sb, NULL, 1);
- err = __logfs_truncate(inode, size);
- if (!err)
- err = __logfs_write_inode(inode, 0);
- logfs_put_wblocks(sb, NULL, 1);
+ size = ALIGN(size, TRUNCATE_STEP);
+ while (size > target) {
+ if (size > TRUNCATE_STEP)
+ size -= TRUNCATE_STEP;
+ else
+ size = 0;
+ if (size < target)
+ size = target;
+
+ logfs_get_wblocks(sb, NULL, 1);
+ err = __logfs_truncate(inode, target);
+ if (!err)
+ err = __logfs_write_inode(inode, 0);
+ logfs_put_wblocks(sb, NULL, 1);
+ }
if (!err)
- err = vmtruncate(inode, size);
+ err = vmtruncate(inode, target);
/* I don't trust error recovery yet. */
WARN_ON(err);
@@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb)
struct logfs_super *super = logfs_super(sb);
destroy_meta_inode(super->s_segfile_inode);
- if (super->s_block_pool)
- mempool_destroy(super->s_block_pool);
- if (super->s_shadow_pool)
- mempool_destroy(super->s_shadow_pool);
+ logfs_mempool_destroy(super->s_block_pool);
+ logfs_mempool_destroy(super->s_shadow_pool);
}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 801a3a1..f77ce2b 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -183,14 +183,8 @@ static int btree_write_alias(struct super_block *sb, struct logfs_block *block,
return 0;
}
-static gc_level_t btree_block_level(struct logfs_block *block)
-{
- return expand_level(block->ino, block->level);
-}
-
static struct logfs_block_ops btree_block_ops = {
.write_block = btree_write_block,
- .block_level = btree_block_level,
.free_block = __free_block,
.write_alias = btree_write_alias,
};
@@ -919,7 +913,7 @@ err:
for (i--; i >= 0; i--)
free_area(super->s_area[i]);
free_area(super->s_journal_area);
- mempool_destroy(super->s_alias_pool);
+ logfs_mempool_destroy(super->s_alias_pool);
return -ENOMEM;
}
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index b60bfac..5866ee6 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -12,6 +12,7 @@
#include "logfs.h"
#include <linux/bio.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
#include <linux/mtd/mtd.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
@@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_block *sb, void *_super)
sb->s_fs_info = super;
sb->s_mtd = super->s_mtd;
sb->s_bdev = super->s_bdev;
+ if (sb->s_bdev)
+ sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+ if (sb->s_mtd)
+ sb->s_bdi = sb->s_mtd->backing_dev_info;
return 0;
}
@@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only)
btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
+ btree_init_mempool32(&super->s_shadow_tree.segment_map,
+ super->s_btree_pool);
ret = logfs_init_mapping(sb);
if (ret)
@@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_block *sb)
if (super->s_erase_page)
__free_page(super->s_erase_page);
super->s_devops->put_device(sb);
- mempool_destroy(super->s_btree_pool);
- mempool_destroy(super->s_alias_pool);
+ logfs_mempool_destroy(super->s_btree_pool);
+ logfs_mempool_destroy(super->s_alias_pool);
kfree(super);
log_super("LogFS: Finished unmounting\n");
}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index cf98da1..fa33851 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -526,10 +526,15 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = NCP_SUPER_MAGIC;
sb->s_op = &ncp_sops;
+ sb->s_bdi = &server->bdi;
server = NCP_SBP(sb);
memset(server, 0, sizeof(*server));
+ error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
+ if (error)
+ goto out_bdi;
+
server->ncp_filp = ncp_filp;
server->ncp_sock = sock;
@@ -719,6 +724,8 @@ out_fput2:
if (server->info_filp)
fput(server->info_filp);
out_fput:
+ bdi_destroy(&server->bdi);
+out_bdi:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
* The previously used put_filp(ncp_filp); was bogous, since
@@ -756,6 +763,7 @@ static void ncp_put_super(struct super_block *sb)
kill_pid(server->m.wdog_pid, SIGTERM, 1);
put_pid(server->m.wdog_pid);
+ bdi_destroy(&server->bdi);
kfree(server->priv.data);
kfree(server->auth.object_name);
vfree(server->rxbuf);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 2a3d352..a8766c4 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
+ NFS_CAP_POSIX_LOCK;
server->options = data->options;
/* Get a client record */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c6f2750..db3ad84 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -837,6 +837,8 @@ out_zap_parent:
/* If we have submounts, don't unhash ! */
if (have_submounts(dentry))
goto out_valid;
+ if (dentry->d_flags & DCACHE_DISCONNECTED)
+ goto out_valid;
shrink_dcache_parent(dentry);
}
d_drop(dentry);
@@ -1025,12 +1027,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
res = NULL;
goto out;
/* This turned out not to be a regular file */
+ case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
- /* case -EISDIR: */
/* case -EINVAL: */
default:
goto out;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 737128f..50a56ed 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
list_for_each_entry(pos, &nfsi->open_files, list) {
if (cred != NULL && pos->cred != cred)
continue;
- if ((pos->mode & mode) == mode) {
- ctx = get_nfs_open_context(pos);
- break;
- }
+ if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
+ continue;
+ ctx = get_nfs_open_context(pos);
+ break;
}
spin_unlock(&inode->i_lock);
return ctx;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index fe0cd9e..6380670 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
+ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
+ server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
@@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53ff70e..de38d63 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page)
struct inode *inode = page->mapping->host;
struct nfs_server *nfss = NFS_SERVER(inode);
+ page_cache_get(page);
if (atomic_long_inc_return(&nfss->writeback) >
NFS_CONGESTION_ON_THRESH) {
set_bdi_congested(&nfss->backing_dev_info,
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
+ page_cache_release(page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
@@ -421,6 +423,7 @@ static void
nfs_mark_request_dirty(struct nfs_page *req)
{
__set_page_dirty_nobuffers(req->wb_page);
+ __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
req = nfs_setup_write_request(ctx, page, offset, count);
if (IS_ERR(req))
return PTR_ERR(req);
+ nfs_mark_request_dirty(req);
/* Update file length */
nfs_grow_file(page, offset, count);
nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
+ nfs_mark_request_dirty(req);
nfs_clear_page_tag_locked(req);
return 0;
}
@@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page,
status = nfs_writepage_setup(ctx, page, offset, count);
if (status < 0)
nfs_set_pageerror(page);
- else
- __set_page_dirty_nobuffers(page);
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
@@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page,
static void nfs_writepage_release(struct nfs_page *req)
{
+ struct page *page = req->wb_page;
- if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
- nfs_end_page_writeback(req->wb_page);
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
nfs_inode_remove_request(req);
- } else
- nfs_end_page_writeback(req->wb_page);
nfs_clear_page_tag_locked(req);
+ nfs_end_page_writeback(page);
}
static int flush_task_priority(int how)
@@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
int how)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
- int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
@@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = flags,
+ .flags = RPC_TASK_ASYNC,
.priority = priority,
};
+ int ret = 0;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
@@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- return PTR_ERR(task);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+ }
+ if (how & FLUSH_SYNC) {
+ ret = rpc_wait_for_completion_task(task);
+ if (ret == 0)
+ ret = task->tk_status;
+ }
rpc_put_task(task);
- return 0;
+out:
+ return ret;
}
/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
*/
static void nfs_redirty_request(struct nfs_page *req)
{
+ struct page *page = req->wb_page;
+
nfs_mark_request_dirty(req);
- nfs_end_page_writeback(req->wb_page);
nfs_clear_page_tag_locked(req);
+ nfs_end_page_writeback(page);
}
/*
@@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata)
if (nfs_write_need_commit(data)) {
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
nfs_mark_request_commit(req);
- nfs_end_page_writeback(page);
dprintk(" marked for commit\n");
goto next;
}
dprintk(" OK\n");
remove_request:
- nfs_end_page_writeback(page);
nfs_inode_remove_request(req);
next:
nfs_clear_page_tag_locked(req);
+ nfs_end_page_writeback(page);
}
nfs_writedata_release(calldata);
}
@@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
{
struct nfs_page *first = nfs_list_entry(head->next);
struct inode *inode = first->wb_context->path.dentry->d_inode;
- int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
@@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head,
.callback_ops = &nfs_commit_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = flags,
+ .flags = RPC_TASK_ASYNC,
.priority = priority,
};
@@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
+ if (how & FLUSH_SYNC)
+ rpc_wait_for_completion_task(task);
rpc_put_task(task);
return 0;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index e170317..34ccf81 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -161,10 +161,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = p + (argp->pagelen>>2);
+ argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
- argp->end = p + (PAGE_SIZE>>2);
+ argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
@@ -1426,10 +1426,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = p + (argp->pagelen>>2);
+ argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
- argp->end = p + (PAGE_SIZE>>2);
+ argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7621db8..8418fcc 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2909,7 +2909,7 @@ out_no_task:
*/
static const struct pid_entry tid_base_stuff[] = {
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
- DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
+ DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index dad7fb2..3e21b1e 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -33,6 +33,14 @@ config PRINT_QUOTA_WARNING
Note that this behavior is currently deprecated and may go away in
future. Please use notification via netlink socket instead.
+config QUOTA_DEBUG
+ bool "Additional quota sanity checks"
+ depends on QUOTA
+ default n
+ help
+ If you say Y here, quota subsystem will perform some additional
+ sanity checks of quota internal structures. If unsure, say N.
+
# Generic support for tree structured quota files. Selected when needed.
config QUOTA_TREE
tristate
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index a0a9405..788b580 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -80,8 +80,6 @@
#include <asm/uaccess.h>
-#define __DQUOT_PARANOIA
-
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
* and quota formats, dqstats structure containing statistics about the lists
@@ -695,7 +693,7 @@ void dqput(struct dquot *dquot)
if (!dquot)
return;
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (!atomic_read(&dquot->dq_count)) {
printk("VFS: dqput: trying to free free dquot\n");
printk("VFS: device %s, dquot of %s %d\n",
@@ -748,7 +746,7 @@ we_slept:
goto we_slept;
}
atomic_dec(&dquot->dq_count);
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
/* sanity check */
BUG_ON(!list_empty(&dquot->dq_free));
#endif
@@ -845,7 +843,7 @@ we_slept:
dquot = NULL;
goto out;
}
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
out:
@@ -874,7 +872,7 @@ static int dqinit_needed(struct inode *inode, int type)
static void add_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode, *old_inode = NULL;
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
int reserved = 0;
#endif
@@ -882,7 +880,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
continue;
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
@@ -907,7 +905,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_unlock(&inode_lock);
iput(old_inode);
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened before quota"
" was turned on thus quota information is probably "
@@ -940,7 +938,7 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
inode->i_dquot[type] = NULL;
if (dquot) {
if (dqput_blocks(dquot)) {
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (atomic_read(&dquot->dq_count) != 1)
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
#endif
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index f8a6075a..0793044 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -46,8 +46,6 @@ static inline bool is_privroot_deh(struct dentry *dir,
struct reiserfs_de_head *deh)
{
struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
- if (reiserfs_expose_privroot(dir->d_sb))
- return 0;
return (dir == dir->d_parent && privroot->d_inode &&
deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 4f9586b..e7cc00e 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -554,7 +554,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
if (!err && new_size < i_size_read(dentry->d_inode)) {
struct iattr newattrs = {
.ia_ctime = current_fs_time(inode->i_sb),
- .ia_size = buffer_size,
+ .ia_size = new_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
@@ -973,21 +973,13 @@ int reiserfs_permission(struct inode *inode, int mask)
return generic_permission(inode, mask, NULL);
}
-/* This will catch lookups from the fs root to .reiserfs_priv */
-static int
-xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
+static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
- if (container_of(q1, struct dentry, d_name) == priv_root)
- return -ENOENT;
- if (q1->len == name->len &&
- !memcmp(q1->name, name->name, name->len))
- return 0;
- return 1;
+ return -EPERM;
}
static const struct dentry_operations xattr_lookup_poison_ops = {
- .d_compare = xattr_lookup_poison,
+ .d_revalidate = xattr_hide_revalidate,
};
int reiserfs_lookup_privroot(struct super_block *s)
@@ -1001,8 +993,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
- if (!reiserfs_expose_privroot(s))
- s->s_root->d_op = &xattr_lookup_poison_ops;
+ dentry->d_op = &xattr_lookup_poison_ops;
if (dentry->d_inode)
dentry->d_inode->i_flags |= S_PRIVATE;
} else
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 1c4c8f0..dfa1d67 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -479,6 +479,7 @@ smb_put_super(struct super_block *sb)
if (server->conn_pid)
kill_pid(server->conn_pid, SIGTERM, 1);
+ bdi_destroy(&server->bdi);
kfree(server->ops);
smb_unload_nls(server);
sb->s_fs_info = NULL;
@@ -525,6 +526,11 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
if (!server)
goto out_no_server;
sb->s_fs_info = server;
+
+ if (bdi_setup_and_register(&server->bdi, "smbfs", BDI_CAP_MAP_COPY))
+ goto out_bdi;
+
+ sb->s_bdi = &server->bdi;
server->super_block = sb;
server->mnt = NULL;
@@ -624,6 +630,8 @@ out_no_smbiod:
out_bad_option:
kfree(mem);
out_no_mem:
+ bdi_destroy(&server->bdi);
+out_bdi:
if (!server->mnt)
printk(KERN_ERR "smb_fill_super: allocation failure\n");
sb->s_fs_info = NULL;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 1cb0d81..653c030 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -87,9 +87,8 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
u64 cur_index = index >> msblk->devblksize_log2;
int bytes, compressed, b = 0, k = 0, page = 0, avail;
-
- bh = kcalloc((msblk->block_size >> msblk->devblksize_log2) + 1,
- sizeof(*bh), GFP_KERNEL);
+ bh = kcalloc(((srclength + msblk->devblksize - 1)
+ >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
if (bh == NULL)
return -ENOMEM;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 3550aec..48b6f4a 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -275,7 +275,8 @@ allocate_root:
err = squashfs_read_inode(root, root_inode);
if (err) {
- iget_failed(root);
+ make_bad_inode(root);
+ iput(root);
goto failed_mount;
}
insert_inode_hash(root);
@@ -353,6 +354,7 @@ static void squashfs_put_super(struct super_block *sb)
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);
+ kfree(sbi->inode_lookup_table);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 15a03d0..7a60387 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -128,8 +128,9 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
goto release_mutex;
}
+ length = stream->total_out;
mutex_unlock(&msblk->read_data_mutex);
- return stream->total_out;
+ return length;
release_mutex:
mutex_unlock(&msblk->read_data_mutex);
diff --git a/fs/super.c b/fs/super.c
index f35ac60..dc72491 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -693,6 +693,7 @@ int set_anon_super(struct super_block *s, void *data)
return -EMFILE;
}
s->s_dev = MKDEV(0, dev & MINORMASK);
+ s->s_bdi = &noop_backing_dev_info;
return 0;
}
@@ -954,10 +955,11 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
if (error < 0)
goto out_free_secdata;
BUG_ON(!mnt->mnt_sb);
+ WARN_ON(!mnt->mnt_sb->s_bdi);
- error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
- if (error)
- goto out_sb;
+ error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
+ if (error)
+ goto out_sb;
/*
* filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
diff --git a/fs/sync.c b/fs/sync.c
index fc5c3d7..92b2281 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include "internal.h"
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
@@ -32,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
- if (!sb->s_bdi)
+ if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
return 0;
if (sb->s_qcop && sb->s_qcop->quota_sync)
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 05cd853..fd96982 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -820,10 +820,10 @@ xfs_reclaim_inode(
* call into reclaim to find it in a clean state instead of waiting for
* it now. We also don't return errors here - if the error is transient
* then the next reclaim pass will flush the inode, and if the error
- * is permanent then the next sync reclaim will relcaim the inode and
+ * is permanent then the next sync reclaim will reclaim the inode and
* pass on the error.
*/
- if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_fs_cmn_err(CE_WARN, ip->i_mount,
"inode 0x%llx background reclaim flush failed with %d",
(long long)ip->i_ino, error);
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index cd27c9d..5bba29a 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -177,16 +177,26 @@ xfs_swap_extents_check_format(
XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
return EINVAL;
- /* Check root block of temp in btree form to max in target */
+ /*
+ * If we are in a btree format, check that the temp root block will fit
+ * in the target and that it has enough extents to be in btree format
+ * in the target.
+ *
+ * Note that we have to be careful to allow btree->extent conversions
+ * (a common defrag case) which will occur when the temp inode is in
+ * extent format...
+ */
if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_BOFF(ip) &&
- tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+ ((XFS_IFORK_BOFF(ip) &&
+ tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) ||
+ XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= ip->i_df.if_ext_max))
return EINVAL;
- /* Check root block of target in btree form to max in temp */
+ /* Reciprocal target->temp btree format checks */
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_BOFF(tip) &&
- ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+ ((XFS_IFORK_BOFF(tip) &&
+ ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) ||
+ XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= tip->i_df.if_ext_max))
return EINVAL;
return 0;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e8fba92..2be0191 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp,
/*
* Determine if we have a transaction that has gone to disk
- * that needs to be covered. Log activity needs to be idle (no AIL and
- * nothing in the iclogs). And, we need to be in the right state indicating
- * something has gone out.
+ * that needs to be covered. To begin the transition to the idle state
+ * firstly the log needs to be idle (no AIL and nothing in the iclogs).
+ * If we are then in a state where covering is needed, the caller is informed
+ * that dummy transactions are required to move the log into the idle state.
+ *
+ * Because this is called as part of the sync process, we should also indicate
+ * that dummy transactions should be issued in anything but the covered or
+ * idle states. This ensures that the log tail is accurately reflected in
+ * the log at the end of the sync, hence if a crash occurrs avoids replay
+ * of transactions where the metadata is already on disk.
*/
int
xfs_log_need_covered(xfs_mount_t *mp)
@@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp)
return 0;
spin_lock(&log->l_icloglock);
- if (((log->l_covered_state == XLOG_STATE_COVER_NEED) ||
- (log->l_covered_state == XLOG_STATE_COVER_NEED2))
- && !xfs_trans_ail_tail(log->l_ailp)
- && xlog_iclogs_empty(log)) {
- if (log->l_covered_state == XLOG_STATE_COVER_NEED)
- log->l_covered_state = XLOG_STATE_COVER_DONE;
- else {
- ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2);
- log->l_covered_state = XLOG_STATE_COVER_DONE2;
+ switch (log->l_covered_state) {
+ case XLOG_STATE_COVER_DONE:
+ case XLOG_STATE_COVER_DONE2:
+ case XLOG_STATE_COVER_IDLE:
+ break;
+ case XLOG_STATE_COVER_NEED:
+ case XLOG_STATE_COVER_NEED2:
+ if (!xfs_trans_ail_tail(log->l_ailp) &&
+ xlog_iclogs_empty(log)) {
+ if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+ log->l_covered_state = XLOG_STATE_COVER_DONE;
+ else
+ log->l_covered_state = XLOG_STATE_COVER_DONE2;
}
+ /* FALLTHRU */
+ default:
needed = 1;
+ break;
}
spin_unlock(&log->l_icloglock);
return needed;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 04a6ebc..2d428b0 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -6,6 +6,7 @@
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 2742e1a..7534979 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -104,6 +104,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
+int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
long nr_pages);
int bdi_writeback_task(struct bdi_writeback *wb);
@@ -249,6 +250,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#endif
extern struct backing_dev_info default_backing_dev_info;
+extern struct backing_dev_info noop_backing_dev_info;
void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
int writeback_in_progress(struct backing_dev_info *bdi);
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 5b5d473..8859e2e 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -7,6 +7,8 @@
#define MAX_CODADEVS 5 /* how many do we allow */
#ifdef __KERNEL__
+#include <linux/backing-dev.h>
+
struct kstatfs;
/* communication pending/processing queues */
@@ -17,6 +19,7 @@ struct venus_comm {
struct list_head vc_processing;
int vc_inuse;
struct super_block *vc_sb;
+ struct backing_dev_info bdi;
};
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 40b1101..68f883b 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -1,21 +1,26 @@
/*
* Char device interface.
*
- * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef _LINUX_FIREWIRE_CDEV_H
@@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor {
* @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
* @header_size: Header size to strip for receive contexts
* @channel: Channel to bind to
- * @speed: Speed to transmit at
+ * @speed: Speed for transmit contexts
* @closure: To be returned in &fw_cdev_event_iso_interrupt
* @handle: Handle to context, written back by kernel
*
@@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor {
* If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context.
*
+ * For receive contexts, @header_size must be at least 4 and must be a multiple
+ * of 4.
+ *
* Note that the effect of a @header_size > 4 depends on
* &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
*/
@@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context {
*
* &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
*
- * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are
- * specified by IEEE 1394a and IEC 61883.
- *
- * FIXME - finish this documentation
+ * Use the FW_CDEV_ISO_ macros to fill in @control.
+ *
+ * For transmit packets, the header length must be a multiple of 4 and specifies
+ * the numbers of bytes in @header that will be prepended to the packet's
+ * payload; these bytes are copied into the kernel and will not be accessed
+ * after the ioctl has returned. The sy and tag fields are copied to the iso
+ * packet header (these fields are specified by IEEE 1394a and IEC 61883-1).
+ * The skip flag specifies that no packet is to be sent in a frame; when using
+ * this, all other fields except the interrupt flag must be zero.
+ *
+ * For receive packets, the header length must be a multiple of the context's
+ * header size; if the header length is larger than the context's header size,
+ * multiple packets are queued for this entry. The sy and tag fields are
+ * ignored. If the sync flag is set, the context drops all packets until
+ * a packet with a matching sy field is received (the sync value to wait for is
+ * specified in the &fw_cdev_start_iso structure). The payload length defines
+ * how many payload bytes can be received for one packet (in addition to payload
+ * quadlets that have been defined as headers and are stripped and returned in
+ * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the
+ * additional bytes are dropped. If less bytes are received, the remaining
+ * bytes in this part of the payload buffer will not be written to, not even by
+ * the next packet, i.e., packets received in consecutive frames will not
+ * necessarily be consecutive in memory. If an entry has queued multiple
+ * packets, the payload length is divided equally among them.
+ *
+ * When a packet with the interrupt flag set has been completed, the
+ * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued
+ * multiple receive packets is completed when its last packet is completed.
*/
struct fw_cdev_iso_packet {
__u32 control;
@@ -501,7 +533,7 @@ struct fw_cdev_iso_packet {
* Queue a number of isochronous packets for reception or transmission.
* This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs,
* which describe how to transmit from or receive into a contiguous region
- * of a mmap()'ed payload buffer. As part of the packet descriptors,
+ * of a mmap()'ed payload buffer. As part of transmit packet descriptors,
* a series of headers can be supplied, which will be prepended to the
* payload during DMA.
*
@@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 {
* instead of allocated.
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
*
- * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
- * for the lifetime of the fd or handle.
+ * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources
+ * for the lifetime of the fd or @handle.
* In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
* for the duration of a bus generation.
*
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h
index b316770..9b4bb5f 100644
--- a/include/linux/firewire-constants.h
+++ b/include/linux/firewire-constants.h
@@ -1,3 +1,28 @@
+/*
+ * IEEE 1394 constants.
+ *
+ * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
#ifndef _LINUX_FIREWIRE_CONSTANTS_H
#define _LINUX_FIREWIRE_CONSTANTS_H
@@ -21,7 +46,7 @@
#define EXTCODE_WRAP_ADD 0x6
#define EXTCODE_VENDOR_DEPENDENT 0x7
-/* Juju specific tcodes */
+/* Linux firewire-core (Juju) specific tcodes */
#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP)
#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP)
#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD)
@@ -36,7 +61,7 @@
#define RCODE_TYPE_ERROR 0x6
#define RCODE_ADDRESS_ERROR 0x7
-/* Juju specific rcodes */
+/* Linux firewire-core (Juju) specific rcodes */
#define RCODE_SEND_ERROR 0x10
#define RCODE_CANCELLED 0x11
#define RCODE_BUSY 0x12
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 31ee31b..f30970c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2316,8 +2316,9 @@ extern int vfs_fstatat(int , char __user *, struct kstat *, int);
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
+ struct fiemap_extent_info *fieinfo,
+ loff_t start, loff_t len,
+ get_block_t *get_block);
extern int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block);
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 3bd018b..c964cd7 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -44,6 +44,7 @@ struct matrix_keymap_data {
* @active_low: gpio polarity
* @wakeup: controls whether the device should be set up as wakeup
* source
+ * @no_autorepeat: disable key autorepeat
*
* This structure represents platform-specific data that use used by
* matrix_keypad driver to perform proper initialization.
@@ -64,6 +65,7 @@ struct matrix_keypad_platform_data {
bool active_low;
bool wakeup;
+ bool no_autorepeat;
};
/**
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a3fd0f9..169d077 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
*/
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 6
+#define NR_IOBUS_DEVS 200
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
@@ -119,6 +119,11 @@ struct kvm_memory_slot {
int user_alloc;
};
+static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+{
+ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+}
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index 6330fc7..5ec9ca6 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -12,6 +12,7 @@
#include <linux/ncp_mount.h>
#include <linux/net.h>
#include <linux/mutex.h>
+#include <linux/backing-dev.h>
#ifdef __KERNEL__
@@ -127,6 +128,7 @@ struct ncp_server {
size_t len;
__u8 data[128];
} unexpected_packet;
+ struct backing_dev_info bdi;
};
extern void ncp_tcp_rcv_proc(struct work_struct *work);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 717a5e5..e82957a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -176,6 +176,7 @@ struct nfs_server {
#define NFS_CAP_ATIME (1U << 11)
#define NFS_CAP_CTIME (1U << 12)
#define NFS_CAP_MTIME (1U << 13)
+#define NFS_CAP_POSIX_LOCK (1U << 14)
/* maximum number of slots to use */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 2110a81..34066ff 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -48,6 +48,15 @@
#define POISON_FREE 0x6b /* for use-after-free poisoning */
#define POISON_END 0xa5 /* end-byte of poisoning */
+/********** mm/hugetlb.c **********/
+/*
+ * Private mappings of hugetlb pages use this poisoned value for
+ * page->mapping. The core VM should not be doing anything with this mapping
+ * but futex requires the existence of some page->mapping value even though it
+ * is unused if PAGE_MAPPING_ANON is set.
+ */
+#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
+
/********** arch/$ARCH/mm/init.c **********/
#define POISON_FREE_INITMEM 0xcc
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 872a98e..07db2fe 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map;
# define rcu_read_release_sched() \
lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
-static inline int debug_lockdep_rcu_enabled(void)
-{
- return likely(rcu_scheduler_active && debug_locks);
-}
+extern int debug_lockdep_rcu_enabled(void);
/**
* rcu_read_lock_held - might we be in RCU read-side critical section?
@@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void)
/**
* rcu_dereference_check - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Do an rcu_dereference(), but check that the conditions under which the
+ * dereference will take place are correct. Typically the conditions indicate
+ * the various locking conditions that should be held at that point. The check
+ * should return true if the conditions are satisfied.
+ *
+ * For example:
+ *
+ * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
+ * lockdep_is_held(&foo->lock));
*
- * Do an rcu_dereference(), but check that the context is correct.
- * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
- * ensure that the rcu_dereference_check() executes within an RCU
- * read-side critical section. It is also possible to check for
- * locks being held, for example, by using lockdep_is_held().
+ * could be used to indicate to lockdep that foo->bar may only be dereferenced
+ * if either the RCU read lock is held, or that the lock required to replace
+ * the bar struct at foo->bar is held.
+ *
+ * Note that the list of conditions may also include indications of when a lock
+ * need not be held, for example during initialisation or destruction of the
+ * target struct:
+ *
+ * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
+ * lockdep_is_held(&foo->lock) ||
+ * atomic_read(&foo->usage) == 0);
*/
#define rcu_dereference_check(p, c) \
({ \
@@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void)
rcu_dereference_raw(p); \
})
+/**
+ * rcu_dereference_protected - fetch RCU pointer when updates prevented
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * is useful in cases where update-side locks prevent the value of the
+ * pointer from changing. Please note that this primitive does -not-
+ * prevent the compiler from repeating this reference or combining it
+ * with other references, so it should not be used without protection
+ * of appropriate locks.
+ */
+#define rcu_dereference_protected(p, c) \
+ ({ \
+ if (debug_lockdep_rcu_enabled() && !(c)) \
+ lockdep_rcu_dereference(__FILE__, __LINE__); \
+ (p); \
+ })
+
#else /* #ifdef CONFIG_PROVE_RCU */
#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
+#define rcu_dereference_protected(p, c) (p)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/**
+ * rcu_access_pointer - fetch RCU pointer with no dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * when the value of this pointer is accessed, but the pointer is not
+ * dereferenced, for example, when testing an RCU-protected pointer against
+ * NULL. This may also be used in cases where update-side locks prevent
+ * the value of the pointer from changing, but rcu_dereference_protected()
+ * is a lighter-weight primitive for this use case.
+ */
+#define rcu_access_pointer(p) ACCESS_ONCE(p)
+
+/**
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 28c9fd0..ebd7472 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -183,9 +183,13 @@ static inline struct regulator *__must_check regulator_get(struct device *dev,
{
/* Nothing except the stubbed out regulator API should be
* looking at the value except to check if it is an error
- * value so the actual return value doesn't matter.
+ * value. Drivers are free to handle NULL specifically by
+ * skipping all regulator API calls, but they don't have to.
+ * Drivers which don't, should make sure they properly handle
+ * corner cases of the API, such as regulator_get_voltage()
+ * returning 0.
*/
- return (struct regulator *)id;
+ return NULL;
}
static inline void regulator_put(struct regulator *regulator)
{
diff --git a/include/linux/smb_fs_sb.h b/include/linux/smb_fs_sb.h
index 8a060a7..bb947dd 100644
--- a/include/linux/smb_fs_sb.h
+++ b/include/linux/smb_fs_sb.h
@@ -10,6 +10,7 @@
#define _SMB_FS_SB
#include <linux/types.h>
+#include <linux/backing-dev.h>
#include <linux/smb.h>
/*
@@ -74,6 +75,8 @@ struct smb_sb_info {
struct smb_ops *ops;
struct super_block *super_block;
+
+ struct backing_dev_info bdi;
};
static inline int
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 8be5135..2c55a7e 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
+ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 78740ec..fa6cde5 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
+void sctp_data_ready(struct sock *sk, int len);
unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
diff --git a/include/net/x25.h b/include/net/x25.h
index 15ef962..468551e 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_timeout;
extern int sysctl_x25_ack_holdback_timeout;
extern int sysctl_x25_forward;
+extern int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr);
+
extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
struct x25_address *);
extern int x25_addr_aton(unsigned char *, struct x25_address *,
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index d57847f..aab3c13 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -26,6 +26,7 @@
#ifdef __KERNEL__
#include <linux/device.h>
#include <pcmcia/ss.h>
+#include <asm/atomic.h>
/*
* PCMCIA device drivers (16-bit cards only; 32-bit cards require CardBus
@@ -94,10 +95,8 @@ struct pcmcia_device {
config_req_t conf;
window_handle_t win;
- /* Is the device suspended, or in the process of
- * being removed? */
+ /* Is the device suspended? */
u16 suspended:1;
- u16 _removed:1;
/* Flags whether io, irq, win configurations were
* requested, and whether the configuration is "locked" */
@@ -115,7 +114,7 @@ struct pcmcia_device {
u16 has_card_id:1;
u16 has_func_id:1;
- u16 reserved:3;
+ u16 reserved:4;
u8 func_id;
u16 manf_id;
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 2e488b6..344705c 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -224,18 +224,16 @@ struct pcmcia_socket {
/* 16-bit state: */
struct {
- /* PCMCIA card is present in socket */
- u8 present:1;
/* "master" ioctl is used */
u8 busy:1;
- /* pcmcia module is being unloaded */
- u8 dead:1;
/* the PCMCIA card consists of two pseudo devices */
u8 has_pfc:1;
- u8 reserved:4;
+ u8 reserved:6;
} pcmcia_state;
+ /* non-zero if PCMCIA card is present */
+ atomic_t present;
#ifdef CONFIG_PCMCIA_IOCTL
struct user_info_t *user;
diff --git a/init/initramfs.c b/init/initramfs.c
index 37d3859..4b9c202 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
compress_name);
message = msg_buf;
}
- }
+ } else
+ error("junk in compressed archive");
if (state != Reset)
error("junk in compressed archive");
this_header = saved_offset + my_inptr;
diff --git a/kernel/cred.c b/kernel/cred.c
index e1dbe9e..62af181 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_creds(void)
error:
put_cred(new);
+ return NULL;
+
free_tgcred:
#ifdef CONFIG_KEYS
kfree(tgcred);
@@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred)
{
if (cred->magic != CRED_MAGIC)
return true;
- if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
- return true;
#ifdef CONFIG_SECURITY_SELINUX
if (selinux_is_enabled()) {
if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4d22896..a8c9621 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
* User space encodes device types as two-byte values,
* so we need to recode them
*/
- swdev = old_decode_dev(swap_area.dev);
+ swdev = new_decode_dev(swap_area.dev);
if (swdev) {
offset = swap_area.offset;
data->swap = swap_type_of(swdev, offset, NULL);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 63fe254..03a7ea1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
+int debug_lockdep_rcu_enabled(void)
+{
+ return rcu_scheduler_active && debug_locks &&
+ current->lockdep_recursion == 0;
+}
+EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+
/**
* rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
*
diff --git a/kernel/sys.c b/kernel/sys.c
index 6d1a7e0..7cb426a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1118,7 +1118,7 @@ DECLARE_RWSEM(uts_sem);
#ifdef COMPAT_UTS_MACHINE
#define override_architecture(name) \
- (current->personality == PER_LINUX32 && \
+ (personality(current->personality) == PER_LINUX32 && \
copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
sizeof(COMPAT_UTS_MACHINE)))
#else
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ff01710..935248b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -356,7 +356,7 @@ config SLUB_STATS
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
- (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE)
+ (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index db521f4..bcb3a4b 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;
- int obytes_processed = 0;
+ int ret = -1;
set_error_fn(error_fn);
@@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
/* decompress */
tmp = dst_len;
- r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
+
+ /* When the input data is not compressed at all,
+ * lzo1x_decompress_safe will fail, so call memcpy()
+ * instead */
+ if (unlikely(dst_len == src_len))
+ memcpy(out_buf, in_buf, src_len);
+ else {
+ r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
out_buf, &tmp);
- if (r != LZO_E_OK || dst_len != tmp) {
- error("Compressed data violation");
- goto exit_2;
+ if (r != LZO_E_OK || dst_len != tmp) {
+ error("Compressed data violation");
+ goto exit_2;
+ }
}
- obytes_processed += dst_len;
if (flush)
flush(out_buf, dst_len);
if (output)
@@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
in_buf += src_len;
}
+ ret = 0;
exit_2:
if (!input)
free(in_buf);
@@ -203,7 +211,7 @@ exit_1:
if (!output)
free(out_buf);
exit:
- return obytes_processed;
+ return ret;
}
#define decompress unlzo
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ba8b670..01e6427 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
* Now parse out the first token and use it as the name for the
* driver to filter for.
*/
- for (i = 0; i < NAME_MAX_LEN; ++i) {
+ for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
current_driver_name[i] = buf[i];
if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
break;
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 66eef2e..41b1804 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
ret->element_size = element_size;
ret->total_nr_elements = total;
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
- memset(ret->parts[0], FLEX_ARRAY_FREE,
+ memset(&ret->parts[0], FLEX_ARRAY_FREE,
FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 24112e5..46d34b0 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
return simple_strtoull(cp, endp, base);
}
+EXPORT_SYMBOL(simple_strtoll);
/**
* strict_strtoul - convert a string to an unsigned long strictly
@@ -408,12 +409,12 @@ enum format_type {
};
struct printf_spec {
- u16 type;
- s16 field_width; /* width of output field */
+ u8 type; /* format_type enum */
u8 flags; /* flags to number() */
- u8 base;
- s8 precision; /* # of digits/chars */
- u8 qualifier;
+ u8 base; /* number base, 8, 10 or 16 only */
+ u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
+ s16 field_width; /* width of output field */
+ s16 precision; /* # of digits/chars */
};
static char *number(char *buf, char *end, unsigned long long num,
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f13e067..707d0dc 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -11,6 +11,8 @@
#include <linux/writeback.h>
#include <linux/device.h>
+static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
+
void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
}
@@ -25,6 +27,11 @@ struct backing_dev_info default_backing_dev_info = {
};
EXPORT_SYMBOL_GPL(default_backing_dev_info);
+struct backing_dev_info noop_backing_dev_info = {
+ .name = "noop",
+};
+EXPORT_SYMBOL_GPL(noop_backing_dev_info);
+
static struct class *bdi_class;
/*
@@ -715,6 +722,33 @@ void bdi_destroy(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL(bdi_destroy);
+/*
+ * For use from filesystems to quickly init and register a bdi associated
+ * with dirty writeback
+ */
+int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
+ unsigned int cap)
+{
+ char tmp[32];
+ int err;
+
+ bdi->name = name;
+ bdi->capabilities = cap;
+ err = bdi_init(bdi);
+ if (err)
+ return err;
+
+ sprintf(tmp, "%.28s%s", name, "-%d");
+ err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
+ if (err) {
+ bdi_destroy(bdi);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bdi_setup_and_register);
+
static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6034dc9..ffbdfc8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -546,6 +546,7 @@ static void free_huge_page(struct page *page)
mapping = (struct address_space *) page_private(page);
set_page_private(page, 0);
+ page->mapping = NULL;
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
@@ -2447,8 +2448,10 @@ retry:
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
- } else
+ } else {
lock_page(page);
+ page->mapping = HUGETLB_POISON;
+ }
}
/*
diff --git a/mm/ksm.c b/mm/ksm.c
index 8cdfc2a..956880f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
do {
cond_resched();
page = follow_page(vma, addr, FOLL_GET);
- if (!page)
+ if (IS_ERR_OR_NULL(page))
break;
if (PageKsm(page))
ret = handle_mm_fault(vma->vm_mm, vma, addr,
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
goto out;
page = follow_page(vma, addr, FOLL_GET);
- if (!page)
+ if (IS_ERR_OR_NULL(page))
goto out;
if (PageAnon(page)) {
flush_anon_page(vma, page, addr);
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
cond_resched();
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
tree_page = get_mergeable_page(tree_rmap_item);
- if (!tree_page)
+ if (IS_ERR_OR_NULL(tree_page))
return NULL;
/*
@@ -1294,7 +1294,7 @@ next_mm:
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (*page && PageAnon(*page)) {
+ if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
@@ -1308,7 +1308,7 @@ next_mm:
up_read(&mm->mmap_sem);
return rmap_item;
}
- if (*page)
+ if (!IS_ERR_OR_NULL(*page))
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
@@ -1367,7 +1367,7 @@ next_mm:
static void ksm_do_scan(unsigned int scan_npages)
{
struct rmap_item *rmap_item;
- struct page *page;
+ struct page *uninitialized_var(page);
while (scan_npages--) {
cond_resched();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f4ede99..6c755de 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2429,11 +2429,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
}
unlock_page_cgroup(pc);
+ *ptr = mem;
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
css_put(&mem->css);
}
- *ptr = mem;
return ret;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f90ea92..456ec6f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1977,7 +1977,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
return 0;
/* Clean everything up if vma_adjust failed. */
- new->vm_ops->close(new);
+ if (new->vm_ops && new->vm_ops->close)
+ new->vm_ops->close(new);
if (new->vm_file) {
if (vma->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(mm);
diff --git a/mm/rmap.c b/mm/rmap.c
index 4bad326..07fc947 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
goto out_enomem_free_avc;
allocated = anon_vma;
}
- spin_lock(&anon_vma->lock);
+ spin_lock(&anon_vma->lock);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma)
list_add(&avc->same_vma, &vma->anon_vma_chain);
list_add(&avc->same_anon_vma, &anon_vma->head);
allocated = NULL;
+ avc = NULL;
}
spin_unlock(&mm->page_table_lock);
-
spin_unlock(&anon_vma->lock);
- if (unlikely(allocated)) {
+
+ if (unlikely(allocated))
anon_vma_free(allocated);
+ if (unlikely(avc))
anon_vma_chain_free(avc);
- }
}
return 0;
@@ -730,23 +731,28 @@ void page_move_anon_rmap(struct page *page,
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
+ * @exclusive: the page is exclusively owned by the current process
*/
static void __page_set_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+ struct vm_area_struct *vma, unsigned long address, int exclusive)
{
- struct anon_vma_chain *avc;
- struct anon_vma *anon_vma;
+ struct anon_vma *anon_vma = vma->anon_vma;
- BUG_ON(!vma->anon_vma);
+ BUG_ON(!anon_vma);
/*
- * We must use the _oldest_ possible anon_vma for the page mapping!
+ * If the page isn't exclusively mapped into this vma,
+ * we must use the _oldest_ possible anon_vma for the
+ * page mapping!
*
- * So take the last AVC chain entry in the vma, which is the deepest
- * ancestor, and use the anon_vma from that.
+ * So take the last AVC chain entry in the vma, which is
+ * the deepest ancestor, and use the anon_vma from that.
*/
- avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
- anon_vma = avc->anon_vma;
+ if (!exclusive) {
+ struct anon_vma_chain *avc;
+ avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
+ anon_vma = avc->anon_vma;
+ }
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
@@ -802,7 +808,7 @@ void page_add_anon_rmap(struct page *page,
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
if (first)
- __page_set_anon_rmap(page, vma, address);
+ __page_set_anon_rmap(page, vma, address, 0);
else
__page_check_anon_rmap(page, vma, address);
}
@@ -824,7 +830,7 @@ void page_add_new_anon_rmap(struct page *page,
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
__inc_zone_page_state(page, NR_ANON_PAGES);
- __page_set_anon_rmap(page, vma, address);
+ __page_set_anon_rmap(page, vma, address, 1);
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE_ANON);
else
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 99d68c3..9753b69 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
/* Connectionless channel */
if (sk->sk_type == SOCK_DGRAM) {
skb = l2cap_create_connless_pdu(sk, msg, len);
- err = l2cap_do_send(sk, skb);
+ if (IS_ERR(skb))
+ err = PTR_ERR(skb);
+ else
+ err = l2cap_do_send(sk, skb);
goto done;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6980625..eaa0e1b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -723,11 +723,11 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
if (!pskb_may_pull(skb, len))
return -EINVAL;
- grec = (void *)(skb->data + len);
+ grec = (void *)(skb->data + len - sizeof(*grec));
group = grec->grec_mca;
type = grec->grec_type;
- len += grec->grec_nsrcs * 4;
+ len += ntohs(grec->grec_nsrcs) * 4;
if (!pskb_may_pull(skb, len))
return -EINVAL;
@@ -957,9 +957,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
unsigned offset;
int err;
- BR_INPUT_SKB_CB(skb)->igmp = 0;
- BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
-
/* We treat OOM as packet loss for now. */
if (!pskb_may_pull(skb, sizeof(*iph)))
return -EINVAL;
@@ -1049,6 +1046,9 @@ err_out:
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb)
{
+ BR_INPUT_SKB_CB(skb)->igmp = 0;
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
+
if (br->multicast_disabled)
return 0;
diff --git a/net/can/raw.c b/net/can/raw.c
index 3a7dffb..da99cf1 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
}
} else if (count == 1) {
- if (copy_from_user(&sfilter, optval, optlen))
+ if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
return -EFAULT;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 1c8a0ce..f769098 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1989,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
if (dev->real_num_tx_queues > 1)
queue_index = skb_tx_hash(dev, skb);
- if (sk && sk->sk_dst_cache)
- sk_tx_queue_set(sk, queue_index);
+ if (sk) {
+ struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache);
+
+ if (dst && skb_dst(skb) == dst)
+ sk_tx_queue_set(sk, queue_index);
+ }
}
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4568120..fe776c9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1270,10 +1270,11 @@ replay:
err = ops->newlink(net, dev, tb, data);
else
err = register_netdevice(dev);
- if (err < 0 && !IS_ERR(dev)) {
+
+ if (err < 0 && !IS_ERR(dev))
free_netdev(dev);
+ if (err < 0)
goto out;
- }
err = rtnl_configure_link(dev, ifm);
if (err < 0)
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index c7da600..93c91b6 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -151,6 +151,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
dev_load(sock_net(sk), ifr.ifr_name);
dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
+ if (!dev)
+ return -ENODEV;
+
if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 59a8387..c98f115 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
{
struct node *ret = tnode_get_child(tn, i);
- return rcu_dereference(ret);
+ return rcu_dereference_check(ret,
+ rcu_read_lock_held() ||
+ lockdep_rtnl_is_held());
}
static inline int tnode_child_length(const struct tnode *tn)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c65f18e..d1bcc9f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(newskb));
- netif_rx(newskb);
+ netif_rx_ni(newskb);
return 0;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 954bbfb..8fef859 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
if (hslot->count < hslot2->count)
goto begin;
- result = udp4_lib_lookup2(net, INADDR_ANY, sport,
- daddr, hnum, dif,
+ result = udp4_lib_lookup2(net, saddr, sport,
+ INADDR_ANY, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16c4391..75d5ef8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -108,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
newskb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(newskb));
- netif_rx(newskb);
+ netif_rx_ni(newskb);
return 0;
}
@@ -629,7 +629,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
/* We must not fragment if the socket is set to force MTU discovery
* or if the skb it not generated by a local socket.
*/
- if (!skb->local_df) {
+ if (!skb->local_df && skb->len > mtu) {
skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c2438e8..05ebd78 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -815,7 +815,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
{
int flags = 0;
- if (rt6_need_strict(&fl->fl6_dst))
+ if (fl->oif || rt6_need_strict(&fl->fl6_dst))
flags |= RT6_LOOKUP_F_IFACE;
if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c92ebe8..075f540 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1015,7 +1015,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
t1 = (struct tcphdr *) skb_push(buff, tot_len);
- skb_reset_transport_header(skb);
+ skb_reset_transport_header(buff);
/* Swap the send and the receive. */
memset(t1, 0, sizeof(*t1));
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c177aea..9082485 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
if (hslot->count < hslot2->count)
goto begin;
- result = udp6_lib_lookup2(net, &in6addr_any, sport,
- daddr, hnum, dif,
+ result = udp6_lib_lookup2(net, saddr, sport,
+ &in6addr_any, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ae18165..00bf7c9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,7 +124,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.dst.dev = dev;
dev_hold(dev);
- xdst->u.rt6.rt6i_idev = in6_dev_get(rt->u.dst.dev);
+ xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
if (!xdst->u.rt6.rt6i_idev)
return -ENODEV;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 96d2534..87782a4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -184,7 +184,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
HT_ADDBA_REQUESTED_MSK) {
spin_unlock_bh(&sta->lock);
- *state = HT_AGG_STATE_IDLE;
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
"(or no longer) expecting addBA response there",
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 06c33b6..b887e48 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
sdata->vif.bss_conf.enable_beacon =
- !!rcu_dereference(sdata->u.ap.beacon);
+ !!sdata->u.ap.beacon;
break;
case NL80211_IFTYPE_ADHOC:
sdata->vif.bss_conf.enable_beacon =
- !!rcu_dereference(sdata->u.ibss.presp);
+ !!sdata->u.ibss.presp;
break;
case NL80211_IFTYPE_MESH_POINT:
sdata->vif.bss_conf.enable_beacon = true;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 58e3e3a..859ee5f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_ACTION:
- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
- return RX_DROP_MONITOR;
- /* fall through */
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
skb_queue_tail(&ifmsh->skb_queue, skb);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c8cd169..4aefa6d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -168,6 +168,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
channel_type != local->hw.conf.channel_type;
+ if (local->tmp_channel)
+ local->tmp_channel_type = channel_type;
local->oper_channel_type = channel_type;
if (ht_changed) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f0accf6..04ea07f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
+ case MESH_PLINK_CATEGORY:
+ case MESH_PATH_SEL_CATEGORY:
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+ break;
}
/*
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 56422d8..fb12cec 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
- sta = rcu_dereference(sta->hnext);
+ sta = rcu_dereference_check(sta->hnext,
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
}
return sta;
}
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
while (sta) {
if ((sta->sdata == sdata ||
sta->sdata->bss == sdata->bss) &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
- sta = rcu_dereference(sta->hnext);
+ sta = rcu_dereference_check(sta->hnext,
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
}
return sta;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index cc90363..243946d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2169,8 +2169,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCSIFFLAGS:
- if (!net_eq(sock_net(sk), &init_net))
- return -ENOIOCTLCMD;
return inet_dgram_ops.ioctl(sock, cmd, arg);
#endif
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9ece910..7b15508 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -134,7 +134,7 @@ static int __init rds_rdma_listen_init(void)
ret = PTR_ERR(cm_id);
printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_create_id() returned %d\n", ret);
- goto out;
+ return ret;
}
sin.sin_family = AF_INET,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index df5abbf..99c93ee 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
- if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
- sctp_assoc_del_peer(asoc, &trans->ipaddr);
+ if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
+ sctp_assoc_rm_peer(asoc, trans);
+ continue;
+ }
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 905fda5..7ec09ba 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Use SCTP specific send buffer space queues. */
ep->sndbuf_policy = sctp_sndbuf_policy;
+ sk->sk_data_ready = sctp_data_ready;
sk->sk_write_space = sctp_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 17cb400e..0fd5b4c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -208,7 +208,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
sp = sctp_sk(asoc->base.sk);
num_types = sp->pf->supported_addrs(sp, types);
- chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
+ chunksize = sizeof(init) + addrs_len;
+ chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
chunksize += sizeof(ecap_param);
if (sctp_prsctp_enable)
@@ -238,14 +239,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += ntohs(auth_hmacs->length);
+ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += ntohs(auth_chunks->length);
+ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -255,7 +256,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* If we have any extensions to report, account for that */
if (num_ext)
- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
@@ -397,13 +399,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += ntohs(auth_hmacs->length);
+ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += ntohs(auth_chunks->length);
+ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -412,7 +414,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
}
if (num_ext)
- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* Now allocate and fill out the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -3315,21 +3318,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
sctp_chunk_free(asconf);
asoc->addip_last_asconf = NULL;
- /* Send the next asconf chunk from the addip chunk queue. */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- asconf = list_entry(entry, struct sctp_chunk, list);
-
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-
return retval;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 4c5bed9..d5ae450 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -962,6 +962,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
}
+/* Sent the next ASCONF packet currently stored in the association.
+ * This happens after the ASCONF_ACK was succeffully processed.
+ */
+static void sctp_cmd_send_asconf(struct sctp_association *asoc)
+{
+ /* Send the next asconf chunk from the addip chunk
+ * queue.
+ */
+ if (!list_empty(&asoc->addip_chunk_list)) {
+ struct list_head *entry = asoc->addip_chunk_list.next;
+ struct sctp_chunk *asconf = list_entry(entry,
+ struct sctp_chunk, list);
+ list_del_init(entry);
+
+ /* Hold the chunk until an ASCONF_ACK is received. */
+ sctp_chunk_hold(asconf);
+ if (sctp_primitive_ASCONF(asoc, asconf))
+ sctp_chunk_free(asconf);
+ else
+ asoc->addip_last_asconf = asconf;
+ }
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
@@ -1617,6 +1640,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
+ case SCTP_CMD_SEND_NEXT_ASCONF:
+ sctp_cmd_send_asconf(asoc);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index abf601a..24b2cd5 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack))
+ asconf_ack)) {
+ /* Successfully processed ASCONF_ACK. We can
+ * release the next asconf if we have one.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
+ SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
+ }
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 007e8ba..44a1ab0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3719,12 +3719,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
- percpu_counter_inc(&sctp_sockets_allocated);
/* Set socket backlog limit. */
sk->sk_backlog.limit = sysctl_sctp_rmem[1];
local_bh_disable();
+ percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
@@ -3741,8 +3741,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
ep = sctp_sk(sk)->ep;
sctp_endpoint_free(ep);
- percpu_counter_dec(&sctp_sockets_allocated);
local_bh_disable();
+ percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
@@ -6189,6 +6189,16 @@ do_nonblock:
goto out;
}
+void sctp_data_ready(struct sock *sk, int len)
+{
+ read_lock_bh(&sk->sk_callback_lock);
+ if (sk_has_sleeper(sk))
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ POLLRDNORM | POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e56f711..36e84e1 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct {
};
#endif
+
+int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr)
+{
+ unsigned char len;
+ int needed;
+ int rc;
+
+ if (skb->len < 1) {
+ /* packet has no address block */
+ rc = 0;
+ goto empty;
+ }
+
+ len = *skb->data;
+ needed = 1 + (len >> 4) + (len & 0x0f);
+
+ if (skb->len < needed) {
+ /* packet is too short to hold the addresses it claims
+ to hold */
+ rc = -1;
+ goto empty;
+ }
+
+ return x25_addr_ntoa(skb->data, called_addr, calling_addr);
+
+empty:
+ *called_addr->x25_addr = 0;
+ *calling_addr->x25_addr = 0;
+
+ return rc;
+}
+
+
int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
struct x25_address *calling_addr)
{
@@ -367,6 +402,7 @@ static void __x25_destroy_socket(struct sock *sk)
/*
* Queue the unaccepted socket for death
*/
+ skb->sk->sk_state = TCP_LISTEN;
sock_set_flag(skb->sk, SOCK_DEAD);
x25_start_heartbeat(skb->sk);
x25_sk(skb->sk)->state = X25_STATE_0;
@@ -554,7 +590,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
- x25->facilities.throughput = X25_DEFAULT_THROUGHPUT;
+ x25->facilities.throughput = 0; /* by default don't negotiate
+ throughput */
x25->facilities.reverse = X25_DEFAULT_REVERSE;
x25->dte_facilities.calling_len = 0;
x25->dte_facilities.called_len = 0;
@@ -922,16 +959,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
/*
* Extract the X.25 addresses and convert them to ASCII strings,
* and remove them.
+ *
+ * Address block is mandatory in call request packets
*/
- addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
+ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
+ if (addr_len <= 0)
+ goto out_clear_request;
skb_pull(skb, addr_len);
/*
* Get the length of the facilities, skip past them for the moment
* get the call user data because this is needed to determine
* the correct listener
+ *
+ * Facilities length is mandatory in call request packets
*/
+ if (skb->len < 1)
+ goto out_clear_request;
len = skb->data[0] + 1;
+ if (skb->len < len)
+ goto out_clear_request;
skb_pull(skb,len);
/*
@@ -1415,9 +1462,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (facilities.winsize_in < 1 ||
facilities.winsize_in > 127)
break;
- if (facilities.throughput < 0x03 ||
- facilities.throughput > 0xDD)
- break;
+ if (facilities.throughput) {
+ int out = facilities.throughput & 0xf0;
+ int in = facilities.throughput & 0x0f;
+ if (!out)
+ facilities.throughput |=
+ X25_DEFAULT_THROUGHPUT << 4;
+ else if (out < 0x30 || out > 0xD0)
+ break;
+ if (!in)
+ facilities.throughput |=
+ X25_DEFAULT_THROUGHPUT;
+ else if (in < 0x03 || in > 0x0D)
+ break;
+ }
if (facilities.reverse &&
(facilities.reverse & 0x81) != 0x81)
break;
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index a21f664..771bab0 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
{
unsigned char *p = skb->data;
- unsigned int len = *p++;
+ unsigned int len;
*vc_fac_mask = 0;
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
+ if (skb->len < 1)
+ return 0;
+
+ len = *p++;
+
+ if (len >= skb->len)
+ return -1;
+
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
memcpy(new, ours, sizeof(*new));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ if (len < 0)
+ return len;
/*
* They want reverse charging, we won't accept it.
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
new->reverse = theirs.reverse;
if (theirs.throughput) {
- if (theirs.throughput < ours->throughput) {
- SOCK_DEBUG(sk, "X.25: throughput negotiated down\n");
- new->throughput = theirs.throughput;
+ int theirs_in = theirs.throughput & 0x0f;
+ int theirs_out = theirs.throughput & 0xf0;
+ int ours_in = ours->throughput & 0x0f;
+ int ours_out = ours->throughput & 0xf0;
+ if (!ours_in || theirs_in < ours_in) {
+ SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
+ new->throughput = (new->throughput & 0xf0) | theirs_in;
+ }
+ if (!ours_out || theirs_out < ours_out) {
+ SOCK_DEBUG(sk,
+ "X.25: outbound throughput negotiated\n");
+ new->throughput = (new->throughput & 0x0f) | theirs_out;
}
}
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index a31b3b9..372ac226 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct x25_address source_addr, dest_addr;
+ int len;
switch (frametype) {
case X25_CALL_ACCEPTED: {
@@ -107,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
* Parse the data in the frame.
*/
skb_pull(skb, X25_STD_MIN_LEN);
- skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
- skb_pull(skb,
- x25_parse_facilities(skb, &x25->facilities,
+
+ len = x25_parse_address_block(skb, &source_addr,
+ &dest_addr);
+ if (len > 0)
+ skb_pull(skb, len);
+
+ len = x25_parse_facilities(skb, &x25->facilities,
&x25->dte_facilities,
- &x25->vc_facil_mask));
+ &x25->vc_facil_mask);
+ if (len > 0)
+ skb_pull(skb, len);
/*
* Copy any Call User Data.
*/
diff --git a/security/inode.c b/security/inode.c
index c3a7938..1c812e8 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode,
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry)) {
+ if (!IS_ERR(*dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
error = mkdir(parent->d_inode, *dentry, mode);
else
error = create(parent->d_inode, *dentry, mode);
} else
- error = PTR_ERR(dentry);
+ error = PTR_ERR(*dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e814d21..dd7cd0f 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -201,7 +201,7 @@ static long keyring_read(const struct key *keyring,
int loop, ret;
ret = 0;
- klist = rcu_dereference(keyring->payload.subscriptions);
+ klist = keyring->payload.subscriptions;
if (klist) {
/* calculate how much data we could return */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 03fe63e..d737cea 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -68,7 +68,8 @@ static int call_sbin_request_key(struct key_construction *cons,
{
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = cons->key, *authkey = cons->authkey, *keyring;
+ struct key *key = cons->key, *authkey = cons->authkey, *keyring,
+ *session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
@@ -112,10 +113,12 @@ static int call_sbin_request_key(struct key_construction *cons,
if (cred->tgcred->process_keyring)
prkey = cred->tgcred->process_keyring->serial;
- if (cred->tgcred->session_keyring)
- sskey = rcu_dereference(cred->tgcred->session_keyring)->serial;
- else
- sskey = cred->user->session_keyring->serial;
+ rcu_read_lock();
+ session = rcu_dereference(cred->tgcred->session_keyring);
+ if (!session)
+ session = cred->user->session_keyring;
+ sskey = session->serial;
+ rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
@@ -336,8 +339,10 @@ static int construct_alloc_key(struct key_type *type,
key_already_present:
mutex_unlock(&key_construction_mutex);
- if (dest_keyring)
+ if (dest_keyring) {
+ __key_link(dest_keyring, key_ref_to_ptr(key_ref));
up_write(&dest_keyring->sem);
+ }
mutex_unlock(&user->cons_lock);
key_put(key);
*_key = key = key_ref_to_ptr(key_ref);
@@ -428,6 +433,11 @@ struct key *request_key_and_link(struct key_type *type,
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ construct_get_dest_keyring(&dest_keyring);
+ key_link(dest_keyring, key);
+ key_put(dest_keyring);
+ }
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 8da6a84..cd4f734 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
void avtab_cache_init(void);
void avtab_cache_destroy(void);
-#define MAX_AVTAB_HASH_BITS 13
+#define MAX_AVTAB_HASH_BITS 11
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 656e474..91acc9a 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -863,7 +863,6 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci)
struct snd_ac97 *ac97;
int ret;
- writel(0, aaci->base + AC97_POWERDOWN);
/*
* Assert AACIRESET for 2us
*/
@@ -1047,7 +1046,11 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
writel(0x1fff, aaci->base + AACI_INTCLR);
writel(aaci->maincr, aaci->base + AACI_MAINCR);
-
+ /*
+ * Fix: ac97 read back fail errors by reading
+ * from any arbitrary aaci register.
+ */
+ readl(aaci->base + AACI_CSCH1);
ret = aaci_probe_ac97(aaci);
if (ret)
goto out;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f8fd586..cec68152 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2272,6 +2272,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
{}
};
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index af34606..e9fdfc4 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -519,14 +519,6 @@ static int ad198x_suspend(struct hda_codec *codec, pm_message_t state)
ad198x_power_eapd(codec);
return 0;
}
-
-static int ad198x_resume(struct hda_codec *codec)
-{
- ad198x_init(codec);
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
- return 0;
-}
#endif
static struct hda_codec_ops ad198x_patch_ops = {
@@ -539,7 +531,6 @@ static struct hda_codec_ops ad198x_patch_ops = {
#endif
#ifdef SND_HDA_NEEDS_RESUME
.suspend = ad198x_suspend,
- .resume = ad198x_resume,
#endif
.reboot_notify = ad198x_shutup,
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c7730db..7404dba 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -230,6 +230,7 @@ enum {
ALC888_ACER_ASPIRE_7730G,
ALC883_MEDION,
ALC883_MEDION_MD2,
+ ALC883_MEDION_WIM2160,
ALC883_LAPTOP_EAPD,
ALC883_LENOVO_101E_2ch,
ALC883_LENOVO_NB0763,
@@ -1389,22 +1390,31 @@ struct alc_fixup {
static void alc_pick_fixup(struct hda_codec *codec,
const struct snd_pci_quirk *quirk,
- const struct alc_fixup *fix)
+ const struct alc_fixup *fix,
+ int pre_init)
{
const struct alc_pincfg *cfg;
quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
if (!quirk)
return;
-
fix += quirk->value;
cfg = fix->pins;
- if (cfg) {
+ if (pre_init && cfg) {
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+ snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n",
+ codec->chip_name, quirk->name);
+#endif
for (; cfg->nid; cfg++)
snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
}
- if (fix->verbs)
+ if (!pre_init && fix->verbs) {
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+ snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n",
+ codec->chip_name, quirk->name);
+#endif
add_verb(codec->spec, fix->verbs);
+ }
}
static int alc_read_coef_idx(struct hda_codec *codec,
@@ -4133,7 +4143,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
- SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
+ SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
@@ -4808,6 +4818,25 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec)
}
}
+static void alc880_auto_init_input_src(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ int c;
+
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ unsigned int mux_idx;
+ const struct hda_input_mux *imux;
+ mux_idx = c >= spec->num_mux_defs ? 0 : c;
+ imux = &spec->input_mux[mux_idx];
+ if (!imux->num_items && mux_idx > 0)
+ imux = &spec->input_mux[0];
+ if (imux)
+ snd_hda_codec_write(codec, spec->adc_nids[c], 0,
+ AC_VERB_SET_CONNECT_SEL,
+ imux->items[0].index);
+ }
+}
+
/* parse the BIOS configuration and set up the alc_spec */
/* return 1 if successful, 0 if the proper config is not found,
* or a negative error code
@@ -4886,6 +4915,7 @@ static void alc880_auto_init(struct hda_codec *codec)
alc880_auto_init_multi_out(codec);
alc880_auto_init_extra_out(codec);
alc880_auto_init_analog_input(codec);
+ alc880_auto_init_input_src(codec);
if (spec->unsol_event)
alc_inithook(codec);
}
@@ -6397,6 +6427,8 @@ static void alc260_auto_init_analog_input(struct hda_codec *codec)
}
}
+#define alc260_auto_init_input_src alc880_auto_init_input_src
+
/*
* generic initialization of ADC, input mixers and output mixers
*/
@@ -6483,6 +6515,7 @@ static void alc260_auto_init(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
alc260_auto_init_multi_out(codec);
alc260_auto_init_analog_input(codec);
+ alc260_auto_init_input_src(codec);
if (spec->unsol_event)
alc_inithook(codec);
}
@@ -8455,6 +8488,42 @@ static struct snd_kcontrol_new alc883_medion_md2_mixer[] = {
{ } /* end */
};
+static struct snd_kcontrol_new alc883_medion_wim2160_mixer[] = {
+ HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+ HDA_CODEC_MUTE("Speaker Playback Switch", 0x15, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x08, 0x0, HDA_INPUT),
+ HDA_CODEC_MUTE("Line Playback Switch", 0x08, 0x0, HDA_INPUT),
+ { } /* end */
+};
+
+static struct hda_verb alc883_medion_wim2160_verbs[] = {
+ /* Unmute front mixer */
+ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
+
+ /* Set speaker pin to front mixer */
+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+
+ /* Init headphone pin */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
+ {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
+
+ { } /* end */
+};
+
+/* toggle speaker-output according to the hp-jack state */
+static void alc883_medion_wim2160_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ spec->autocfg.hp_pins[0] = 0x1a;
+ spec->autocfg.speaker_pins[0] = 0x15;
+}
+
static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -9164,6 +9233,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = {
[ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g",
[ALC883_MEDION] = "medion",
[ALC883_MEDION_MD2] = "medion-md2",
+ [ALC883_MEDION_WIM2160] = "medion-wim2160",
[ALC883_LAPTOP_EAPD] = "laptop-eapd",
[ALC883_LENOVO_101E_2ch] = "lenovo-101e",
[ALC883_LENOVO_NB0763] = "lenovo-nb0763",
@@ -9280,6 +9350,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
@@ -9818,6 +9889,21 @@ static struct alc_config_preset alc882_presets[] = {
.setup = alc883_medion_md2_setup,
.init_hook = alc_automute_amp,
},
+ [ALC883_MEDION_WIM2160] = {
+ .mixers = { alc883_medion_wim2160_mixer },
+ .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs },
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .dac_nids = alc883_dac_nids,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
+ .adc_nids = alc883_adc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
+ .channel_mode = alc883_3ST_2ch_modes,
+ .input_mux = &alc883_capture_source,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .setup = alc883_medion_wim2160_setup,
+ .init_hook = alc_automute_amp,
+ },
[ALC883_LAPTOP_EAPD] = {
.mixers = { alc883_base_mixer },
.init_verbs = { alc883_init_verbs, alc882_eapd_verbs },
@@ -10363,7 +10449,8 @@ static int patch_alc882(struct hda_codec *codec)
board_config = ALC882_AUTO;
}
- alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups);
+ if (board_config == ALC882_AUTO)
+ alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1);
if (board_config == ALC882_AUTO) {
/* automatic parse from the BIOS config */
@@ -10436,6 +10523,9 @@ static int patch_alc882(struct hda_codec *codec)
set_capture_mixer(codec);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (board_config == ALC882_AUTO)
+ alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0);
+
spec->vmaster_nid = 0x0c;
codec->patch_ops = alc_patch_ops;
@@ -12816,6 +12906,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
dac = 0x02;
break;
case 0x15:
+ case 0x21: /* ALC269vb has this pin, too */
dac = 0x03;
break;
default:
@@ -13735,19 +13826,19 @@ static void alc269_laptop_unsol_event(struct hda_codec *codec,
}
}
-static void alc269_laptop_dmic_setup(struct hda_codec *codec)
+static void alc269_laptop_amic_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
spec->ext_mic.pin = 0x18;
spec->ext_mic.mux_idx = 0;
- spec->int_mic.pin = 0x12;
- spec->int_mic.mux_idx = 5;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
spec->auto_mic = 1;
}
-static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
+static void alc269_laptop_dmic_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
@@ -13755,14 +13846,14 @@ static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
spec->ext_mic.pin = 0x18;
spec->ext_mic.mux_idx = 0;
spec->int_mic.pin = 0x12;
- spec->int_mic.mux_idx = 6;
+ spec->int_mic.mux_idx = 5;
spec->auto_mic = 1;
}
-static void alc269_laptop_amic_setup(struct hda_codec *codec)
+static void alc269vb_laptop_amic_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- spec->autocfg.hp_pins[0] = 0x15;
+ spec->autocfg.hp_pins[0] = 0x21;
spec->autocfg.speaker_pins[0] = 0x14;
spec->ext_mic.pin = 0x18;
spec->ext_mic.mux_idx = 0;
@@ -13771,6 +13862,18 @@ static void alc269_laptop_amic_setup(struct hda_codec *codec)
spec->auto_mic = 1;
}
+static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ spec->autocfg.hp_pins[0] = 0x21;
+ spec->autocfg.speaker_pins[0] = 0x14;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x12;
+ spec->int_mic.mux_idx = 6;
+ spec->auto_mic = 1;
+}
+
static void alc269_laptop_inithook(struct hda_codec *codec)
{
alc269_speaker_automute(codec);
@@ -13975,6 +14078,27 @@ static void alc269_auto_init(struct hda_codec *codec)
alc_inithook(codec);
}
+enum {
+ ALC269_FIXUP_SONY_VAIO,
+};
+
+const static struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
+ {}
+};
+
+static const struct alc_fixup alc269_fixups[] = {
+ [ALC269_FIXUP_SONY_VAIO] = {
+ .verbs = alc269_sony_vaio_fixup_verbs
+ },
+};
+
+static struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ {}
+};
+
+
/*
* configuration and preset
*/
@@ -14034,7 +14158,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
ALC269_DMIC),
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC),
SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC),
- SND_PCI_QUIRK(0x104d, 0x9071, "SONY XTB", ALC269_DMIC),
+ SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK),
SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC),
SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU),
@@ -14108,7 +14232,7 @@ static struct alc_config_preset alc269_presets[] = {
.num_channel_mode = ARRAY_SIZE(alc269_modes),
.channel_mode = alc269_modes,
.unsol_event = alc269_laptop_unsol_event,
- .setup = alc269_laptop_amic_setup,
+ .setup = alc269vb_laptop_amic_setup,
.init_hook = alc269_laptop_inithook,
},
[ALC269VB_DMIC] = {
@@ -14188,6 +14312,9 @@ static int patch_alc269(struct hda_codec *codec)
board_config = ALC269_AUTO;
}
+ if (board_config == ALC269_AUTO)
+ alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1);
+
if (board_config == ALC269_AUTO) {
/* automatic parse from the BIOS config */
err = alc269_parse_auto_config(codec);
@@ -14240,6 +14367,9 @@ static int patch_alc269(struct hda_codec *codec)
set_capture_mixer(codec);
set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ if (board_config == ALC269_AUTO)
+ alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0);
+
spec->vmaster_nid = 0x02;
codec->patch_ops = alc_patch_ops;
@@ -15328,7 +15458,8 @@ static int patch_alc861(struct hda_codec *codec)
board_config = ALC861_AUTO;
}
- alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups);
+ if (board_config == ALC861_AUTO)
+ alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1);
if (board_config == ALC861_AUTO) {
/* automatic parse from the BIOS config */
@@ -15365,6 +15496,9 @@ static int patch_alc861(struct hda_codec *codec)
spec->vmaster_nid = 0x03;
+ if (board_config == ALC861_AUTO)
+ alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0);
+
codec->patch_ops = alc_patch_ops;
if (board_config == ALC861_AUTO) {
spec->init_hook = alc861_auto_init;
@@ -16299,7 +16433,8 @@ static int patch_alc861vd(struct hda_codec *codec)
board_config = ALC861VD_AUTO;
}
- alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups);
+ if (board_config == ALC861VD_AUTO)
+ alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1);
if (board_config == ALC861VD_AUTO) {
/* automatic parse from the BIOS config */
@@ -16347,6 +16482,9 @@ static int patch_alc861vd(struct hda_codec *codec)
spec->vmaster_nid = 0x02;
+ if (board_config == ALC861VD_AUTO)
+ alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0);
+
codec->patch_ops = alc_patch_ops;
if (board_config == ALC861VD_AUTO)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index c4be3fa..7fb7d01 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1607,6 +1607,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
"Dell Studio 1555", STAC_DELL_M6_DMIC),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
"Dell Studio 1557", STAC_DELL_M6_DMIC),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+ "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+ "Dell Studio 1558", STAC_DELL_M6_BOTH),
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 9ddc373..7345381 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -476,7 +476,7 @@ static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec,
knew->name = kstrdup(tmpl->name, GFP_KERNEL);
if (!knew->name)
return NULL;
- return 0;
+ return knew;
}
static void via_free_kctls(struct hda_codec *codec)
@@ -1215,14 +1215,13 @@ static struct snd_kcontrol_new via_hp_mixer[2] = {
},
};
-static int via_hp_build(struct via_spec *spec)
+static int via_hp_build(struct hda_codec *codec)
{
+ struct via_spec *spec = codec->spec;
struct snd_kcontrol_new *knew;
hda_nid_t nid;
-
- knew = via_clone_control(spec, &via_hp_mixer[0]);
- if (knew == NULL)
- return -ENOMEM;
+ int nums;
+ hda_nid_t conn[HDA_MAX_CONNECTIONS];
switch (spec->codec_type) {
case VT1718S:
@@ -1239,6 +1238,14 @@ static int via_hp_build(struct via_spec *spec)
break;
}
+ nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS);
+ if (nums <= 1)
+ return 0;
+
+ knew = via_clone_control(spec, &via_hp_mixer[0]);
+ if (knew == NULL)
+ return -ENOMEM;
+
knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
knew->private_value = nid;
@@ -2561,7 +2568,7 @@ static int vt1708_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
via_smart51_build(spec);
return 1;
@@ -3087,7 +3094,7 @@ static int vt1709_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
via_smart51_build(spec);
return 1;
@@ -3654,7 +3661,7 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
via_smart51_build(spec);
return 1;
@@ -4140,7 +4147,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
via_smart51_build(spec);
return 1;
@@ -4510,7 +4517,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
return 1;
}
@@ -4930,7 +4937,7 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
via_smart51_build(spec);
@@ -5425,7 +5432,7 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
via_smart51_build(spec);
@@ -5781,7 +5788,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
return 1;
}
@@ -6000,12 +6007,12 @@ static int vt1812_auto_create_multi_out_ctls(struct via_spec *spec,
/* Line-Out: PortE */
err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
- "Master Front Playback Volume",
+ "Front Playback Volume",
HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT));
if (err < 0)
return err;
err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE,
- "Master Front Playback Switch",
+ "Front Playback Switch",
HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT));
if (err < 0)
return err;
@@ -6130,7 +6137,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- via_hp_build(spec);
+ via_hp_build(codec);
return 1;
}
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index b64e781..b56e336 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -849,6 +849,7 @@ struct snd_m3 {
struct snd_kcontrol *master_switch;
struct snd_kcontrol *master_volume;
struct tasklet_struct hwvol_tq;
+ unsigned int in_suspend;
#ifdef CONFIG_PM
u16 *suspend_mem;
@@ -884,6 +885,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_m3_ids) = {
MODULE_DEVICE_TABLE(pci, snd_m3_ids);
static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
@@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
+ /* Ignore spurious HV interrupts during suspend / resume, this avoids
+ mistaking them for a mute button press. */
+ if (chip->in_suspend)
+ return;
+
if (!chip->master_switch || !chip->master_volume)
return;
@@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
if (chip->suspend_mem == NULL)
return 0;
+ chip->in_suspend = 1;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
@@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci)
snd_m3_hv_init(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ chip->in_suspend = 0;
return 0;
}
#endif /* CONFIG_PM */
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index a34cbcf..002e289 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/firmware.h>
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index 2e79d71..2b31ac6 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -71,7 +71,12 @@ static void imx_ssi_dma_callback(int channel, void *data)
static void snd_imx_dma_err_callback(int channel, void *data, int err)
{
- pr_err("DMA error callback called\n");
+ struct snd_pcm_substream *substream = data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct imx_pcm_runtime_data *iprtd = runtime->private_data;
+ int ret;
pr_err("DMA timeout on channel %d -%s%s%s%s\n",
channel,
@@ -79,6 +84,14 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err)
err & IMX_DMA_ERR_REQUEST ? " request" : "",
err & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
err & IMX_DMA_ERR_BUFFER ? " buffer" : "");
+
+ imx_dma_disable(iprtd->dma);
+ ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
+ IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ DMA_MODE_WRITE : DMA_MODE_READ);
+ if (!ret)
+ imx_dma_enable(iprtd->dma);
}
static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream)
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c
index f96a373..6b518e0 100644
--- a/sound/soc/imx/imx-pcm-fiq.c
+++ b/sound/soc/imx/imx-pcm-fiq.c
@@ -39,23 +39,24 @@ struct imx_pcm_runtime_data {
unsigned long offset;
unsigned long last_offset;
unsigned long size;
- struct timer_list timer;
- int poll_time;
+ struct hrtimer hrt;
+ int poll_time_ns;
+ struct snd_pcm_substream *substream;
+ atomic_t running;
};
-static inline void imx_ssi_set_next_poll(struct imx_pcm_runtime_data *iprtd)
+static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
{
- iprtd->timer.expires = jiffies + iprtd->poll_time;
-}
-
-static void imx_ssi_timer_callback(unsigned long data)
-{
- struct snd_pcm_substream *substream = (void *)data;
+ struct imx_pcm_runtime_data *iprtd =
+ container_of(hrt, struct imx_pcm_runtime_data, hrt);
+ struct snd_pcm_substream *substream = iprtd->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct imx_pcm_runtime_data *iprtd = runtime->private_data;
struct pt_regs regs;
unsigned long delta;
+ if (!atomic_read(&iprtd->running))
+ return HRTIMER_NORESTART;
+
get_fiq_regs(&regs);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -72,16 +73,14 @@ static void imx_ssi_timer_callback(unsigned long data)
/* If we've transferred at least a period then report it and
* reset our poll time */
- if (delta >= runtime->period_size) {
+ if (delta >= iprtd->period) {
snd_pcm_period_elapsed(substream);
iprtd->last_offset = iprtd->offset;
-
- imx_ssi_set_next_poll(iprtd);
}
- /* Restart the timer; if we didn't report we'll run on the next tick */
- add_timer(&iprtd->timer);
+ hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
+ return HRTIMER_RESTART;
}
static struct fiq_handler fh = {
@@ -99,8 +98,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
iprtd->period = params_period_bytes(params) ;
iprtd->offset = 0;
iprtd->last_offset = 0;
- iprtd->poll_time = HZ / (params_rate(params) / params_period_size(params));
-
+ iprtd->poll_time_ns = 1000000000 / params_rate(params) *
+ params_period_size(params);
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
@@ -135,8 +134,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- imx_ssi_set_next_poll(iprtd);
- add_timer(&iprtd->timer);
+ atomic_set(&iprtd->running, 1);
+ hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
+ HRTIMER_MODE_REL);
if (++fiq_enable == 1)
enable_fiq(imx_pcm_fiq);
@@ -145,11 +145,11 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- del_timer(&iprtd->timer);
+ atomic_set(&iprtd->running, 0);
+
if (--fiq_enable == 0)
disable_fiq(imx_pcm_fiq);
-
break;
default:
return -EINVAL;
@@ -180,7 +180,7 @@ static struct snd_pcm_hardware snd_imx_hardware = {
.buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = 16 * 1024,
- .periods_min = 2,
+ .periods_min = 4,
.periods_max = 255,
.fifo_size = 0,
};
@@ -194,9 +194,11 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL);
runtime->private_data = iprtd;
- init_timer(&iprtd->timer);
- iprtd->timer.data = (unsigned long)substream;
- iprtd->timer.function = imx_ssi_timer_callback;
+ iprtd->substream = substream;
+
+ atomic_set(&iprtd->running, 0);
+ hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ iprtd->hrt.function = snd_hrtimer_callback;
ret = snd_pcm_hw_constraint_integer(substream->runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
@@ -212,7 +214,8 @@ static int snd_imx_close(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
- del_timer_sync(&iprtd->timer);
+ hrtimer_cancel(&iprtd->hrt);
+
kfree(iprtd);
return 0;
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 0bcc6d7..80b4fee 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -656,7 +656,8 @@ static int imx_ssi_probe(struct platform_device *pdev)
dai->private_data = ssi;
if ((cpu_is_mx27() || cpu_is_mx21()) &&
- !(ssi->flags & IMX_SSI_USE_AC97)) {
+ !(ssi->flags & IMX_SSI_USE_AC97) &&
+ (ssi->flags & IMX_SSI_DMA)) {
ssi->flags |= IMX_SSI_DMA;
platform = imx_ssi_dma_mx2_init(pdev, ssi);
} else
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 2c59afd..9e28b20 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -986,6 +986,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
DEFINE_WAIT(wait);
long timeout = msecs_to_jiffies(50);
+ if (ep->umidi->disconnected)
+ return;
/*
* The substream buffer is empty, but some data might still be in the
* currently active URBs, so we have to wait for those to complete.
@@ -1123,14 +1125,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
* Frees an output endpoint.
* May be called when ep hasn't been initialized completely.
*/
-static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
{
unsigned int i;
for (i = 0; i < OUTPUT_URBS; ++i)
- if (ep->urbs[i].urb)
+ if (ep->urbs[i].urb) {
free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
ep->max_transfer);
+ ep->urbs[i].urb = NULL;
+ }
+}
+
+static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
+{
+ snd_usbmidi_out_endpoint_clear(ep);
kfree(ep);
}
@@ -1262,15 +1271,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
usb_kill_urb(ep->out->urbs[j].urb);
if (umidi->usb_protocol_ops->finish_out_endpoint)
umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
+ ep->out->active_urbs = 0;
+ if (ep->out->drain_urbs) {
+ ep->out->drain_urbs = 0;
+ wake_up(&ep->out->drain_wait);
+ }
}
if (ep->in)
for (j = 0; j < INPUT_URBS; ++j)
usb_kill_urb(ep->in->urbs[j]);
/* free endpoints here; later call can result in Oops */
- if (ep->out) {
- snd_usbmidi_out_endpoint_delete(ep->out);
- ep->out = NULL;
- }
+ if (ep->out)
+ snd_usbmidi_out_endpoint_clear(ep->out);
if (ep->in) {
snd_usbmidi_in_endpoint_delete(ep->in);
ep->in = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a0cd19..c82ae24 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
kvm_arch_flush_shadow(kvm);
+ srcu_read_unlock(&kvm->srcu, idx);
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
@@ -648,7 +652,7 @@ skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
+ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = vmalloc(dirty_bytes);
if (!new.dirty_bitmap)
@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
{
struct kvm_memory_slot *memslot;
int r, i;
- int n;
+ unsigned long n;
unsigned long any = 0;
r = -EINVAL;
@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
+ unsigned long *p = memslot->dirty_bitmap +
+ rel_gfn / BITS_PER_LONG;
+ int offset = rel_gfn % BITS_PER_LONG;
/* avoid RMW */
- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
+ if (!generic_test_le_bit(offset, p))
+ generic___set_le_bit(offset, p);
}
}
OpenPOWER on IntegriCloud