summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-format14
-rw-r--r--Documentation/DMA-attributes.txt18
-rw-r--r--Documentation/DocBook/device-drivers.tmpl17
-rw-r--r--Documentation/cgroups/cpusets.txt2
-rw-r--r--Documentation/cpu-hotplug.txt22
-rw-r--r--Documentation/devicetree/bindings/regulator/anatop-regulator.txt29
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--Documentation/hwmon/k10temp2
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/st.txt4
-rw-r--r--Documentation/scsi/ufs.txt133
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/dma-mapping.h18
-rw-r--r--arch/alpha/kernel/pci-noop.c10
-rw-r--r--arch/alpha/kernel/pci_iommu.c10
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/arm/kernel/bios32.c1
-rw-r--r--arch/arm/kernel/insn.c1
-rw-r--r--arch/arm/kernel/kprobes.c2
-rw-r--r--arch/arm/kernel/ptrace.c4
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/mach-msm/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-omap2/include/mach/barriers.h2
-rw-r--r--arch/avr32/include/asm/barrier.h2
-rw-r--r--arch/avr32/include/asm/special_insns.h13
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h2
-rw-r--r--arch/blackfin/include/asm/cmpxchg.h3
-rw-r--r--arch/blackfin/kernel/setup.c7
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c1
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h18
-rw-r--r--arch/hexagon/kernel/dma.c9
-rw-r--r--arch/hexagon/kernel/smp.c8
-rw-r--r--arch/ia64/hp/common/sba_iommu.c11
-rw-r--r--arch/ia64/include/asm/dma-mapping.h18
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c14
-rw-r--r--arch/ia64/sn/pci/pci_dma.c9
-rw-r--r--arch/m68k/include/asm/atomic.h1
-rw-r--r--arch/m68k/mac/config.c3
-rw-r--r--arch/m68k/q40/config.c3
-rw-r--r--arch/microblaze/include/asm/cmpxchg.h2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h18
-rw-r--r--arch/microblaze/include/asm/futex.h2
-rw-r--r--arch/microblaze/include/asm/processor.h4
-rw-r--r--arch/microblaze/kernel/dma.c10
-rw-r--r--arch/microblaze/kernel/early_printk.c2
-rw-r--r--arch/microblaze/kernel/setup.c2
-rw-r--r--arch/microblaze/kernel/unwind.c1
-rw-r--r--arch/microblaze/lib/uaccess_old.S98
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c12
-rw-r--r--arch/mips/cavium-octeon/smp.c4
-rw-r--r--arch/mips/include/asm/dma-mapping.h18
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/smp.c27
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/dma-default.c8
-rw-r--r--arch/mips/netlogic/common/smp.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c7
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/parisc/include/asm/atomic.h107
-rw-r--r--arch/parisc/include/asm/cmpxchg.h116
-rw-r--r--arch/parisc/include/asm/futex.h31
-rw-r--r--arch/parisc/kernel/smp.c3
-rw-r--r--arch/powerpc/boot/dts/p1020mbg-pc.dtsi151
-rw-r--r--arch/powerpc/boot/dts/p1020mbg-pc_32b.dts89
-rw-r--r--arch/powerpc/boot/dts/p1020mbg-pc_36b.dts89
-rw-r--r--arch/powerpc/boot/dts/p1020utm-pc.dtsi140
-rw-r--r--arch/powerpc/boot/dts/p1020utm-pc_32b.dts89
-rw-r--r--arch/powerpc/boot/dts/p1020utm-pc_36b.dts89
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts3
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts4
-rw-r--r--arch/powerpc/boot/dts/p3060qds.dts2
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts3
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts4
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h24
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h7
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h26
-rw-r--r--arch/powerpc/kernel/dma-iommu.c10
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/fadump.c2
-rw-r--r--arch/powerpc/kernel/ibmebus.c10
-rw-r--r--arch/powerpc/kernel/kgdb.c1
-rw-r--r--arch/powerpc/kernel/vio.c14
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c1
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c16
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c13
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c4
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c22
-rw-r--r--arch/sh/include/asm/dma-mapping.h28
-rw-r--r--arch/sh/kernel/dma-nommu.c4
-rw-r--r--arch/sh/mm/consistent.c6
-rw-r--r--arch/sparc/include/asm/dma-mapping.h18
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/sparc/kernel/iommu.c10
-rw-r--r--arch/sparc/kernel/ioport.c18
-rw-r--r--arch/sparc/kernel/leon_kernel.c6
-rw-r--r--arch/sparc/kernel/pci_sun4v.c9
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/um/kernel/skas/process.c2
-rw-r--r--arch/um/kernel/smp.c9
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h18
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c18
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/include/asm/dma-mapping.h26
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/kernel/amd_gart_64.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event.c17
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c36
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c19
-rw-r--r--arch/x86/kernel/irq.c7
-rw-r--r--arch/x86/kernel/kgdb.c60
-rw-r--r--arch/x86/kernel/pci-calgary_64.c9
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/kernel/pci-nommu.c6
-rw-r--r--arch/x86/kernel/pci-swiotlb.c17
-rw-r--r--arch/x86/kernel/process.c24
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--crypto/ablkcipher.c4
-rw-r--r--crypto/aead.c4
-rw-r--r--crypto/crypto_user.c80
-rw-r--r--crypto/pcrypt.c8
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/firmware_class.c208
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/block/floppy.c36
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c3
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c2
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/edac/mce_amd.c6
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c7
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c304
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c2
-rw-r--r--drivers/gpu/drm/radeon/atom.c15
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c14
-rw-r--r--drivers/hsi/Kconfig19
-rw-r--r--drivers/hsi/Makefile6
-rw-r--r--drivers/hsi/clients/Kconfig13
-rw-r--r--drivers/hsi/clients/Makefile5
-rw-r--r--drivers/hsi/clients/hsi_char.c802
-rw-r--r--drivers/hsi/hsi.c494
-rw-r--r--drivers/hsi/hsi_boardinfo.c62
-rw-r--r--drivers/hsi/hsi_core.h35
-rw-r--r--drivers/hwmon/Kconfig7
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/adm1031.c20
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/k10temp.c4
-rw-r--r--drivers/hwmon/max6639.c15
-rw-r--r--drivers/hwmon/w83627ehf.c18
-rw-r--r--drivers/iommu/amd_iommu.c10
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/md/linear.c9
-rw-r--r--drivers/md/raid0.c27
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c59
-rw-r--r--drivers/misc/kgdbts.c160
-rw-r--r--drivers/mtd/mtdchar.c53
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/eql.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c464
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c12
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c40
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/renesas/Kconfig5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c22
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h15
-rw-r--r--drivers/net/ethernet/via/via-rhine.c12
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/rionet.c11
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/cdc_eem.c1
-rw-r--r--drivers/net/usb/rtl8150.c26
-rw-r--r--drivers/net/usb/zaurus.c5
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/wimax/i2400m/netdev.c3
-rw-r--r--drivers/net/wimax/i2400m/usb.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c31
-rw-r--r--drivers/net/wireless/orinoco/main.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/pci/pcie/aspm.c13
-rw-r--r--drivers/regulator/anatop-regulator.c5
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/fixed-helper.c3
-rw-r--r--drivers/regulator/mc13892-regulator.c10
-rw-r--r--drivers/regulator/s5m8767.c14
-rw-r--r--drivers/regulator/tps6586x-regulator.c5
-rw-r--r--drivers/regulator/wm831x-dcdc.c9
-rw-r--r--drivers/regulator/wm831x-isink.c2
-rw-r--r--drivers/regulator/wm831x-ldo.c7
-rw-r--r--drivers/regulator/wm8350-regulator.c34
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/bfa/bfa.h9
-rw-r--r--drivers/scsi/bfa/bfa_core.c693
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c5
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c188
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h17
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c151
-rw-r--r--drivers/scsi/bfa/bfa_svc.c69
-rw-r--r--drivers/scsi/bfa/bfa_svc.h4
-rw-r--r--drivers/scsi/bfa/bfad_attr.c47
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfi_ms.h17
-rw-r--r--drivers/scsi/bfa/bfi_reg.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c83
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c38
-rw-r--r--drivers/scsi/ipr.c73
-rw-r--r--drivers/scsi/ipr.h16
-rw-r--r--drivers/scsi/libfc/fc_exch.c14
-rw-r--r--drivers/scsi/libfc/fc_lport.c10
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c488
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c27
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/st.c21
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/ufs/Kconfig49
-rw-r--r--drivers/scsi/ufs/Makefile2
-rw-r--r--drivers/scsi/ufs/ufs.h207
-rw-r--r--drivers/scsi/ufs/ufshcd.c1978
-rw-r--r--drivers/scsi/ufs/ufshci.h376
-rw-r--r--drivers/scsi/vmw_pvscsi.c65
-rw-r--r--drivers/scsi/vmw_pvscsi.h109
-rw-r--r--drivers/tty/serial/sunzilog.c4
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/virtio/virtio_balloon.c14
-rw-r--r--drivers/virtio/virtio_pci.c74
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/aio.c32
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext2/ext2.h631
-rw-r--r--fs/ext2/xattr_security.c5
-rw-r--r--fs/ext2/xattr_trusted.c5
-rw-r--r--fs/ext2/xip.c2
-rw-r--r--fs/ext3/acl.c8
-rw-r--r--fs/ext3/balloc.c10
-rw-r--r--fs/ext3/bitmap.c4
-rw-r--r--fs/ext3/dir.c7
-rw-r--r--fs/ext3/ext3.h (renamed from include/linux/ext3_fs.h)488
-rw-r--r--fs/ext3/ext3_jbd.c2
-rw-r--r--fs/ext3/file.c6
-rw-r--r--fs/ext3/fsync.c8
-rw-r--r--fs/ext3/hash.c4
-rw-r--r--fs/ext3/ialloc.c13
-rw-r--r--fs/ext3/inode.c12
-rw-r--r--fs/ext3/ioctl.c7
-rw-r--r--fs/ext3/namei.c14
-rw-r--r--fs/ext3/resize.c5
-rw-r--r--fs/ext3/super.c18
-rw-r--r--fs/ext3/symlink.c4
-rw-r--r--fs/ext3/xattr.c7
-rw-r--r--fs/ext3/xattr_security.c6
-rw-r--r--fs/ext3/xattr_trusted.c6
-rw-r--r--fs/ext3/xattr_user.c5
-rw-r--r--fs/gfs2/file.c1
-rw-r--r--fs/namei.c195
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/pstore/inode.c26
-rw-r--r--include/asm-generic/cmpxchg.h1
-rw-r--r--include/crypto/internal/aead.h2
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/drm/drm.h14
-rw-r--r--include/drm/drmP.h62
-rw-r--r--include/drm/intel-gtt.h4
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/cpumask.h6
-rw-r--r--include/linux/cryptouser.h3
-rw-r--r--include/linux/dma-attrs.h2
-rw-r--r--include/linux/dma-mapping.h13
-rw-r--r--include/linux/ext2_fs.h569
-rw-r--r--include/linux/ext2_fs_sb.h126
-rw-r--r--include/linux/ext3_fs_i.h151
-rw-r--r--include/linux/ext3_fs_sb.h91
-rw-r--r--include/linux/ext3_jbd.h229
-rw-r--r--include/linux/firewire.h3
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/hsi/Kbuild1
-rw-r--r--include/linux/hsi/hsi.h410
-rw-r--r--include/linux/hsi/hsi_char.h63
-rw-r--r--include/linux/if_eql.h2
-rw-r--r--include/linux/kernel.h15
-rw-r--r--include/linux/kgdb.h7
-rw-r--r--include/linux/kmod.h27
-rw-r--r--include/linux/lsm_audit.h96
-rw-r--r--include/linux/mtio.h1
-rw-r--r--include/linux/perf_event.h90
-rw-r--r--include/linux/platform_data/atmel.h6
-rw-r--r--include/linux/pm_qos.h4
-rw-r--r--include/linux/regulator/machine.h8
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/scsi/iscsi_if.h19
-rw-r--r--include/scsi/libfcoe.h4
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/xen/swiotlb-xen.h6
-rw-r--r--init/Kconfig4
-rw-r--r--init/do_mounts_initrd.c1
-rw-r--r--init/do_mounts_rd.c9
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/debug/debug_core.c53
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/irq_work.c1
-rw-r--r--kernel/kmod.c117
-rw-r--r--kernel/padata.c13
-rw-r--r--kernel/power/hibernate.c18
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/qos.c50
-rw-r--r--kernel/power/suspend.c7
-rw-r--r--kernel/power/user.c10
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/ring_buffer.c157
-rw-r--r--kernel/trace/trace.c113
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_entries.h16
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--net/802/garp.c22
-rw-r--r--net/core/dev.c1
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv6/route.c34
-rw-r--r--net/mac80211/agg-rx.c3
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/rose/rose_dev.c4
-rw-r--r--net/wireless/nl80211.c29
-rwxr-xr-xscripts/tags.sh2
-rw-r--r--security/apparmor/audit.c42
-rw-r--r--security/apparmor/capability.c6
-rw-r--r--security/apparmor/file.c54
-rw-r--r--security/apparmor/include/audit.h28
-rw-r--r--security/apparmor/ipc.c10
-rw-r--r--security/apparmor/lib.c4
-rw-r--r--security/apparmor/lsm.c8
-rw-r--r--security/apparmor/policy.c10
-rw-r--r--security/apparmor/policy_unpack.c20
-rw-r--r--security/apparmor/resource.c12
-rw-r--r--security/lsm_audit.c80
-rw-r--r--security/selinux/avc.c101
-rw-r--r--security/selinux/hooks.c185
-rw-r--r--security/selinux/include/avc.h25
-rw-r--r--security/selinux/selinuxfs.c110
-rw-r--r--security/smack/smack.h23
-rw-r--r--security/smack/smack_access.c14
-rw-r--r--security/smack/smack_lsm.c29
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c8
-rw-r--r--sound/soc/fsl/p1022_ds.c10
-rw-r--r--tools/perf/Documentation/perf-report.txt5
-rw-r--r--tools/perf/Makefile66
-rw-r--r--tools/perf/builtin-diff.c60
-rw-r--r--tools/perf/builtin-report.c40
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-test.c174
-rw-r--r--tools/perf/config/feature-tests.mak15
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/cache.h12
-rw-r--r--tools/perf/util/evlist.c6
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/gtk/browser.c189
-rw-r--r--tools/perf/util/gtk/gtk.h8
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c200
-rw-r--r--tools/perf/util/hist.h19
-rw-r--r--tools/perf/util/include/linux/export.h (renamed from tools/perf/util/include/linux/module.h)0
-rw-r--r--tools/perf/util/parse-events.c603
-rw-r--r--tools/perf/util/parse-events.h49
-rw-r--r--tools/perf/util/parse-events.l127
-rw-r--r--tools/perf/util/parse-events.y229
-rw-r--r--tools/perf/util/pmu.c469
-rw-r--r--tools/perf/util/pmu.h41
-rw-r--r--tools/perf/util/pmu.l43
-rw-r--r--tools/perf/util/pmu.y93
-rw-r--r--tools/perf/util/probe-finder.c4
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/symbol.c3
-rw-r--r--tools/perf/util/trace-event-parse.c10
-rw-r--r--tools/perf/util/ui/browser.h2
-rw-r--r--tools/perf/util/ui/browsers/hists.c14
-rw-r--r--tools/perf/util/ui/keysyms.h2
-rw-r--r--tools/perf/util/ui/util.c82
500 files changed, 14119 insertions, 4883 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-format b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
new file mode 100644
index 0000000..079afc7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
@@ -0,0 +1,14 @@
+Where: /sys/bus/event_source/devices/<dev>/format
+Date: January 2012
+Kernel Version: 3.3
+Contact: Jiri Olsa <jolsa@redhat.com>
+Description:
+ Attribute group to describe the magic bits that go into
+ perf_event_attr::config[012] for a particular pmu.
+ Each attribute of this group defines the 'hardware' bitmask
+ we want to export, so that userspace can deal with sane
+ name/value pairs.
+
+ Example: 'config1:1,6-10,44'
+ Defines contents of attribute that occupies bits 1,6-10,44 of
+ perf_event_attr::config1.
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index b768cc0..5c72eed 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -31,3 +31,21 @@ may be weakly ordered, that is that reads and writes may pass each other.
Since it is optional for platforms to implement DMA_ATTR_WEAK_ORDERING,
those that do not will simply ignore the attribute and exhibit default
behavior.
+
+DMA_ATTR_WRITE_COMBINE
+----------------------
+
+DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
+buffered to improve performance.
+
+Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE,
+those that do not will simply ignore the attribute and exhibit default
+behavior.
+
+DMA_ATTR_NON_CONSISTENT
+-----------------------
+
+DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
+consistent or non-consistent memory as it sees fit. By using this API,
+you are guaranteeing to the platform that you have all the correct and
+necessary sync points for this memory in the driver.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 9c27e51..7514dbf 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -446,4 +446,21 @@ X!Idrivers/video/console/fonts.c
!Edrivers/i2c/i2c-core.c
</chapter>
+ <chapter id="hsi">
+ <title>High Speed Synchronous Serial Interface (HSI)</title>
+
+ <para>
+ High Speed Synchronous Serial Interface (HSI) is a
+ serial interface mainly used for connecting application
+ engines (APE) with cellular modem engines (CMT) in cellular
+ handsets.
+
+ HSI provides multiplexing for up to 16 logical channels,
+ low-latency and full duplex communication.
+ </para>
+
+!Iinclude/linux/hsi/hsi.h
+!Edrivers/hsi/hsi.c
+ </chapter>
+
</book>
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 5c51ed4..cefd3d8 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -217,7 +217,7 @@ and name space for cpusets, with a minimum of additional kernel code.
The cpus and mems files in the root (top_cpuset) cpuset are
read-only. The cpus file automatically tracks the value of
-cpu_online_map using a CPU hotplug notifier, and the mems file
+cpu_online_mask using a CPU hotplug notifier, and the mems file
automatically tracks the value of node_states[N_HIGH_MEMORY]--i.e.,
nodes with memory--using the cpuset_track_online_nodes() hook.
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index a20bfd4..66ef8f3 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -47,7 +47,7 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
other cpus later online, read FAQ's for more info.
additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets
- cpu_possible_map = cpu_present_map + additional_cpus
+ cpu_possible_mask = cpu_present_mask + additional_cpus
cede_offline={"off","on"} Use this option to disable/enable putting offlined
processors to an extended H_CEDE state on
@@ -64,11 +64,11 @@ should only rely on this to count the # of cpus, but *MUST* not rely
on the apicid values in those tables for disabled apics. In the event
BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could
use this parameter "additional_cpus=x" to represent those cpus in the
-cpu_possible_map.
+cpu_possible_mask.
possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus.
This option sets possible_cpus bits in
- cpu_possible_map. Thus keeping the numbers of bits set
+ cpu_possible_mask. Thus keeping the numbers of bits set
constant even if the machine gets rebooted.
CPU maps and such
@@ -76,7 +76,7 @@ CPU maps and such
[More on cpumaps and primitive to manipulate, please check
include/linux/cpumask.h that has more descriptive text.]
-cpu_possible_map: Bitmap of possible CPUs that can ever be available in the
+cpu_possible_mask: Bitmap of possible CPUs that can ever be available in the
system. This is used to allocate some boot time memory for per_cpu variables
that aren't designed to grow/shrink as CPUs are made available or removed.
Once set during boot time discovery phase, the map is static, i.e no bits
@@ -84,13 +84,13 @@ are added or removed anytime. Trimming it accurately for your system needs
upfront can save some boot time memory. See below for how we use heuristics
in x86_64 case to keep this under check.
-cpu_online_map: Bitmap of all CPUs currently online. Its set in __cpu_up()
+cpu_online_mask: Bitmap of all CPUs currently online. Its set in __cpu_up()
after a cpu is available for kernel scheduling and ready to receive
interrupts from devices. Its cleared when a cpu is brought down using
__cpu_disable(), before which all OS services including interrupts are
migrated to another target CPU.
-cpu_present_map: Bitmap of CPUs currently present in the system. Not all
+cpu_present_mask: Bitmap of CPUs currently present in the system. Not all
of them may be online. When physical hotplug is processed by the relevant
subsystem (e.g ACPI) can change and new bit either be added or removed
from the map depending on the event is hot-add/hot-remove. There are currently
@@ -99,22 +99,22 @@ at which time hotplug is disabled.
You really dont need to manipulate any of the system cpu maps. They should
be read-only for most use. When setting up per-cpu resources almost always use
-cpu_possible_map/for_each_possible_cpu() to iterate.
+cpu_possible_mask/for_each_possible_cpu() to iterate.
Never use anything other than cpumask_t to represent bitmap of CPUs.
#include <linux/cpumask.h>
- for_each_possible_cpu - Iterate over cpu_possible_map
- for_each_online_cpu - Iterate over cpu_online_map
- for_each_present_cpu - Iterate over cpu_present_map
+ for_each_possible_cpu - Iterate over cpu_possible_mask
+ for_each_online_cpu - Iterate over cpu_online_mask
+ for_each_present_cpu - Iterate over cpu_present_mask
for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
#include <linux/cpu.h>
get_online_cpus() and put_online_cpus():
The above calls are used to inhibit cpu hotplug operations. While the
-cpu_hotplug.refcount is non zero, the cpu_online_map will not change.
+cpu_hotplug.refcount is non zero, the cpu_online_mask will not change.
If you merely need to avoid cpus going away, you could also use
preempt_disable() and preempt_enable() for those sections.
Just remember the critical section cannot call any
diff --git a/Documentation/devicetree/bindings/regulator/anatop-regulator.txt b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
new file mode 100644
index 0000000..357758c
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
@@ -0,0 +1,29 @@
+Anatop Voltage regulators
+
+Required properties:
+- compatible: Must be "fsl,anatop-regulator"
+- anatop-reg-offset: Anatop MFD register offset
+- anatop-vol-bit-shift: Bit shift for the register
+- anatop-vol-bit-width: Number of bits used in the register
+- anatop-min-bit-val: Minimum value of this register
+- anatop-min-voltage: Minimum voltage of this regulator
+- anatop-max-voltage: Maximum voltage of this regulator
+
+Any property defined as part of the core regulator
+binding, defined in regulator.txt, can also be used.
+
+Example:
+
+ regulator-vddpu {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1300000>;
+ };
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index c1be806..709e08e 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,14 +6,6 @@ be removed from this file.
---------------------------
-What: x86 floppy disable_hlt
-When: 2012
-Why: ancient workaround of dubious utility clutters the
- code used by everybody else.
-Who: Len Brown <len.brown@intel.com>
-
----------------------------
-
What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
When: 2012
Why: This optional sub-feature of APM is of dubious reliability,
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index a10f736..90956b6 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -11,7 +11,7 @@ Supported chips:
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
-* AMD Family 15h processors: "Bulldozer"
+* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity"
Prefix: 'k10temp'
Addresses scanned: PCI space
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 3b7488f..e34b531d 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -225,6 +225,7 @@ Code Seq#(hex) Include File Comments
'j' 00-3F linux/joystick.h
'k' 00-0F linux/spi/spidev.h conflict!
'k' 00-05 video/kyro.h conflict!
+'k' 10-17 linux/hsi/hsi_char.h HSI character device
'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system
<http://web.archive.org/web/*/http://mikonos.dia.unisa.it/tcfs>
'l' 40-7F linux/udf_fs_i.h in development:
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index b48ded5..b7dd650 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -94,3 +94,5 @@ sym53c8xx_2.txt
- info on second generation driver for sym53c8xx based adapters
tmscsim.txt
- info on driver for AM53c974 based adapters
+ufs.txt
+ - info on Universal Flash Storage(UFS) and UFS host controller driver.
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 691ca29..685bf35 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -390,6 +390,10 @@ MTSETDRVBUFFER
MT_ST_SYSV sets the SYSV semantics (mode)
MT_ST_NOWAIT enables immediate mode (i.e., don't wait for
the command to finish) for some commands (e.g., rewind)
+ MT_ST_NOWAIT_EOF enables immediate filemark mode (i.e. when
+ writing a filemark, don't wait for it to complete). Please
+ see the BASICS note about MTWEOFI with respect to the
+ possible dangers of writing immediate filemarks.
MT_ST_SILI enables setting the SILI bit in SCSI commands when
reading in variable block mode to enhance performance when
reading blocks shorter than the byte count; set this only
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
new file mode 100644
index 0000000..41a6164
--- /dev/null
+++ b/Documentation/scsi/ufs.txt
@@ -0,0 +1,133 @@
+ Universal Flash Storage
+ =======================
+
+
+Contents
+--------
+
+1. Overview
+2. UFS Architecture Overview
+ 2.1 Application Layer
+ 2.2 UFS Transport Protocol(UTP) layer
+ 2.3 UFS Interconnect(UIC) Layer
+3. UFSHCD Overview
+ 3.1 UFS controller initialization
+ 3.2 UTP Transfer requests
+ 3.3 UFS error handling
+ 3.4 SCSI Error handling
+
+
+1. Overview
+-----------
+
+Universal Flash Storage(UFS) is a storage specification for flash devices.
+It is aimed to provide a universal storage interface for both
+embedded and removable flash memory based storage in mobile
+devices such as smart phones and tablet computers. The specification
+is defined by JEDEC Solid State Technology Association. UFS is based
+on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
+physical layer and MIPI Unipro as the link layer.
+
+The main goals of UFS is to provide,
+ * Optimized performance:
+ For UFS version 1.0 and 1.1 the target performance is as follows,
+ Support for Gear1 is mandatory (rate A: 1248Mbps, rate B: 1457.6Mbps)
+ Support for Gear2 is optional (rate A: 2496Mbps, rate B: 2915.2Mbps)
+ Future version of the standard,
+ Gear3 (rate A: 4992Mbps, rate B: 5830.4Mbps)
+ * Low power consumption
+ * High random IOPs and low latency
+
+
+2. UFS Architecture Overview
+----------------------------
+
+UFS has a layered communication architecture which is based on SCSI
+SAM-5 architectural model.
+
+UFS communication architecture consists of following layers,
+
+2.1 Application Layer
+
+ The Application layer is composed of UFS command set layer(UCS),
+ Task Manager and Device manager. The UFS interface is designed to be
+ protocol agnostic, however SCSI has been selected as a baseline
+ protocol for versions 1.0 and 1.1 of UFS protocol layer.
+ UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
+ * UCS: It handles SCSI commands supported by UFS specification.
+ * Task manager: It handles task management functions defined by the
+ UFS which are meant for command queue control.
+ * Device manager: It handles device level operations and device
+ configuration operations. Device level operations mainly involve
+ device power management operations and commands to Interconnect
+ layers. Device level configurations involve handling of query
+ requests which are used to modify and retrieve configuration
+ information of the device.
+
+2.2 UFS Transport Protocol(UTP) layer
+
+ UTP layer provides services for
+ the higher layers through Service Access Points. UTP defines 3
+ service access points for higher layers.
+ * UDM_SAP: Device manager service access point is exposed to device
+ manager for device level operations. These device level operations
+ are done through query requests.
+ * UTP_CMD_SAP: Command service access point is exposed to UFS command
+ set layer(UCS) to transport commands.
+ * UTP_TM_SAP: Task management service access point is exposed to task
+ manager to transport task management functions.
+ UTP transports messages through UFS protocol information unit(UPIU).
+
+2.3 UFS Interconnect(UIC) Layer
+
+ UIC is the lowest layer of UFS layered architecture. It handles
+ connection between UFS host and UFS device. UIC consists of
+ MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
+ to upper layer,
+ * UIC_SAP: To transport UPIU between UFS host and UFS device.
+ * UIO_SAP: To issue commands to Unipro layers.
+
+
+3. UFSHCD Overview
+------------------
+
+The UFS host controller driver is based on Linux SCSI Framework.
+UFSHCD is a low level device driver which acts as an interface between
+SCSI Midlayer and PCIe based UFS host controllers.
+
+The current UFSHCD implementation supports following functionality,
+
+3.1 UFS controller initialization
+
+ The initialization module brings UFS host controller to active state
+ and prepares the controller to transfer commands/response between
+ UFSHCD and UFS device.
+
+3.2 UTP Transfer requests
+
+ Transfer request handling module of UFSHCD receives SCSI commands
+ from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
+ controller. Also, the module decodes, responses received from UFS
+ host controller in the form of UPIUs and intimates the SCSI Midlayer
+ of the status of the command.
+
+3.3 UFS error handling
+
+ Error handling module handles Host controller fatal errors,
+ Device fatal errors and UIC interconnect layer related errors.
+
+3.4 SCSI Error handling
+
+ This is done through UFSHCD SCSI error handling routines registered
+ with SCSI Midlayer. Examples of some of the error handling commands
+ issues by SCSI Midlayer are Abort task, Lun reset and host reset.
+ UFSHCD Routines to perform these tasks are registered with
+ SCSI Midlayer through .eh_abort_handler, .eh_device_reset_handler and
+ .eh_host_reset_handler.
+
+In this version of UFSHCD Query requests and power management
+functionality are not implemented.
+
+UFS Specifications can be found at,
+UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
+UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/MAINTAINERS b/MAINTAINERS
index eecf344..962232d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1251,7 +1251,6 @@ ATHEROS ATH5K WIRELESS DRIVER
M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
-M: Bob Copeland <me@bobcopeland.com>
L: linux-wireless@vger.kernel.org
L: ath5k-devel@lists.ath5k.org
W: http://wireless.kernel.org/en/users/Drivers/ath5k
@@ -3557,17 +3556,13 @@ L: linux-pm@vger.kernel.org
S: Supported
F: arch/x86/platform/mrst/pmu.*
-INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
+INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
+M: Stanislav Yakovlev <stas.yakovlev@gmail.com>
L: linux-wireless@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/networking/README.ipw2100
-F: drivers/net/wireless/ipw2x00/ipw2100.*
-
-INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
-L: linux-wireless@vger.kernel.org
-S: Orphan
F: Documentation/networking/README.ipw2200
-F: drivers/net/wireless/ipw2x00/ipw2200.*
+F: drivers/net/wireless/ipw2x00/
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
M: Joseph Cihula <joseph.cihula@intel.com>
diff --git a/Makefile b/Makefile
index fd4b3d0..5e637c2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
-PATCHLEVEL = 3
+PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 4567aca..dfa32f0 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -12,16 +12,22 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#include <asm-generic/dma-mapping-common.h>
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
- return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp);
+ return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
- get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle);
+ get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 04eea48..df24b76 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -108,7 +108,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
}
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
@@ -123,7 +124,8 @@ static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
}
static void alpha_noop_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr)
+ void *cpu_addr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -174,8 +176,8 @@ static int alpha_noop_set_mask(struct device *dev, u64 mask)
}
struct dma_map_ops alpha_noop_ops = {
- .alloc_coherent = alpha_noop_alloc_coherent,
- .free_coherent = alpha_noop_free_coherent,
+ .alloc = alpha_noop_alloc_coherent,
+ .free = alpha_noop_free_coherent,
.map_page = alpha_noop_map_page,
.map_sg = alpha_noop_map_sg,
.mapping_error = alpha_noop_mapping_error,
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 4361080..cd63479 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -434,7 +434,8 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
else DMA_ADDRP is undefined. */
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addrp, gfp_t gfp)
+ dma_addr_t *dma_addrp, gfp_t gfp,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
@@ -478,7 +479,8 @@ try_again:
DMA_ADDR past this call are illegal. */
static void alpha_pci_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr)
+ void *cpu_addr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
@@ -952,8 +954,8 @@ static int alpha_pci_set_mask(struct device *dev, u64 mask)
}
struct dma_map_ops alpha_pci_ops = {
- .alloc_coherent = alpha_pci_alloc_coherent,
- .free_coherent = alpha_pci_free_coherent,
+ .alloc = alpha_pci_alloc_coherent,
+ .free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page,
.unmap_page = alpha_pci_unmap_page,
.map_sg = alpha_pci_map_sg,
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 4087a56..50d438d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -450,7 +450,7 @@ setup_smp(void)
smp_num_probed = 1;
}
- printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
+ printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 632df9a..ede5f77 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -299,7 +299,6 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev)
*/
void pcibios_fixup_bus(struct pci_bus *bus)
{
- struct pci_sys_data *root = bus->sysdata;
struct pci_dev *dev;
u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
index ab312e5..b760340 100644
--- a/arch/arm/kernel/insn.c
+++ b/arch/arm/kernel/insn.c
@@ -1,3 +1,4 @@
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/opcodes.h>
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index ab1869d..4dd41fc 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -152,7 +152,7 @@ int __kprobes __arch_disarm_kprobe(void *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- stop_machine(__arch_disarm_kprobe, p, &cpu_online_map);
+ stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 45956c9..80abafb 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -256,7 +256,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
{
unsigned long tmp;
- if (off & 3 || off >= sizeof(struct user))
+ if (off & 3)
return -EIO;
tmp = 0;
@@ -268,6 +268,8 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
tmp = tsk->mm->end_code;
else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
+ else if (off >= sizeof(struct user))
+ return -EIO;
return put_user(tmp, ret);
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2cee7d1..addbbe8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -349,7 +349,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* re-initialize the map in platform_smp_prepare_cpus() if
* present != possible (e.g. physical hotplug).
*/
- init_cpu_present(&cpu_possible_map);
+ init_cpu_present(cpu_possible_mask);
/*
* Initialise the SCU if there are more than one CPU
@@ -581,8 +581,9 @@ void smp_send_stop(void)
unsigned long timeout;
if (num_online_cpus() > 1) {
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
+ struct cpumask mask;
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}
diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h
index 169a840..c14011f 100644
--- a/arch/arm/mach-msm/include/mach/uncompress.h
+++ b/arch/arm/mach-msm/include/mach/uncompress.h
@@ -16,6 +16,7 @@
#ifndef __ASM_ARCH_MSM_UNCOMPRESS_H
#define __ASM_ARCH_MSM_UNCOMPRESS_H
+#include <asm/barrier.h>
#include <asm/processor.h>
#include <mach/msm_iomap.h>
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
index 4fa72c7..1c582a8 100644
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ b/arch/arm/mach-omap2/include/mach/barriers.h
@@ -22,6 +22,8 @@
#ifndef __MACH_BARRIERS_H
#define __MACH_BARRIERS_H
+#include <asm/outercache.h>
+
extern void omap_bus_sync(void);
#define rmb() dsb()
diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
index 808001c..0961275 100644
--- a/arch/avr32/include/asm/barrier.h
+++ b/arch/avr32/include/asm/barrier.h
@@ -8,6 +8,8 @@
#ifndef __ASM_AVR32_BARRIER_H
#define __ASM_AVR32_BARRIER_H
+#define nop() asm volatile("nop")
+
#define mb() asm volatile("" : : : "memory")
#define rmb() mb()
#define wmb() asm volatile("sync 0" : : : "memory")
diff --git a/arch/avr32/include/asm/special_insns.h b/arch/avr32/include/asm/special_insns.h
deleted file mode 100644
index f922218..0000000
--- a/arch/avr32/include/asm/special_insns.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Copyright (C) 2004-2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_AVR32_SPECIAL_INSNS_H
-#define __ASM_AVR32_SPECIAL_INSNS_H
-
-#define nop() asm volatile("nop")
-
-#endif /* __ASM_AVR32_SPECIAL_INSNS_H */
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 7173386..70742ec9 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/serial.h>
#include <linux/platform_data/macb.h>
-#include <linux/platform_data/atmel_nand.h>
+#include <linux/platform_data/atmel.h>
#define GPIO_PIN_NONE (-1)
diff --git a/arch/blackfin/include/asm/cmpxchg.h b/arch/blackfin/include/asm/cmpxchg.h
index ba2484f..c05868c 100644
--- a/arch/blackfin/include/asm/cmpxchg.h
+++ b/arch/blackfin/include/asm/cmpxchg.h
@@ -122,7 +122,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#include <asm-generic/cmpxchg.h>
+#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* !CONFIG_SMP */
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 2aa0193..2ad747e 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -550,6 +550,7 @@ static __init void memory_setup(void)
{
#ifdef CONFIG_MTD_UCLINUX
unsigned long mtd_phys = 0;
+ unsigned long n;
#endif
unsigned long max_mem;
@@ -593,9 +594,9 @@ static __init void memory_setup(void)
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
- if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
- mtd_size =
- PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
+ n = ext2_image_size((void *)(mtd_phys + 0x400));
+ if (n)
+ mtd_size = PAGE_ALIGN(n * 1024);
# endif
# if defined(CONFIG_CRAMFS)
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 41098a3..4f8d8bc 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 448b224..233ed3d 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -71,29 +71,35 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return (dma_addr == bad_dma_address);
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- ret = ops->alloc_coherent(dev, size, dma_handle, flag);
+ ret = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index e711ace..3730221 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -54,7 +54,8 @@ static struct gen_pool *coherent_pool;
/* Allocates from a pool of uncached memory that was reserved at boot time */
void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag)
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs)
{
void *ret;
@@ -81,7 +82,7 @@ void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, struct dma_attrs *attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
@@ -202,8 +203,8 @@ static void hexagon_sync_single_for_device(struct device *dev,
}
struct dma_map_ops hexagon_dma_ops = {
- .alloc_coherent = hexagon_dma_alloc_coherent,
- .free_coherent = hexagon_free_coherent,
+ .alloc = hexagon_dma_alloc_coherent,
+ .free = hexagon_free_coherent,
.map_sg = hexagon_map_sg,
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 15d1fd2..9b44a9e 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -35,7 +35,7 @@
#define BASE_IPI_IRQ 26
/*
- * cpu_possible_map needs to be filled out prior to setup_per_cpu_areas
+ * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
* (which is prior to any of our smp_prepare_cpu crap), in order to set
* up the... per_cpu areas.
*/
@@ -208,7 +208,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
stack_start = ((void *) thread) + THREAD_SIZE;
__vmstart(start_secondary, stack_start);
- while (!cpu_isset(cpu, cpu_online_map))
+ while (!cpu_online(cpu))
barrier();
return 0;
@@ -229,7 +229,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Right now, let's just fake it. */
for (i = 0; i < max_cpus; i++)
- cpu_set(i, cpu_present_map);
+ set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */
if (max_cpus > 1)
@@ -269,5 +269,5 @@ void smp_start_cpus(void)
int i;
for (i = 0; i < NR_CPUS; i++)
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
}
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index f6ea3a3..bcda5b2 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1129,7 +1129,8 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-API-HOWTO.txt
*/
static void *
-sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
+sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, struct dma_attrs *attrs)
{
struct ioc *ioc;
void *addr;
@@ -1191,8 +1192,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
*
* See Documentation/DMA-API-HOWTO.txt
*/
-static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
@@ -2212,8 +2213,8 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = {
- .alloc_coherent = sba_alloc_coherent,
- .free_coherent = sba_free_coherent,
+ .alloc = sba_alloc_coherent,
+ .free = sba_free_coherent,
.map_page = sba_map_page,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 4336d08..4f5e814 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -23,23 +23,29 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *daddr, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *daddr, gfp_t gfp,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = platform_dma_get_ops(dev);
void *caddr;
- caddr = ops->alloc_coherent(dev, size, daddr, gfp);
+ caddr = ops->alloc(dev, size, daddr, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *daddr, caddr);
return caddr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *caddr, dma_addr_t daddr)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *caddr, dma_addr_t daddr,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = platform_dma_get_ops(dev);
debug_dma_free_coherent(dev, size, caddr, daddr);
- ops->free_coherent(dev, size, caddr, daddr);
+ ops->free(dev, size, caddr, daddr, attrs);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index ac795d3..6f38b61 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -839,7 +839,7 @@ static __init int setup_additional_cpus(char *s)
early_param("additional_cpus", setup_additional_cpus);
/*
- * cpu_possible_map should be static, it cannot change as CPUs
+ * cpu_possible_mask should be static, it cannot change as CPUs
* are onlined, or offlined. The reason is per-cpu data-structures
* are allocated by some modules at init time, and dont expect to
* do this dynamically on cpu arrival/departure.
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index d9485d9..939260a 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -15,16 +15,24 @@ int swiotlb __read_mostly;
EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
+static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
+{
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
struct dma_map_ops swiotlb_dma_ops = {
- .alloc_coherent = ia64_swiotlb_alloc_coherent,
- .free_coherent = swiotlb_free_coherent,
+ .alloc = ia64_swiotlb_alloc_coherent,
+ .free = ia64_swiotlb_free_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index a9d310d..3290d6e 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -76,7 +76,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* more information.
*/
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t flags)
+ dma_addr_t * dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
void *cpuaddr;
unsigned long phys_addr;
@@ -137,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* any associated IOMMU mappings.
*/
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -466,8 +467,8 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
}
static struct dma_map_ops sn_dma_ops = {
- .alloc_coherent = sn_dma_alloc_coherent,
- .free_coherent = sn_dma_free_coherent,
+ .alloc = sn_dma_alloc_coherent,
+ .free = sn_dma_free_coherent,
.map_page = sn_dma_map_page,
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 336e617..f4e32de 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/irqflags.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 96fa6ed..d9f62e0 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -980,6 +980,9 @@ int __init mac_platform_init(void)
{
u8 *swim_base;
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
/*
* Serial devices
*/
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 512adb6..8a1ce32 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -334,6 +334,9 @@ static __init int q40_add_kbd_device(void)
{
struct platform_device *pdev;
+ if (!MACH_IS_Q40)
+ return -ENODEV;
+
pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
index 0094859a..538afc0 100644
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ b/arch/microblaze/include/asm/cmpxchg.h
@@ -1,6 +1,8 @@
#ifndef _ASM_MICROBLAZE_CMPXCHG_H
#define _ASM_MICROBLAZE_CMPXCHG_H
+#include <linux/irqflags.h>
+
void __bad_xchg(volatile void *ptr, int size);
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 3a3e5b8..01d2282 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -123,28 +123,34 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
BUG_ON(!ops);
- memory = ops->alloc_coherent(dev, size, dma_handle, flag);
+ memory = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index b0526d2..ff8cde1 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -24,7 +24,7 @@
.word 1b,4b,2b,4b; \
.previous;" \
: "=&r" (oldval), "=&r" (ret) \
- : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ : "r" (uaddr), "i" (-EFAULT), "r" (oparg) \
); \
})
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 510a8e1..bffb545 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -31,6 +31,8 @@ extern const struct seq_operations cpuinfo_op;
/* Do necessary setup to start up a newly executed thread. */
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
+extern void ret_from_fork(void);
+
# endif /* __ASSEMBLY__ */
# ifndef CONFIG_MMU
@@ -143,8 +145,6 @@ static inline void exit_thread(void)
unsigned long get_wchan(struct task_struct *p);
-extern void ret_from_fork(void);
-
/* The size allocated for kernel stacks. This _must_ be a power of two! */
# define KERNEL_STACK_SIZE 0x2000
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 65a4af4..a2bfa2c 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -33,7 +33,8 @@ static unsigned long get_dma_direct_offset(struct device *dev)
#define NOT_COHERENT_CACHE
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
@@ -57,7 +58,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
}
static void dma_direct_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
@@ -176,8 +178,8 @@ dma_direct_sync_sg_for_device(struct device *dev,
}
struct dma_map_ops dma_direct_ops = {
- .alloc_coherent = dma_direct_alloc_coherent,
- .free_coherent = dma_direct_free_coherent,
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index ec48587..aba1f9a9 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -176,6 +176,7 @@ void __init remap_early_printk(void)
base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
printk(KERN_CONT "0x%x\n", base_addr);
+#ifdef CONFIG_MMU
/*
* Early console is on the top of skipped TLB entries
* decrease tlb_skip size ensure that hardcoded TLB entry will be
@@ -189,6 +190,7 @@ void __init remap_early_printk(void)
* cmp rX, orig_base_addr
*/
tlb_skip -= 1;
+#endif
}
void __init disable_early_printk(void)
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 71af974..16d8dfd 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -206,6 +206,7 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
+# ifdef CONFIG_MMU
static int __init debugfs_tlb(void)
{
struct dentry *d;
@@ -218,6 +219,7 @@ static int __init debugfs_tlb(void)
return -ENOMEM;
}
device_initcall(debugfs_tlb);
+# endif
#endif
static int dflt_bus_notify(struct notifier_block *nb,
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 9781a52..6be4ae3 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -24,6 +24,7 @@
#include <asm/sections.h>
#include <asm/exceptions.h>
#include <asm/unwind.h>
+#include <asm/switch_to.h>
struct stack_trace;
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index f037266..f085995 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -122,22 +122,22 @@ __strnlen_user:
15: swi r24, r5, 0x0018 + offset; \
16: swi r25, r5, 0x001C + offset; \
.section __ex_table,"a"; \
- .word 1b, 0f; \
- .word 2b, 0f; \
- .word 3b, 0f; \
- .word 4b, 0f; \
- .word 5b, 0f; \
- .word 6b, 0f; \
- .word 7b, 0f; \
- .word 8b, 0f; \
- .word 9b, 0f; \
- .word 10b, 0f; \
- .word 11b, 0f; \
- .word 12b, 0f; \
- .word 13b, 0f; \
- .word 14b, 0f; \
- .word 15b, 0f; \
- .word 16b, 0f; \
+ .word 1b, 33f; \
+ .word 2b, 33f; \
+ .word 3b, 33f; \
+ .word 4b, 33f; \
+ .word 5b, 33f; \
+ .word 6b, 33f; \
+ .word 7b, 33f; \
+ .word 8b, 33f; \
+ .word 9b, 33f; \
+ .word 10b, 33f; \
+ .word 11b, 33f; \
+ .word 12b, 33f; \
+ .word 13b, 33f; \
+ .word 14b, 33f; \
+ .word 15b, 33f; \
+ .word 16b, 33f; \
.text
#define COPY_80(offset) \
@@ -190,14 +190,17 @@ w2: sw r4, r5, r3
.align 4 /* Alignment is important to keep icache happy */
page: /* Create room on stack and save registers for storign values */
- addik r1, r1, -32
- swi r19, r1, 4
- swi r20, r1, 8
- swi r21, r1, 12
- swi r22, r1, 16
- swi r23, r1, 20
- swi r24, r1, 24
- swi r25, r1, 28
+ addik r1, r1, -40
+ swi r5, r1, 0
+ swi r6, r1, 4
+ swi r7, r1, 8
+ swi r19, r1, 12
+ swi r20, r1, 16
+ swi r21, r1, 20
+ swi r22, r1, 24
+ swi r23, r1, 28
+ swi r24, r1, 32
+ swi r25, r1, 36
loop: /* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */
/* Loop unrolling to get performance boost */
COPY_80(0x000);
@@ -205,21 +208,44 @@ loop: /* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */
COPY_80(0x100);
COPY_80(0x180);
/* copy loop */
- addik r6, r6, 0x200
- addik r7, r7, -0x200
- bneid r7, loop
- addik r5, r5, 0x200
+ addik r6, r6, 0x200
+ addik r7, r7, -0x200
+ bneid r7, loop
+ addik r5, r5, 0x200
+
/* Restore register content */
- lwi r19, r1, 4
- lwi r20, r1, 8
- lwi r21, r1, 12
- lwi r22, r1, 16
- lwi r23, r1, 20
- lwi r24, r1, 24
- lwi r25, r1, 28
- addik r1, r1, 32
+ lwi r5, r1, 0
+ lwi r6, r1, 4
+ lwi r7, r1, 8
+ lwi r19, r1, 12
+ lwi r20, r1, 16
+ lwi r21, r1, 20
+ lwi r22, r1, 24
+ lwi r23, r1, 28
+ lwi r24, r1, 32
+ lwi r25, r1, 36
+ addik r1, r1, 40
/* return back */
+ addik r3, r0, 0
+ rtsd r15, 8
+ nop
+
+/* Fault case - return temp count */
+33:
addik r3, r7, 0
+ /* Restore register content */
+ lwi r5, r1, 0
+ lwi r6, r1, 4
+ lwi r7, r1, 8
+ lwi r19, r1, 12
+ lwi r20, r1, 16
+ lwi r21, r1, 20
+ lwi r22, r1, 24
+ lwi r23, r1, 28
+ lwi r24, r1, 32
+ lwi r25, r1, 36
+ addik r1, r1, 40
+ /* return back */
rtsd r15, 8
nop
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index b6bb92c..41dd0088 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -192,7 +192,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order = get_order(size);
@@ -240,8 +240,8 @@ EXPORT_SYMBOL(dma_to_phys);
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
.dma_map_ops = {
- .alloc_coherent = octeon_dma_alloc_coherent,
- .free_coherent = octeon_dma_free_coherent,
+ .alloc = octeon_dma_alloc_coherent,
+ .free = octeon_dma_free_coherent,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
@@ -325,8 +325,8 @@ void __init plat_swiotlb_setup(void)
#ifdef CONFIG_PCI
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
.dma_map_ops = {
- .alloc_coherent = octeon_dma_alloc_coherent,
- .free_coherent = octeon_dma_free_coherent,
+ .alloc = octeon_dma_alloc_coherent,
+ .free = octeon_dma_free_coherent,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index c3e2b85..97e7ce9 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -78,7 +78,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
}
/**
- * Detect available CPUs, populate cpu_possible_map
+ * Detect available CPUs, populate cpu_possible_mask
*/
static void octeon_smp_hotplug_setup(void)
{
@@ -268,7 +268,7 @@ static int octeon_cpu_disable(void)
spin_lock(&smp_reserve_lock);
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_irq_disable();
fixup_irqs();
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 7aa37dd..be39a12 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask)
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
- ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+ ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
- ops->free_coherent(dev, size, vaddr, dma_handle);
+ ops->free(dev, size, vaddr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 802e616..33f63ba 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+ cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index e309665..f8b2c59 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
int i;
#ifdef CONFIG_SMP
- if (!cpu_isset(n, cpu_online_map))
+ if (!cpu_online(n))
return 0;
#endif
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index ca67356..3046e29 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -317,7 +317,7 @@ static int bmips_cpu_disable(void)
pr_info("SMP: CPU%d is offline\n", cpu);
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_flush_tlb_all();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9c1cce9..ba9376bf 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
for (;;) {
if (cpu_wait)
(*cpu_wait)(); /* Wait if available. */
@@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
- init_cpu_present(&cpu_possible_map);
+ init_cpu_present(cpu_possible_mask);
#endif
}
@@ -248,7 +248,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
return 0;
}
@@ -320,13 +320,12 @@ void flush_tlb_mm(struct mm_struct *mm)
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
- cpumask_t mask = cpu_online_map;
unsigned int cpu;
- cpu_clear(smp_processor_id(), mask);
- for_each_cpu_mask(cpu, mask)
- if (cpu_context(cpu, mm))
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
+ }
}
local_flush_tlb_mm(mm);
@@ -360,13 +359,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
- cpumask_t mask = cpu_online_map;
unsigned int cpu;
- cpu_clear(smp_processor_id(), mask);
- for_each_cpu_mask(cpu, mask)
- if (cpu_context(cpu, mm))
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
+ }
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
@@ -407,13 +405,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
- cpumask_t mask = cpu_online_map;
unsigned int cpu;
- cpu_clear(smp_processor_id(), mask);
- for_each_cpu_mask(cpu, mask)
- if (cpu_context(cpu, vma->vm_mm))
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 0;
+ }
}
local_flush_tlb_page(vma, page);
preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index c4f75bb..f5dd38f 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -291,7 +291,7 @@ static void smtc_configure_tlb(void)
* possibly leave some TCs/VPEs as "slave" processors.
*
* Use c0_MVPConf0 to find out how many TCs are available, setting up
- * cpu_possible_map and the logical/physical mappings.
+ * cpu_possible_mask and the logical/physical mappings.
*/
int __init smtc_build_cpu_map(int start_cpu_slot)
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 1f9ca07..47037ec 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
if (vma)
mask = *mm_cpumask(vma->vm_mm);
else
- mask = cpu_online_map;
- cpu_clear(cpu, mask);
- for_each_cpu_mask(cpu, mask)
+ mask = *cpu_online_mask;
+ cpumask_clear_cpu(cpu, &mask);
+ for_each_cpu(cpu, &mask)
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
preempt_enable();
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 4608491..3fab204 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp)
+ dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dma_free_noncoherent);
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
int order = get_order(size);
@@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = {
- .alloc_coherent = mips_dma_alloc_coherent,
- .free_coherent = mips_dma_free_coherent,
+ .alloc = mips_dma_alloc_coherent,
+ .free = mips_dma_free_coherent,
.map_page = mips_dma_map_page,
.unmap_page = mips_dma_unmap_page,
.map_sg = mips_dma_map_sg,
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index db17f49..fab316d 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -165,7 +165,7 @@ void __init nlm_smp_setup(void)
cpu_set(boot_cpu, phys_cpu_present_map);
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
- cpu_set(0, cpu_possible_map);
+ set_cpu_possible(0, true);
num_cpus = 1;
for (i = 0; i < NR_CPUS; i++) {
@@ -177,14 +177,14 @@ void __init nlm_smp_setup(void)
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
- cpu_set(num_cpus, cpu_possible_map);
+ set_cpu_possible(num_cpus, true);
++num_cpus;
}
}
pr_info("Phys CPU present map: %lx, possible map %lx\n",
(unsigned long)phys_cpu_present_map.bits[0],
- (unsigned long)cpu_possible_map.bits[0]);
+ (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
pr_info("Detected %i Slave CPU(s)\n", num_cpus);
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 2608752..b71fae2 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -146,7 +146,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
}
/*
- * Detect available CPUs, populate cpu_possible_map before smp_init
+ * Detect available CPUs, populate cpu_possible_mask before smp_init
*
* We don't want to start the secondary CPU yet nor do we have a nice probing
* feature in PMON so we just assume presence of the secondary core.
@@ -155,10 +155,10 @@ static void __init yos_smp_setup(void)
{
int i;
- cpus_clear(cpu_possible_map);
+ init_cpu_possible(cpu_none_mask);
for (i = 0; i < 2; i++) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
@@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus)
/*
* Be paranoid. Enable the IPI only if we're really about to go SMP.
*/
- if (cpus_weight(cpu_possible_map))
+ if (num_possible_cpus())
set_c0_status(STATUSF_IP5);
}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index c6851df..735b43b 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
/* Only let it join in if it's marked enabled */
if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
(tot_cpus_found != NR_CPUS)) {
- cpu_set(cpuid, cpu_possible_map);
+ set_cpu_possible(cpuid, true);
alloc_cpupda(cpuid, tot_cpus_found);
cpus_found++;
tot_cpus_found++;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index d667875..de88e22 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -138,7 +138,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
/*
* Use CFE to find out how many CPUs are available, setting up
- * cpu_possible_map and the logical/physical mappings.
+ * cpu_possible_mask and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
@@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void)
{
int i, num;
- cpus_clear(cpu_possible_map);
- cpu_set(0, cpu_possible_map);
+ init_cpu_possible(cpumask_of(0));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 38e7f6b..285cfef 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
/*
* Use CFE to find out how many CPUs are available, setting up
- * cpu_possible_map and the logical/physical mappings.
+ * cpu_possible_mask and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
@@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void)
{
int i, num;
- cpus_clear(cpu_possible_map);
- cpu_set(0, cpu_possible_map);
+ init_cpu_possible(cpumask_of(0));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 3ae5607..6c6defc 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -6,6 +6,7 @@
#define _ASM_PARISC_ATOMIC_H_
#include <linux/types.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -48,112 +49,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif
-/* This should get optimized out since it's never called.
-** Or get a link error if xchg is used "wrong".
-*/
-extern void __xchg_called_with_bad_pointer(void);
-
-
-/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __xchg8(char, char *);
-extern unsigned long __xchg32(int, int *);
-#ifdef CONFIG_64BIT
-extern unsigned long __xchg64(unsigned long, unsigned long *);
-#endif
-
-/* optimizer better get rid of switch since size is a constant */
-static __inline__ unsigned long
-__xchg(unsigned long x, __volatile__ void * ptr, int size)
-{
- switch(size) {
-#ifdef CONFIG_64BIT
- case 8: return __xchg64(x,(unsigned long *) ptr);
-#endif
- case 4: return __xchg32((int) x, (int *) ptr);
- case 1: return __xchg8((char) x, (char *) ptr);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-
-/*
-** REVISIT - Abandoned use of LDCW in xchg() for now:
-** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
-** o and while we are at it, could CONFIG_64BIT code use LDCD too?
-**
-** if (__builtin_constant_p(x) && (x == NULL))
-** if (((unsigned long)p & 0xf) == 0)
-** return __ldcw(p);
-*/
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/* bug catcher for when unsupported size is used - won't link */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
-extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
-
-/* don't worry...optimizer will get rid of most of this */
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
-{
- switch(size) {
-#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
-#endif
- case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-#include <asm-generic/cmpxchg-local.h>
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old,
- unsigned long new_, int size)
-{
- switch (size) {
-#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
-#endif
- case 4: return __cmpxchg_u32(ptr, old, new_);
- default:
- return __cmpxchg_local_generic(ptr, old, new_, size);
- }
-}
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
-#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#endif
-
/*
* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees inconsistent values.
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
new file mode 100644
index 0000000..dbd1335
--- /dev/null
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -0,0 +1,116 @@
+/*
+ * forked from parisc asm/atomic.h which was:
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#ifndef _ASM_PARISC_CMPXCHG_H_
+#define _ASM_PARISC_CMPXCHG_H_
+
+/* This should get optimized out since it's never called.
+** Or get a link error if xchg is used "wrong".
+*/
+extern void __xchg_called_with_bad_pointer(void);
+
+/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __xchg8(char, char *);
+extern unsigned long __xchg32(int, int *);
+#ifdef CONFIG_64BIT
+extern unsigned long __xchg64(unsigned long, unsigned long *);
+#endif
+
+/* optimizer better get rid of switch since size is a constant */
+static inline unsigned long
+__xchg(unsigned long x, __volatile__ void *ptr, int size)
+{
+ switch (size) {
+#ifdef CONFIG_64BIT
+ case 8: return __xchg64(x, (unsigned long *) ptr);
+#endif
+ case 4: return __xchg32((int) x, (int *) ptr);
+ case 1: return __xchg8((char) x, (char *) ptr);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+/*
+** REVISIT - Abandoned use of LDCW in xchg() for now:
+** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
+** o and while we are at it, could CONFIG_64BIT code use LDCD too?
+**
+** if (__builtin_constant_p(x) && (x == NULL))
+** if (((unsigned long)p & 0xf) == 0)
+** return __ldcw(p);
+*/
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/* bug catcher for when unsupported size is used - won't link */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
+ unsigned int new_);
+extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
+ unsigned long old, unsigned long new_);
+
+/* don't worry...optimizer will get rid of most of this */
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+{
+ switch (size) {
+#ifdef CONFIG_64BIT
+ case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+ case 4: return __cmpxchg_u32((unsigned int *)ptr,
+ (unsigned int)old, (unsigned int)new_);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+})
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new_, int size)
+{
+ switch (size) {
+#ifdef CONFIG_64BIT
+ case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+ case 4: return __cmpxchg_u32(ptr, old, new_);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new_, size);
+ }
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 2388bdb..49df148 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -8,6 +8,29 @@
#include <asm/atomic.h>
#include <asm/errno.h>
+/* The following has to match the LWS code in syscall.S. We have
+ sixteen four-word locks. */
+
+static inline void
+_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
+{
+ extern u32 lws_lock_start[];
+ long index = ((long)uaddr & 0xf0) >> 2;
+ arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+ local_irq_save(*flags);
+ arch_spin_lock(s);
+}
+
+static inline void
+_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
+{
+ extern u32 lws_lock_start[];
+ long index = ((long)uaddr & 0xf0) >> 2;
+ arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+ arch_spin_unlock(s);
+ local_irq_restore(*flags);
+}
+
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
@@ -26,7 +49,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_disable();
- _atomic_spin_lock_irqsave(uaddr, flags);
+ _futex_spin_lock_irqsave(uaddr, &flags);
switch (op) {
case FUTEX_OP_SET:
@@ -71,7 +94,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
- _atomic_spin_unlock_irqrestore(uaddr, flags);
+ _futex_spin_unlock_irqrestore(uaddr, &flags);
pagefault_enable();
@@ -113,7 +136,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
* address. This should scale to a couple of CPUs.
*/
- _atomic_spin_lock_irqsave(uaddr, flags);
+ _futex_spin_lock_irqsave(uaddr, &flags);
ret = get_user(val, uaddr);
@@ -122,7 +145,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
*uval = val;
- _atomic_spin_unlock_irqrestore(uaddr, flags);
+ _futex_spin_unlock_irqrestore(uaddr, &flags);
return ret;
}
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 5006e8e..0bb1d63 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -290,8 +290,7 @@ smp_cpu_init(int cpunum)
mb();
/* Well, support 2.4 linux scheme as well. */
- if (cpu_isset(cpunum, cpu_online_map))
- {
+ if (cpu_online(cpunum)) {
extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
diff --git a/arch/powerpc/boot/dts/p1020mbg-pc.dtsi b/arch/powerpc/boot/dts/p1020mbg-pc.dtsi
new file mode 100644
index 0000000..a24699c
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020mbg-pc.dtsi
@@ -0,0 +1,151 @@
+/*
+ * P1020 MBG-PC Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x4000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* 128KB for DTB Image */
+ reg = <0x0 0x00020000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@20000 {
+ /* 3.875 MB for Linux Kernel Image */
+ reg = <0x00020000 0x003e0000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 58MB for Root file System */
+ reg = <0x00400000 0x03a00000>;
+ label = "NOR Root File System";
+ };
+
+ partition@3e00000 {
+ /* This location must not be altered */
+ /* 1M for Vitesse 7385 Switch firmware */
+ reg = <0x3e00000 0x00100000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@3f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x03f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ L2switch@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "vitesse-7385";
+ reg = <0x2 0x0 0x20000>;
+ };
+};
+
+&soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ */
+ usb@23000 {
+ status = "disabled";
+ phy_type = "ulpi";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020mbg-pc_32b.dts b/arch/powerpc/boot/dts/p1020mbg-pc_32b.dts
new file mode 100644
index 0000000..ab8f076
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020mbg-pc_32b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 MBG-PC Device Tree Source (32-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020MBG-PC";
+ compatible = "fsl,P1020MBG-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0x0 0xffe05000 0x0 0x1000>;
+
+ /* NOR and L2 switch */
+ ranges = <0x0 0x0 0x0 0xec000000 0x04000000
+ 0x1 0x0 0x0 0xffa00000 0x00040000
+ 0x2 0x0 0x0 0xffb00000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ reg = <0x0 0xffe09000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0x0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0x0 0xffe0a000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0x0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020mbg-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020mbg-pc_36b.dts b/arch/powerpc/boot/dts/p1020mbg-pc_36b.dts
new file mode 100644
index 0000000..9e9f401
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020mbg-pc_36b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 MBG-PC Device Tree Source (36-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020MBG-PC";
+ compatible = "fsl,P1020MBG-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0x0 0x1000>;
+
+ /* NOR and L2 switch */
+ ranges = <0x0 0x0 0xf 0xec000000 0x04000000
+ 0x1 0x0 0xf 0xffa00000 0x00040000
+ 0x2 0x0 0xf 0xffb00000 0x00020000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020mbg-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020utm-pc.dtsi b/arch/powerpc/boot/dts/p1020utm-pc.dtsi
new file mode 100644
index 0000000..7ea85ea
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020utm-pc.dtsi
@@ -0,0 +1,140 @@
+/*
+ * P1020 UTM-PC Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x2000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* 256KB for DTB Image */
+ reg = <0x0 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@40000 {
+ /* 3.75 MB for Linux Kernel Image */
+ reg = <0x00040000 0x003c0000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 27MB for Root file System */
+ reg = <0x00400000 0x01b00000>;
+ label = "NOR Root File System";
+ };
+
+ partition@1f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x01f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+};
+
+&soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+ phy2: ethernet-phy@2 {
+ interrupts = <1 1 0 0>;
+ reg = <0x2>;
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ */
+ usb@23000 {
+ status = "disabled";
+ phy_type = "ulpi";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020utm-pc_32b.dts b/arch/powerpc/boot/dts/p1020utm-pc_32b.dts
new file mode 100644
index 0000000..4bfdd89
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020utm-pc_32b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 UTM-PC Device Tree Source (32-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020UTM-PC";
+ compatible = "fsl,P1020UTM-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0x0 0xffe05000 0x0 0x1000>;
+
+ /* NOR */
+ ranges = <0x0 0x0 0x0 0xec000000 0x02000000
+ 0x1 0x0 0x0 0xffa00000 0x00040000
+ 0x2 0x0 0x0 0xffb00000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ reg = <0x0 0xffe09000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0x0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0x0 0xffe0a000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0x0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020utm-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020utm-pc_36b.dts b/arch/powerpc/boot/dts/p1020utm-pc_36b.dts
new file mode 100644
index 0000000..abec535
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020utm-pc_36b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 UTM-PC Device Tree Source (36-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020UTM-PC";
+ compatible = "fsl,P1020UTM-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0x0 0x1000>;
+
+ /* NOR */
+ ranges = <0x0 0x0 0xf 0xec000000 0x02000000
+ 0x1 0x0 0xf 0xffa00000 0x00040000
+ 0x2 0x0 0xf 0xffb00000 0x00020000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020utm-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index 4f957db..2852139 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -135,7 +135,6 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -151,7 +150,6 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -167,7 +165,6 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
- fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index f469145..22a215e 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -173,7 +173,6 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -189,7 +188,6 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -205,7 +203,6 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
- fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -221,7 +218,6 @@
reg = <0xf 0xfe203000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
- fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p3060qds.dts b/arch/powerpc/boot/dts/p3060qds.dts
index 529042e..9ae875c 100644
--- a/arch/powerpc/boot/dts/p3060qds.dts
+++ b/arch/powerpc/boot/dts/p3060qds.dts
@@ -212,7 +212,6 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -228,7 +227,6 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 6d60e54..3e20460 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -141,7 +141,6 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -157,7 +156,6 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -173,7 +171,6 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
- fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index 1c25068..27c07ed 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -173,7 +173,6 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -189,7 +188,6 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -205,7 +203,6 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
- fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -221,7 +218,6 @@
reg = <0xf 0xfe203000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
- fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index f8aef20..91db656 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -116,6 +116,7 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 82b13bf..6798343 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -71,6 +71,8 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
# CONFIG_HID_SUPPORT is not set
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index cc87a84..d6b6df5 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -117,6 +117,7 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
CONFIG_NVRAM=y
CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
CONFIG_SPI=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 48d6682..5b0e292 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -119,6 +119,7 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
CONFIG_NVRAM=y
CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
CONFIG_SPI=y
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index dd70fac..62678e3 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -22,9 +22,11 @@
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs);
extern void dma_direct_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -130,23 +132,29 @@ static inline int dma_supported(struct device *dev, u64 mask)
extern int dma_set_mask(struct device *dev, u64 dma_mask);
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!dma_ops);
- cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+ cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
@@ -154,7 +162,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index f3b0c2c..976835d 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -134,10 +134,15 @@
* whether they will be clobbered.
*
* Note that r11 can be used as an output parameter.
+ *
+ * The "memory" clobber is only necessary for hcalls where the Hypervisor
+ * will read or write guest memory. However, we add it to all hcalls because
+ * the impact is minimal, and we want to ensure that it's present for the
+ * hcalls that need it.
*/
/* List of common clobbered registers. Do not use this macro. */
-#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc"
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index ce04530..aa4c488 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -16,15 +16,6 @@
#define __ASM_POWERPC_FSL_GUTS_H__
#ifdef __KERNEL__
-/*
- * These #ifdefs are safe because it's not possible to build a kernel that
- * runs on e500 and e600 cores.
- */
-
-#if !defined(CONFIG_PPC_85xx) && !defined(CONFIG_PPC_86xx)
-#error Only 85xx and 86xx SOCs are supported
-#endif
-
/**
* Global Utility Registers.
*
@@ -36,11 +27,7 @@
* different names. In these cases, one name is chosen to avoid extraneous
* #ifdefs.
*/
-#ifdef CONFIG_PPC_85xx
-struct ccsr_guts_85xx {
-#else
-struct ccsr_guts_86xx {
-#endif
+struct ccsr_guts {
__be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
__be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
__be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
@@ -77,11 +64,8 @@ struct ccsr_guts_86xx {
u8 res0a8[0xb0 - 0xa8];
__be32 rstcr; /* 0x.00b0 - Reset Control Register */
u8 res0b4[0xc0 - 0xb4];
-#ifdef CONFIG_PPC_85xx
- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register */
-#else
- __be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */
-#endif
+ __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
+ Called 'elbcvselcr' on 86xx SOCs */
u8 res0c4[0x224 - 0xc4];
__be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
__be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
@@ -136,7 +120,7 @@ struct ccsr_guts_86xx {
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
*/
-static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts,
+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
unsigned int co, unsigned int ch, unsigned int device)
{
unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
@@ -172,7 +156,7 @@ static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts,
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* value: the new value for the bit (0 or 1)
*/
-static inline void guts_set_pmuxcr_dma(struct ccsr_guts_86xx __iomem *guts,
+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
unsigned int co, unsigned int ch, unsigned int value)
{
if ((ch == 0) || (ch == 3)) {
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 3f6464b..bcfdcd2 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -17,7 +17,8 @@
* to the dma address (mapping) of the first page.
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
@@ -25,7 +26,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
}
static void dma_iommu_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
@@ -105,8 +107,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
}
struct dma_map_ops dma_iommu_ops = {
- .alloc_coherent = dma_iommu_alloc_coherent,
- .free_coherent = dma_iommu_free_coherent,
+ .alloc = dma_iommu_alloc_coherent,
+ .free = dma_iommu_free_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 1ebc918..4ab88da 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -47,8 +47,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* for everything else.
*/
struct dma_map_ops swiotlb_dma_ops = {
- .alloc_coherent = dma_direct_alloc_coherent,
- .free_coherent = dma_direct_free_coherent,
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 7d0233c..b1ec983 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -26,7 +26,8 @@
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -54,7 +55,8 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
}
void dma_direct_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent(size, vaddr);
@@ -150,8 +152,8 @@ static inline void dma_direct_sync_single(struct device *dev,
#endif
struct dma_map_ops dma_direct_ops = {
- .alloc_coherent = dma_direct_alloc_coherent,
- .free_coherent = dma_direct_free_coherent,
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index cfe7a38..18bdf74 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -40,6 +40,8 @@
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/fadump.h>
+#include <asm/debug.h>
+#include <asm/setup.h>
static struct fw_dump fw_dump;
static struct fadump_mem_struct fdm;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 79bb282..b01d14e 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -65,7 +65,8 @@ static struct of_device_id __initdata ibmebus_matches[] = {
static void *ibmebus_alloc_coherent(struct device *dev,
size_t size,
dma_addr_t *dma_handle,
- gfp_t flag)
+ gfp_t flag,
+ struct dma_attrs *attrs)
{
void *mem;
@@ -77,7 +78,8 @@ static void *ibmebus_alloc_coherent(struct device *dev,
static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr,
- dma_addr_t dma_handle)
+ dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
kfree(vaddr);
}
@@ -136,8 +138,8 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev)
}
static struct dma_map_ops ibmebus_dma_ops = {
- .alloc_coherent = ibmebus_alloc_coherent,
- .free_coherent = ibmebus_free_coherent,
+ .alloc = ibmebus_alloc_coherent,
+ .free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported,
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 76a6e40..782bd0a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -24,6 +24,7 @@
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
+#include <asm/debug.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index b2f7c84..a3a9990 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -482,7 +482,8 @@ static void vio_cmo_balance(struct work_struct *work)
}
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
@@ -492,7 +493,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
return NULL;
}
- ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
+ ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
@@ -502,11 +503,12 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
}
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
- dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
+ dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
@@ -607,8 +609,8 @@ static u64 vio_dma_get_required_mask(struct device *dev)
}
struct dma_map_ops vio_dma_mapping_ops = {
- .alloc_coherent = vio_dma_iommu_alloc_coherent,
- .free_coherent = vio_dma_iommu_free_coherent,
+ .alloc = vio_dma_iommu_alloc_coherent,
+ .free = vio_dma_iommu_free_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index f1950d1..135663a 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -21,6 +21,7 @@
#include <asm/disassemble.h>
#include <asm/kvm_book3s.h>
#include <asm/reg.h>
+#include <asm/switch_to.h>
#define OP_19_XOP_RFID 18
#define OP_19_XOP_RFI 50
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index e70ef2d..a59a25a 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -24,6 +24,7 @@
#include <asm/kvm_fpu.h>
#include <asm/reg.h>
#include <asm/cacheflush.h>
+#include <asm/switch_to.h>
#include <linux/vmalloc.h>
/* #define DEBUG */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7340e10..642d885 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -33,6 +33,7 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
+#include <asm/switch_to.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index bfb11e0..e2d401a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -93,7 +93,7 @@ struct mpc52xx_pci {
};
/* MPC5200 device tree match tables */
-const struct of_device_id mpc52xx_pci_ids[] __initdata = {
+const struct of_device_id mpc52xx_pci_ids[] __initconst = {
{ .type = "pci", .compatible = "fsl,mpc5200-pci", },
{ .type = "pci", .compatible = "mpc5200-pci", },
{}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 3754ddc..9a6f044 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -270,7 +270,7 @@ static void __init mpc85xx_mds_qe_init(void)
if (machine_is(p1021_mds)) {
- struct ccsr_guts_85xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
np = of_find_node_by_name(NULL, "global-utilities");
if (np) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 9848f9e..313fce4 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -127,7 +127,7 @@ static void __init mpc85xx_rdb_setup_arch(void)
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
if (machine_is(p1025_rdb)) {
- struct ccsr_guts_85xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
np = of_find_node_by_name(NULL, "global-utilities");
if (np) {
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 0fe88e3..e74b7cd 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -150,7 +150,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
struct device_node *guts_node;
struct device_node *indirect_node = NULL;
- struct ccsr_guts_85xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
u8 __iomem *lbc_lcs0_ba = NULL;
u8 __iomem *lbc_lcs1_ba = NULL;
u8 b;
@@ -269,7 +269,7 @@ exit:
void p1022ds_set_pixel_clock(unsigned int pixclock)
{
struct device_node *guts_np = NULL;
- struct ccsr_guts_85xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
unsigned long freq;
u64 temp;
u32 pxclk;
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index bbc6152..62cd3c5 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -225,7 +225,7 @@ void mpc8610hpcd_set_monitor_port(enum fsl_diu_monitor_port port)
void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
{
struct device_node *guts_np = NULL;
- struct ccsr_guts_86xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
unsigned long freq;
u64 temp;
u32 pxclk;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index ae9fc7b..b9f509a 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -564,7 +564,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
/* A coherent allocation implies strong ordering */
static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
if (iommu_fixed_is_weak)
return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
@@ -572,18 +573,19 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
device_to_mask(dev), flag,
dev_to_node(dev));
else
- return dma_direct_ops.alloc_coherent(dev, size, dma_handle,
- flag);
+ return dma_direct_ops.alloc(dev, size, dma_handle, flag,
+ attrs);
}
static void dma_fixed_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
if (iommu_fixed_is_weak)
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
dma_handle);
else
- dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
+ dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
}
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
@@ -642,8 +644,8 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
struct dma_map_ops dma_iommu_fixed_ops = {
- .alloc_coherent = dma_fixed_alloc_coherent,
- .free_coherent = dma_fixed_free_coherent,
+ .alloc = dma_fixed_alloc_coherent,
+ .free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
.unmap_sg = dma_fixed_unmap_sg,
.dma_supported = dma_fixed_dma_supported,
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 7f9b674..6e3409d 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -61,7 +61,7 @@ static void qpace_progress(char *s, unsigned short hex)
printk("*** %04x : %s\n", hex, s ? s : "");
}
-static const struct of_device_id qpace_bus_ids[] __initdata = {
+static const struct of_device_id qpace_bus_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .type = "spider", },
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index fa3e294..4ab0876 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -140,7 +140,7 @@ static int __devinit cell_setup_phb(struct pci_controller *phb)
return 0;
}
-static const struct of_device_id cell_bus_ids[] __initdata = {
+static const struct of_device_id cell_bus_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .type = "spider", },
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 880eb9c..5606fe3 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -515,7 +515,8 @@ core_initcall(ps3_system_bus_init);
* to the dma address (mapping) of the first page.
*/
static void * ps3_alloc_coherent(struct device *_dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
int result;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@@ -552,7 +553,7 @@ clean_none:
}
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@@ -701,8 +702,8 @@ static u64 ps3_dma_get_required_mask(struct device *_dev)
}
static struct dma_map_ops ps3_sb_dma_ops = {
- .alloc_coherent = ps3_alloc_coherent,
- .free_coherent = ps3_free_coherent,
+ .alloc = ps3_alloc_coherent,
+ .free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported,
@@ -712,8 +713,8 @@ static struct dma_map_ops ps3_sb_dma_ops = {
};
static struct dma_map_ops ps3_ioc0_dma_ops = {
- .alloc_coherent = ps3_alloc_coherent,
- .free_coherent = ps3_free_coherent,
+ .alloc = ps3_alloc_coherent,
+ .free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported,
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 4a47525..4cb375c 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -59,8 +59,7 @@ static int eeh_event_handler(void * dummy)
struct eeh_event *event;
struct eeh_dev *edev;
- daemonize("eehd");
- set_current_state(TASK_INTERRUPTIBLE);
+ set_task_comm(current, "eehd");
spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL;
@@ -83,6 +82,7 @@ static int eeh_event_handler(void * dummy)
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
eeh_pci_name(edev->pdev));
+ set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
edev = handle_eeh_events(event);
eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index ceb09cb..818e763 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Freescale Semicondutor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -266,7 +266,19 @@ EXPORT_SYMBOL(qe_clock_source);
static void qe_snums_init(void)
{
int i;
- static const u8 snum_init[] = {
+ static const u8 snum_init_76[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+ 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+ 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+ 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+ 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+ 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+ 0xF4, 0xF5, 0xFC, 0xFD,
+ };
+ static const u8 snum_init_46[] = {
0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
@@ -274,9 +286,15 @@ static void qe_snums_init(void)
0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
};
+ static const u8 *snum_init;
qe_num_of_snum = qe_get_num_of_snums();
+ if (qe_num_of_snum == 76)
+ snum_init = snum_init_76;
+ else
+ snum_init = snum_init_46;
+
for (i = 0; i < qe_num_of_snum; i++) {
snums[i].num = snum_init[i];
snums[i].state = QE_SNUM_STATE_FREE;
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 1a73c3e..8bd965e 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -52,25 +52,31 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return dma_addr == 0;
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
- if (!ops->alloc_coherent)
+ if (!ops->alloc)
return NULL;
- memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
+ memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -78,14 +84,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return;
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
- if (ops->free_coherent)
- ops->free_coherent(dev, size, vaddr, dma_handle);
+ if (ops->free)
+ ops->free(dev, size, vaddr, dma_handle, attrs);
}
/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag);
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index 3c55b87..5b0bfcd 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -63,8 +63,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
#endif
struct dma_map_ops nommu_dma_ops = {
- .alloc_coherent = dma_generic_alloc_coherent,
- .free_coherent = dma_generic_free_coherent,
+ .alloc = dma_generic_alloc_coherent,
+ .free = dma_generic_free_coherent,
.map_page = nommu_map_page,
.map_sg = nommu_map_sg,
#ifdef CONFIG_DMA_NONCOHERENT
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index f251b5f..b81d9db 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -33,7 +33,8 @@ static int __init dma_init(void)
fs_initcall(dma_init);
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret, *ret_nocache;
int order = get_order(size);
@@ -64,7 +65,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
}
void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 8c0e4f7..48a7c65 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -26,24 +26,30 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#include <asm-generic/dma-mapping-common.h>
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
- cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 6fa2f79..76e4a52 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -12,8 +12,6 @@
* the SpitFire page tables.
*/
-#include <asm-generic/pgtable-nopud.h>
-
#include <linux/compiler.h>
#include <linux/const.h>
#include <asm/types.h>
@@ -22,6 +20,8 @@
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm-generic/pgtable-nopud.h>
+
/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
* The page copy blockops can use 0x6000000 to 0x8000000.
* The TSB is mapped in the 0x8000000 to 0xa000000 range.
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 4643d68..070ed14 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -280,7 +280,8 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
}
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addrp, gfp_t gfp)
+ dma_addr_t *dma_addrp, gfp_t gfp,
+ struct dma_attrs *attrs)
{
unsigned long flags, order, first_page;
struct iommu *iommu;
@@ -330,7 +331,8 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
}
static void dma_4u_free_coherent(struct device *dev, size_t size,
- void *cpu, dma_addr_t dvma)
+ void *cpu, dma_addr_t dvma,
+ struct dma_attrs *attrs)
{
struct iommu *iommu;
unsigned long flags, order, npages;
@@ -825,8 +827,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
}
static struct dma_map_ops sun4u_dma_ops = {
- .alloc_coherent = dma_4u_alloc_coherent,
- .free_coherent = dma_4u_free_coherent,
+ .alloc = dma_4u_alloc_coherent,
+ .free = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
.unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index d0479e2..21bd739 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -261,7 +261,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
* CPU may access them without any explicit flushing.
*/
static void *sbus_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *dma_addrp, gfp_t gfp)
+ dma_addr_t *dma_addrp, gfp_t gfp,
+ struct dma_attrs *attrs)
{
struct platform_device *op = to_platform_device(dev);
unsigned long len_total = PAGE_ALIGN(len);
@@ -315,7 +316,7 @@ err_nopages:
}
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba)
+ dma_addr_t ba, struct dma_attrs *attrs)
{
struct resource *res;
struct page *pgv;
@@ -407,8 +408,8 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
struct dma_map_ops sbus_dma_ops = {
- .alloc_coherent = sbus_alloc_coherent,
- .free_coherent = sbus_free_coherent,
+ .alloc = sbus_alloc_coherent,
+ .free = sbus_free_coherent,
.map_page = sbus_map_page,
.unmap_page = sbus_unmap_page,
.map_sg = sbus_map_sg,
@@ -436,7 +437,8 @@ arch_initcall(sparc_register_ioport);
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
static void *pci32_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *pba, gfp_t gfp)
+ dma_addr_t *pba, gfp_t gfp,
+ struct dma_attrs *attrs)
{
unsigned long len_total = PAGE_ALIGN(len);
void *va;
@@ -489,7 +491,7 @@ err_nopages:
* past this call are illegal.
*/
static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba)
+ dma_addr_t ba, struct dma_attrs *attrs)
{
struct resource *res;
@@ -645,8 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
}
struct dma_map_ops pci32_dma_ops = {
- .alloc_coherent = pci32_alloc_coherent,
- .free_coherent = pci32_free_coherent,
+ .alloc = pci32_alloc_coherent,
+ .free = pci32_free_coherent,
.map_page = pci32_map_page,
.unmap_page = pci32_unmap_page,
.map_sg = pci32_map_sg,
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index a19c8a0..35e4367 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -104,11 +104,11 @@ static int irq_choose_cpu(const struct cpumask *affinity)
{
cpumask_t mask;
- cpus_and(mask, cpu_online_map, *affinity);
- if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask))
+ cpumask_and(&mask, cpu_online_mask, affinity);
+ if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
return boot_cpu_id;
else
- return first_cpu(mask);
+ return cpumask_first(&mask);
}
#else
#define irq_choose_cpu(affinity) boot_cpu_id
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index af5755d..7661e84 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -128,7 +128,8 @@ static inline long iommu_batch_end(void)
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addrp, gfp_t gfp)
+ dma_addr_t *dma_addrp, gfp_t gfp,
+ struct dma_attrs *attrs)
{
unsigned long flags, order, first_page, npages, n;
struct iommu *iommu;
@@ -198,7 +199,7 @@ range_alloc_fail:
}
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
- dma_addr_t dvma)
+ dma_addr_t dvma, struct dma_attrs *attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -527,8 +528,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
static struct dma_map_ops sun4v_dma_ops = {
- .alloc_coherent = dma_4v_alloc_coherent,
- .free_coherent = dma_4v_free_coherent,
+ .alloc = dma_4v_alloc_coherent,
+ .free = dma_4v_free_coherent,
.map_page = dma_4v_map_page,
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 5f85d8b..92a94f4 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(hash_for_home_map);
/*
* cpu_cacheable_map lists all the cpus whose caches the hypervisor can
- * flush on our behalf. It is set to cpu_possible_map OR'ed with
+ * flush on our behalf. It is set to cpu_possible_mask OR'ed with
* hash_for_home_map, and it is what should be passed to
* hv_flush_remote() to flush all caches. Note that if there are
* dedicated hypervisor driver tiles that have authorized use of their
@@ -1186,7 +1186,7 @@ static void __init setup_cpu_maps(void)
sizeof(cpu_lotar_map));
if (rc < 0) {
pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
- cpu_lotar_map = cpu_possible_map;
+ cpu_lotar_map = *cpu_possible_mask;
}
#if CHIP_HAS_CBOX_HOME_MAP()
@@ -1196,9 +1196,9 @@ static void __init setup_cpu_maps(void)
sizeof(hash_for_home_map));
if (rc < 0)
early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
- cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map);
+ cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
#else
- cpu_cacheable_map = cpu_possible_map;
+ cpu_cacheable_map = *cpu_possible_mask;
#endif
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 2e9852c..0a9e57e 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -41,7 +41,7 @@ static int __init start_kernel_proc(void *unused)
cpu_tasks[0].pid = pid;
cpu_tasks[0].task = current;
#ifdef CONFIG_SMP
- cpu_online_map = cpumask_of_cpu(0);
+ init_cpu_online(get_cpu_mask(0));
#endif
start_kernel();
return 0;
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 155206a..6f588e1 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -76,7 +76,7 @@ static int idle_proc(void *cpup)
cpu_relax();
notify_cpu_starting(cpu);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
default_idle();
return 0;
}
@@ -110,8 +110,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
- cpu_clear(me, cpu_online_map);
- cpu_set(me, cpu_online_map);
+ set_cpu_online(me, true);
cpu_set(me, cpu_callin_map);
err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
@@ -138,13 +137,13 @@ void smp_prepare_cpus(unsigned int maxcpus)
void smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), true);
}
int __cpu_up(unsigned int cpu)
{
cpu_set(cpu, smp_commenced_mask);
- while (!cpu_isset(cpu, cpu_online_map))
+ while (!cpu_online(cpu))
mb();
return 0;
}
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index 9258e59..366460a 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -82,20 +82,26 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+ return dma_ops->alloc(dev, size, dma_handle, flag, attrs);
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
index bfa9fbb..16c08b2 100644
--- a/arch/unicore32/mm/dma-swiotlb.c
+++ b/arch/unicore32/mm/dma-swiotlb.c
@@ -17,9 +17,23 @@
#include <asm/dma.h>
+static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+}
+
+static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
+{
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
struct dma_map_ops swiotlb_dma_map_ops = {
- .alloc_coherent = swiotlb_alloc_coherent,
- .free_coherent = swiotlb_free_coherent,
+ .alloc = unicore_swiotlb_alloc_coherent,
+ .free = unicore_swiotlb_free_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 968dbe2..41a7237 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -129,6 +129,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# prevent gcc from generating any FP code by mistake
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index ed3065f..4b4331d 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -59,7 +59,8 @@ extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag);
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs);
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
@@ -111,9 +112,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
return gfp;
}
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp)
+dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
@@ -129,18 +132,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (!is_device_dma_capable(dev))
return NULL;
- if (!ops->alloc_coherent)
+ if (!ops->alloc)
return NULL;
- memory = ops->alloc_coherent(dev, size, dma_handle,
- dma_alloc_coherent_gfp_flags(dev, gfp));
+ memory = ops->alloc(dev, size, dma_handle,
+ dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t bus)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -150,8 +156,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return;
debug_dma_free_coherent(dev, size, vaddr, bus);
- if (ops->free_coherent)
- ops->free_coherent(dev, size, vaddr, bus);
+ if (ops->free)
+ ops->free(dev, size, vaddr, bus, attrs);
}
#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7284c9a..4fa7dcc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -974,16 +974,6 @@ extern bool cpu_has_amd_erratum(const int *);
#define cpu_has_amd_erratum(x) (false)
#endif /* CONFIG_CPU_SUP_AMD */
-#ifdef CONFIG_X86_32
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-#endif
-
-void disable_hlt(void);
-void enable_hlt(void);
-
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index b1e7c7f..e6631120 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -477,7 +477,7 @@ error:
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
- gfp_t flag)
+ gfp_t flag, struct dma_attrs *attrs)
{
dma_addr_t paddr;
unsigned long align_mask;
@@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
}
__free_pages(page, get_order(size));
} else
- return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
+ return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
+ attrs);
return NULL;
}
@@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, struct dma_attrs *attrs)
{
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, get_order(size));
@@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = {
.unmap_sg = gart_unmap_sg,
.map_page = gart_map_page,
.unmap_page = gart_unmap_page,
- .alloc_coherent = gart_alloc_coherent,
- .free_coherent = gart_free_coherent,
+ .alloc = gart_alloc_coherent,
+ .free = gart_free_coherent,
.mapping_error = gart_mapping_error,
};
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 40883ff..bb8e034 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1313,6 +1313,11 @@ static void __init pmu_check_apic(void)
pr_info("no hardware sampling interrupt available.\n");
}
+static struct attribute_group x86_pmu_format_group = {
+ .name = "format",
+ .attrs = NULL,
+};
+
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@@ -1387,6 +1392,7 @@ static int __init init_hw_perf_events(void)
}
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+ x86_pmu_format_group.attrs = x86_pmu.format_attrs;
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
@@ -1615,6 +1621,9 @@ static int x86_pmu_event_idx(struct perf_event *event)
{
int idx = event->hw.idx;
+ if (!x86_pmu.attr_rdpmc)
+ return 0;
+
if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
idx -= X86_PMC_IDX_FIXED;
idx |= 1 << 30;
@@ -1667,6 +1676,7 @@ static struct attribute_group x86_pmu_attr_group = {
static const struct attribute_group *x86_pmu_attr_groups[] = {
&x86_pmu_attr_group,
+ &x86_pmu_format_group,
NULL,
};
@@ -1698,14 +1708,19 @@ static struct pmu pmu = {
.flush_branch_stack = x86_pmu_flush_branch_stack,
};
-void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
+ userpg->cap_usr_time = 0;
+ userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+ userpg->pmc_width = x86_pmu.cntval_bits;
+
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
return;
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
+ userpg->cap_usr_time = 1;
userpg->time_mult = this_cpu_read(cyc2ns);
userpg->time_shift = CYC2NS_SCALE_FACTOR;
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8484e77..6638aaf 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -339,6 +339,7 @@ struct x86_pmu {
* sysfs attrs
*/
int attr_rdpmc;
+ struct attribute **format_attrs;
/*
* CPU Hotplug hooks
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dd002fa..95e7fe1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -404,6 +404,21 @@ static void amd_pmu_cpu_dead(int cpu)
}
}
+PMU_FORMAT_ATTR(event, "config:0-7,32-35");
+PMU_FORMAT_ATTR(umask, "config:8-15" );
+PMU_FORMAT_ATTR(edge, "config:18" );
+PMU_FORMAT_ATTR(inv, "config:23" );
+PMU_FORMAT_ATTR(cmask, "config:24-31" );
+
+static struct attribute *amd_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask.attr,
+ NULL,
+};
+
static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = x86_pmu_handle_irq,
@@ -426,6 +441,8 @@ static __initconst const struct x86_pmu amd_pmu = {
.get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints,
+ .format_attrs = amd_format_attr,
+
.cpu_prepare = amd_pmu_cpu_prepare,
.cpu_starting = amd_pmu_cpu_starting,
.cpu_dead = amd_pmu_cpu_dead,
@@ -596,6 +613,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
.cpu_dead = amd_pmu_cpu_dead,
#endif
.cpu_starting = amd_pmu_cpu_starting,
+ .format_attrs = amd_format_attr,
};
__init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6a84e7f..26b3e2f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1431,6 +1431,24 @@ static void core_pmu_enable_all(int added)
}
}
+PMU_FORMAT_ATTR(event, "config:0-7" );
+PMU_FORMAT_ATTR(umask, "config:8-15" );
+PMU_FORMAT_ATTR(edge, "config:18" );
+PMU_FORMAT_ATTR(pc, "config:19" );
+PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
+PMU_FORMAT_ATTR(inv, "config:23" );
+PMU_FORMAT_ATTR(cmask, "config:24-31" );
+
+static struct attribute *intel_arch_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_pc.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask.attr,
+ NULL,
+};
+
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
@@ -1455,6 +1473,7 @@ static __initconst const struct x86_pmu core_pmu = {
.put_event_constraints = intel_put_event_constraints,
.event_constraints = intel_core_event_constraints,
.guest_get_msrs = core_guest_get_msrs,
+ .format_attrs = intel_arch_formats_attr,
};
struct intel_shared_regs *allocate_shared_regs(int cpu)
@@ -1553,6 +1572,21 @@ static void intel_pmu_flush_branch_stack(void)
intel_pmu_lbr_reset();
}
+PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
+
+static struct attribute *intel_arch3_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_pc.attr,
+ &format_attr_any.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask.attr,
+
+ &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
+ NULL,
+};
+
static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
@@ -1576,6 +1610,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
+ .format_attrs = intel_arch3_formats_attr,
+
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index ef484d9..a2dfacf 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1271,6 +1271,17 @@ done:
return num ? -EINVAL : 0;
}
+PMU_FORMAT_ATTR(cccr, "config:0-31" );
+PMU_FORMAT_ATTR(escr, "config:32-62");
+PMU_FORMAT_ATTR(ht, "config:63" );
+
+static struct attribute *intel_p4_formats_attr[] = {
+ &format_attr_cccr.attr,
+ &format_attr_escr.attr,
+ &format_attr_ht.attr,
+ NULL,
+};
+
static __initconst const struct x86_pmu p4_pmu = {
.name = "Netburst P4/Xeon",
.handle_irq = p4_pmu_handle_irq,
@@ -1305,6 +1316,8 @@ static __initconst const struct x86_pmu p4_pmu = {
* the former idea is taken from OProfile code
*/
.perfctr_second_write = 1,
+
+ .format_attrs = intel_p4_formats_attr,
};
__init int p4_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index c7181be..32bcfc7 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -87,6 +87,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
(void)checking_wrmsrl(hwc->config_base, val);
}
+PMU_FORMAT_ATTR(event, "config:0-7" );
+PMU_FORMAT_ATTR(umask, "config:8-15" );
+PMU_FORMAT_ATTR(edge, "config:18" );
+PMU_FORMAT_ATTR(pc, "config:19" );
+PMU_FORMAT_ATTR(inv, "config:23" );
+PMU_FORMAT_ATTR(cmask, "config:24-31" );
+
+static struct attribute *intel_p6_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_pc.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask.attr,
+ NULL,
+};
+
static __initconst const struct x86_pmu p6_pmu = {
.name = "p6",
.handle_irq = x86_pmu_handle_irq,
@@ -115,6 +132,8 @@ static __initconst const struct x86_pmu p6_pmu = {
.cntval_mask = (1ULL << 32) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = p6_event_constraints,
+
+ .format_attrs = intel_p6_formats_attr,
};
__init int p6_pmu_init(void)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7943e0c..3dafc60 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -282,8 +282,13 @@ void fixup_irqs(void)
else if (!(warned++))
set_affinity = 0;
+ /*
+ * We unmask if the irq was not marked masked by the
+ * core code. That respects the lazy irq disable
+ * behaviour.
+ */
if (!irqd_can_move_in_process_context(data) &&
- !irqd_irq_disabled(data) && chip->irq_unmask)
+ !irqd_irq_masked(data) && chip->irq_unmask)
chip->irq_unmask(data);
raw_spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index db6720e..8bfb614 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -43,6 +43,8 @@
#include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/hw_breakpoint.h>
+#include <linux/uaccess.h>
+#include <linux/memory.h>
#include <asm/debugreg.h>
#include <asm/apicdef.h>
@@ -741,6 +743,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->ip = ip;
}
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+ char opc[BREAK_INSTR_SIZE];
+
+ bpt->type = BP_BREAKPOINT;
+ err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ err = probe_kernel_write((char *)bpt->bpt_addr,
+ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+#ifdef CONFIG_DEBUG_RODATA
+ if (!err)
+ return err;
+ /*
+ * It is safe to call text_poke() because normal kernel execution
+ * is stopped on all cores, so long as the text_mutex is not locked.
+ */
+ if (mutex_is_locked(&text_mutex))
+ return -EBUSY;
+ text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
+ return -EINVAL;
+ bpt->type = BP_POKE_BREAKPOINT;
+#endif /* CONFIG_DEBUG_RODATA */
+ return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+#ifdef CONFIG_DEBUG_RODATA
+ int err;
+ char opc[BREAK_INSTR_SIZE];
+
+ if (bpt->type != BP_POKE_BREAKPOINT)
+ goto knl_write;
+ /*
+ * It is safe to call text_poke() because normal kernel execution
+ * is stopped on all cores, so long as the text_mutex is not locked.
+ */
+ if (mutex_is_locked(&text_mutex))
+ goto knl_write;
+ text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
+ err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+ if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
+ goto knl_write;
+ return err;
+knl_write:
+#endif /* CONFIG_DEBUG_RODATA */
+ return probe_kernel_write((char *)bpt->bpt_addr,
+ (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 6ac5782..d0b2fb9 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -430,7 +430,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
}
static void* calgary_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
{
void *ret = NULL;
dma_addr_t mapping;
@@ -463,7 +463,8 @@ error:
}
static void calgary_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
unsigned int npages;
struct iommu_table *tbl = find_iommu_table(dev);
@@ -476,8 +477,8 @@ static void calgary_free_coherent(struct device *dev, size_t size,
}
static struct dma_map_ops calgary_dma_ops = {
- .alloc_coherent = calgary_alloc_coherent,
- .free_coherent = calgary_free_coherent,
+ .alloc = calgary_alloc_coherent,
+ .free = calgary_free_coherent,
.map_sg = calgary_map_sg,
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 28e5e06..3003250 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void)
}
}
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag)
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs)
{
unsigned long dma_mask;
struct page *page;
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 3af4af81..f960506 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
}
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
@@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev,
}
struct dma_map_ops nommu_dma_ops = {
- .alloc_coherent = dma_generic_alloc_coherent,
- .free_coherent = nommu_free_coherent,
+ .alloc = dma_generic_alloc_coherent,
+ .free = nommu_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 8f972cb..6c483ba 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -15,21 +15,30 @@
int swiotlb __read_mostly;
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
void *vaddr;
- vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
+ vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
+ attrs);
if (vaddr)
return vaddr;
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
+static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
+{
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
static struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
- .alloc_coherent = x86_swiotlb_alloc_coherent,
- .free_coherent = swiotlb_free_coherent,
+ .alloc = x86_swiotlb_alloc_coherent,
+ .free = x86_swiotlb_free_coherent,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index a33afaa..1d92a5a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -362,34 +362,10 @@ void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
#endif
-#ifdef CONFIG_X86_32
-/*
- * This halt magic was a workaround for ancient floppy DMA
- * wreckage. It should be safe to remove.
- */
-static int hlt_counter;
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-EXPORT_SYMBOL(enable_hlt);
-
-static inline int hlt_use_halt(void)
-{
- return (!hlt_counter && boot_cpu_data.hlt_works_ok);
-}
-#else
static inline int hlt_use_halt(void)
{
return 1;
}
-#endif
#ifndef CONFIG_SMP
static inline void play_dead(void)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5671752..5a5b6e4 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -289,7 +289,7 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x24, K & 0xFF); /* and imm8,%al */
} else if (K >= 0xFFFF0000) {
EMIT2(0x66, 0x25); /* and imm16,%ax */
- EMIT2(K, 2);
+ EMIT(K, 2);
} else {
EMIT1_off32(0x25, K); /* and imm32,%eax */
}
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 4793683..218cdb1 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -225,13 +225,13 @@ static void __restore_processor_state(struct saved_context *ctxt)
fix_processor_context();
do_fpu_end();
+ x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
}
/* Needed by apm.c */
void restore_processor_state(void)
{
- x86_platform.restore_sched_clock_state();
__restore_processor_state(&saved_context);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b132ade..4f51beb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -967,7 +967,7 @@ void xen_setup_shared_info(void)
xen_setup_mfn_list_list();
}
-/* This is called once we have the cpu_possible_map */
+/* This is called once we have the cpu_possible_mask */
void xen_setup_vcpu_info_placement(void)
{
int cpu;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index b480d42..967633a 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -12,8 +12,8 @@ int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error,
- .alloc_coherent = xen_swiotlb_alloc_coherent,
- .free_coherent = xen_swiotlb_free_coherent,
+ .alloc = xen_swiotlb_alloc_coherent,
+ .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a0f768c..8d3a056 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -613,8 +613,7 @@ out:
return err;
}
-static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
- u32 mask)
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
@@ -652,6 +651,7 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
return ERR_PTR(crypto_givcipher_default(alg, type, mask));
}
+EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask)
diff --git a/crypto/aead.c b/crypto/aead.c
index 04add3dc..e4cb351 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -470,8 +470,7 @@ out:
return err;
}
-static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
- u32 mask)
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
@@ -503,6 +502,7 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
}
+EXPORT_SYMBOL_GPL(crypto_lookup_aead);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask)
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index f76e42b..f1ea0a0 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -21,9 +21,13 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptouser.h>
+#include <linux/sched.h>
#include <net/netlink.h>
#include <linux/security.h>
#include <net/net_namespace.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
#include "internal.h"
DEFINE_MUTEX(crypto_cfg_mutex);
@@ -301,6 +305,60 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
return crypto_unregister_instance(alg);
}
+static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
+ u32 mask)
+{
+ int err;
+ struct crypto_alg *alg;
+
+ type = crypto_skcipher_type(type);
+ mask = crypto_skcipher_mask(mask);
+
+ for (;;) {
+ alg = crypto_lookup_skcipher(name, type, mask);
+ if (!IS_ERR(alg))
+ return alg;
+
+ err = PTR_ERR(alg);
+ if (err != -EAGAIN)
+ break;
+ if (signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ return ERR_PTR(err);
+}
+
+static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
+ u32 mask)
+{
+ int err;
+ struct crypto_alg *alg;
+
+ type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type |= CRYPTO_ALG_TYPE_AEAD;
+ mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ for (;;) {
+ alg = crypto_lookup_aead(name, type, mask);
+ if (!IS_ERR(alg))
+ return alg;
+
+ err = PTR_ERR(alg);
+ if (err != -EAGAIN)
+ break;
+ if (signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ return ERR_PTR(err);
+}
+
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
@@ -325,7 +383,19 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
else
name = p->cru_name;
- alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+ switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
+ break;
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
+ break;
+ default:
+ alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+ }
+
if (IS_ERR(alg))
return PTR_ERR(alg);
@@ -387,12 +457,20 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
+ struct crypto_alg *alg;
+ u16 dump_alloc = 0;
+
if (link->dump == NULL)
return -EINVAL;
+
+ list_for_each_entry(alg, &crypto_alg_list, cra_list)
+ dump_alloc += CRYPTO_REPORT_MAXSIZE;
+
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
+ .min_dump_alloc = dump_alloc,
};
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 29a89da..b2c99dc 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -280,11 +280,11 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
ictx->tfm_count++;
- cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+ cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
- ctx->cb_cpu = cpumask_first(cpu_active_mask);
+ ctx->cb_cpu = cpumask_first(cpu_online_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
- ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+ ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
@@ -472,7 +472,7 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
goto err_free_padata;
}
- cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
+ cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
rcu_assign_pointer(pcrypt->cb_cpumask, mask);
pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 6f0459c..d236aef 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -50,6 +50,8 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/hsi/Kconfig"
+
source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 262b19d..95952c8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
+obj-y += hsi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 6c9387d..5401814 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -16,10 +16,11 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
+#include <linux/workqueue.h>
#include <linux/highmem.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
@@ -81,6 +82,11 @@ enum {
static int loading_timeout = 60; /* In seconds */
+static inline long firmware_loading_timeout(void)
+{
+ return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
+}
+
/* fw_lock could be moved to 'struct firmware_priv' but since it is just
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);
@@ -440,13 +446,11 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
{
struct firmware_priv *fw_priv;
struct device *f_dev;
- int error;
fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
- error = -ENOMEM;
- goto err_out;
+ return ERR_PTR(-ENOMEM);
}
fw_priv->fw = firmware;
@@ -463,98 +467,80 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
f_dev->parent = device;
f_dev->class = &firmware_class;
- dev_set_uevent_suppress(f_dev, true);
-
- /* Need to pin this module until class device is destroyed */
- __module_get(THIS_MODULE);
-
- error = device_add(f_dev);
- if (error) {
- dev_err(device, "%s: device_register failed\n", __func__);
- goto err_put_dev;
- }
-
- error = device_create_bin_file(f_dev, &firmware_attr_data);
- if (error) {
- dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
- goto err_del_dev;
- }
-
- error = device_create_file(f_dev, &dev_attr_loading);
- if (error) {
- dev_err(device, "%s: device_create_file failed\n", __func__);
- goto err_del_bin_attr;
- }
-
- if (uevent)
- dev_set_uevent_suppress(f_dev, false);
-
return fw_priv;
-
-err_del_bin_attr:
- device_remove_bin_file(f_dev, &firmware_attr_data);
-err_del_dev:
- device_del(f_dev);
-err_put_dev:
- put_device(f_dev);
-err_out:
- return ERR_PTR(error);
}
-static void fw_destroy_instance(struct firmware_priv *fw_priv)
-{
- struct device *f_dev = &fw_priv->dev;
-
- device_remove_file(f_dev, &dev_attr_loading);
- device_remove_bin_file(f_dev, &firmware_attr_data);
- device_unregister(f_dev);
-}
-
-static int _request_firmware(const struct firmware **firmware_p,
- const char *name, struct device *device,
- bool uevent, bool nowait)
+static struct firmware_priv *
+_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
+ struct device *device, bool uevent, bool nowait)
{
- struct firmware_priv *fw_priv;
struct firmware *firmware;
- int retval = 0;
+ struct firmware_priv *fw_priv;
if (!firmware_p)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
if (fw_get_builtin_firmware(firmware, name)) {
dev_dbg(device, "firmware: using built-in firmware %s\n", name);
- return 0;
+ return NULL;
+ }
+
+ fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ if (IS_ERR(fw_priv)) {
+ release_firmware(firmware);
+ *firmware_p = NULL;
}
+ return fw_priv;
+}
- read_lock_usermodehelper();
+static void _request_firmware_cleanup(const struct firmware **firmware_p)
+{
+ release_firmware(*firmware_p);
+ *firmware_p = NULL;
+}
- if (WARN_ON(usermodehelper_is_disabled())) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- retval = -EBUSY;
- goto out;
+static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
+ long timeout)
+{
+ int retval = 0;
+ struct device *f_dev = &fw_priv->dev;
+
+ dev_set_uevent_suppress(f_dev, true);
+
+ /* Need to pin this module until class device is destroyed */
+ __module_get(THIS_MODULE);
+
+ retval = device_add(f_dev);
+ if (retval) {
+ dev_err(f_dev, "%s: device_register failed\n", __func__);
+ goto err_put_dev;
}
- if (uevent)
- dev_dbg(device, "firmware: requesting %s\n", name);
+ retval = device_create_bin_file(f_dev, &firmware_attr_data);
+ if (retval) {
+ dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
+ goto err_del_dev;
+ }
- fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
- if (IS_ERR(fw_priv)) {
- retval = PTR_ERR(fw_priv);
- goto out;
+ retval = device_create_file(f_dev, &dev_attr_loading);
+ if (retval) {
+ dev_err(f_dev, "%s: device_create_file failed\n", __func__);
+ goto err_del_bin_attr;
}
if (uevent) {
- if (loading_timeout > 0)
+ dev_set_uevent_suppress(f_dev, false);
+ dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
+ if (timeout != MAX_SCHEDULE_TIMEOUT)
mod_timer(&fw_priv->timeout,
- round_jiffies_up(jiffies +
- loading_timeout * HZ));
+ round_jiffies_up(jiffies + timeout));
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
@@ -570,16 +556,13 @@ static int _request_firmware(const struct firmware **firmware_p,
fw_priv->fw = NULL;
mutex_unlock(&fw_lock);
- fw_destroy_instance(fw_priv);
-
-out:
- read_unlock_usermodehelper();
-
- if (retval) {
- release_firmware(firmware);
- *firmware_p = NULL;
- }
-
+ device_remove_file(f_dev, &dev_attr_loading);
+err_del_bin_attr:
+ device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
return retval;
}
@@ -602,7 +585,26 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
- return _request_firmware(firmware_p, name, device, true, false);
+ struct firmware_priv *fw_priv;
+ int ret;
+
+ fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
+ false);
+ if (IS_ERR_OR_NULL(fw_priv))
+ return PTR_RET(fw_priv);
+
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n", name);
+ } else {
+ ret = _request_firmware_load(fw_priv, true,
+ firmware_loading_timeout());
+ usermodehelper_read_unlock();
+ }
+ if (ret)
+ _request_firmware_cleanup(firmware_p);
+
+ return ret;
}
/**
@@ -629,25 +631,39 @@ struct firmware_work {
bool uevent;
};
-static int request_firmware_work_func(void *arg)
+static void request_firmware_work_func(struct work_struct *work)
{
- struct firmware_work *fw_work = arg;
+ struct firmware_work *fw_work;
const struct firmware *fw;
+ struct firmware_priv *fw_priv;
+ long timeout;
int ret;
- if (!arg) {
- WARN_ON(1);
- return 0;
+ fw_work = container_of(work, struct firmware_work, work);
+ fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
+ fw_work->uevent, true);
+ if (IS_ERR_OR_NULL(fw_priv)) {
+ ret = PTR_RET(fw_priv);
+ goto out;
+ }
+
+ timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
+ if (timeout) {
+ ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
+ usermodehelper_read_unlock();
+ } else {
+ dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
+ fw_work->name);
+ ret = -EAGAIN;
}
+ if (ret)
+ _request_firmware_cleanup(&fw);
- ret = _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
+ out:
fw_work->cont(fw, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
-
- return ret;
}
/**
@@ -673,7 +689,6 @@ request_firmware_nowait(
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
- struct task_struct *task;
struct firmware_work *fw_work;
fw_work = kzalloc(sizeof (struct firmware_work), gfp);
@@ -692,15 +707,8 @@ request_firmware_nowait(
return -EFAULT;
}
- task = kthread_run(request_firmware_work_func, fw_work,
- "firmware/%s", name);
- if (IS_ERR(task)) {
- fw_work->cont(NULL, fw_work->context);
- module_put(fw_work->module);
- kfree(fw_work);
- return PTR_ERR(task);
- }
-
+ INIT_WORK(&fw_work->work, request_firmware_work_func);
+ schedule_work(&fw_work->work);
return 0;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 541f821..bd0f394 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -532,6 +532,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.suspend_time = ktime_set(0, 0);
dev->power.max_time_suspended_ns = -1;
dev->power.deferred_resume = false;
+ wake_up_all(&dev->power.wait_queue);
+
if (retval == -EAGAIN || retval == -EBUSY) {
dev->power.runtime_error = 0;
@@ -547,7 +549,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
} else {
pm_runtime_cancel_pending(dev);
}
- wake_up_all(&dev->power.wait_queue);
goto out;
}
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 5157fa0..fb14a63 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -396,7 +396,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
map->cache_word_size);
/* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, i);
+ ret = regcache_lookup_reg(map, regtmp);
if (ret >= 0 && val == map->reg_defaults[ret].def)
continue;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 76a0823..b0b00d7 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1030,37 +1030,6 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
return 0;
}
-static DEFINE_SPINLOCK(floppy_hlt_lock);
-static int hlt_disabled;
-static void floppy_disable_hlt(void)
-{
- unsigned long flags;
-
- WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
- spin_lock_irqsave(&floppy_hlt_lock, flags);
- if (!hlt_disabled) {
- hlt_disabled = 1;
-#ifdef HAVE_DISABLE_HLT
- disable_hlt();
-#endif
- }
- spin_unlock_irqrestore(&floppy_hlt_lock, flags);
-}
-
-static void floppy_enable_hlt(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&floppy_hlt_lock, flags);
- if (hlt_disabled) {
- hlt_disabled = 0;
-#ifdef HAVE_DISABLE_HLT
- enable_hlt();
-#endif
- }
- spin_unlock_irqrestore(&floppy_hlt_lock, flags);
-}
-
static void setup_DMA(void)
{
unsigned long f;
@@ -1105,7 +1074,6 @@ static void setup_DMA(void)
fd_enable_dma();
release_dma_lock(f);
#endif
- floppy_disable_hlt();
}
static void show_floppy(void);
@@ -1707,7 +1675,6 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
fd_disable_dma();
release_dma_lock(f);
- floppy_enable_hlt();
do_floppy = NULL;
if (fdc >= N_FDC || FDCS->address == -1) {
/* we don't even know which FDC is the culprit */
@@ -1856,8 +1823,6 @@ static void floppy_shutdown(unsigned long data)
show_floppy();
cancel_activity();
- floppy_enable_hlt();
-
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
@@ -4508,7 +4473,6 @@ static void floppy_release_irq_and_dma(void)
#if N_FDC > 1
set_dor(1, ~8, 0);
#endif
- floppy_enable_hlt();
if (floppy_track_buffer && max_buffer_sectors) {
tmpsize = max_buffer_sectors * 1024;
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 5da67f1..7ea18a5 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -234,6 +234,7 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 5cf47ac..7f025fb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1190,7 +1190,6 @@ static inline int needs_idle_maps(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- extern int intel_iommu_gfx_mapped;
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
@@ -1459,6 +1458,8 @@ static const struct intel_gtt_driver_description {
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index a22ffa5..0bf1b89 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -142,7 +142,7 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
/* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, &cpu_present_map);
+ cpumask_copy(policy->cpus, cpu_present_mask);
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 16a6b48..ec78cce 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -585,7 +585,7 @@ static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
- enum dma_transfer_direction dir, unsigned long flags)
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
{
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_desc *txd;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 36e1486..d0c372e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -754,9 +754,7 @@ static int __init mce_amd_init(void)
if (c->x86_vendor != X86_VENDOR_AMD)
return 0;
- if ((c->x86 < 0xf || c->x86 > 0x12) &&
- (c->x86 != 0x14 || c->x86_model > 0xf) &&
- (c->x86 != 0x15 || c->x86_model > 0xf))
+ if (c->x86 < 0xf || c->x86 > 0x15)
return 0;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -797,7 +795,7 @@ static int __init mce_amd_init(void)
break;
default:
- printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86);
+ printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
kfree(fam_ops);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cc11488..e354bc0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
+ select DMA_SHARED_BUFFER
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a858532..c20da5b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o
+ drm_trace_points.o drm_global.o drm_prime.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0b65fbc..6116e3b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -136,6 +136,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7740dd2..a0d6e89 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -559,9 +559,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
/* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+ if (var->bits_per_pixel > fb->bits_per_pixel ||
+ var->xres > fb->width || var->yres > fb->height ||
+ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
- "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+ "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+ var->xres, var->yres, var->bits_per_pixel,
+ var->xres_virtual, var->yres_virtual,
fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7348a3d..cdfbf27 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -271,6 +271,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&priv->prime);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -571,6 +574,10 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file_priv->prime);
+
kfree(file_priv);
/* ========================================================
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 0ef358e..83114b5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -35,6 +35,7 @@
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
#include "drmP.h"
/** @file drm_gem.c
@@ -232,6 +233,10 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
+ if (obj->import_attach)
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->import_attach->dmabuf);
+
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
@@ -527,6 +532,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
+ if (obj->import_attach)
+ drm_prime_remove_imported_buf_handle(&file_priv->prime,
+ obj->import_attach->dmabuf);
+
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
new file mode 100644
index 0000000..1bdf2b5
--- /dev/null
+++ b/drivers/gpu/drm/drm_prime.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright © 2012 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include "drmP.h"
+
+/*
+ * DMA-BUF/GEM Object references and lifetime overview:
+ *
+ * On the export the dma_buf holds a reference to the exporting GEM
+ * object. It takes this reference in handle_to_fd_ioctl, when it
+ * first calls .prime_export and stores the exporting GEM object in
+ * the dma_buf priv. This reference is released when the dma_buf
+ * object goes away in the driver .release function.
+ *
+ * On the import the importing GEM object holds a reference to the
+ * dma_buf (which in turn holds a ref to the exporting GEM object).
+ * It takes that reference in the fd_to_handle ioctl.
+ * It calls dma_buf_get, creates an attachment to it and stores the
+ * attachment in the GEM object. When this attachment is destroyed
+ * when the imported object is destroyed, we remove the attachment
+ * and drop the reference to the dma_buf.
+ *
+ * Thus the chain of references always flows in one direction
+ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+ *
+ * Self-importing: if userspace is using PRIME as a replacement for flink
+ * then it will get a fd->handle request for a GEM object that it created.
+ * Drivers should detect this situation and return back the gem object
+ * from the dma-buf private.
+ */
+
+struct drm_prime_member {
+ struct list_head entry;
+ struct dma_buf *dma_buf;
+ uint32_t handle;
+};
+
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct drm_gem_object *obj;
+ void *buf;
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ mutex_lock(&file_priv->prime.lock);
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ get_dma_buf(obj->import_attach->dmabuf);
+ *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return 0;
+ }
+
+ if (obj->export_dma_buf) {
+ get_dma_buf(obj->export_dma_buf);
+ *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
+ drm_gem_object_unreference_unlocked(obj);
+ } else {
+ buf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(buf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return PTR_ERR(buf);
+ }
+ obj->export_dma_buf = buf;
+ *prime_fd = dma_buf_fd(buf, flags);
+ }
+ mutex_unlock(&file_priv->prime.lock);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ int ret;
+
+ dma_buf = dma_buf_get(prime_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ mutex_lock(&file_priv->prime.lock);
+
+ ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+ dma_buf, handle);
+ if (!ret) {
+ ret = 0;
+ goto out_put;
+ }
+
+ /* never seen this one, need to import */
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ goto out_put;
+
+ ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ dma_buf, *handle);
+ if (ret)
+ goto fail;
+
+ mutex_unlock(&file_priv->prime.lock);
+ return 0;
+
+fail:
+ /* hmm, if driver attached, we are relying on the free-object path
+ * to detach.. which seems ok..
+ */
+ drm_gem_object_handle_unreference_unlocked(obj);
+out_put:
+ dma_buf_put(dma_buf);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+ uint32_t flags;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_handle_to_fd)
+ return -ENOSYS;
+
+ /* check flags are valid */
+ if (args->flags & ~DRM_CLOEXEC)
+ return -EINVAL;
+
+ /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+ flags = args->flags & DRM_CLOEXEC;
+
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, flags, &args->fd);
+}
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_fd_to_handle)
+ return -ENOSYS;
+
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
+ args->fd, &args->handle);
+}
+
+/*
+ * drm_prime_pages_to_sg
+ *
+ * this helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+{
+ struct sg_table *sg = NULL;
+ struct scatterlist *iter;
+ int i;
+ int ret;
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg)
+ goto out;
+
+ ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ for_each_sg(sg->sgl, iter, nr_pages, i)
+ sg_set_page(iter, pages[i], PAGE_SIZE, 0);
+
+ return sg;
+out:
+ kfree(sg);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/* helper function to cleanup a GEM/prime object */
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+{
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+ attach = obj->import_attach;
+ if (sg)
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(attach->dmabuf, attach);
+ /* remove the reference */
+ dma_buf_put(dma_buf);
+}
+EXPORT_SYMBOL(drm_prime_gem_destroy);
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ INIT_LIST_HEAD(&prime_fpriv->head);
+ mutex_init(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_init_file_private);
+
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ struct drm_prime_member *member, *safe;
+ list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ list_del(&member->entry);
+ kfree(member);
+ }
+}
+EXPORT_SYMBOL(drm_prime_destroy_file_private);
+
+int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ member = kmalloc(sizeof(*member), GFP_KERNEL);
+ if (!member)
+ return -ENOMEM;
+
+ member->dma_buf = dma_buf;
+ member->handle = handle;
+ list_add(&member->entry, &prime_fpriv->head);
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
+
+int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ *handle = member->handle;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+
+void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+{
+ struct drm_prime_member *member, *safe;
+
+ mutex_lock(&prime_fpriv->lock);
+ list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ list_del(&member->entry);
+ kfree(member);
+ }
+ }
+ mutex_unlock(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9341eb8..785f67f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,21 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
static int i915_load_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1197,7 +1212,7 @@ static int i915_load_gem_init(struct drm_device *dev)
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
mutex_lock(&dev->struct_mutex);
- if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
@@ -1207,8 +1222,10 @@ static int i915_load_gem_init(struct drm_device *dev)
i915_gem_do_init(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
return ret;
+ }
} else {
/* Let GEM Manage all of the aperture.
*
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1a7559b..dfa55e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -66,7 +66,11 @@ MODULE_PARM_DESC(semaphores,
int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
+ "Enable power-saving render C-state 6. "
+ "Different stages can be selected via bitmask values "
+ "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+ "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+ "default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -103,8 +107,8 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-bool i915_enable_ppgtt __read_mostly = 1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
+int i915_enable_ppgtt __read_mostly = -1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
@@ -292,6 +296,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
{0, 0, 0}
};
@@ -533,7 +538,9 @@ static int i915_drm_thaw(struct drm_device *dev)
drm_irq_install(dev);
/* Resume the modeset for every activated CRTC */
+ mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f19f5..5fabc6c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1053,6 +1053,27 @@ struct drm_i915_file_private {
#include "i915_trace.h"
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
@@ -1065,7 +1086,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
-extern bool i915_enable_ppgtt __read_mostly;
+extern int i915_enable_ppgtt __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f441f5..4c65c63 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1472,16 +1472,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_rendering_seqno = seqno;
- if (obj->fenced_gpu_access) {
- struct drm_i915_fence_reg *reg;
-
- BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+ if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
obj->last_fenced_ring = ring;
- reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
}
}
@@ -3754,12 +3757,32 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ uint32_t __iomem *pd_addr;
+ uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
- pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
+
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 81687af..f51a696 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -498,8 +498,8 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (ret)
goto err_unpin;
}
+ obj->pending_fenced_gpu_access = true;
}
- obj->pending_fenced_gpu_access = need_fence;
}
entry->offset = obj->gtt_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2eacd78..a135c61 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -65,9 +65,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
- uint32_t pd_entry;
unsigned first_pd_entry_in_global_pt;
- uint32_t __iomem *pd_addr;
int i;
int ret = -ENOMEM;
@@ -100,7 +98,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
goto err_pt_alloc;
}
- pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar) {
@@ -117,13 +114,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
ppgtt->pt_dma_addr[i] = pt_addr;
} else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
}
- readl(pd_addr);
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3886cf0..2abf4eb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2385,6 +2385,7 @@
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1<<25)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 8168d8f..b48fc2a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*
*/
+#include <linux/dmi.h>
#include <drm/drm_dp_helper.h>
#include "drmP.h"
#include "drm.h"
@@ -621,6 +622,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->edp.bpp = 18;
}
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ "VBIOS ROM for %s\n",
+ id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+ {
+ .callback = intel_no_opregion_vbt_callback,
+ .ident = "ThinkCentre A57",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+ },
+ },
+ { }
+};
+
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
@@ -641,7 +662,7 @@ intel_parse_bios(struct drm_device *dev)
init_vbt_defaults(dev_priv);
/* XXX Should this validation be moved to intel_opregion.c? */
- if (dev_priv->opregion.vbt) {
+ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d514719..91b35fd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5539,7 +5539,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on panel\n");
temp |= DREF_SSC1_ENABLE;
- }
+ } else
+ temp &= ~DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -7580,6 +7581,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ for_each_pipe(pipe) {
+ reg = PIPECONF(pipe);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ }
+
if (HAS_PCH_SPLIT(dev))
return;
@@ -8215,7 +8222,7 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-static bool intel_enable_rc6(struct drm_device *dev)
+static int intel_enable_rc6(struct drm_device *dev)
{
/*
* Respect the kernel parameter if it is set
@@ -8233,11 +8240,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
* Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
- return 0;
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
}
- DRM_DEBUG_DRIVER("RC6 enabled\n");
- return 1;
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -8247,6 +8254,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
+ int rc6_mode;
int i;
/* Here begins a magic sequence of register writes to enable
@@ -8284,9 +8292,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (intel_enable_rc6(dev_priv->dev))
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
- ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c5c0973..95db2e9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -755,6 +755,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fc66af6..e25581a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -626,7 +626,7 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring)
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (IS_GEN7(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev))
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7aa0450..a464771 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -411,6 +411,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
old_obj = intel_plane->obj;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ca16399..97a8126 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -13,6 +13,7 @@ config DRM_NOUVEAU
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
select ACPI_WMI if ACPI
select MXM_WMI if ACPI
+ select POWER_SUPPLY
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 637afe7..80963d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -177,14 +177,15 @@ bios_shadow_pci(struct nvbios *bios)
if (!pci_enable_rom(pdev)) {
void __iomem *rom = pci_map_rom(pdev, &length);
- if (rom) {
+ if (rom && length) {
bios->data = kmalloc(length, GFP_KERNEL);
if (bios->data) {
memcpy_fromio(bios->data, rom, length);
bios->length = length;
}
- pci_unmap_rom(pdev, rom);
}
+ if (rom)
+ pci_unmap_rom(pdev, rom);
pci_disable_rom(pdev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 44e6416..846afb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -436,11 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
}
if (dev_priv->card_type < NV_C0) {
- init->subchan[0].handle = NvSw;
- init->subchan[0].grclass = NV_SW;
- init->nr_subchan = 1;
- } else {
- init->nr_subchan = 0;
+ init->subchan[0].handle = 0x00000000;
+ init->subchan[0].grclass = 0x0000;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
}
/* Named memory object area */
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index bcf0fd9..23d4edf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,8 +48,8 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
- NvSubSw = 0,
- NvSubM2MF = 1,
+ NvSubM2MF = 0,
+ NvSubSw = 1,
NvSub2D = 2,
NvSubCtxSurf2D = 2,
NvSubGdiRect = 3,
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4886b3..c2a8511 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -642,7 +642,7 @@ nouveau_card_channel_init(struct drm_device *dev)
OUT_RING (chan, chan->vram_handle);
OUT_RING (chan, chan->gart_handle);
} else
- if (dev_priv->card_type <= NV_C0) {
+ if (dev_priv->card_type <= NV_D0) {
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d1bd239..5ce9bf5 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1306,8 +1306,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
int atom_asic_init(struct atom_context *ctx)
{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
uint32_t ps[16];
+ int ret;
+
memset(ps, 0, 64);
ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
@@ -1317,7 +1320,17 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ if (ret)
+ return ret;
+
+ memset(ps, 0, 64);
+
+ if (rdev->family < CHIP_R600) {
+ if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+ atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+ }
+ return ret;
}
void atom_destroy(struct atom_context *ctx)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 93cfe20..25fea63 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -44,6 +44,7 @@
#define ATOM_CMD_SETSCLK 0x0A
#define ATOM_CMD_SETMCLK 0x0B
#define ATOM_CMD_SETPCLK 0x0C
+#define ATOM_CMD_SPDFANCNTL 0x39
#define ATOM_DATA_FWI_PTR 0xC
#define ATOM_DATA_IIO_PTR 0x32
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6f70158..df6a4db 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -241,7 +241,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
domain_start = bo->rdev->mc.vram_start;
else
domain_start = bo->rdev->mc.gtt_start;
- WARN_ON_ONCE((*gpu_addr - domain_start) > max_offset);
+ WARN_ON_ONCE(max_offset <
+ (radeon_bo_gpu_offset(bo) - domain_start));
}
return 0;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5340c5f..5367390 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -47,7 +47,7 @@ static struct vm_operations_struct udl_gem_vm_ops = {
static const struct file_operations udl_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .mmap = drm_gem_mmap,
+ .mmap = udl_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1612954..96820d0 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -121,6 +121,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 852642d..92f19ef 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -71,6 +71,20 @@ int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
return drm_gem_handle_delete(file, handle);
}
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ return ret;
+}
+
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig
new file mode 100644
index 0000000..d94e38d
--- /dev/null
+++ b/drivers/hsi/Kconfig
@@ -0,0 +1,19 @@
+#
+# HSI driver configuration
+#
+menuconfig HSI
+ tristate "HSI support"
+ ---help---
+ The "High speed synchronous Serial Interface" is
+ synchronous serial interface used mainly to connect
+ application engines and cellular modems.
+
+if HSI
+
+config HSI_BOARDINFO
+ bool
+ default y
+
+source "drivers/hsi/clients/Kconfig"
+
+endif # HSI
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile
new file mode 100644
index 0000000..9d5d33f
--- /dev/null
+++ b/drivers/hsi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for HSI
+#
+obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o
+obj-$(CONFIG_HSI) += hsi.o
+obj-y += clients/
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
new file mode 100644
index 0000000..3bacd27
--- /dev/null
+++ b/drivers/hsi/clients/Kconfig
@@ -0,0 +1,13 @@
+#
+# HSI clients configuration
+#
+
+comment "HSI clients"
+
+config HSI_CHAR
+ tristate "HSI/SSI character driver"
+ depends on HSI
+ ---help---
+ If you say Y here, you will enable the HSI/SSI character driver.
+ This driver provides a simple character device interface for
+ serial communication with the cellular modem over HSI/SSI bus.
diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile
new file mode 100644
index 0000000..327c0e2
--- /dev/null
+++ b/drivers/hsi/clients/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for HSI clients
+#
+
+obj-$(CONFIG_HSI_CHAR) += hsi_char.o
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
new file mode 100644
index 0000000..88a050d
--- /dev/null
+++ b/drivers/hsi/clients/hsi_char.c
@@ -0,0 +1,802 @@
+/*
+ * HSI character device driver, implements the character device
+ * interface.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Andras Domokos <andras.domokos@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/ioctl.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
+#include <linux/stat.h>
+#include <linux/hsi/hsi.h>
+#include <linux/hsi/hsi_char.h>
+
+#define HSC_DEVS 16 /* Num of channels */
+#define HSC_MSGS 4
+
+#define HSC_RXBREAK 0
+
+#define HSC_ID_BITS 6
+#define HSC_PORT_ID_BITS 4
+#define HSC_ID_MASK 3
+#define HSC_PORT_ID_MASK 3
+#define HSC_CH_MASK 0xf
+
+/*
+ * We support up to 4 controllers that can have up to 4
+ * ports, which should currently be more than enough.
+ */
+#define HSC_BASEMINOR(id, port_id) \
+ ((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \
+ (((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS))
+
+enum {
+ HSC_CH_OPEN,
+ HSC_CH_READ,
+ HSC_CH_WRITE,
+ HSC_CH_WLINE,
+};
+
+enum {
+ HSC_RX,
+ HSC_TX,
+};
+
+struct hsc_client_data;
+/**
+ * struct hsc_channel - hsi_char internal channel data
+ * @ch: channel number
+ * @flags: Keeps state of the channel (open/close, reading, writing)
+ * @free_msgs_list: List of free HSI messages/requests
+ * @rx_msgs_queue: List of pending RX requests
+ * @tx_msgs_queue: List of pending TX requests
+ * @lock: Serialize access to the lists
+ * @cl: reference to the associated hsi_client
+ * @cl_data: reference to the client data that this channels belongs to
+ * @rx_wait: RX requests wait queue
+ * @tx_wait: TX requests wait queue
+ */
+struct hsc_channel {
+ unsigned int ch;
+ unsigned long flags;
+ struct list_head free_msgs_list;
+ struct list_head rx_msgs_queue;
+ struct list_head tx_msgs_queue;
+ spinlock_t lock;
+ struct hsi_client *cl;
+ struct hsc_client_data *cl_data;
+ wait_queue_head_t rx_wait;
+ wait_queue_head_t tx_wait;
+};
+
+/**
+ * struct hsc_client_data - hsi_char internal client data
+ * @cdev: Characther device associated to the hsi_client
+ * @lock: Lock to serialize open/close access
+ * @flags: Keeps track of port state (rx hwbreak armed)
+ * @usecnt: Use count for claiming the HSI port (mutex protected)
+ * @cl: Referece to the HSI client
+ * @channels: Array of channels accessible by the client
+ */
+struct hsc_client_data {
+ struct cdev cdev;
+ struct mutex lock;
+ unsigned long flags;
+ unsigned int usecnt;
+ struct hsi_client *cl;
+ struct hsc_channel channels[HSC_DEVS];
+};
+
+/* Stores the major number dynamically allocated for hsi_char */
+static unsigned int hsc_major;
+/* Maximum buffer size that hsi_char will accept from userspace */
+static unsigned int max_data_size = 0x1000;
+module_param(max_data_size, uint, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
+
+static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
+ struct list_head *queue)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->lock, flags);
+ list_add_tail(&msg->link, queue);
+ spin_unlock_irqrestore(&channel->lock, flags);
+}
+
+static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel,
+ struct list_head *queue)
+{
+ struct hsi_msg *msg = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->lock, flags);
+
+ if (list_empty(queue))
+ goto out;
+
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ list_del(&msg->link);
+out:
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ return msg;
+}
+
+static inline void hsc_msg_free(struct hsi_msg *msg)
+{
+ kfree(sg_virt(msg->sgt.sgl));
+ hsi_free_msg(msg);
+}
+
+static void hsc_free_list(struct list_head *list)
+{
+ struct hsi_msg *msg, *tmp;
+
+ list_for_each_entry_safe(msg, tmp, list, link) {
+ list_del(&msg->link);
+ hsc_msg_free(msg);
+ }
+}
+
+static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l)
+{
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&channel->lock, flags);
+ list_splice_init(l, &list);
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ hsc_free_list(&list);
+}
+
+static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size)
+{
+ struct hsi_msg *msg;
+ void *buf;
+
+ msg = hsi_alloc_msg(1, GFP_KERNEL);
+ if (!msg)
+ goto out;
+ buf = kmalloc(alloc_size, GFP_KERNEL);
+ if (!buf) {
+ hsi_free_msg(msg);
+ goto out;
+ }
+ sg_init_one(msg->sgt.sgl, buf, alloc_size);
+ /* Ignore false positive, due to sg pointer handling */
+ kmemleak_ignore(buf);
+
+ return msg;
+out:
+ return NULL;
+}
+
+static inline int hsc_msgs_alloc(struct hsc_channel *channel)
+{
+ struct hsi_msg *msg;
+ int i;
+
+ for (i = 0; i < HSC_MSGS; i++) {
+ msg = hsc_msg_alloc(max_data_size);
+ if (!msg)
+ goto out;
+ msg->channel = channel->ch;
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ }
+
+ return 0;
+out:
+ hsc_free_list(&channel->free_msgs_list);
+
+ return -ENOMEM;
+}
+
+static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg)
+{
+ return msg->sgt.sgl->length;
+}
+
+static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len)
+{
+ msg->sgt.sgl->length = len;
+}
+
+static void hsc_rx_completed(struct hsi_msg *msg)
+{
+ struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsc_channel *channel = cl_data->channels + msg->channel;
+
+ if (test_bit(HSC_CH_READ, &channel->flags)) {
+ hsc_add_tail(channel, msg, &channel->rx_msgs_queue);
+ wake_up(&channel->rx_wait);
+ } else {
+ hsc_add_tail(channel, msg, &channel->free_msgs_list);
+ }
+}
+
+static void hsc_rx_msg_destructor(struct hsi_msg *msg)
+{
+ msg->status = HSI_STATUS_ERROR;
+ hsc_msg_len_set(msg, 0);
+ hsc_rx_completed(msg);
+}
+
+static void hsc_tx_completed(struct hsi_msg *msg)
+{
+ struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsc_channel *channel = cl_data->channels + msg->channel;
+
+ if (test_bit(HSC_CH_WRITE, &channel->flags)) {
+ hsc_add_tail(channel, msg, &channel->tx_msgs_queue);
+ wake_up(&channel->tx_wait);
+ } else {
+ hsc_add_tail(channel, msg, &channel->free_msgs_list);
+ }
+}
+
+static void hsc_tx_msg_destructor(struct hsi_msg *msg)
+{
+ msg->status = HSI_STATUS_ERROR;
+ hsc_msg_len_set(msg, 0);
+ hsc_tx_completed(msg);
+}
+
+static void hsc_break_req_destructor(struct hsi_msg *msg)
+{
+ struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+
+ hsi_free_msg(msg);
+ clear_bit(HSC_RXBREAK, &cl_data->flags);
+}
+
+static void hsc_break_received(struct hsi_msg *msg)
+{
+ struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsc_channel *channel = cl_data->channels;
+ int i, ret;
+
+ /* Broadcast HWBREAK on all channels */
+ for (i = 0; i < HSC_DEVS; i++, channel++) {
+ struct hsi_msg *msg2;
+
+ if (!test_bit(HSC_CH_READ, &channel->flags))
+ continue;
+ msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list);
+ if (!msg2)
+ continue;
+ clear_bit(HSC_CH_READ, &channel->flags);
+ hsc_msg_len_set(msg2, 0);
+ msg2->status = HSI_STATUS_COMPLETED;
+ hsc_add_tail(channel, msg2, &channel->rx_msgs_queue);
+ wake_up(&channel->rx_wait);
+ }
+ hsi_flush(msg->cl);
+ ret = hsi_async_read(msg->cl, msg);
+ if (ret < 0)
+ hsc_break_req_destructor(msg);
+}
+
+static int hsc_break_request(struct hsi_client *cl)
+{
+ struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+ int ret;
+
+ if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags))
+ return -EBUSY;
+
+ msg = hsi_alloc_msg(0, GFP_KERNEL);
+ if (!msg) {
+ clear_bit(HSC_RXBREAK, &cl_data->flags);
+ return -ENOMEM;
+ }
+ msg->break_frame = 1;
+ msg->complete = hsc_break_received;
+ msg->destructor = hsc_break_req_destructor;
+ ret = hsi_async_read(cl, msg);
+ if (ret < 0)
+ hsc_break_req_destructor(msg);
+
+ return ret;
+}
+
+static int hsc_break_send(struct hsi_client *cl)
+{
+ struct hsi_msg *msg;
+ int ret;
+
+ msg = hsi_alloc_msg(0, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+ msg->break_frame = 1;
+ msg->complete = hsi_free_msg;
+ msg->destructor = hsi_free_msg;
+ ret = hsi_async_write(cl, msg);
+ if (ret < 0)
+ hsi_free_msg(msg);
+
+ return ret;
+}
+
+static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
+{
+ struct hsi_config tmp;
+ int ret;
+
+ if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME))
+ return -EINVAL;
+ if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS))
+ return -EINVAL;
+ if (rxc->channels & (rxc->channels - 1))
+ return -EINVAL;
+ if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE))
+ return -EINVAL;
+ tmp = cl->rx_cfg;
+ cl->rx_cfg.mode = rxc->mode;
+ cl->rx_cfg.channels = rxc->channels;
+ cl->rx_cfg.flow = rxc->flow;
+ ret = hsi_setup(cl);
+ if (ret < 0) {
+ cl->rx_cfg = tmp;
+ return ret;
+ }
+ if (rxc->mode == HSI_MODE_FRAME)
+ hsc_break_request(cl);
+
+ return ret;
+}
+
+static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc)
+{
+ rxc->mode = cl->rx_cfg.mode;
+ rxc->channels = cl->rx_cfg.channels;
+ rxc->flow = cl->rx_cfg.flow;
+}
+
+static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
+{
+ struct hsi_config tmp;
+ int ret;
+
+ if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME))
+ return -EINVAL;
+ if ((txc->channels == 0) || (txc->channels > HSC_DEVS))
+ return -EINVAL;
+ if (txc->channels & (txc->channels - 1))
+ return -EINVAL;
+ if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO))
+ return -EINVAL;
+ tmp = cl->tx_cfg;
+ cl->tx_cfg.mode = txc->mode;
+ cl->tx_cfg.channels = txc->channels;
+ cl->tx_cfg.speed = txc->speed;
+ cl->tx_cfg.arb_mode = txc->arb_mode;
+ ret = hsi_setup(cl);
+ if (ret < 0) {
+ cl->tx_cfg = tmp;
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc)
+{
+ txc->mode = cl->tx_cfg.mode;
+ txc->channels = cl->tx_cfg.channels;
+ txc->speed = cl->tx_cfg.speed;
+ txc->arb_mode = cl->tx_cfg.arb_mode;
+}
+
+static ssize_t hsc_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos __maybe_unused)
+{
+ struct hsc_channel *channel = file->private_data;
+ struct hsi_msg *msg;
+ ssize_t ret;
+
+ if (len == 0)
+ return 0;
+ if (!IS_ALIGNED(len, sizeof(u32)))
+ return -EINVAL;
+ if (len > max_data_size)
+ len = max_data_size;
+ if (channel->ch >= channel->cl->rx_cfg.channels)
+ return -ECHRNG;
+ if (test_and_set_bit(HSC_CH_READ, &channel->flags))
+ return -EBUSY;
+ msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
+ if (!msg) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ hsc_msg_len_set(msg, len);
+ msg->complete = hsc_rx_completed;
+ msg->destructor = hsc_rx_msg_destructor;
+ ret = hsi_async_read(channel->cl, msg);
+ if (ret < 0) {
+ hsc_add_tail(channel, msg, &channel->free_msgs_list);
+ goto out;
+ }
+
+ ret = wait_event_interruptible(channel->rx_wait,
+ !list_empty(&channel->rx_msgs_queue));
+ if (ret < 0) {
+ clear_bit(HSC_CH_READ, &channel->flags);
+ hsi_flush(channel->cl);
+ return -EINTR;
+ }
+
+ msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue);
+ if (msg) {
+ if (msg->status != HSI_STATUS_ERROR) {
+ ret = copy_to_user((void __user *)buf,
+ sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg));
+ if (ret)
+ ret = -EFAULT;
+ else
+ ret = hsc_msg_len_get(msg);
+ } else {
+ ret = -EIO;
+ }
+ hsc_add_tail(channel, msg, &channel->free_msgs_list);
+ }
+out:
+ clear_bit(HSC_CH_READ, &channel->flags);
+
+ return ret;
+}
+
+static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len,
+ loff_t *ppos __maybe_unused)
+{
+ struct hsc_channel *channel = file->private_data;
+ struct hsi_msg *msg;
+ ssize_t ret;
+
+ if ((len == 0) || !IS_ALIGNED(len, sizeof(u32)))
+ return -EINVAL;
+ if (len > max_data_size)
+ len = max_data_size;
+ if (channel->ch >= channel->cl->tx_cfg.channels)
+ return -ECHRNG;
+ if (test_and_set_bit(HSC_CH_WRITE, &channel->flags))
+ return -EBUSY;
+ msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
+ if (!msg) {
+ clear_bit(HSC_CH_WRITE, &channel->flags);
+ return -ENOSPC;
+ }
+ if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ hsc_msg_len_set(msg, len);
+ msg->complete = hsc_tx_completed;
+ msg->destructor = hsc_tx_msg_destructor;
+ ret = hsi_async_write(channel->cl, msg);
+ if (ret < 0)
+ goto out;
+
+ ret = wait_event_interruptible(channel->tx_wait,
+ !list_empty(&channel->tx_msgs_queue));
+ if (ret < 0) {
+ clear_bit(HSC_CH_WRITE, &channel->flags);
+ hsi_flush(channel->cl);
+ return -EINTR;
+ }
+
+ msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue);
+ if (msg) {
+ if (msg->status == HSI_STATUS_ERROR)
+ ret = -EIO;
+ else
+ ret = hsc_msg_len_get(msg);
+
+ hsc_add_tail(channel, msg, &channel->free_msgs_list);
+ }
+out:
+ clear_bit(HSC_CH_WRITE, &channel->flags);
+
+ return ret;
+}
+
+static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct hsc_channel *channel = file->private_data;
+ unsigned int state;
+ struct hsc_rx_config rxc;
+ struct hsc_tx_config txc;
+ long ret = 0;
+
+ switch (cmd) {
+ case HSC_RESET:
+ hsi_flush(channel->cl);
+ break;
+ case HSC_SET_PM:
+ if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
+ return -EFAULT;
+ if (state == HSC_PM_DISABLE) {
+ if (test_and_set_bit(HSC_CH_WLINE, &channel->flags))
+ return -EINVAL;
+ ret = hsi_start_tx(channel->cl);
+ } else if (state == HSC_PM_ENABLE) {
+ if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
+ return -EINVAL;
+ ret = hsi_stop_tx(channel->cl);
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case HSC_SEND_BREAK:
+ return hsc_break_send(channel->cl);
+ case HSC_SET_RX:
+ if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc)))
+ return -EFAULT;
+ return hsc_rx_set(channel->cl, &rxc);
+ case HSC_GET_RX:
+ hsc_rx_get(channel->cl, &rxc);
+ if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc)))
+ return -EFAULT;
+ break;
+ case HSC_SET_TX:
+ if (copy_from_user(&txc, (void __user *)arg, sizeof(txc)))
+ return -EFAULT;
+ return hsc_tx_set(channel->cl, &txc);
+ case HSC_GET_TX:
+ hsc_tx_get(channel->cl, &txc);
+ if (copy_to_user((void __user *)arg, &txc, sizeof(txc)))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static inline void __hsc_port_release(struct hsc_client_data *cl_data)
+{
+ BUG_ON(cl_data->usecnt == 0);
+
+ if (--cl_data->usecnt == 0) {
+ hsi_flush(cl_data->cl);
+ hsi_release_port(cl_data->cl);
+ }
+}
+
+static int hsc_open(struct inode *inode, struct file *file)
+{
+ struct hsc_client_data *cl_data;
+ struct hsc_channel *channel;
+ int ret = 0;
+
+ pr_debug("open, minor = %d\n", iminor(inode));
+
+ cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev);
+ mutex_lock(&cl_data->lock);
+ channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK);
+
+ if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Check if we have already claimed the port associated to the HSI
+ * client. If not then try to claim it, else increase its refcount
+ */
+ if (cl_data->usecnt == 0) {
+ ret = hsi_claim_port(cl_data->cl, 0);
+ if (ret < 0)
+ goto out;
+ hsi_setup(cl_data->cl);
+ }
+ cl_data->usecnt++;
+
+ ret = hsc_msgs_alloc(channel);
+ if (ret < 0) {
+ __hsc_port_release(cl_data);
+ goto out;
+ }
+
+ file->private_data = channel;
+ mutex_unlock(&cl_data->lock);
+
+ return ret;
+out:
+ mutex_unlock(&cl_data->lock);
+
+ return ret;
+}
+
+static int hsc_release(struct inode *inode __maybe_unused, struct file *file)
+{
+ struct hsc_channel *channel = file->private_data;
+ struct hsc_client_data *cl_data = channel->cl_data;
+
+ mutex_lock(&cl_data->lock);
+ file->private_data = NULL;
+ if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
+ hsi_stop_tx(channel->cl);
+ __hsc_port_release(cl_data);
+ hsc_reset_list(channel, &channel->rx_msgs_queue);
+ hsc_reset_list(channel, &channel->tx_msgs_queue);
+ hsc_reset_list(channel, &channel->free_msgs_list);
+ clear_bit(HSC_CH_READ, &channel->flags);
+ clear_bit(HSC_CH_WRITE, &channel->flags);
+ clear_bit(HSC_CH_OPEN, &channel->flags);
+ wake_up(&channel->rx_wait);
+ wake_up(&channel->tx_wait);
+ mutex_unlock(&cl_data->lock);
+
+ return 0;
+}
+
+static const struct file_operations hsc_fops = {
+ .owner = THIS_MODULE,
+ .read = hsc_read,
+ .write = hsc_write,
+ .unlocked_ioctl = hsc_ioctl,
+ .open = hsc_open,
+ .release = hsc_release,
+};
+
+static void __devinit hsc_channel_init(struct hsc_channel *channel)
+{
+ init_waitqueue_head(&channel->rx_wait);
+ init_waitqueue_head(&channel->tx_wait);
+ spin_lock_init(&channel->lock);
+ INIT_LIST_HEAD(&channel->free_msgs_list);
+ INIT_LIST_HEAD(&channel->rx_msgs_queue);
+ INIT_LIST_HEAD(&channel->tx_msgs_queue);
+}
+
+static int __devinit hsc_probe(struct device *dev)
+{
+ const char devname[] = "hsi_char";
+ struct hsc_client_data *cl_data;
+ struct hsc_channel *channel;
+ struct hsi_client *cl = to_hsi_client(dev);
+ unsigned int hsc_baseminor;
+ dev_t hsc_dev;
+ int ret;
+ int i;
+
+ cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+ if (!cl_data) {
+ dev_err(dev, "Could not allocate hsc_client_data\n");
+ return -ENOMEM;
+ }
+ hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl));
+ if (!hsc_major) {
+ ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor,
+ HSC_DEVS, devname);
+ if (ret > 0)
+ hsc_major = MAJOR(hsc_dev);
+ } else {
+ hsc_dev = MKDEV(hsc_major, hsc_baseminor);
+ ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname);
+ }
+ if (ret < 0) {
+ dev_err(dev, "Device %s allocation failed %d\n",
+ hsc_major ? "minor" : "major", ret);
+ goto out1;
+ }
+ mutex_init(&cl_data->lock);
+ hsi_client_set_drvdata(cl, cl_data);
+ cdev_init(&cl_data->cdev, &hsc_fops);
+ cl_data->cdev.owner = THIS_MODULE;
+ cl_data->cl = cl;
+ for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) {
+ hsc_channel_init(channel);
+ channel->ch = i;
+ channel->cl = cl;
+ channel->cl_data = cl_data;
+ }
+
+ /* 1 hsi client -> N char devices (one for each channel) */
+ ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS);
+ if (ret) {
+ dev_err(dev, "Could not add char device %d\n", ret);
+ goto out2;
+ }
+
+ return 0;
+out2:
+ unregister_chrdev_region(hsc_dev, HSC_DEVS);
+out1:
+ kfree(cl_data);
+
+ return ret;
+}
+
+static int __devexit hsc_remove(struct device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
+ dev_t hsc_dev = cl_data->cdev.dev;
+
+ cdev_del(&cl_data->cdev);
+ unregister_chrdev_region(hsc_dev, HSC_DEVS);
+ hsi_client_set_drvdata(cl, NULL);
+ kfree(cl_data);
+
+ return 0;
+}
+
+static struct hsi_client_driver hsc_driver = {
+ .driver = {
+ .name = "hsi_char",
+ .owner = THIS_MODULE,
+ .probe = hsc_probe,
+ .remove = __devexit_p(hsc_remove),
+ },
+};
+
+static int __init hsc_init(void)
+{
+ int ret;
+
+ if ((max_data_size < 4) || (max_data_size > 0x10000) ||
+ (max_data_size & (max_data_size - 1))) {
+ pr_err("Invalid max read/write data size");
+ return -EINVAL;
+ }
+
+ ret = hsi_register_client_driver(&hsc_driver);
+ if (ret) {
+ pr_err("Error while registering HSI/SSI driver %d", ret);
+ return ret;
+ }
+
+ pr_info("HSI/SSI char device loaded\n");
+
+ return 0;
+}
+module_init(hsc_init);
+
+static void __exit hsc_exit(void)
+{
+ hsi_unregister_client_driver(&hsc_driver);
+ pr_info("HSI char device removed\n");
+}
+module_exit(hsc_exit);
+
+MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
+MODULE_ALIAS("hsi:hsi_char");
+MODULE_DESCRIPTION("HSI character device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
new file mode 100644
index 0000000..4e2d79b
--- /dev/null
+++ b/drivers/hsi/hsi.c
@@ -0,0 +1,494 @@
+/*
+ * HSI core.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/compiler.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "hsi_core.h"
+
+static struct device_type hsi_ctrl = {
+ .name = "hsi_controller",
+};
+
+static struct device_type hsi_cl = {
+ .name = "hsi_client",
+};
+
+static struct device_type hsi_port = {
+ .name = "hsi_port",
+};
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *a __maybe_unused, char *buf)
+{
+ return sprintf(buf, "hsi:%s\n", dev_name(dev));
+}
+
+static struct device_attribute hsi_bus_dev_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL,
+};
+
+static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ if (dev->type == &hsi_cl)
+ add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
+
+ return 0;
+}
+
+static int hsi_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return strcmp(dev_name(dev), driver->name) == 0;
+}
+
+static struct bus_type hsi_bus_type = {
+ .name = "hsi",
+ .dev_attrs = hsi_bus_dev_attrs,
+ .match = hsi_bus_match,
+ .uevent = hsi_bus_uevent,
+};
+
+static void hsi_client_release(struct device *dev)
+{
+ kfree(to_hsi_client(dev));
+}
+
+static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
+{
+ struct hsi_client *cl;
+ unsigned long flags;
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return;
+ cl->device.type = &hsi_cl;
+ cl->tx_cfg = info->tx_cfg;
+ cl->rx_cfg = info->rx_cfg;
+ cl->device.bus = &hsi_bus_type;
+ cl->device.parent = &port->device;
+ cl->device.release = hsi_client_release;
+ dev_set_name(&cl->device, info->name);
+ cl->device.platform_data = info->platform_data;
+ spin_lock_irqsave(&port->clock, flags);
+ list_add_tail(&cl->link, &port->clients);
+ spin_unlock_irqrestore(&port->clock, flags);
+ if (info->archdata)
+ cl->device.archdata = *info->archdata;
+ if (device_register(&cl->device) < 0) {
+ pr_err("hsi: failed to register client: %s\n", info->name);
+ kfree(cl);
+ }
+}
+
+static void hsi_scan_board_info(struct hsi_controller *hsi)
+{
+ struct hsi_cl_info *cl_info;
+ struct hsi_port *p;
+
+ list_for_each_entry(cl_info, &hsi_board_list, list)
+ if (cl_info->info.hsi_id == hsi->id) {
+ p = hsi_find_port_num(hsi, cl_info->info.port);
+ if (!p)
+ continue;
+ hsi_new_client(p, &cl_info->info);
+ }
+}
+
+static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
+{
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct hsi_port *port = to_hsi_port(dev->parent);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->clock, flags);
+ list_del(&cl->link);
+ spin_unlock_irqrestore(&port->clock, flags);
+ device_unregister(dev);
+
+ return 0;
+}
+
+static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
+{
+ device_for_each_child(dev, NULL, hsi_remove_client);
+ device_unregister(dev);
+
+ return 0;
+}
+
+static void hsi_controller_release(struct device *dev __maybe_unused)
+{
+}
+
+static void hsi_port_release(struct device *dev __maybe_unused)
+{
+}
+
+/**
+ * hsi_unregister_controller - Unregister an HSI controller
+ * @hsi: The HSI controller to register
+ */
+void hsi_unregister_controller(struct hsi_controller *hsi)
+{
+ device_for_each_child(&hsi->device, NULL, hsi_remove_port);
+ device_unregister(&hsi->device);
+}
+EXPORT_SYMBOL_GPL(hsi_unregister_controller);
+
+/**
+ * hsi_register_controller - Register an HSI controller and its ports
+ * @hsi: The HSI controller to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_controller(struct hsi_controller *hsi)
+{
+ unsigned int i;
+ int err;
+
+ hsi->device.type = &hsi_ctrl;
+ hsi->device.bus = &hsi_bus_type;
+ hsi->device.release = hsi_controller_release;
+ err = device_register(&hsi->device);
+ if (err < 0)
+ return err;
+ for (i = 0; i < hsi->num_ports; i++) {
+ hsi->port[i].device.parent = &hsi->device;
+ hsi->port[i].device.bus = &hsi_bus_type;
+ hsi->port[i].device.release = hsi_port_release;
+ hsi->port[i].device.type = &hsi_port;
+ INIT_LIST_HEAD(&hsi->port[i].clients);
+ spin_lock_init(&hsi->port[i].clock);
+ err = device_register(&hsi->port[i].device);
+ if (err < 0)
+ goto out;
+ }
+ /* Populate HSI bus with HSI clients */
+ hsi_scan_board_info(hsi);
+
+ return 0;
+out:
+ hsi_unregister_controller(hsi);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_register_controller);
+
+/**
+ * hsi_register_client_driver - Register an HSI client to the HSI bus
+ * @drv: HSI client driver to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_client_driver(struct hsi_client_driver *drv)
+{
+ drv->driver.bus = &hsi_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(hsi_register_client_driver);
+
+static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
+{
+ return 0;
+}
+
+static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
+{
+ return 0;
+}
+
+/**
+ * hsi_alloc_controller - Allocate an HSI controller and its ports
+ * @n_ports: Number of ports on the HSI controller
+ * @flags: Kernel allocation flags
+ *
+ * Return NULL on failure or a pointer to an hsi_controller on success.
+ */
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
+{
+ struct hsi_controller *hsi;
+ struct hsi_port *port;
+ unsigned int i;
+
+ if (!n_ports)
+ return NULL;
+
+ port = kzalloc(sizeof(*port)*n_ports, flags);
+ if (!port)
+ return NULL;
+ hsi = kzalloc(sizeof(*hsi), flags);
+ if (!hsi)
+ goto out;
+ for (i = 0; i < n_ports; i++) {
+ dev_set_name(&port[i].device, "port%d", i);
+ port[i].num = i;
+ port[i].async = hsi_dummy_msg;
+ port[i].setup = hsi_dummy_cl;
+ port[i].flush = hsi_dummy_cl;
+ port[i].start_tx = hsi_dummy_cl;
+ port[i].stop_tx = hsi_dummy_cl;
+ port[i].release = hsi_dummy_cl;
+ mutex_init(&port[i].lock);
+ }
+ hsi->num_ports = n_ports;
+ hsi->port = port;
+
+ return hsi;
+out:
+ kfree(port);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_controller);
+
+/**
+ * hsi_free_controller - Free an HSI controller
+ * @hsi: Pointer to HSI controller
+ */
+void hsi_free_controller(struct hsi_controller *hsi)
+{
+ if (!hsi)
+ return;
+
+ kfree(hsi->port);
+ kfree(hsi);
+}
+EXPORT_SYMBOL_GPL(hsi_free_controller);
+
+/**
+ * hsi_free_msg - Free an HSI message
+ * @msg: Pointer to the HSI message
+ *
+ * Client is responsible to free the buffers pointed by the scatterlists.
+ */
+void hsi_free_msg(struct hsi_msg *msg)
+{
+ if (!msg)
+ return;
+ sg_free_table(&msg->sgt);
+ kfree(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_free_msg);
+
+/**
+ * hsi_alloc_msg - Allocate an HSI message
+ * @nents: Number of memory entries
+ * @flags: Kernel allocation flags
+ *
+ * nents can be 0. This mainly makes sense for read transfer.
+ * In that case, HSI drivers will call the complete callback when
+ * there is data to be read without consuming it.
+ *
+ * Return NULL on failure or a pointer to an hsi_msg on success.
+ */
+struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags)
+{
+ struct hsi_msg *msg;
+ int err;
+
+ msg = kzalloc(sizeof(*msg), flags);
+ if (!msg)
+ return NULL;
+
+ if (!nents)
+ return msg;
+
+ err = sg_alloc_table(&msg->sgt, nents, flags);
+ if (unlikely(err)) {
+ kfree(msg);
+ msg = NULL;
+ }
+
+ return msg;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_msg);
+
+/**
+ * hsi_async - Submit an HSI transfer to the controller
+ * @cl: HSI client sending the transfer
+ * @msg: The HSI transfer passed to controller
+ *
+ * The HSI message must have the channel, ttype, complete and destructor
+ * fields set beforehand. If nents > 0 then the client has to initialize
+ * also the scatterlists to point to the buffers to write to or read from.
+ *
+ * HSI controllers relay on pre-allocated buffers from their clients and they
+ * do not allocate buffers on their own.
+ *
+ * Once the HSI message transfer finishes, the HSI controller calls the
+ * complete callback with the status and actual_len fields of the HSI message
+ * updated. The complete callback can be called before returning from
+ * hsi_async.
+ *
+ * Returns -errno on failure or 0 on success
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+
+ WARN_ON_ONCE(!msg->destructor || !msg->complete);
+ msg->cl = cl;
+
+ return port->async(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_async);
+
+/**
+ * hsi_claim_port - Claim the HSI client's port
+ * @cl: HSI client that wants to claim its port
+ * @share: Flag to indicate if the client wants to share the port or not.
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_claim_port(struct hsi_client *cl, unsigned int share)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ int err = 0;
+
+ mutex_lock(&port->lock);
+ if ((port->claimed) && (!port->shared || !share)) {
+ err = -EBUSY;
+ goto out;
+ }
+ if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) {
+ err = -ENODEV;
+ goto out;
+ }
+ port->claimed++;
+ port->shared = !!share;
+ cl->pclaimed = 1;
+out:
+ mutex_unlock(&port->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_claim_port);
+
+/**
+ * hsi_release_port - Release the HSI client's port
+ * @cl: HSI client which previously claimed its port
+ */
+void hsi_release_port(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ mutex_lock(&port->lock);
+ /* Allow HW driver to do some cleanup */
+ port->release(cl);
+ if (cl->pclaimed)
+ port->claimed--;
+ BUG_ON(port->claimed < 0);
+ cl->pclaimed = 0;
+ if (!port->claimed)
+ port->shared = 0;
+ module_put(to_hsi_controller(port->device.parent)->owner);
+ mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(hsi_release_port);
+
+static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused)
+{
+ if (cl->hsi_start_rx)
+ (*cl->hsi_start_rx)(cl);
+
+ return 0;
+}
+
+static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused)
+{
+ if (cl->hsi_stop_rx)
+ (*cl->hsi_stop_rx)(cl);
+
+ return 0;
+}
+
+static int hsi_port_for_each_client(struct hsi_port *port, void *data,
+ int (*fn)(struct hsi_client *cl, void *data))
+{
+ struct hsi_client *cl;
+
+ spin_lock(&port->clock);
+ list_for_each_entry(cl, &port->clients, link) {
+ spin_unlock(&port->clock);
+ (*fn)(cl, data);
+ spin_lock(&port->clock);
+ }
+ spin_unlock(&port->clock);
+
+ return 0;
+}
+
+/**
+ * hsi_event -Notifies clients about port events
+ * @port: Port where the event occurred
+ * @event: The event type
+ *
+ * Clients should not be concerned about wake line behavior. However, due
+ * to a race condition in HSI HW protocol, clients need to be notified
+ * about wake line changes, so they can implement a workaround for it.
+ *
+ * Events:
+ * HSI_EVENT_START_RX - Incoming wake line high
+ * HSI_EVENT_STOP_RX - Incoming wake line down
+ */
+void hsi_event(struct hsi_port *port, unsigned int event)
+{
+ int (*fn)(struct hsi_client *cl, void *data);
+
+ switch (event) {
+ case HSI_EVENT_START_RX:
+ fn = hsi_start_rx;
+ break;
+ case HSI_EVENT_STOP_RX:
+ fn = hsi_stop_rx;
+ break;
+ default:
+ return;
+ }
+ hsi_port_for_each_client(port, NULL, fn);
+}
+EXPORT_SYMBOL_GPL(hsi_event);
+
+static int __init hsi_init(void)
+{
+ return bus_register(&hsi_bus_type);
+}
+postcore_initcall(hsi_init);
+
+static void __exit hsi_exit(void)
+{
+ bus_unregister(&hsi_bus_type);
+}
+module_exit(hsi_exit);
+
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/hsi_boardinfo.c b/drivers/hsi/hsi_boardinfo.c
new file mode 100644
index 0000000..e56bc6d
--- /dev/null
+++ b/drivers/hsi/hsi_boardinfo.c
@@ -0,0 +1,62 @@
+/*
+ * HSI clients registration interface
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "hsi_core.h"
+
+/*
+ * hsi_board_list is only used internally by the HSI framework.
+ * No one else is allowed to make use of it.
+ */
+LIST_HEAD(hsi_board_list);
+EXPORT_SYMBOL_GPL(hsi_board_list);
+
+/**
+ * hsi_register_board_info - Register HSI clients information
+ * @info: Array of HSI clients on the board
+ * @len: Length of the array
+ *
+ * HSI clients are statically declared and registered on board files.
+ *
+ * HSI clients will be automatically registered to the HSI bus once the
+ * controller and the port where the clients wishes to attach are registered
+ * to it.
+ *
+ * Return -errno on failure, 0 on success.
+ */
+int __init hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len)
+{
+ struct hsi_cl_info *cl_info;
+
+ cl_info = kzalloc(sizeof(*cl_info) * len, GFP_KERNEL);
+ if (!cl_info)
+ return -ENOMEM;
+
+ for (; len; len--, info++, cl_info++) {
+ cl_info->info = *info;
+ list_add_tail(&cl_info->list, &hsi_board_list);
+ }
+
+ return 0;
+}
diff --git a/drivers/hsi/hsi_core.h b/drivers/hsi/hsi_core.h
new file mode 100644
index 0000000..ab5c2fb
--- /dev/null
+++ b/drivers/hsi/hsi_core.h
@@ -0,0 +1,35 @@
+/*
+ * HSI framework internal interfaces,
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_CORE_H__
+#define __LINUX_HSI_CORE_H__
+
+#include <linux/hsi/hsi.h>
+
+struct hsi_cl_info {
+ struct list_head list;
+ struct hsi_board_info info;
+};
+
+extern struct list_head hsi_board_list;
+
+#endif /* __LINUX_HSI_CORE_H__ */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5b32d56..8deedc1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -253,7 +253,8 @@ config SENSORS_K10TEMP
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
the AMD Family 10h and all revisions of the AMD Family 11h,
- 12h (Llano), 14h (Brazos) and 15h (Bulldozer) microarchitectures.
+ 12h (Llano), 14h (Brazos) and 15h (Bulldozer/Trinity)
+ microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -425,7 +426,7 @@ config SENSORS_GL520SM
config SENSORS_GPIO_FAN
tristate "GPIO fan"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
If you say yes here you get support for fans connected to GPIO lines.
@@ -883,7 +884,7 @@ source drivers/hwmon/pmbus/Kconfig
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
If you say yes here you get support for the Sensiron SHT10, SHT11,
SHT15, SHT71, SHT75 humidity and temperature sensors.
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 554f046..145f135 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -632,6 +632,7 @@ static int register_ro_attrs(struct acpi_power_meter_resource *resource,
sensors->dev_attr.show = ro->show;
sensors->index = ro->index;
+ sysfs_attr_init(&sensors->dev_attr.attr);
res = device_create_file(dev, &sensors->dev_attr);
if (res) {
sensors->dev_attr.attr.name = NULL;
@@ -661,6 +662,7 @@ static int register_rw_attrs(struct acpi_power_meter_resource *resource,
sensors->dev_attr.store = rw->set;
sensors->index = rw->index;
+ sysfs_attr_init(&sensors->dev_attr.attr);
res = device_create_file(dev, &sensors->dev_attr);
if (res) {
sensors->dev_attr.attr.name = NULL;
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 0e0cfcc..ce43642 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -128,6 +128,7 @@ static int __devinit ad7314_probe(struct spi_device *spi_dev)
ret = PTR_ERR(chip->hwmon_dev);
goto error_remove_group;
}
+ chip->spi_dev = spi_dev;
return 0;
error_remove_group:
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index ff37363..44e1fd7 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -233,18 +233,15 @@ static const auto_chan_table_t auto_channel_select_table_adm1030 = {
* nearest match if no exact match where found.
*/
static int
-get_fan_auto_nearest(struct adm1031_data *data,
- int chan, u8 val, u8 reg, u8 *new_reg)
+get_fan_auto_nearest(struct adm1031_data *data, int chan, u8 val, u8 reg)
{
int i;
int first_match = -1, exact_match = -1;
u8 other_reg_val =
(*data->chan_select_table)[FAN_CHAN_FROM_REG(reg)][chan ? 0 : 1];
- if (val == 0) {
- *new_reg = 0;
+ if (val == 0)
return 0;
- }
for (i = 0; i < 8; i++) {
if ((val == (*data->chan_select_table)[i][chan]) &&
@@ -264,13 +261,11 @@ get_fan_auto_nearest(struct adm1031_data *data,
}
if (exact_match >= 0)
- *new_reg = exact_match;
+ return exact_match;
else if (first_match >= 0)
- *new_reg = first_match;
- else
- return -EINVAL;
+ return first_match;
- return 0;
+ return -EINVAL;
}
static ssize_t show_fan_auto_channel(struct device *dev,
@@ -301,11 +296,12 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
- ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg);
- if (ret) {
+ ret = get_fan_auto_nearest(data, nr, val, data->conf1);
+ if (ret < 0) {
mutex_unlock(&data->update_lock);
return ret;
}
+ reg = ret;
data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1);
if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) ^
(old_fan_mode & ADM1031_CONF1_AUTO_MODE)) {
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 729499e..ece4159 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -276,6 +276,7 @@ static bool duty_mode_enabled(u8 pwm_enable)
return false;
default:
BUG();
+ return true;
}
}
@@ -291,6 +292,7 @@ static bool auto_mode_enabled(u8 pwm_enable)
return true;
default:
BUG();
+ return false;
}
}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index aba29d6..307bb32 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,6 +33,9 @@ static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+/* PCI-IDs for Northbridge devices not used anywhere else */
+#define PCI_DEVICE_ID_AMD_15H_M10H_NB_F3 0x1403
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -210,6 +213,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_NB_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 193067e..de8f7ad 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -596,8 +596,10 @@ static int max6639_remove(struct i2c_client *client)
return 0;
}
-static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int max6639_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
if (data < 0)
return data;
@@ -606,8 +608,9 @@ static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
}
-static int max6639_resume(struct i2c_client *client)
+static int max6639_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
if (data < 0)
return data;
@@ -615,6 +618,7 @@ static int max6639_resume(struct i2c_client *client)
return i2c_smbus_write_byte_data(client,
MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
}
+#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id max6639_id[] = {
{"max6639", 0},
@@ -623,15 +627,18 @@ static const struct i2c_device_id max6639_id[] = {
MODULE_DEVICE_TABLE(i2c, max6639_id);
+static const struct dev_pm_ops max6639_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max6639_suspend, max6639_resume)
+};
+
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6639",
+ .pm = &max6639_pm_ops,
},
.probe = max6639_probe,
.remove = max6639_remove,
- .suspend = max6639_suspend,
- .resume = max6639_resume,
.id_table = max6639_id,
.detect = max6639_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index a25350c..54922ed 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -2619,15 +2619,15 @@ static struct platform_driver w83627ehf_driver = {
static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
struct w83627ehf_sio_data *sio_data)
{
- static const char __initdata sio_name_W83627EHF[] = "W83627EHF";
- static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
- static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
- static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
- static const char __initdata sio_name_W83627UHG[] = "W83627UHG";
- static const char __initdata sio_name_W83667HG[] = "W83667HG";
- static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
- static const char __initdata sio_name_NCT6775[] = "NCT6775F";
- static const char __initdata sio_name_NCT6776[] = "NCT6776F";
+ static const char sio_name_W83627EHF[] __initconst = "W83627EHF";
+ static const char sio_name_W83627EHG[] __initconst = "W83627EHG";
+ static const char sio_name_W83627DHG[] __initconst = "W83627DHG";
+ static const char sio_name_W83627DHG_P[] __initconst = "W83627DHG-P";
+ static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
+ static const char sio_name_W83667HG[] __initconst = "W83667HG";
+ static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
+ static const char sio_name_NCT6775[] __initconst = "NCT6775F";
+ static const char sio_name_NCT6776[] __initconst = "NCT6776F";
u16 val;
const char *sio_name;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index ae2ec92..a5bee8e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2707,7 +2707,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
* The exported alloc_coherent function for dma_ops.
*/
static void *alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag)
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs)
{
unsigned long flags;
void *virt_addr;
@@ -2765,7 +2766,8 @@ out_free:
* The exported free_coherent function for dma_ops.
*/
static void free_coherent(struct device *dev, size_t size,
- void *virt_addr, dma_addr_t dma_addr)
+ void *virt_addr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
{
unsigned long flags;
struct protection_domain *domain;
@@ -2846,8 +2848,8 @@ static void __init prealloc_protection_domains(void)
}
static struct dma_map_ops amd_iommu_dma_ops = {
- .alloc_coherent = alloc_coherent,
- .free_coherent = free_coherent,
+ .alloc = alloc_coherent,
+ .free = free_coherent,
.map_page = map_page,
.unmap_page = unmap_page,
.map_sg = map_sg,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 132f93b..f93d5ac 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2949,7 +2949,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
static void *intel_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
void *vaddr;
int order;
@@ -2981,7 +2982,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
}
static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order;
@@ -3126,8 +3127,8 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
}
struct dma_map_ops intel_dma_ops = {
- .alloc_coherent = intel_alloc_coherent,
- .free_coherent = intel_free_coherent,
+ .alloc = intel_alloc_coherent,
+ .free = intel_free_coherent,
.map_sg = intel_map_sg,
.unmap_sg = intel_unmap_sg,
.map_page = intel_map_page,
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 05ed4d0c..c0b8c96 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -891,7 +891,7 @@ open_bchannel(struct fritzcard *fc, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index d055ae7..e2c83a2 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1962,7 +1962,7 @@ open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6023387..8cde2a0 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -486,7 +486,7 @@ open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index b47e9be..884369f 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1506,7 +1506,7 @@ open_bchannel(struct ipac_hw *ipac, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 10446ab..9a6da6e 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1670,7 +1670,7 @@ isar_open(struct isar_hw *isar, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index dd6de9f..c726e09 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -860,7 +860,7 @@ open_bchannel(struct tiger_hw *card, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 7f1e7ba..2183357 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1015,7 +1015,7 @@ open_bchannel(struct w6692_hw *card, struct channel_req *rq)
{
struct bchannel *bch;
- if (rq->adr.channel > 2)
+ if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b0fcc7d..fa211d8 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -198,6 +198,7 @@ out:
static int linear_run (struct mddev *mddev)
{
struct linear_conf *conf;
+ int ret;
if (md_check_no_bitmap(mddev))
return -EINVAL;
@@ -211,7 +212,13 @@ static int linear_run (struct mddev *mddev)
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
- return md_integrity_register(mddev);
+
+ ret = md_integrity_register(mddev);
+ if (ret) {
+ kfree(conf);
+ mddev->private = NULL;
+ }
+ return ret;
}
static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6f31f55..de63a1f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -407,6 +407,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
return array_sectors;
}
+static int raid0_stop(struct mddev *mddev);
+
static int raid0_run(struct mddev *mddev)
{
struct r0conf *conf;
@@ -454,7 +456,12 @@ static int raid0_run(struct mddev *mddev)
blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
dump_zones(mddev);
- return md_integrity_register(mddev);
+
+ ret = md_integrity_register(mddev);
+ if (ret)
+ raid0_stop(mddev);
+
+ return ret;
}
static int raid0_stop(struct mddev *mddev)
@@ -625,6 +632,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
static void *raid0_takeover_raid1(struct mddev *mddev)
{
struct r0conf *priv_conf;
+ int chunksect;
/* Check layout:
* - (N - 1) mirror drives must be already faulty
@@ -635,10 +643,25 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
return ERR_PTR(-EINVAL);
}
+ /*
+ * a raid1 doesn't have the notion of chunk size, so
+ * figure out the largest suitable size we can use.
+ */
+ chunksect = 64 * 2; /* 64K by default */
+
+ /* The array must be an exact multiple of chunksize */
+ while (chunksect && (mddev->array_sectors & (chunksect - 1)))
+ chunksect >>= 1;
+
+ if ((chunksect << 9) < PAGE_SIZE)
+ /* array size does not allow a suitable chunk size */
+ return ERR_PTR(-EINVAL);
+
/* Set new parameters */
mddev->new_level = 0;
mddev->new_layout = 0;
- mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+ mddev->new_chunk_sectors = chunksect;
+ mddev->chunk_sectors = chunksect;
mddev->delta_disks = 1 - mddev->raid_disks;
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a40a20..d35e4c9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1738,7 +1738,7 @@ static int process_checks(struct r1bio *r1_bio)
s = sbio->bi_io_vec[j].bv_page;
if (memcmp(page_address(p),
page_address(s),
- PAGE_SIZE))
+ sbio->bi_io_vec[j].bv_len))
break;
}
} else
@@ -2386,8 +2386,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
int ok = 1;
for (i = 0 ; i < conf->raid_disks * 2 ; i++)
if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
- struct md_rdev *rdev =
- rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
ok = rdev_set_badblocks(rdev, sector_nr,
min_bad, 0
) && ok;
@@ -2636,11 +2635,13 @@ static struct r1conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
+static int stop(struct mddev *mddev);
static int run(struct mddev *mddev)
{
struct r1conf *conf;
int i;
struct md_rdev *rdev;
+ int ret;
if (mddev->level != 1) {
printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -2705,7 +2706,11 @@ static int run(struct mddev *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
}
- return md_integrity_register(mddev);
+
+ ret = md_integrity_register(mddev);
+ if (ret)
+ stop(mddev);
+ return ret;
}
static int stop(struct mddev *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3540316..fff7821 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1821,7 +1821,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
for (j = 0; j < vcnt; j++)
if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
page_address(tbio->bi_io_vec[j].bv_page),
- PAGE_SIZE))
+ fbio->bi_io_vec[j].bv_len))
break;
if (j == vcnt)
continue;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 23ac880..f351422 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2471,39 +2471,41 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
int abort = 0;
int i;
- md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
clear_bit(STRIPE_SYNCING, &sh->state);
s->syncing = 0;
s->replacing = 0;
/* There is nothing more to do for sync/check/repair.
+ * Don't even need to abort as that is handled elsewhere
+ * if needed, and not always wanted e.g. if there is a known
+ * bad block here.
* For recover/replace we need to record a bad block on all
* non-sync devices, or abort the recovery
*/
- if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
- return;
- /* During recovery devices cannot be removed, so locking and
- * refcounting of rdevs is not needed
- */
- for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = conf->disks[i].rdev;
- if (rdev
- && !test_bit(Faulty, &rdev->flags)
- && !test_bit(In_sync, &rdev->flags)
- && !rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
- abort = 1;
- rdev = conf->disks[i].replacement;
- if (rdev
- && !test_bit(Faulty, &rdev->flags)
- && !test_bit(In_sync, &rdev->flags)
- && !rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
- abort = 1;
- }
- if (abort) {
- conf->recovery_disabled = conf->mddev->recovery_disabled;
- set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
+ if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
+ /* During recovery devices cannot be removed, so
+ * locking and refcounting of rdevs is not needed
+ */
+ for (i = 0; i < conf->raid_disks; i++) {
+ struct md_rdev *rdev = conf->disks[i].rdev;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ rdev = conf->disks[i].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ }
+ if (abort)
+ conf->recovery_disabled =
+ conf->mddev->recovery_disabled;
}
+ md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
}
static int want_replace(struct stripe_head *sh, int disk_idx)
@@ -3203,7 +3205,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* Not in-sync */;
else if (is_bad) {
/* also not in-sync */
- if (!test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (!test_bit(WriteErrorSeen, &rdev->flags) &&
+ test_bit(R5_UPTODATE, &dev->flags)) {
/* treat as in-sync, but with a read error
* which we can now try to correct
*/
@@ -3276,12 +3279,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* If there is a failed device being replaced,
* we must be recovering.
* else if we are after recovery_cp, we must be syncing
+ * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
* else we can only be replacing
* sync and recovery both need to read all devices, and so
* use the same flag.
*/
if (do_recovery ||
- sh->sector >= conf->mddev->recovery_cp)
+ sh->sector >= conf->mddev->recovery_cp ||
+ test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
s->syncing = 1;
else
s->replacing = 1;
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3f7ad83..3aa9a96 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -134,12 +134,17 @@ static int force_hwbrks;
static int hwbreaks_ok;
static int hw_break_val;
static int hw_break_val2;
+static int cont_instead_of_sstep;
+static unsigned long cont_thread_id;
+static unsigned long sstep_thread_id;
#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
static int arch_needs_sstep_emulation = 1;
#else
static int arch_needs_sstep_emulation;
#endif
+static unsigned long cont_addr;
static unsigned long sstep_addr;
+static int restart_from_top_after_write;
static int sstep_state;
/* Storage for the registers, in GDB format. */
@@ -187,7 +192,8 @@ static int kgdbts_unreg_thread(void *ptr)
*/
while (!final_ack)
msleep_interruptible(1500);
-
+ /* Pause for any other threads to exit after final ack. */
+ msleep_interruptible(1000);
if (configured)
kgdb_unregister_io_module(&kgdbts_io_ops);
configured = 0;
@@ -211,7 +217,7 @@ static unsigned long lookup_addr(char *arg)
if (!strcmp(arg, "kgdbts_break_test"))
addr = (unsigned long)kgdbts_break_test;
else if (!strcmp(arg, "sys_open"))
- addr = (unsigned long)sys_open;
+ addr = (unsigned long)do_sys_open;
else if (!strcmp(arg, "do_fork"))
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
@@ -283,6 +289,16 @@ static void hw_break_val_write(void)
hw_break_val++;
}
+static int get_thread_id_continue(char *put_str, char *arg)
+{
+ char *ptr = &put_str[11];
+
+ if (put_str[1] != 'T' || put_str[2] != '0')
+ return 1;
+ kgdb_hex2long(&ptr, &cont_thread_id);
+ return 0;
+}
+
static int check_and_rewind_pc(char *put_str, char *arg)
{
unsigned long addr = lookup_addr(arg);
@@ -299,13 +315,21 @@ static int check_and_rewind_pc(char *put_str, char *arg)
if (addr + BREAK_INSTR_SIZE == ip)
offset = -BREAK_INSTR_SIZE;
#endif
- if (strcmp(arg, "silent") && ip + offset != addr) {
+
+ if (arch_needs_sstep_emulation && sstep_addr &&
+ ip + offset == sstep_addr &&
+ ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) {
+ /* This is special case for emulated single step */
+ v2printk("Emul: rewind hit single step bp\n");
+ restart_from_top_after_write = 1;
+ } else if (strcmp(arg, "silent") && ip + offset != addr) {
eprintk("kgdbts: BP mismatch %lx expected %lx\n",
ip + offset, addr);
return 1;
}
/* Readjust the instruction pointer if needed */
ip += offset;
+ cont_addr = ip;
#ifdef GDB_ADJUSTS_BREAK_OFFSET
instruction_pointer_set(&kgdbts_regs, ip);
#endif
@@ -315,6 +339,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
static int check_single_step(char *put_str, char *arg)
{
unsigned long addr = lookup_addr(arg);
+ static int matched_id;
+
/*
* From an arch indepent point of view the instruction pointer
* should be on a different instruction
@@ -324,6 +350,29 @@ static int check_single_step(char *put_str, char *arg)
gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
v2printk("Singlestep stopped at IP: %lx\n",
instruction_pointer(&kgdbts_regs));
+
+ if (sstep_thread_id != cont_thread_id) {
+ /*
+ * Ensure we stopped in the same thread id as before, else the
+ * debugger should continue until the original thread that was
+ * single stepped is scheduled again, emulating gdb's behavior.
+ */
+ v2printk("ThrID does not match: %lx\n", cont_thread_id);
+ if (arch_needs_sstep_emulation) {
+ if (matched_id &&
+ instruction_pointer(&kgdbts_regs) != addr)
+ goto continue_test;
+ matched_id++;
+ ts.idx -= 2;
+ sstep_state = 0;
+ return 0;
+ }
+ cont_instead_of_sstep = 1;
+ ts.idx -= 4;
+ return 0;
+ }
+continue_test:
+ matched_id = 0;
if (instruction_pointer(&kgdbts_regs) == addr) {
eprintk("kgdbts: SingleStep failed at %lx\n",
instruction_pointer(&kgdbts_regs));
@@ -365,10 +414,40 @@ static int got_break(char *put_str, char *arg)
return 1;
}
+static void get_cont_catch(char *arg)
+{
+ /* Always send detach because the test is completed at this point */
+ fill_get_buf("D");
+}
+
+static int put_cont_catch(char *put_str, char *arg)
+{
+ /* This is at the end of the test and we catch any and all input */
+ v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id);
+ ts.idx--;
+ return 0;
+}
+
+static int emul_reset(char *put_str, char *arg)
+{
+ if (strncmp(put_str, "$OK", 3))
+ return 1;
+ if (restart_from_top_after_write) {
+ restart_from_top_after_write = 0;
+ ts.idx = -1;
+ }
+ return 0;
+}
+
static void emul_sstep_get(char *arg)
{
if (!arch_needs_sstep_emulation) {
- fill_get_buf(arg);
+ if (cont_instead_of_sstep) {
+ cont_instead_of_sstep = 0;
+ fill_get_buf("c");
+ } else {
+ fill_get_buf(arg);
+ }
return;
}
switch (sstep_state) {
@@ -398,9 +477,11 @@ static void emul_sstep_get(char *arg)
static int emul_sstep_put(char *put_str, char *arg)
{
if (!arch_needs_sstep_emulation) {
- if (!strncmp(put_str+1, arg, 2))
- return 0;
- return 1;
+ char *ptr = &put_str[11];
+ if (put_str[1] != 'T' || put_str[2] != '0')
+ return 1;
+ kgdb_hex2long(&ptr, &sstep_thread_id);
+ return 0;
}
switch (sstep_state) {
case 1:
@@ -411,8 +492,7 @@ static int emul_sstep_put(char *put_str, char *arg)
v2printk("Stopped at IP: %lx\n",
instruction_pointer(&kgdbts_regs));
/* Want to stop at IP + break instruction size by default */
- sstep_addr = instruction_pointer(&kgdbts_regs) +
- BREAK_INSTR_SIZE;
+ sstep_addr = cont_addr + BREAK_INSTR_SIZE;
break;
case 2:
if (strncmp(put_str, "$OK", 3)) {
@@ -424,6 +504,9 @@ static int emul_sstep_put(char *put_str, char *arg)
if (strncmp(put_str, "$T0", 3)) {
eprintk("kgdbts: failed continue sstep\n");
return 1;
+ } else {
+ char *ptr = &put_str[11];
+ kgdb_hex2long(&ptr, &sstep_thread_id);
}
break;
case 4:
@@ -502,10 +585,10 @@ static struct test_struct bad_read_test[] = {
static struct test_struct singlestep_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
- { "c", "T0*", }, /* Continue */
+ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+ { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
{ "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs }, /* Write registers */
- { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
{ "g", "kgdbts_break_test", NULL, check_single_step },
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
@@ -523,16 +606,16 @@ static struct test_struct singlestep_break_test[] = {
static struct test_struct do_fork_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
- { "c", "T0*", }, /* Continue */
- { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
- { "write", "OK", write_regs }, /* Write registers */
+ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
{ "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
+ { "write", "OK", write_regs, emul_reset }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
{ "g", "do_fork", NULL, check_single_step },
{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
- { "", "" },
+ { "", "", get_cont_catch, put_cont_catch },
};
/* Test for hitting a breakpoint at sys_open for what ever the number
@@ -541,16 +624,16 @@ static struct test_struct do_fork_test[] = {
static struct test_struct sys_open_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
- { "c", "T0*", }, /* Continue */
- { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
- { "write", "OK", write_regs }, /* Write registers */
+ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
{ "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+ { "write", "OK", write_regs, emul_reset }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
{ "g", "sys_open", NULL, check_single_step },
{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
- { "", "" },
+ { "", "", get_cont_catch, put_cont_catch },
};
/*
@@ -693,8 +776,8 @@ static int run_simple_test(int is_get_char, int chr)
/* This callback is a put char which is when kgdb sends data to
* this I/O module.
*/
- if (ts.tst[ts.idx].get[0] == '\0' &&
- ts.tst[ts.idx].put[0] == '\0') {
+ if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' &&
+ !ts.tst[ts.idx].get_handler) {
eprintk("kgdbts: ERROR: beyond end of test on"
" '%s' line %i\n", ts.name, ts.idx);
return 0;
@@ -907,6 +990,17 @@ static void kgdbts_run_tests(void)
if (ptr)
sstep_test = simple_strtol(ptr+1, NULL, 10);
+ /* All HW break point tests */
+ if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
+ hwbreaks_ok = 1;
+ v1printk("kgdbts:RUN hw breakpoint test\n");
+ run_breakpoint_test(1);
+ v1printk("kgdbts:RUN hw write breakpoint test\n");
+ run_hw_break_test(1);
+ v1printk("kgdbts:RUN access write breakpoint test\n");
+ run_hw_break_test(0);
+ }
+
/* required internal KGDB tests */
v1printk("kgdbts:RUN plant and detach test\n");
run_plant_and_detach_test(0);
@@ -924,35 +1018,11 @@ static void kgdbts_run_tests(void)
/* ===Optional tests=== */
- /* All HW break point tests */
- if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
- hwbreaks_ok = 1;
- v1printk("kgdbts:RUN hw breakpoint test\n");
- run_breakpoint_test(1);
- v1printk("kgdbts:RUN hw write breakpoint test\n");
- run_hw_break_test(1);
- v1printk("kgdbts:RUN access write breakpoint test\n");
- run_hw_break_test(0);
- }
-
if (nmi_sleep) {
v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
run_nmi_sleep_test(nmi_sleep);
}
-#ifdef CONFIG_DEBUG_RODATA
- /* Until there is an api to write to read-only text segments, use
- * HW breakpoints for the remainder of any tests, else print a
- * failure message if hw breakpoints do not work.
- */
- if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) {
- eprintk("kgdbts: HW breakpoints do not work,"
- "skipping remaining tests\n");
- return;
- }
- force_hwbrks = 1;
-#endif /* CONFIG_DEBUG_RODATA */
-
/* If the do_fork test is run it will be the last test that is
* executed because a kernel thread will be spawned at the very
* end to unregister the debug hooks.
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 55d8232..94eb05b 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -39,7 +39,6 @@
#include <asm/uaccess.h>
static DEFINE_MUTEX(mtd_mutex);
-static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
@@ -75,7 +74,9 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
return -EINVAL;
}
-
+static int count;
+static struct vfsmount *mnt;
+static struct file_system_type mtd_inodefs_type;
static int mtdchar_open(struct inode *inode, struct file *file)
{
@@ -92,6 +93,10 @@ static int mtdchar_open(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
+ ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
+ if (ret)
+ return ret;
+
mutex_lock(&mtd_mutex);
mtd = get_mtd_device(NULL, devnum);
@@ -106,7 +111,7 @@ static int mtdchar_open(struct inode *inode, struct file *file)
goto out;
}
- mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+ mtd_ino = iget_locked(mnt->mnt_sb, devnum);
if (!mtd_ino) {
put_mtd_device(mtd);
ret = -ENOMEM;
@@ -141,6 +146,7 @@ static int mtdchar_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&mtd_mutex);
+ simple_release_fs(&mnt, &count);
return ret;
} /* mtdchar_open */
@@ -162,6 +168,7 @@ static int mtdchar_close(struct inode *inode, struct file *file)
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
+ simple_release_fs(&mnt, &count);
return 0;
} /* mtdchar_close */
@@ -1175,10 +1182,15 @@ static const struct file_operations mtd_fops = {
#endif
};
+static const struct super_operations mtd_ops = {
+ .drop_inode = generic_delete_inode,
+ .statfs = simple_statfs,
+};
+
static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
+ return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
}
static struct file_system_type mtd_inodefs_type = {
@@ -1187,26 +1199,6 @@ static struct file_system_type mtd_inodefs_type = {
.kill_sb = kill_anon_super,
};
-static void mtdchar_notify_add(struct mtd_info *mtd)
-{
-}
-
-static void mtdchar_notify_remove(struct mtd_info *mtd)
-{
- struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
-
- if (mtd_ino) {
- /* Destroy the inode if it exists */
- clear_nlink(mtd_ino);
- iput(mtd_ino);
- }
-}
-
-static struct mtd_notifier mtdchar_notifier = {
- .add = mtdchar_notify_add,
- .remove = mtdchar_notify_remove,
-};
-
static int __init init_mtdchar(void)
{
int ret;
@@ -1224,19 +1216,8 @@ static int __init init_mtdchar(void)
pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
goto err_unregister_chdev;
}
-
- mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
- if (IS_ERR(mtd_inode_mnt)) {
- ret = PTR_ERR(mtd_inode_mnt);
- pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
- goto err_unregister_filesystem;
- }
- register_mtd_user(&mtdchar_notifier);
-
return ret;
-err_unregister_filesystem:
- unregister_filesystem(&mtd_inodefs_type);
err_unregister_chdev:
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
return ret;
@@ -1244,8 +1225,6 @@ err_unregister_chdev:
static void __exit cleanup_mtdchar(void)
{
- unregister_mtd_user(&mtdchar_notifier);
- kern_unmount(mtd_inode_mnt);
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0c76186..941b4e1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -891,9 +891,15 @@ static void bond_do_fail_over_mac(struct bonding *bond,
switch (bond->params.fail_over_mac) {
case BOND_FOM_ACTIVE:
- if (new_active)
+ if (new_active) {
memcpy(bond->dev->dev_addr, new_active->dev->dev_addr,
new_active->dev->addr_len);
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+ }
break;
case BOND_FOM_FOLLOW:
/*
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index a59cf96..f219d38 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -125,6 +125,7 @@
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_eql.h>
+#include <linux/pkt_sched.h>
#include <asm/uaccess.h>
@@ -143,7 +144,7 @@ static void eql_timer(unsigned long param)
equalizer_t *eql = (equalizer_t *) param;
struct list_head *this, *tmp, *head;
- spin_lock_bh(&eql->queue.lock);
+ spin_lock(&eql->queue.lock);
head = &eql->queue.all_slaves;
list_for_each_safe(this, tmp, head) {
slave_t *slave = list_entry(this, slave_t, list);
@@ -157,7 +158,7 @@ static void eql_timer(unsigned long param)
}
}
- spin_unlock_bh(&eql->queue.lock);
+ spin_unlock(&eql->queue.lock);
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
add_timer(&eql->timer);
@@ -341,7 +342,7 @@ static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *slave_dev = slave->dev;
skb->dev = slave_dev;
- skb->priority = 1;
+ skb->priority = TC_PRIO_FILLER;
slave->bytes_queued += skb->len;
dev_queue_xmit(skb);
dev->stats.tx_packets++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e37161f..2c9ee55 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1173,6 +1173,13 @@ enum {
};
+struct bnx2x_prev_path_list {
+ u8 bus;
+ u8 slot;
+ u8 path;
+ struct list_head list;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f1f3ca6..44556b7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1721,6 +1721,29 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
} while (0)
#endif
+bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
+{
+ /* build FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
+
+ if (loaded_fw != my_fw) {
+ if (is_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
+ loaded_fw, my_fw);
+ return false;
+ }
+
+ return true;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@@ -1815,23 +1838,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* build FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
-
- /* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
-
- DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
- loaded_fw, my_fw);
-
/* abort nic load if version mismatch */
- if (my_fw != loaded_fw) {
- BNX2X_ERR("bnx2x with FW %x already loaded, "
- "which mismatches my %x FW. aborting",
- loaded_fw, my_fw);
+ if (!bnx2x_test_firmware_version(bp, true)) {
rc = -EBUSY;
LOAD_ERROR_EXIT(bp, load_error2);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 8b16338..5c27454 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -431,6 +431,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
+/* validate currect fw is loaded */
+bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
+
/* dev_close main block */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5d71b7d..dbff591 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1251,6 +1251,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
+ #define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+ #define REQ_BC_VER_4_INITIATE_FLR 0x00070213
+
#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index beb4cdb..efa557b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -35,7 +35,6 @@
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
-#define BMAC_CONTROL_RX_ENABLE 2
#define WC_LANE_MAX 4
#define I2C_SWITCH_WIDTH 2
#define I2C_BSC0 0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 7ba557a..763535e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -89,6 +89,8 @@
#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+
+#define BMAC_CONTROL_RX_ENABLE 2
/***********************************************************/
/* Structs */
/***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f7f9aa8..e077d25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -52,6 +52,7 @@
#include <linux/prefetch.h>
#include <linux/zlib.h>
#include <linux/io.h>
+#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
@@ -211,6 +212,10 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+/* Global resources for unloading a previously loaded device */
+#define BNX2X_PREV_WAIT_NEEDED 1
+static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static LIST_HEAD(bnx2x_prev_list);
/****************************************************************************
* General service functions
****************************************************************************/
@@ -8812,109 +8817,371 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
bnx2x_undi_int_disable_e1h(bp);
}
-static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
{
- u32 val;
+ u32 val, base_addr, offset, mask, reset_reg;
+ bool mac_stopped = false;
+ u8 port = BP_PORT(bp);
- /* possibly another driver is trying to reset the chip */
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
- /* check if doorbell queue is reset */
- if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET)
- & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
+ if (!CHIP_IS_E3(bp)) {
+ val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+ mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
+ if ((mask & reset_reg) && val) {
+ u32 wb_data[2];
+ BNX2X_DEV_INFO("Disable bmac Rx\n");
+ base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
+ : NIG_REG_INGRESS_BMAC0_MEM;
+ offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
+ : BIGMAC_REGISTER_BMAC_CONTROL;
- /*
- * Check if it is the UNDI driver
+ /*
+ * use rd/wr since we cannot use dmae. This is safe
+ * since MCP won't access the bus due to the request
+ * to unload, and no function on the path can be
+ * loaded at this time.
+ */
+ wb_data[0] = REG_RD(bp, base_addr + offset);
+ wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR(bp, base_addr + offset, wb_data[0]);
+ REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+
+ }
+ BNX2X_DEV_INFO("Disable emac Rx\n");
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
+
+ mac_stopped = true;
+ } else {
+ if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
+ BNX2X_DEV_INFO("Disable xmac Rx\n");
+ base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+ val & ~(1 << 1));
+ REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+ val | (1 << 1));
+ REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+ mac_stopped = true;
+ }
+ mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+ if (mask & reset_reg) {
+ BNX2X_DEV_INFO("Disable umac Rx\n");
+ base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+ mac_stopped = true;
+ }
+ }
+
+ if (mac_stopped)
+ msleep(20);
+
+}
+
+#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
+#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
+#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+
+static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
+ u8 inc)
+{
+ u16 rcq, bd;
+ u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
+
+ rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+ bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+
+ tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+ REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+
+ BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+ port, bd, rcq);
+}
+
+static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
+{
+ u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ if (!rc) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc = false;
+
+ if (down_trylock(&bnx2x_prev_sem))
+ return false;
+
+ list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
+ if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+ bp->pdev->bus->number == tmp_list->bus &&
+ BP_PATH(bp) == tmp_list->path) {
+ rc = true;
+ BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
+ BP_PATH(bp));
+ break;
+ }
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
+static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ tmp_list = (struct bnx2x_prev_path_list *)
+ kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+ if (!tmp_list) {
+ BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
+ return -ENOMEM;
+ }
+
+ tmp_list->bus = bp->pdev->bus->number;
+ tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
+ tmp_list->path = BP_PATH(bp);
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ kfree(tmp_list);
+ } else {
+ BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
+ list_add(&tmp_list->list, &bnx2x_prev_list);
+ up(&bnx2x_prev_sem);
+ }
+
+ return rc;
+}
+
+static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
+{
+ int pos;
+ u32 cap;
+ struct pci_dev *dev = bp->pdev;
+
+ pos = pci_pcie_cap(dev);
+ if (!pos)
+ return false;
+
+ pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return false;
+
+ return true;
+}
+
+static int __devinit bnx2x_do_flr(struct bnx2x *bp)
+{
+ int i, pos;
+ u16 status;
+ struct pci_dev *dev = bp->pdev;
+
+ /* probe the capability first */
+ if (bnx2x_can_flr(bp))
+ return -ENOTTY;
+
+ pos = pci_pcie_cap(dev);
+ if (!pos)
+ return -ENOTTY;
+
+ /* Wait for Transaction Pending bit clean */
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
+ }
+
+ dev_err(&dev->dev,
+ "transaction is not cleared; proceeding with reset anyway\n");
+
+clear:
+ if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+ BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+ bp->common.bc_ver);
+ return -EINVAL;
+ }
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
+
+ return 0;
+}
+
+static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+{
+ int rc;
+
+ BNX2X_DEV_INFO("Uncommon unload Flow\n");
+
+ /* Test if previous unload process was already finished for this path */
+ if (bnx2x_prev_is_path_marked(bp))
+ return bnx2x_prev_mcp_done(bp);
+
+ /* If function has FLR capabilities, and existing FW version matches
+ * the one required, then FLR will be sufficient to clean any residue
+ * left by previous driver
+ */
+ if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
+ return bnx2x_do_flr(bp);
+
+ /* Close the MCP request, return failure*/
+ rc = bnx2x_prev_mcp_done(bp);
+ if (!rc)
+ rc = BNX2X_PREV_WAIT_NEEDED;
+
+ return rc;
+}
+
+static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
+{
+ u32 reset_reg, tmp_reg = 0, rc;
+ /* It is possible a previous function received 'common' answer,
+ * but hasn't loaded yet, therefore creating a scenario of
+ * multiple functions receiving 'common' on the same path.
+ */
+ BNX2X_DEV_INFO("Common unload Flow\n");
+
+ if (bnx2x_prev_is_path_marked(bp))
+ return bnx2x_prev_mcp_done(bp);
+
+ reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+
+ /* Reset should be performed after BRB is emptied */
+ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+ u32 timer_count = 1000;
+ bool prev_undi = false;
+
+ /* Close the MAC Rx to prevent BRB from filling up */
+ bnx2x_prev_unload_close_mac(bp);
+
+ /* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
- if (val == 0x7) {
- u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- /* save our pf_num */
- int orig_pf_num = bp->pf_num;
- int port;
- u32 swap_en, swap_val, value;
-
- /* clear the UNDI indication */
- REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
-
- BNX2X_DEV_INFO("UNDI is active! reset device\n");
-
- /* try unload UNDI on port 0 */
- bp->pf_num = 0;
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- reset_code = bnx2x_fw_command(bp, reset_code, 0);
-
- /* if UNDI is loaded on the other port */
- if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
-
- /* send "DONE" for previous unload */
- bnx2x_fw_command(bp,
- DRV_MSG_CODE_UNLOAD_DONE, 0);
-
- /* unload UNDI on port 1 */
- bp->pf_num = 1;
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
- bnx2x_fw_command(bp, reset_code, 0);
+ reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
+ tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+ if (tmp_reg == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
}
+ }
+ /* wait until BRB is empty */
+ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+ while (timer_count) {
+ u32 prev_brb = tmp_reg;
- bnx2x_undi_int_disable(bp);
- port = BP_PORT(bp);
-
- /* close input traffic and wait for it */
- /* Do not rcv packets to BRB */
- REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
- NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
- /* Do not direct rcv packets that are not for MCP to
- * the BRB */
- REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
- NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
- /* clear AEU */
- REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
- MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
- msleep(10);
-
- /* save NIG port swap info */
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- /* reset device */
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
- 0xd3ffffff);
-
- value = 0x1400;
- if (CHIP_IS_E3(bp)) {
- value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
- value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
- }
+ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+ if (!tmp_reg)
+ break;
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- value);
+ BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
- /* take the NIG out of reset and restore swap values */
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
- MISC_REGISTERS_RESET_REG_1_RST_NIG);
- REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
- REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+ /* reset timer as long as BRB actually gets emptied */
+ if (prev_brb > tmp_reg)
+ timer_count = 1000;
+ else
+ timer_count--;
- /* send unload done to the MCP */
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ /* If UNDI resides in memory, manually increment it */
+ if (prev_undi)
+ bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
- /* restore our func and fw_seq */
- bp->pf_num = orig_pf_num;
+ udelay(10);
}
+
+ if (!timer_count)
+ BNX2X_ERR("Failed to empty BRB, hope for the best\n");
+
}
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ /* No packets are in the pipeline, path is ready for reset */
+ bnx2x_reset_common(bp);
+
+ rc = bnx2x_prev_mark_path(bp);
+ if (rc) {
+ bnx2x_prev_mcp_done(bp);
+ return rc;
+ }
+
+ return bnx2x_prev_mcp_done(bp);
+}
+
+static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
+{
+ int time_counter = 10;
+ u32 rc, fw, hw_lock_reg, hw_lock_val;
+ BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
+
+ /* Release previously held locks */
+ hw_lock_reg = (BP_FUNC(bp) <= 5) ?
+ (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
+ (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+
+ hw_lock_val = (REG_RD(bp, hw_lock_reg));
+ if (hw_lock_val) {
+ if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+ BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
+ }
+
+ BNX2X_DEV_INFO("Release Previously held hw lock\n");
+ REG_WR(bp, hw_lock_reg, 0xffffffff);
+ } else
+ BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
+
+ if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
+ BNX2X_DEV_INFO("Release previously held alr\n");
+ REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+ }
+
+
+ do {
+ /* Lock MCP using an unload request */
+ fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
+ if (!fw) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ break;
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = bnx2x_prev_unload_common(bp);
+ break;
+ }
+
+ /* non-common reply from MCP night require looping */
+ rc = bnx2x_prev_unload_uncommon(bp);
+ if (rc != BNX2X_PREV_WAIT_NEEDED)
+ break;
+
+ msleep(20);
+ } while (--time_counter);
+
+ if (!time_counter || rc) {
+ BNX2X_ERR("Failed unloading previous driver, aborting\n");
+ rc = -EBUSY;
+ }
+
+ BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
+
+ return rc;
}
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
@@ -10100,8 +10367,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
- if (!BP_NOMCP(bp))
- bnx2x_undi_unload(bp);
+ if (!BP_NOMCP(bp)) {
+ /* init fw_seq */
+ bp->fw_seq =
+ SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ bnx2x_prev_unload(bp);
+ }
+
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -11431,9 +11706,18 @@ static int __init bnx2x_init(void)
static void __exit bnx2x_cleanup(void)
{
+ struct list_head *pos, *q;
pci_unregister_driver(&bnx2x_pci_driver);
destroy_workqueue(bnx2x_wq);
+
+ /* Free globablly allocated resources */
+ list_for_each_safe(pos, q, &bnx2x_prev_list) {
+ struct bnx2x_prev_path_list *tmp =
+ list_entry(pos, struct bnx2x_prev_path_list, list);
+ list_del(pos);
+ kfree(tmp);
+ }
}
void bnx2x_notify_link_changed(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fd7fb45..ab0a250 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -987,6 +987,7 @@
* clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
#define IGU_REG_WRITE_DONE_PENDING 0x130480
#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
+#define MCP_REG_MCPR_ACCESS_LOCK 0x8009c
#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
#define MCP_REG_MCPR_GP_INPUTS 0x800c0
#define MCP_REG_MCPR_GP_OENABLE 0x800c8
@@ -1686,6 +1687,7 @@
[10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_1 0xa580
#define MISC_REG_RESET_REG_2 0xa590
/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
shared with the driver resides */
@@ -5606,6 +5608,7 @@
/* [RC 32] Parity register #0 read clear */
#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
+#define MCPR_ACCESS_LOCK_LOCK (1L<<31)
#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -5732,6 +5735,7 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0)
#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19)
#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 3f52fad..5135733 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3847,7 +3847,7 @@ static bool bnx2x_credit_pool_get_entry(
continue;
/* If we've got here we are going to find a free entry */
- for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+ for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
i < BIT_VEC64_ELEM_SZ; idx++, i++)
if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4e4bb38..062ac33 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2778,7 +2778,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ !tp->pci_fn))
return;
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9eb8159..f7f0bf5 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -356,13 +356,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
if (prop)
tbiaddr = *prop;
- }
- if (tbiaddr == -1) {
- err = -EBUSY;
- goto err_free_irqs;
- } else {
- out_be32(tbipa, tbiaddr);
+ if (tbiaddr == -1) {
+ err = -EBUSY;
+ goto err_free_irqs;
+ } else {
+ out_be32(tbipa, tbiaddr);
+ }
}
err = of_mdiobus_register(new_bus, np);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4e3cd2f..17a46e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3945,6 +3945,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
}
if (max_speed == SPEED_1000) {
+ unsigned int snums = qe_get_num_of_snums();
+
/* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
@@ -3954,11 +3956,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
- /* If QE's snum number is 46 which means we need to support
+ /* If QE's snum number is 46/76 which means we need to support
* 4 UECs at 1000Base-T simultaneously, we need to allocate
* more Threads to Rx.
*/
- if (qe_get_num_of_snums() == 46)
+ if ((snums == 76) || (snums == 46))
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
else
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 0e9aec8..4348b6f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -164,6 +164,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
static bool e1000_vlan_used(struct e1000_adapter *adapter);
static void e1000_vlan_mode(struct net_device *netdev,
netdev_features_t features);
+static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ bool filter_on);
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
@@ -215,7 +217,8 @@ MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
@@ -979,7 +982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
- adapter->msg_enable = (1 << debug) - 1;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
adapter->bars = bars;
adapter->need_ioport = need_ioport;
@@ -1214,7 +1217,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- e1000_vlan_mode(netdev, netdev->features);
+ e1000_vlan_filter_on_off(adapter, false);
/* print bus type/speed/width info */
e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
@@ -4770,6 +4773,22 @@ static bool e1000_vlan_used(struct e1000_adapter *adapter)
return false;
}
+static void __e1000_vlan_mode(struct e1000_adapter *adapter,
+ netdev_features_t features)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = er32(CTRL);
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= E1000_CTRL_VME;
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl &= ~E1000_CTRL_VME;
+ }
+ ew32(CTRL, ctrl);
+}
static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
bool filter_on)
{
@@ -4779,6 +4798,7 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_disable(adapter);
+ __e1000_vlan_mode(adapter, adapter->netdev->features);
if (filter_on) {
/* enable VLAN receive filtering */
rctl = er32(RCTL);
@@ -4799,24 +4819,14 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
}
static void e1000_vlan_mode(struct net_device *netdev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl;
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_disable(adapter);
- ctrl = er32(CTRL);
- if (features & NETIF_F_HW_VLAN_RX) {
- /* enable VLAN tag insert/strip */
- ctrl |= E1000_CTRL_VME;
- } else {
- /* disable VLAN tag insert/strip */
- ctrl &= ~E1000_CTRL_VME;
- }
- ew32(CTRL, ctrl);
+ __e1000_vlan_mode(adapter, features);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7152eb1..2c38a65 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -60,6 +60,11 @@
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
static const struct e1000_info *e1000_info_tbl[] = {
@@ -6172,7 +6177,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.adapter = adapter;
adapter->hw.mac.type = ei->mac;
adapter->max_hw_frame_size = ei->max_hw_frame_size;
- adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c490241..5ec3159 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -238,6 +238,11 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
struct igb_reg_info {
u32 ofs;
char *name;
@@ -1893,7 +1898,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 217c143..d61ca2a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -55,6 +55,11 @@ static const char igbvf_driver_string[] =
static const char igbvf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
@@ -2649,7 +2654,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
adapter->flags = ei->flags;
adapter->hw.back = adapter;
adapter->hw.mac.type = ei->mac;
- adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* PCI config space info */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 82aaa79..5fce363 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -134,8 +134,8 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
@@ -442,7 +442,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.back = adapter;
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
if (!adapter->hw.hw_addr) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 80e26ff..74e1921 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -544,7 +544,7 @@ struct ixgbe_fdir_filter {
u16 action;
};
-enum ixbge_state_t {
+enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 398fc22..3e26b1f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,8 +63,8 @@ static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
#define MAJ 3
-#define MIN 6
-#define BUILD 7
+#define MIN 8
+#define BUILD 21
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
@@ -141,13 +141,16 @@ module_param(allow_unsupported_sfp, uint, 0);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -6834,7 +6837,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 581c659..307611a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -91,7 +91,10 @@ MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
@@ -3367,7 +3370,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/*
* call save state here in standalone driver because it relies on
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 423a1a2..b806d9b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1767,13 +1767,14 @@ static int sky2_open(struct net_device *dev)
sky2_hw_up(sky2);
+ /* Enable interrupts from phy/mac for port */
+ imask = sky2_read32(hw, B0_IMSK);
+
if (hw->chip_id == CHIP_ID_YUKON_OPT ||
hw->chip_id == CHIP_ID_YUKON_PRM ||
hw->chip_id == CHIP_ID_YUKON_OP_2)
imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
- /* Enable interrupts from phy/mac for port */
- imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_IMSK);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6944424..6dfc26d 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1441,7 +1441,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
#endif
if (!is_valid_ether_addr(ndev->dev_addr))
- dev_hw_addr_random(ndev, ndev->dev_addr);
+ eth_hw_addr_random(ndev);
/* Reset the ethernet controller */
__lpc_eth_reset(pldat);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9755b49..3fb2355 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -7,7 +7,8 @@ config SH_ETH
depends on SUPERH && \
(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
- CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
+ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
+ CPU_SUBTYPE_SH7757)
select CRC32
select NET_CORE
select MII
@@ -16,4 +17,4 @@ config SH_ETH
---help---
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
+ - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8615961..d63e09b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,8 @@
/*
* SuperH Ethernet device driver
*
- * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2009 Renesas Solutions Corp.
+ * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
+ * Copyright (C) 2008-2012 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/clk.h>
#include <linux/sh_eth.h>
#include "sh_eth.h"
@@ -279,8 +280,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
return &sh_eth_my_cpu_data;
}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
+static void sh_eth_reset_hw_crc(struct net_device *ndev);
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -314,6 +316,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
+
+ /* Reset HW CRC register */
+ sh_eth_reset_hw_crc(ndev);
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -370,8 +375,17 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
+#if defined(CONFIG_CPU_SUBTYPE_SH7734)
+ .hw_crc = 1,
+#endif
};
+static void sh_eth_reset_hw_crc(struct net_device *ndev)
+{
+ if (sh_eth_my_cpu_data.hw_crc)
+ sh_eth_write(ndev, 0x0, CSMR);
+}
+
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
#define SH_ETH_RESET_DEFAULT 1
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -790,7 +804,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* all sh_eth int mask */
sh_eth_write(ndev, 0, EESIPR);
-#if defined(__LITTLE_ENDIAN__)
+#if defined(__LITTLE_ENDIAN)
if (mdp->cd->hw_swap)
sh_eth_write(ndev, EDMR_EL, EDMR);
else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57dc262..0fa14afc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,8 +1,8 @@
/*
* SuperH Ethernet device driver
*
- * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2011 Renesas Solutions Corp.
+ * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
+ * Copyright (C) 2008-2012 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ enum {
CEECR,
MAFCR,
RTRATE,
+ CSMR,
+ RMII_MII,
/* TSU Absolute address */
ARSTR,
@@ -172,6 +174,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[RMCR] = 0x0458,
[RPADIR] = 0x0460,
[FCFTR] = 0x0468,
+ [CSMR] = 0x04E4,
[ECMR] = 0x0500,
[ECSR] = 0x0510,
@@ -200,6 +203,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[CERCR] = 0x0768,
[CEECR] = 0x0770,
[MAFCR] = 0x0778,
+ [RMII_MII] = 0x0790,
[ARSTR] = 0x0000,
[TSU_CTRST] = 0x0004,
@@ -377,7 +381,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
/*
* Register's bits
*/
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
/* EDSR */
enum EDSR_BIT {
EDSR_ENT = 0x01, EDSR_ENR = 0x02,
@@ -689,7 +693,7 @@ enum TSU_FWSLC_BIT {
*/
struct sh_eth_txdesc {
u32 status; /* TD0 */
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN)
u16 pad0; /* TD1 */
u16 buffer_length; /* TD1 */
#else
@@ -706,7 +710,7 @@ struct sh_eth_txdesc {
*/
struct sh_eth_rxdesc {
u32 status; /* RD0 */
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN)
u16 frame_length; /* RD1 */
u16 buffer_length; /* RD1 */
#else
@@ -751,6 +755,7 @@ struct sh_eth_cpu_data {
unsigned rpadir:1; /* E-DMAC have RPADIR */
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+ unsigned hw_crc:1; /* E-DMAC have CSMR */
};
struct sh_eth_private {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 39b8cf3..fcfa01f 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -503,30 +503,32 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
static void rhine_restart_tx(struct net_device *dev);
-static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high)
+static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
{
void __iomem *ioaddr = rp->base;
int i;
for (i = 0; i < 1024; i++) {
- if (high ^ !!(ioread8(ioaddr + reg) & mask))
+ bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
+
+ if (low ^ has_mask_bits)
break;
udelay(10);
}
if (i > 64) {
netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
- "count: %04d\n", high ? "high" : "low", reg, mask, i);
+ "count: %04d\n", low ? "low" : "high", reg, mask, i);
}
}
static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
{
- rhine_wait_bit(rp, reg, mask, true);
+ rhine_wait_bit(rp, reg, mask, false);
}
static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
{
- rhine_wait_bit(rp, reg, mask, false);
+ rhine_wait_bit(rp, reg, mask, true);
}
static u32 rhine_get_events(struct rhine_private *rp)
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index a0d1913..e250675 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -147,7 +147,7 @@ static void sa1100_irda_dma_start(struct sa1100_buf *buf,
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = buf->chan;
- desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir,
+ desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (desc) {
desc->callback = cb;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a57f057..91d2588 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -375,8 +375,8 @@ static void rionet_remove(struct rio_dev *rdev)
struct net_device *ndev = rio_get_drvdata(rdev);
struct rionet_peer *peer, *tmp;
- free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
- __fls(sizeof(void *)) + 4 : 0);
+ free_pages((unsigned long)rionet_active, get_order(sizeof(void *) *
+ RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size)));
unregister_netdev(ndev);
free_netdev(ndev);
@@ -432,15 +432,16 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
int rc = 0;
struct rionet_private *rnet;
u16 device_id;
+ const size_t rionet_active_bytes = sizeof(void *) *
+ RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
- mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
+ get_order(rionet_active_bytes));
if (!rionet_active) {
rc = -ENOMEM;
goto out;
}
- memset((void *)rionet_active, 0, sizeof(void *) *
- RIO_MAX_ROUTE_ENTRIES(mport->sys_size));
+ memset((void *)rionet_active, 0, rionet_active_bytes);
/* Set up private area */
rnet = netdev_priv(ndev);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 3886b30..3e41b00 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -165,13 +165,13 @@ static void rx_complete(struct urb *req)
memcpy(skb_put(skb, 1), page_address(page), 1);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 1, req->actual_length,
- req->actual_length);
+ PAGE_SIZE);
page = NULL;
}
} else {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, req->actual_length,
- req->actual_length);
+ PAGE_SIZE);
page = NULL;
}
if (req->actual_length < PAGE_SIZE)
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 439690b..685a4e2 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -93,6 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
/* no jumbogram (16K) support for now */
dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
return 0;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6dda2fe..d363b31 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -85,32 +85,6 @@
#define INT_CRERR_CNT 0x06
#define INT_COL_CNT 0x07
-/* Transmit status register errors */
-#define TSR_ECOL (1<<5)
-#define TSR_LCOL (1<<4)
-#define TSR_LOSS_CRS (1<<3)
-#define TSR_JBR (1<<2)
-#define TSR_ERRORS (TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR)
-/* Receive status register errors */
-#define RSR_CRC (1<<2)
-#define RSR_FAE (1<<1)
-#define RSR_ERRORS (RSR_CRC | RSR_FAE)
-
-/* Media status register definitions */
-#define MSR_DUPLEX (1<<4)
-#define MSR_SPEED (1<<3)
-#define MSR_LINK (1<<2)
-
-/* Interrupt pipe data */
-#define INT_TSR 0x00
-#define INT_RSR 0x01
-#define INT_MSR 0x02
-#define INT_WAKSR 0x03
-#define INT_TXOK_CNT 0x04
-#define INT_RXLOST_CNT 0x05
-#define INT_CRERR_CNT 0x06
-#define INT_COL_CNT 0x07
-
#define RTL8150_MTU 1540
#define RTL8150_TX_TIMEOUT (HZ)
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index c3197ce..34db195 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -337,6 +337,11 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
},
{
+ /* Motorola Rokr E6 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6027, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &bogus_mdlm_info,
+}, {
/* Motorola MOTOMAGX phones */
USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 019da01..4de2760 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -625,12 +625,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
- if (net_ratelimit()) {
- if (likely(capacity == -ENOMEM)) {
+ if (likely(capacity == -ENOMEM)) {
+ if (net_ratelimit()) {
dev_warn(&dev->dev,
"TX queue failure: out of memory\n");
} else {
- dev->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
+ if (net_ratelimit())
dev_warn(&dev->dev,
"Unexpected TX queue failure: %d\n",
capacity);
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 63e4b70..1d76ae8 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -597,7 +597,8 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
- strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
+ strncpy(info->fw_version,
+ i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
if (net_dev->dev.parent)
strncpy(info->bus_info, dev_name(net_dev->dev.parent),
sizeof(info->bus_info) - 1);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 2c1b8b6..29b1e03 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -339,6 +339,23 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
return result;
}
+static void i2400mu_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct usb_device *udev = i2400mu->usb_dev;
+
+ strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
+ strncpy(info->fw_version,
+ i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+ usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops i2400mu_ethtool_ops = {
+ .get_drvinfo = i2400mu_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
static
void i2400mu_netdev_setup(struct net_device *net_dev)
@@ -347,6 +364,7 @@ void i2400mu_netdev_setup(struct net_device *net_dev)
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
i2400mu_init(i2400mu);
i2400m_netdev_setup(net_dev);
+ net_dev->ethtool_ops = &i2400mu_ethtool_ops;
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 2f4b48e..e5cceb0 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -20,7 +20,6 @@
/* Common calibration code */
-#define ATH9K_NF_TOO_HIGH -60
static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
{
@@ -346,10 +345,10 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
"NF calibrated [%s] [chain %d] is %d\n",
(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
- if (nf[i] > ATH9K_NF_TOO_HIGH) {
+ if (nf[i] > limit->max) {
ath_dbg(common, CALIBRATE,
"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
- i, nf[i], ATH9K_NF_TOO_HIGH);
+ i, nf[i], limit->max);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
ath_dbg(common, CALIBRATE,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 60159f4..cb00645 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -680,7 +680,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->queues = 4;
hw->max_rates = 4;
hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
+ hw->max_listen_interval = 1;
hw->max_rate_tries = 10;
hw->sta_data_size = sizeof(struct ath_node);
hw->vif_data_size = sizeof(struct ath_vif);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3879485..215eb25 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -640,7 +640,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
an->sta = sta;
an->vif = vif;
- if (sta->ht_cap.ht_supported) {
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
@@ -659,7 +659,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
an->sta = NULL;
#endif
- if (sta->ht_cap.ht_supported)
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ath_tx_node_cleanup(sc, an);
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f4ae3ba..1c4583c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1913,13 +1913,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->rx.frag) {
int space = skb->len - skb_tailroom(hdr_skb);
- sc->rx.frag = NULL;
-
if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
dev_kfree_skb(skb);
goto requeue_drop_frag;
}
+ sc->rx.frag = NULL;
+
skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
skb->len);
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 4fcdac6..2b02257 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11507,9 +11507,9 @@ static int ipw_wdev_init(struct net_device *dev)
rc = -ENOMEM;
goto out;
}
- /* translate geo->bg to a_band.channels */
+ /* translate geo->a to a_band.channels */
for (i = 0; i < geo->a_channels; i++) {
- a_band->channels[i].band = IEEE80211_BAND_2GHZ;
+ a_band->channels[i].band = IEEE80211_BAND_5GHZ;
a_band->channels[i].center_freq = geo->a[i].freq;
a_band->channels[i].hw_value = geo->a[i].channel;
a_band->channels[i].max_power = geo->a[i].max_power;
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 0c12093..faec404 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -2673,8 +2673,6 @@ il3945_bg_restart(struct work_struct *data)
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
mutex_lock(&il->mutex);
- /* FIXME: vif can be dereferenced */
- il->vif = NULL;
il->is_open = 0;
mutex_unlock(&il->mutex);
il3945_down(il);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 17f1c68..c46275a 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5652,8 +5652,6 @@ il4965_bg_restart(struct work_struct *data)
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
mutex_lock(&il->mutex);
- /* FIXME: do we dereference vif without mutex locked ? */
- il->vif = NULL;
il->is_open = 0;
__il4965_down(il);
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e5ac047..eaf24945 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4508,6 +4508,7 @@ il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct il_priv *il = hw->priv;
int err;
+ bool reset;
mutex_lock(&il->mutex);
D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
@@ -4518,7 +4519,12 @@ il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- if (il->vif) {
+ /*
+ * We do not support multiple virtual interfaces, but on hardware reset
+ * we have to add the same interface again.
+ */
+ reset = (il->vif == vif);
+ if (il->vif && !reset) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4528,8 +4534,11 @@ il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
err = il_set_mode(il);
if (err) {
- il->vif = NULL;
- il->iw_mode = NL80211_IFTYPE_STATION;
+ IL_WARN("Fail to set mode %d\n", vif->type);
+ if (!reset) {
+ il->vif = NULL;
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ }
}
out:
@@ -5279,9 +5288,9 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
D_MAC80211("BSSID %pM\n", bss_conf->bssid);
/*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
+ * If there is currently a HW scan going on in the background,
+ * then we need to cancel it, otherwise sometimes we are not
+ * able to authenticate (FIXME: why ?)
*/
if (il_scan_cancel_timeout(il, 100)) {
D_MAC80211("leave - scan abort failed\n");
@@ -5290,14 +5299,10 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
/* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(il->staging.bssid_addr, bss_conf->bssid,
- ETH_ALEN);
+ memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
- /* currently needed in a few places */
- memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
- } else
- il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ /* FIXME: currently needed in a few places */
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
}
/*
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index dd6c64a..88e3ad2 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1336,6 +1336,10 @@ static void qbuf_scan(struct orinoco_private *priv, void *buf,
unsigned long flags;
sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
+ if (!sd) {
+ printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
+ return;
+ }
sd->buf = buf;
sd->len = len;
sd->type = type;
@@ -1353,6 +1357,10 @@ static void qabort_scan(struct orinoco_private *priv)
unsigned long flags;
sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
+ if (!sd) {
+ printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
+ return;
+ }
sd->len = -1; /* Abort */
spin_lock_irqsave(&priv->scan_lock, flags);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index cd490ab..001735f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -163,7 +163,13 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
/* Reschedule urb to read TX status again instantly */
return true;
- } else if (rt2800usb_txstatus_pending(rt2x00dev)) {
+ }
+
+ /* Check if there is any entry that timedout waiting on TX status */
+ if (rt2800usb_txstatus_timeout(rt2x00dev))
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+
+ if (rt2800usb_txstatus_pending(rt2x00dev)) {
/* Read register after 250 us */
hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
HRTIMER_MODE_REL);
@@ -178,7 +184,7 @@ stop_reading:
* here again if status reading is needed.
*/
if (rt2800usb_txstatus_pending(rt2x00dev) &&
- test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
+ !test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
return true;
else
return false;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 1eec3a0..4c01624 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1893,7 +1893,7 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
- dm_digtable.cur_igvalue = 0x17;
+ dm_digtable.cur_igvalue = 0x37;
rtl92c_dm_write_dig(hw);
break;
default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 34591eeb..28fc5fb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3077,7 +3077,7 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue;
- de_digtable.cur_igvalue = 0x17;
+ de_digtable.cur_igvalue = 0x37;
rtl92d_dm_write_dig(hw);
break;
default:
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 4bdef24..b500840 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -508,9 +508,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
int pos;
u32 reg32;
- if (aspm_disabled)
- return 0;
-
/*
* Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
@@ -519,6 +516,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
pos = pci_pcie_cap(child);
if (!pos)
return -EINVAL;
+
+ /*
+ * If ASPM is disabled then we're not going to change
+ * the BIOS state. It's safe to continue even if it's a
+ * pre-1.1 device
+ */
+
+ if (aspm_disabled)
+ continue;
+
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 17499a5..53969af 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -138,9 +138,10 @@ static int __devinit anatop_regulator_probe(struct platform_device *pdev)
rdesc->type = REGULATOR_VOLTAGE;
rdesc->owner = THIS_MODULE;
sreg->mfd = anatopmfd;
- ret = of_property_read_u32(np, "reg", &sreg->control_reg);
+ ret = of_property_read_u32(np, "anatop-reg-offset",
+ &sreg->control_reg);
if (ret) {
- dev_err(dev, "no reg property set\n");
+ dev_err(dev, "no anatop-reg-offset property set\n");
goto anatop_probe_end;
}
ret = of_property_read_u32(np, "anatop-vol-bit-width",
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c056abd..e70dd38 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2992,14 +2992,14 @@ void regulator_unregister(struct regulator_dev *rdev)
if (rdev == NULL)
return;
+ if (rdev->supply)
+ regulator_put(rdev->supply);
mutex_lock(&regulator_list_mutex);
debugfs_remove_recursive(rdev->debugfs);
flush_work_sync(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
- if (rdev->supply)
- regulator_put(rdev->supply);
kfree(rdev->constraints);
device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 30d0a15..cacd33c 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -18,7 +18,6 @@ static void regulator_fixed_release(struct device *dev)
/**
* regulator_register_fixed - register a no-op fixed regulator
- * @name: supply name
* @id: platform device id
* @supplies: consumers for this regulator
* @num_supplies: number of consumers
@@ -32,7 +31,7 @@ struct platform_device *regulator_register_fixed(int id,
if (!data)
return NULL;
- data->cfg.supply_name = "dummy";
+ data->cfg.supply_name = "fixed-dummy";
data->cfg.microvolts = 0;
data->cfg.gpio = -EINVAL;
data->cfg.enabled_at_boot = 1;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index e8cfc99..845aa22 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -552,7 +552,7 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
mc13xxx_lock(mc13892);
ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
if (ret)
- goto err_free;
+ goto err_unlock;
/* enable switch auto mode */
if ((val & 0x0000FFFF) == 0x45d0) {
@@ -562,7 +562,7 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
MC13892_SWITCHERS4_SW1MODE_AUTO |
MC13892_SWITCHERS4_SW2MODE_AUTO);
if (ret)
- goto err_free;
+ goto err_unlock;
ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
MC13892_SWITCHERS5_SW3MODE_M |
@@ -570,7 +570,7 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
MC13892_SWITCHERS5_SW3MODE_AUTO |
MC13892_SWITCHERS5_SW4MODE_AUTO);
if (ret)
- goto err_free;
+ goto err_unlock;
}
mc13xxx_unlock(mc13892);
@@ -612,10 +612,10 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
err:
while (--i >= 0)
regulator_unregister(priv->regulators[i]);
+ return ret;
-err_free:
+err_unlock:
mc13xxx_unlock(mc13892);
-
return ret;
}
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 58447db..4ca2db0 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -311,8 +311,7 @@ static int s5m8767_set_voltage(struct regulator_dev *rdev,
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
const struct s5m_voltage_desc *desc;
int reg_id = rdev_get_id(rdev);
- int reg, mask, ret;
- int i;
+ int sel, reg, mask, ret;
u8 val;
switch (reg_id) {
@@ -333,19 +332,20 @@ static int s5m8767_set_voltage(struct regulator_dev *rdev,
desc = reg_voltage_map[reg_id];
- i = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
- if (i < 0)
- return i;
+ sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
+ if (sel < 0)
+ return sel;
ret = s5m8767_get_voltage_register(rdev, &reg);
if (ret)
return ret;
s5m_reg_read(s5m8767->iodev, reg, &val);
- val = val & mask;
+ val &= ~mask;
+ val |= sel;
ret = s5m_reg_write(s5m8767->iodev, reg, val);
- *selector = i;
+ *selector = sel;
return ret;
}
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 29b615c..cfc1f16 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -79,6 +79,11 @@ static int tps6586x_ldo_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
+ int rid = rdev_get_id(rdev);
+
+ /* LDO0 has minimal voltage 1.2V rather than 1.25V */
+ if ((rid == TPS6586X_ID_LDO_0) && (selector == 0))
+ return (info->voltages[0] - 50) * 1000;
return info->voltages[selector] * 1000;
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 4904a40..ff810e7 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -380,13 +380,15 @@ static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev,
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_dcdc_ilim); i++) {
- if (max_uA <= wm831x_dcdc_ilim[i])
+ if ((min_uA <= wm831x_dcdc_ilim[i]) &&
+ (wm831x_dcdc_ilim[i] <= max_uA))
break;
}
if (i == ARRAY_SIZE(wm831x_dcdc_ilim))
return -EINVAL;
- return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK, i);
+ return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK,
+ i << WM831X_DC1_HC_THR_SHIFT);
}
static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
@@ -400,7 +402,8 @@ static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
if (val < 0)
return val;
- return wm831x_dcdc_ilim[val & WM831X_DC1_HC_THR_MASK];
+ val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT;
+ return wm831x_dcdc_ilim[val];
}
static struct regulator_ops wm831x_buckv_ops = {
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 634aac3..b414e09 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -101,7 +101,7 @@ static int wm831x_isink_set_current(struct regulator_dev *rdev,
for (i = 0; i < ARRAY_SIZE(wm831x_isinkv_values); i++) {
int val = wm831x_isinkv_values[i];
- if (min_uA >= val && val <= max_uA) {
+ if (min_uA <= val && val <= max_uA) {
ret = wm831x_set_bits(wm831x, isink->reg,
WM831X_CS1_ISEL_MASK, i);
return ret;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index f1e4ab0..641e9f6 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -506,22 +506,19 @@ static int wm831x_aldo_set_mode(struct regulator_dev *rdev,
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
- int ctrl_reg = ldo->base + WM831X_LDO_CONTROL;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
int ret;
switch (mode) {
case REGULATOR_MODE_NORMAL:
- ret = wm831x_set_bits(wm831x, on_reg,
- WM831X_LDO7_ON_MODE, 0);
+ ret = wm831x_set_bits(wm831x, on_reg, WM831X_LDO7_ON_MODE, 0);
if (ret < 0)
return ret;
break;
case REGULATOR_MODE_IDLE:
- ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO7_ON_MODE,
+ ret = wm831x_set_bits(wm831x, on_reg, WM831X_LDO7_ON_MODE,
WM831X_LDO7_ON_MODE);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index ab1e183..05ecfb8 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -99,7 +99,7 @@ static int get_isink_val(int min_uA, int max_uA, u16 *setting)
{
int i;
- for (i = ARRAY_SIZE(isink_cur) - 1; i >= 0; i--) {
+ for (i = 0; i < ARRAY_SIZE(isink_cur); i++) {
if (min_uA <= isink_cur[i] && max_uA >= isink_cur[i]) {
*setting = i;
return 0;
@@ -186,7 +186,7 @@ static int wm8350_isink_get_current(struct regulator_dev *rdev)
return 0;
}
- return DIV_ROUND_CLOSEST(isink_cur[val], 100);
+ return isink_cur[val];
}
/* turn on ISINK followed by DCDC */
@@ -495,25 +495,25 @@ static int wm8350_dcdc_set_suspend_enable(struct regulator_dev *rdev)
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC1_LOW_POWER,
- wm8350->pmic.dcdc1_hib_mode);
+ val | wm8350->pmic.dcdc1_hib_mode);
break;
case WM8350_DCDC_3:
val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC3_LOW_POWER,
- wm8350->pmic.dcdc3_hib_mode);
+ val | wm8350->pmic.dcdc3_hib_mode);
break;
case WM8350_DCDC_4:
val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC4_LOW_POWER,
- wm8350->pmic.dcdc4_hib_mode);
+ val | wm8350->pmic.dcdc4_hib_mode);
break;
case WM8350_DCDC_6:
val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC6_LOW_POWER,
- wm8350->pmic.dcdc6_hib_mode);
+ val | wm8350->pmic.dcdc6_hib_mode);
break;
case WM8350_DCDC_2:
case WM8350_DCDC_5:
@@ -535,25 +535,25 @@ static int wm8350_dcdc_set_suspend_disable(struct regulator_dev *rdev)
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER);
wm8350->pmic.dcdc1_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC1_LOW_POWER,
- WM8350_DCDC_HIB_MODE_DIS);
+ val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_3:
val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER);
wm8350->pmic.dcdc3_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC3_LOW_POWER,
- WM8350_DCDC_HIB_MODE_DIS);
+ val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_4:
val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER);
wm8350->pmic.dcdc4_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC4_LOW_POWER,
- WM8350_DCDC_HIB_MODE_DIS);
+ val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_6:
val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER);
wm8350->pmic.dcdc6_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC6_LOW_POWER,
- WM8350_DCDC_HIB_MODE_DIS);
+ val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_2:
case WM8350_DCDC_5:
@@ -575,13 +575,13 @@ static int wm8350_dcdc25_set_suspend_enable(struct regulator_dev *rdev)
val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
& ~WM8350_DC2_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
- WM8350_DC2_HIB_MODE_ACTIVE);
+ (WM8350_DC2_HIB_MODE_ACTIVE << WM8350_DC2_HIB_MODE_SHIFT));
break;
case WM8350_DCDC_5:
val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
- & ~WM8350_DC2_HIB_MODE_MASK;
+ & ~WM8350_DC5_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
- WM8350_DC5_HIB_MODE_ACTIVE);
+ (WM8350_DC5_HIB_MODE_ACTIVE << WM8350_DC5_HIB_MODE_SHIFT));
break;
default:
return -EINVAL;
@@ -600,13 +600,13 @@ static int wm8350_dcdc25_set_suspend_disable(struct regulator_dev *rdev)
val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
& ~WM8350_DC2_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
- WM8350_DC2_HIB_MODE_DISABLE);
+ (WM8350_DC2_HIB_MODE_DISABLE << WM8350_DC2_HIB_MODE_SHIFT));
break;
case WM8350_DCDC_5:
val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
- & ~WM8350_DC2_HIB_MODE_MASK;
+ & ~WM8350_DC5_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
- WM8350_DC2_HIB_MODE_DISABLE);
+ (WM8350_DC5_HIB_MODE_DISABLE << WM8350_DC5_HIB_MODE_SHIFT));
break;
default:
return -EINVAL;
@@ -749,7 +749,7 @@ static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev)
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_HIB_MODE_MASK;
- wm8350_reg_write(wm8350, volt_reg, WM8350_LDO1_HIB_MODE_DIS);
+ wm8350_reg_write(wm8350, volt_reg, val | WM8350_LDO1_HIB_MODE_DIS);
return 0;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a06e608..29684c8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -619,6 +619,7 @@ config SCSI_ARCMSR
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ad24e06..8deedea 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
+obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index f29d512..68ce085 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -2582,7 +2582,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* this than via the PCI device table
*/
if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
- error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+ atpdev->chip_ver = pdev->revision;
if (atpdev->chip_ver < 2)
goto err_eio;
}
@@ -2601,7 +2601,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
base_io &= 0xfffffff8;
if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
- error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+ atpdev->chip_ver = pdev->revision;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
host_id = inb(base_io + 0x39);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index a796de9..4ad7e36 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -225,9 +225,9 @@ struct bfa_faa_args_s {
};
struct bfa_iocfc_s {
+ bfa_fsm_t fsm;
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
- int action;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
u8 hw_qid[BFI_IOC_MAX_CQS];
@@ -236,7 +236,9 @@ struct bfa_iocfc_s {
struct bfa_cb_qe_s dis_hcb_qe;
struct bfa_cb_qe_s en_hcb_qe;
struct bfa_cb_qe_s stats_hcb_qe;
- bfa_boolean_t cfgdone;
+ bfa_boolean_t submod_enabled;
+ bfa_boolean_t cb_reqd; /* Driver call back reqd */
+ bfa_status_t op_status; /* Status of bfa iocfc op */
struct bfa_dma_s cfg_info;
struct bfi_iocfc_cfg_s *cfginfo;
@@ -341,8 +343,6 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
u32 *end);
void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
-wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
-wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
struct bfi_pbc_vport_s *pbc_vport);
@@ -428,7 +428,6 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
void bfa_iocfc_enable(struct bfa_s *bfa);
void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 4bd546b..456e576 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -200,13 +200,431 @@ enum {
#define DEF_CFG_NUM_SBOOT_LUNS 16
/*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+ struct bfa_iocfc_s, enum iocfc_event);
+
+/*
* forward declaration for IOC FC functions
*/
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
static void bfa_iocfc_disable_cbfn(void *bfa_arg);
static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_INIT:
+ case IOCFC_E_ENABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_DCONF_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_CFG_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_START:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+ break;
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_fcport_init(iocfc->bfa);
+ bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_DCONF_DONE:
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_DISABLED:
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+ bfa_iocfc_stop_cb, iocfc->bfa);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_CFG_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_DISABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+ bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_ENABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+ break;
+ case IOCFC_E_IOC_DISABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+ bfa_iocfc_disable_cb, iocfc->bfa);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
/*
* BFA Interrupt handling functions
@@ -231,16 +649,19 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
-static inline void
+bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
u32 pi, ci;
struct list_head *waitq;
+ bfa_boolean_t ret;
ci = bfa_rspq_ci(bfa, qid);
pi = bfa_rspq_pi(bfa, qid);
+ ret = (ci != pi);
+
while (ci != pi) {
m = bfa_rspq_elem(bfa, qid, ci);
WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
@@ -260,6 +681,8 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
waitq = bfa_reqq(bfa, qid);
if (!list_empty(waitq))
bfa_reqq_resume(bfa, qid);
+
+ return ret;
}
static inline void
@@ -320,6 +743,7 @@ bfa_intx(struct bfa_s *bfa)
{
u32 intr, qintr;
int queue;
+ bfa_boolean_t rspq_comp = BFA_FALSE;
intr = readl(bfa->iocfc.bfa_regs.intr_status);
@@ -332,11 +756,12 @@ bfa_intx(struct bfa_s *bfa)
*/
if (bfa->queue_process) {
for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
- bfa_isr_rspq(bfa, queue);
+ if (bfa_isr_rspq(bfa, queue))
+ rspq_comp = BFA_TRUE;
}
if (!intr)
- return BFA_TRUE;
+ return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
/*
* CPE completion queue interrupt
@@ -525,11 +950,9 @@ bfa_iocfc_send_cfg(void *bfa_arg)
* Enable interrupt coalescing if it is driver init path
* and not ioc disable/enable path.
*/
- if (!iocfc->cfgdone)
+ if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
cfg_info->intr_attr.coalesce = BFA_TRUE;
- iocfc->cfgdone = BFA_FALSE;
-
/*
* dma map IOC configuration itself
*/
@@ -549,8 +972,6 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa->bfad = bfad;
iocfc->bfa = bfa;
- iocfc->action = BFA_IOCFC_ACT_NONE;
-
iocfc->cfg = *cfg;
/*
@@ -683,6 +1104,8 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->start(bfa);
+
+ bfa->iocfc.submod_enabled = BFA_TRUE;
}
/*
@@ -693,8 +1116,13 @@ bfa_iocfc_disable_submod(struct bfa_s *bfa)
{
int i;
+ if (bfa->iocfc.submod_enabled == BFA_FALSE)
+ return;
+
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->iocdisable(bfa);
+
+ bfa->iocfc.submod_enabled = BFA_FALSE;
}
static void
@@ -702,15 +1130,8 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
{
struct bfa_s *bfa = bfa_arg;
- if (complete) {
- if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
- bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
- else
- bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
- } else {
- if (bfa->iocfc.cfgdone)
- bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
- }
+ if (complete)
+ bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
}
static void
@@ -721,8 +1142,6 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
if (compl)
complete(&bfad->comp);
- else
- bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
}
static void
@@ -794,8 +1213,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
- iocfc->cfgdone = BFA_TRUE;
-
/*
* configure queue register offsets as learnt from firmware
*/
@@ -811,22 +1228,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
*/
bfa_msix_queue_install(bfa);
- /*
- * Configuration is complete - initialize/start submodules
- */
- bfa_fcport_init(bfa);
-
- if (iocfc->action == BFA_IOCFC_ACT_INIT) {
- if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
- bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- } else {
- if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
- bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
- bfa_iocfc_enable_cb, bfa);
- bfa_iocfc_start_submod(bfa);
+ if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+ bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+ bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+ bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
}
}
+
void
bfa_iocfc_reset_queues(struct bfa_s *bfa)
{
@@ -840,6 +1248,23 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
}
}
+/*
+ * Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+ cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+ bfa->ioc.attr->pwwn = msg->pwwn;
+ bfa->ioc.attr->nwwn = msg->nwwn;
+ bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
/* Fabric Assigned Address specific functions */
/*
@@ -855,84 +1280,13 @@ bfa_faa_validate_request(struct bfa_s *bfa)
if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
return BFA_STATUS_FEATURE_NOT_SUPPORTED;
} else {
- if (!bfa_ioc_is_acq_addr(&bfa->ioc))
- return BFA_STATUS_IOC_NON_OP;
+ return BFA_STATUS_IOC_NON_OP;
}
return BFA_STATUS_OK;
}
bfa_status_t
-bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
-{
- struct bfi_faa_en_dis_s faa_enable_req;
- struct bfa_iocfc_s *iocfc = &bfa->iocfc;
- bfa_status_t status;
-
- iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
- iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
- status = bfa_faa_validate_request(bfa);
- if (status != BFA_STATUS_OK)
- return status;
-
- if (iocfc->faa_args.busy == BFA_TRUE)
- return BFA_STATUS_DEVBUSY;
-
- if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
- return BFA_STATUS_FAA_ENABLED;
-
- if (bfa_fcport_is_trunk_enabled(bfa))
- return BFA_STATUS_ERROR_TRUNK_ENABLED;
-
- bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
- iocfc->faa_args.busy = BFA_TRUE;
-
- memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
- bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
- BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
-
- bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
- sizeof(struct bfi_faa_en_dis_s));
-
- return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
- void *cbarg)
-{
- struct bfi_faa_en_dis_s faa_disable_req;
- struct bfa_iocfc_s *iocfc = &bfa->iocfc;
- bfa_status_t status;
-
- iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
- iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
- status = bfa_faa_validate_request(bfa);
- if (status != BFA_STATUS_OK)
- return status;
-
- if (iocfc->faa_args.busy == BFA_TRUE)
- return BFA_STATUS_DEVBUSY;
-
- if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
- return BFA_STATUS_FAA_DISABLED;
-
- bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
- iocfc->faa_args.busy = BFA_TRUE;
-
- memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
- bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
- BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
-
- bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
- sizeof(struct bfi_faa_en_dis_s));
-
- return BFA_STATUS_OK;
-}
-
-bfa_status_t
bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
bfa_cb_iocfc_t cbfn, void *cbarg)
{
@@ -963,38 +1317,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
}
/*
- * FAA enable response
- */
-static void
-bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
- struct bfi_faa_en_dis_rsp_s *rsp)
-{
- void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
- bfa_status_t status = rsp->status;
-
- WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
- iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
- iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
- * FAA disable response
- */
-static void
-bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
- struct bfi_faa_en_dis_rsp_s *rsp)
-{
- void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
- bfa_status_t status = rsp->status;
-
- WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
- iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
- iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
* FAA query response
*/
static void
@@ -1023,25 +1345,10 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
{
struct bfa_s *bfa = bfa_arg;
- if (status == BFA_STATUS_FAA_ACQ_ADDR) {
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- return;
- }
-
- if (status != BFA_STATUS_OK) {
- bfa_isr_disable(bfa);
- if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
- bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
- bfa_iocfc_enable_cb, bfa);
- return;
- }
-
- bfa_iocfc_send_cfg(bfa);
- bfa_dconf_modinit(bfa);
+ if (status == BFA_STATUS_OK)
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+ else
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
}
/*
@@ -1052,17 +1359,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
- bfa_isr_disable(bfa);
- bfa_iocfc_disable_submod(bfa);
-
- if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
- bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
- bfa);
- else {
- WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
- bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
- bfa);
- }
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
}
/*
@@ -1074,13 +1371,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
struct bfa_s *bfa = bfa_arg;
bfa->queue_process = BFA_FALSE;
-
- bfa_isr_disable(bfa);
- bfa_iocfc_disable_submod(bfa);
-
- if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
- bfa);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
}
/*
@@ -1095,7 +1386,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
bfa_isr_enable(bfa);
}
-
/*
* Query IOC memory requirement information.
*/
@@ -1171,6 +1461,12 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
INIT_LIST_HEAD(&bfa->comp_q);
for (i = 0; i < BFI_IOC_MAX_CQS; i++)
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+ bfa->iocfc.cb_reqd = BFA_FALSE;
+ bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa->iocfc.submod_enabled = BFA_FALSE;
+
+ bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
}
/*
@@ -1179,8 +1475,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
void
bfa_iocfc_init(struct bfa_s *bfa)
{
- bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
- bfa_ioc_enable(&bfa->ioc);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
}
/*
@@ -1190,8 +1485,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
void
bfa_iocfc_start(struct bfa_s *bfa)
{
- if (bfa->iocfc.cfgdone)
- bfa_iocfc_start_submod(bfa);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
}
/*
@@ -1201,12 +1495,8 @@ bfa_iocfc_start(struct bfa_s *bfa)
void
bfa_iocfc_stop(struct bfa_s *bfa)
{
- bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
-
bfa->queue_process = BFA_FALSE;
- bfa_dconf_modexit(bfa);
- if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
- bfa_ioc_disable(&bfa->ioc);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
}
void
@@ -1226,13 +1516,9 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
case BFI_IOCFC_I2H_UPDATEQ_RSP:
iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
break;
- case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
- bfa_faa_enable_reply(iocfc,
- (struct bfi_faa_en_dis_rsp_s *)msg);
- break;
- case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
- bfa_faa_disable_reply(iocfc,
- (struct bfi_faa_en_dis_rsp_s *)msg);
+ case BFI_IOCFC_I2H_ADDR_MSG:
+ bfa_iocfc_process_faa_addr(bfa,
+ (struct bfi_faa_addr_msg_s *)msg);
break;
case BFI_IOCFC_I2H_FAA_QUERY_RSP:
bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
@@ -1306,8 +1592,8 @@ bfa_iocfc_enable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Enable");
- bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
- bfa_ioc_enable(&bfa->ioc);
+ bfa->iocfc.cb_reqd = BFA_TRUE;
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
}
void
@@ -1315,17 +1601,16 @@ bfa_iocfc_disable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Disable");
- bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
bfa->queue_process = BFA_FALSE;
- bfa_ioc_disable(&bfa->ioc);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
}
-
bfa_boolean_t
bfa_iocfc_is_operational(struct bfa_s *bfa)
{
- return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
+ return bfa_ioc_is_operational(&bfa->ioc) &&
+ bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
}
/*
@@ -1567,16 +1852,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
}
}
-void
-bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
-{
- if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
- if (bfa->iocfc.cfgdone == BFA_TRUE)
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- }
-}
-
/*
* Return the list of PCI vendor/device id lists supported by this
* BFA instance.
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index cb07c62..36756ce 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -52,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
u8 fw_tick_res; /* FW clock resolution in ms */
- u8 rsvd[2];
+ u8 rsvd[6];
};
#pragma pack()
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index d4f951f..5d2a130 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5717,6 +5717,8 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
if (vport_drv->comp_del)
complete(vport_drv->comp_del);
+ else
+ kfree(vport_drv);
bfa_lps_delete(vport->lps);
}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 52628d5..fe0463a 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -2169,7 +2169,10 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
* - MAX receive frame size
*/
rport->cisc = plogi->csp.cisc;
- rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+ if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
+ rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+ else
+ rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index eca7ab7..14e6284 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -88,7 +88,6 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
@@ -97,7 +96,6 @@ static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
-
/*
* IOC state machine definitions/declarations
*/
@@ -114,7 +112,6 @@ enum ioc_event {
IOC_E_HWERROR = 10, /* hardware error interrupt */
IOC_E_TIMEOUT = 11, /* timeout */
IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
- IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +124,6 @@ bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -140,7 +136,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
- {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
};
/*
@@ -371,17 +366,9 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
switch (event) {
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
- bfa_ioc_check_attr_wwns(ioc);
- bfa_ioc_hb_monitor(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
- case IOC_E_FWRSP_ACQ_ADDR:
- bfa_ioc_timer_stop(ioc);
- bfa_ioc_hb_monitor(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
- break;
-
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
@@ -406,51 +393,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
-/*
- * Acquiring address from fabric (entry function)
- */
-static void
-bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
-{
-}
-
-/*
- * Acquiring address from the fabric
- */
-static void
-bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
- bfa_trc(ioc, event);
-
- switch (event) {
- case IOC_E_FWRSP_GETATTR:
- bfa_ioc_check_attr_wwns(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
- break;
-
- case IOC_E_PFFAILED:
- case IOC_E_HWERROR:
- bfa_hb_timer_stop(ioc);
- case IOC_E_HBFAIL:
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
- if (event != IOC_E_PFFAILED)
- bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
- break;
-
- case IOC_E_DISABLE:
- bfa_hb_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
- break;
-
- case IOC_E_ENABLE:
- break;
-
- default:
- bfa_sm_fault(ioc, event);
- }
-}
-
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
{
@@ -458,6 +400,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+ bfa_ioc_hb_monitor(ioc);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
@@ -738,26 +681,60 @@ static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
struct bfi_ioc_image_hdr_s fwhdr;
- u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+ u32 r32, fwstate, pgnum, pgoff, loff = 0;
+ int i;
+
+ /*
+ * Spin on init semaphore to serialize.
+ */
+ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ while (r32 & 0x1) {
+ udelay(20);
+ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ }
/* h/w sem init */
- if (fwstate == BFI_IOC_UNINIT)
+ fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+ if (fwstate == BFI_IOC_UNINIT) {
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
+ }
bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
- if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
+ }
+
+ /*
+ * Clear fwver hdr
+ */
+ pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
+ bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
+ loff += sizeof(u32);
+ }
bfa_trc(iocpf->ioc, fwstate);
- bfa_trc(iocpf->ioc, fwhdr.exec);
+ bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
/*
- * Try to lock and then unlock the semaphore.
+ * Unlock the hw semaphore. Should be here only once per boot.
*/
readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+
+ /*
+ * unlock init semaphore.
+ */
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+
sem_get:
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1707,11 +1684,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 i;
u32 asicmode;
- /*
- * Initialize LMEM first before code download
- */
- bfa_ioc_lmem_init(ioc);
-
bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
@@ -1999,6 +1971,12 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
bfa_ioc_pll_init_asic(ioc);
ioc->pllinit = BFA_TRUE;
+
+ /*
+ * Initialize LMEM
+ */
+ bfa_ioc_lmem_init(ioc);
+
/*
* release semaphore.
*/
@@ -2122,10 +2100,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
bfa_ioc_getattr_reply(ioc);
break;
- case BFI_IOC_I2H_ACQ_ADDR_REPLY:
- bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
- break;
-
default:
bfa_trc(ioc, msg->mh.msg_id);
WARN_ON(1);
@@ -2416,15 +2390,6 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
}
/*
- * Return TRUE if IOC is in acquiring address state
- */
-bfa_boolean_t
-bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
-{
- return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
-}
-
-/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
@@ -2916,17 +2881,6 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
-{
- if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
- return;
- if (ioc->attr->nwwn == 0)
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
- if (ioc->attr->pwwn == 0)
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
-}
-
/*
* BFA IOC PF private functions
*/
@@ -4495,7 +4449,7 @@ bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
*/
#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
-#define BFA_DIAG_FWPING_TOV 1000 /* msec */
+#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
/* IOC event handler */
static void
@@ -4772,7 +4726,7 @@ diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
}
static void
-diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
{
bfa_trc(diag, diag->ledtest.lock);
diag->ledtest.lock = BFA_FALSE;
@@ -4850,6 +4804,8 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
u32 pattern, struct bfa_diag_memtest_result *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
+ u32 memtest_tov;
+
bfa_trc(diag, pattern);
if (!bfa_ioc_adapter_is_disabled(diag->ioc))
@@ -4869,8 +4825,10 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
/* download memtest code and take LPU0 out of reset */
bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+ memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
+ CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
- bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+ bfa_diag_memtest_done, diag, memtest_tov);
diag->timer_active = 1;
return BFA_STATUS_OK;
}
@@ -5641,24 +5599,27 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
case BFA_DCONF_SM_INIT:
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
+ bfa_fsm_send_event(&dconf->bfa->iocfc,
+ IOCFC_E_DCONF_DONE);
return;
}
bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
- dconf->flashdone = BFA_FALSE;
- bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf,
sizeof(struct bfa_dconf_s), 0,
bfa_dconf_init_cb, dconf->bfa);
if (bfa_status != BFA_STATUS_OK) {
+ bfa_timer_stop(&dconf->timer);
bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
return;
}
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
@@ -5679,15 +5640,20 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
+ bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
- bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
case BFA_DCONF_SM_IOCDISABLE:
+ bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
break;
default:
@@ -5710,9 +5676,8 @@ bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
- bfa_trc(dconf->bfa, dconf->flashdone);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_INIT:
case BFA_DCONF_SM_IOCDISABLE:
@@ -5774,9 +5739,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
bfa_timer_stop(&dconf->timer);
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
- dconf->flashdone = BFA_TRUE;
- bfa_trc(dconf->bfa, dconf->flashdone);
- bfa_ioc_disable(&dconf->bfa->ioc);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
default:
bfa_sm_fault(dconf->bfa, event);
@@ -5823,8 +5786,8 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
break;
@@ -5865,11 +5828,6 @@ bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (cfg->drvcfg.min_cfg) {
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
dconf->min_cfg = BFA_TRUE;
- /*
- * Set the flashdone flag to TRUE explicitly as no flash
- * write will happen in min_cfg mode.
- */
- dconf->flashdone = BFA_TRUE;
} else {
dconf->min_cfg = BFA_FALSE;
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
@@ -5885,9 +5843,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
struct bfa_s *bfa = arg;
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
- dconf->flashdone = BFA_TRUE;
- bfa_trc(bfa, dconf->flashdone);
- bfa_iocfc_cb_dconf_modinit(bfa, status);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
if (status == BFA_STATUS_OK) {
bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5895,7 +5851,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
dconf->dconf->hdr.version = BFI_DCONF_VERSION;
}
- bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
}
void
@@ -5977,7 +5933,5 @@ void
bfa_dconf_modexit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
- BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
- bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 546d46b..1a99d4b 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -373,6 +373,22 @@ struct bfa_cb_qe_s {
};
/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+ IOCFC_E_INIT = 1, /* IOCFC init request */
+ IOCFC_E_START = 2, /* IOCFC mod start request */
+ IOCFC_E_STOP = 3, /* IOCFC stop request */
+ IOCFC_E_ENABLE = 4, /* IOCFC enable request */
+ IOCFC_E_DISABLE = 5, /* IOCFC disable request */
+ IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
+ IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
+ IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
+ IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
+ IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
+};
+
+/*
* ASIC block configurtion related
*/
@@ -706,7 +722,6 @@ struct bfa_dconf_s {
struct bfa_dconf_mod_s {
bfa_sm_t sm;
u8 instance;
- bfa_boolean_t flashdone;
bfa_boolean_t read_data_valid;
bfa_boolean_t min_cfg;
struct bfa_timer_s timer;
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index d1b8f0c..2eb0c6a 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -786,17 +786,73 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
}
#define CT2_NFC_MAX_DELAY 1000
+#define CT2_NFC_VER_VALID 0x143
+#define BFA_IOC_PLL_POLL 1000000
+
+static bfa_boolean_t
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (r32 & __NFC_CONTROLLER_HALTED)
+ return BFA_TRUE;
+
+ return BFA_FALSE;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+ u32 r32;
+ int i;
+
+ writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (!(r32 & __NFC_CONTROLLER_HALTED))
+ return;
+ udelay(1000);
+ }
+ WARN_ON(1);
+}
+
bfa_status_t
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
{
- u32 wgn, r32;
- int i;
+ u32 wgn, r32, nfc_ver, i;
- /*
- * Initialize PLL if not already done by NFC
- */
wgn = readl(rb + CT2_WGN_STATUS);
- if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+ nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+ if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+ (nfc_ver >= CT2_NFC_VER_VALID)) {
+ if (bfa_ioc_ct2_nfc_halted(rb))
+ bfa_ioc_ct2_nfc_resume(rb);
+
+ writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+ rb + CT2_CSI_FW_CTL_SET_REG);
+
+ for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+ break;
+ }
+
+ WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+ for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+ break;
+ }
+
+ WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+ udelay(1000);
+
+ r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+ WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+ } else {
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -804,57 +860,62 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
break;
udelay(1000);
}
- }
- /*
- * Mask the interrupts and clear any
- * pending interrupts.
- */
- writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
- writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
-
- r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
- if (r32 == 1) {
- writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ bfa_ioc_ct2_mac_reset(rb);
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
}
- r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
- if (r32 == 1) {
- writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
- }
-
- bfa_ioc_ct2_mac_reset(rb);
- bfa_ioc_ct2_sclk_init(rb);
- bfa_ioc_ct2_lclk_init(rb);
-
- /*
- * release soft reset on s_clk & l_clk
- */
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
- writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
- (rb + CT2_APP_PLL_SCLK_CTL_REG));
-
- /*
- * release soft reset on s_clk & l_clk
- */
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
- writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
- (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* Announce flash device presence, if flash was corrupted.
*/
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
- r32 = readl((rb + PSS_GPIO_OUT_REG));
+ r32 = readl(rb + PSS_GPIO_OUT_REG);
writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
- r32 = readl((rb + PSS_GPIO_OE_REG));
+ r32 = readl(rb + PSS_GPIO_OE_REG);
writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
}
+ /*
+ * Mask the interrupts and clear any
+ * pending interrupts.
+ */
+ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+ writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+ /* For first time initialization, no need to clear interrupts */
+ r32 = readl(rb + HOST_SEM5_REG);
+ if (r32 & 0x1) {
+ r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
+ if (r32 == 1) {
+ writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+ readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ }
+ r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ if (r32 == 1) {
+ writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ }
+ }
+
bfa_ioc_ct2_mem_init(rb);
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
+
return BFA_STATUS_OK;
}
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa8a0ea..2e856e6 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -1280,6 +1280,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_login);
+ bfa_lps_send_login(lps);
break;
case BFA_LPS_SM_OFFLINE:
@@ -1578,7 +1579,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
break;
case BFA_STATUS_VPORT_MAX:
- if (!rsp->ext_status)
+ if (rsp->ext_status)
bfa_lps_no_res(lps, rsp->ext_status);
break;
@@ -3084,33 +3085,6 @@ bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
}
static void
-bfa_fcport_send_txcredit(void *port_cbarg)
-{
-
- struct bfa_fcport_s *fcport = port_cbarg;
- struct bfi_fcport_set_svc_params_req_s *m;
-
- /*
- * check for room in queue to send request now
- */
- m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
- if (!m) {
- bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
- return;
- }
-
- bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
- bfa_fn_lpu(fcport->bfa));
- m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
- m->bb_scn = fcport->cfg.bb_scn;
-
- /*
- * queue I/O message to firmware
- */
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
-}
-
-static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
struct bfa_qos_stats_s *s)
{
@@ -3602,26 +3576,24 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
return BFA_STATUS_UNSUPP_SPEED;
}
- /* For Mezz card, port speed entered needs to be checked */
- if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
- if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
- /* For CT2, 1G is not supported */
- if ((speed == BFA_PORT_SPEED_1GBPS) &&
- (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
- return BFA_STATUS_UNSUPP_SPEED;
+ /* Port speed entered needs to be checked */
+ if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+ /* For CT2, 1G is not supported */
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+ return BFA_STATUS_UNSUPP_SPEED;
- /* Already checked for Auto Speed and Max Speed supp */
- if (!(speed == BFA_PORT_SPEED_1GBPS ||
- speed == BFA_PORT_SPEED_2GBPS ||
- speed == BFA_PORT_SPEED_4GBPS ||
- speed == BFA_PORT_SPEED_8GBPS ||
- speed == BFA_PORT_SPEED_16GBPS ||
- speed == BFA_PORT_SPEED_AUTO))
- return BFA_STATUS_UNSUPP_SPEED;
- } else {
- if (speed != BFA_PORT_SPEED_10GBPS)
- return BFA_STATUS_UNSUPP_SPEED;
- }
+ /* Already checked for Auto Speed and Max Speed supp */
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO))
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS)
+ return BFA_STATUS_UNSUPP_SPEED;
}
fcport->cfg.speed = speed;
@@ -3765,7 +3737,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
fcport->cfg.bb_scn = bb_scn;
if (bb_scn)
fcport->bbsc_op_state = BFA_TRUE;
- bfa_fcport_send_txcredit(fcport);
}
/*
@@ -3825,8 +3796,6 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
attr->port_state = BFA_PORT_ST_IOCDIS;
else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
attr->port_state = BFA_PORT_ST_FWMISMATCH;
- else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
- attr->port_state = BFA_PORT_ST_ACQ_ADDR;
}
/* FCoE vlan */
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index b52cbb6..f300675 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -663,10 +663,6 @@ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
/* FAA specific APIs */
-bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
- bfa_cb_iocfc_t cbfn, void *cbarg);
-bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
- bfa_cb_iocfc_t cbfn, void *cbarg);
bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
bfa_cb_iocfc_t cbfn, void *cbarg);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 1938fe0..7b1ecd2 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -442,6 +442,43 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status;
}
+int
+bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ uint32_t status;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ status = bfa_port_disable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (status != BFA_STATUS_OK)
+ return -EIO;
+
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.status != BFA_STATUS_OK)
+ return -EIO;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ status = bfa_port_enable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (status != BFA_STATUS_OK)
+ return -EIO;
+
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.status != BFA_STATUS_OK)
+ return -EIO;
+
+ return 0;
+}
+
static int
bfad_im_vport_delete(struct fc_vport *fc_vport)
{
@@ -457,8 +494,11 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
unsigned long flags;
struct completion fcomp;
- if (im_port->flags & BFAD_PORT_DELETE)
- goto free_scsi_host;
+ if (im_port->flags & BFAD_PORT_DELETE) {
+ bfad_scsi_host_free(bfad, im_port);
+ list_del(&vport->list_entry);
+ return 0;
+ }
port = im_port->port;
@@ -489,7 +529,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
wait_for_completion(vport->comp_del);
-free_scsi_host:
bfad_scsi_host_free(bfad, im_port);
list_del(&vport->list_entry);
kfree(vport);
@@ -579,7 +618,7 @@ struct fc_function_template bfad_im_fc_function_template = {
.show_rport_dev_loss_tmo = 1,
.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
-
+ .issue_fc_host_lip = bfad_im_issue_fc_host_lip,
.vport_create = bfad_im_vport_create,
.vport_delete = bfad_im_vport_delete,
.vport_disable = bfad_im_vport_disable,
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 8005c6c..e1f4b10 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -1288,50 +1288,6 @@ out:
}
int
-bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
-{
- struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
- unsigned long flags;
- struct bfad_hal_comp fcomp;
-
- init_completion(&fcomp.comp);
- iocmd->status = BFA_STATUS_OK;
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
- if (iocmd->status != BFA_STATUS_OK)
- goto out;
-
- wait_for_completion(&fcomp.comp);
- iocmd->status = fcomp.status;
-out:
- return 0;
-}
-
-int
-bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
-{
- struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
- unsigned long flags;
- struct bfad_hal_comp fcomp;
-
- init_completion(&fcomp.comp);
- iocmd->status = BFA_STATUS_OK;
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
- if (iocmd->status != BFA_STATUS_OK)
- goto out;
-
- wait_for_completion(&fcomp.comp);
- iocmd->status = fcomp.status;
-out:
- return 0;
-}
-
-int
bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
@@ -1918,6 +1874,7 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
void *iocmd_bufptr;
unsigned long flags;
+ u32 offset;
if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
@@ -1935,8 +1892,10 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
spin_lock_irqsave(&bfad->bfad_lock, flags);
+ offset = iocmd->offset;
iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
- (u32 *)&iocmd->offset, &iocmd->bufsz);
+ &offset, &iocmd->bufsz);
+ iocmd->offset = offset;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
out:
return 0;
@@ -2633,12 +2592,6 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_FLASH_DISABLE_OPTROM:
rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
break;
- case IOCMD_FAA_ENABLE:
- rc = bfad_iocmd_faa_enable(bfad, iocmd);
- break;
- case IOCMD_FAA_DISABLE:
- rc = bfad_iocmd_faa_disable(bfad, iocmd);
- break;
case IOCMD_FAA_QUERY:
rc = bfad_iocmd_faa_query(bfad, iocmd);
break;
@@ -2809,9 +2762,16 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) job->shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
+ struct request_queue *request_q = job->req->q;
void *payload_kbuf;
int rc = -EINVAL;
+ /*
+ * Set the BSG device request_queue size to 256 to support
+ * payloads larger than 512*1024K bytes.
+ */
+ blk_queue_max_segments(request_q, 256);
+
/* Allocate a temp buffer to hold the passed in user space command */
payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
if (!payload_kbuf) {
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index e859adb..17ad672 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -83,8 +83,6 @@ enum {
IOCMD_PORT_CFG_MODE,
IOCMD_FLASH_ENABLE_OPTROM,
IOCMD_FLASH_DISABLE_OPTROM,
- IOCMD_FAA_ENABLE,
- IOCMD_FAA_DISABLE,
IOCMD_FAA_QUERY,
IOCMD_CEE_GET_ATTR,
IOCMD_CEE_GET_STATS,
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index dc5b9d9..7f74f1d 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.0.2.2"
+#define BFAD_DRIVER_VERSION "3.0.23.0"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 0d9f1fb..d4220e1 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -28,17 +28,15 @@ enum bfi_iocfc_h2i_msgs {
BFI_IOCFC_H2I_CFG_REQ = 1,
BFI_IOCFC_H2I_SET_INTR_REQ = 2,
BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
- BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4,
- BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5,
- BFI_IOCFC_H2I_FAA_QUERY_REQ = 6,
+ BFI_IOCFC_H2I_FAA_QUERY_REQ = 4,
+ BFI_IOCFC_H2I_ADDR_REQ = 5,
};
enum bfi_iocfc_i2h_msgs {
BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
- BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4),
- BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5),
- BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6),
+ BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4),
+ BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5),
};
struct bfi_iocfc_cfg_s {
@@ -184,6 +182,13 @@ struct bfi_faa_en_dis_s {
struct bfi_mhdr_s mh; /* common msg header */
};
+struct bfi_faa_addr_msg_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 rsvd[4];
+ wwn_t pwwn; /* Fabric acquired PWWN */
+ wwn_t nwwn; /* Fabric acquired PWWN */
+};
+
/*
* BFI_IOCFC_H2I_FAA_QUERY_REQ message
*/
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index d892064..ed5f159 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -335,11 +335,17 @@ enum {
#define __PMM_1T_PNDB_P 0x00000002
#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
#define CT2_WGN_STATUS 0x00014990
+#define __A2T_AHB_LOAD 0x00000800
#define __WGN_READY 0x00000400
#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_CSR_CLR_REG 0x00027420
#define CT2_NFC_CSR_SET_REG 0x00027424
#define __HALT_NFC_CONTROLLER 0x00000002
#define __NFC_CONTROLLER_HALTED 0x00001000
+#define CT2_RSC_GPR15_REG 0x0002765c
+#define CT2_CSI_FW_CTL_REG 0x00027080
+#define CT2_CSI_FW_CTL_SET_REG 0x00027088
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
#define __CSI_MAC_RESET 0x00000010
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index abd72a0..c1c6a92 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -439,13 +439,13 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
fr->fr_dev = lport;
bg = &bnx2fc_global;
- spin_lock_bh(&bg->fcoe_rx_list.lock);
+ spin_lock(&bg->fcoe_rx_list.lock);
__skb_queue_tail(&bg->fcoe_rx_list, skb);
if (bg->fcoe_rx_list.qlen == 1)
wake_up_process(bg->thread);
- spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ spin_unlock(&bg->fcoe_rx_list.lock);
return 0;
err:
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ae7d15c..335e851 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
goto err;
fps = &per_cpu(fcoe_percpu, cpu);
- spin_lock_bh(&fps->fcoe_rx_list.lock);
+ spin_lock(&fps->fcoe_rx_list.lock);
if (unlikely(!fps->thread)) {
/*
* The targeted CPU is not ready, let's target
@@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
"ready for incoming skb- using first online "
"CPU.\n");
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
+ spin_unlock(&fps->fcoe_rx_list.lock);
cpu = cpumask_first(cpu_online_mask);
fps = &per_cpu(fcoe_percpu, cpu);
- spin_lock_bh(&fps->fcoe_rx_list.lock);
+ spin_lock(&fps->fcoe_rx_list.lock);
if (!fps->thread) {
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
+ spin_unlock(&fps->fcoe_rx_list.lock);
goto err;
}
}
@@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
* so we're free to queue skbs into it's queue.
*/
- /* If this is a SCSI-FCP frame, and this is already executing on the
- * correct CPU, and the queue for this CPU is empty, then go ahead
- * and process the frame directly in the softirq context.
- * This lets us process completions without context switching from the
- * NET_RX softirq, to our receive processing thread, and then back to
- * BLOCK softirq context.
+ /*
+ * Note: We used to have a set of conditions under which we would
+ * call fcoe_recv_frame directly, rather than queuing to the rx list
+ * as it could save a few cycles, but doing so is prohibited, as
+ * fcoe_recv_frame has several paths that may sleep, which is forbidden
+ * in softirq context.
*/
- if (fh->fh_type == FC_TYPE_FCP &&
- cpu == smp_processor_id() &&
- skb_queue_empty(&fps->fcoe_rx_list)) {
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
- fcoe_recv_frame(skb);
- } else {
- __skb_queue_tail(&fps->fcoe_rx_list, skb);
- if (fps->fcoe_rx_list.qlen == 1)
- wake_up_process(fps->thread);
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
- }
+ __skb_queue_tail(&fps->fcoe_rx_list, skb);
+ if (fps->thread->state == TASK_INTERRUPTIBLE)
+ wake_up_process(fps->thread);
+ spin_unlock(&fps->fcoe_rx_list.lock);
return 0;
err:
@@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
+ struct sk_buff_head tmp;
+
+ skb_queue_head_init(&tmp);
set_user_nice(current, -20);
while (!kthread_should_stop()) {
spin_lock_bh(&p->fcoe_rx_list.lock);
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+ skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+ while ((skb = __skb_dequeue(&tmp)) != NULL)
+ fcoe_recv_frame(skb);
+
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ if (!skb_queue_len(&p->fcoe_rx_list)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fcoe_rx_list.lock);
schedule();
set_current_state(TASK_RUNNING);
- if (kthread_should_stop())
- return 0;
- spin_lock_bh(&p->fcoe_rx_list.lock);
- }
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- fcoe_recv_frame(skb);
+ } else
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
}
return 0;
}
@@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
- if (!fcoe_link_ok(lport))
+ if (!fcoe_link_ok(lport)) {
+ rtnl_unlock();
fcoe_ctlr_link_up(&fcoe->ctlr);
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+ }
out_nodev:
rtnl_unlock();
@@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport)
static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
- struct fcoe_rcv_info *fr;
- struct sk_buff_head *list;
- struct sk_buff *skb, *next;
- struct sk_buff *head;
+ struct sk_buff *skb;
unsigned int cpu;
for_each_possible_cpu(cpu) {
pp = &per_cpu(fcoe_percpu, cpu);
- spin_lock_bh(&pp->fcoe_rx_list.lock);
- list = &pp->fcoe_rx_list;
- head = list->next;
- for (skb = head; skb != (struct sk_buff *)list;
- skb = next) {
- next = skb->next;
- fr = fcoe_dev_from_skb(skb);
- if (fr->fr_dev == lport) {
- __skb_unlink(skb, list);
- kfree_skb(skb);
- }
- }
- if (!pp->thread || !cpu_online(cpu)) {
- spin_unlock_bh(&pp->fcoe_rx_list.lock);
+ if (!pp->thread || !cpu_online(cpu))
continue;
- }
skb = dev_alloc_skb(0);
if (!skb) {
@@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
}
skb->destructor = fcoe_percpu_flush_done;
+ spin_lock_bh(&pp->fcoe_rx_list.lock);
__skb_queue_tail(&pp->fcoe_rx_list, skb);
if (pp->fcoe_rx_list.qlen == 1)
wake_up_process(pp->thread);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index e7522dc..249a106 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
printk(KERN_INFO "libfcoe: host%d: FIP selected "
"Fibre-Channel Forwarder MAC %pM\n",
fip->lp->host->host_no, sel->fcf_mac);
- memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+ memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
fip->map_dest = 0;
}
unlock:
@@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
memcpy(fcf->fcf_mac,
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
+ memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
if (!is_valid_ether_addr(fcf->fcf_mac)) {
LIBFCOE_FIP_DBG(fip,
"Invalid MAC addr %pM in FIP adv\n",
@@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_desc *desc;
struct fip_encaps *els;
struct fcoe_dev_stats *stats;
+ struct fcoe_fcf *sel;
enum fip_desc_type els_dtype = 0;
u8 els_op;
u8 sub;
@@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
/* Drop ELS if there are duplicate critical descriptors */
if (desc->fip_dtype < 32) {
- if (desc_mask & 1U << desc->fip_dtype) {
+ if ((desc->fip_dtype != FIP_DT_MAC) &&
+ (desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
"Descriptors in FIP ELS\n");
goto drop;
@@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
switch (desc->fip_dtype) {
case FIP_DT_MAC:
+ sel = fip->sel_fcf;
if (desc_cnt == 1) {
LIBFCOE_FIP_DBG(fip, "FIP descriptors "
"received out of order\n");
goto drop;
}
+ /*
+ * Some switch implementations send two MAC descriptors,
+ * with first MAC(granted_mac) being the FPMA, and the
+ * second one(fcoe_mac) is used as destination address
+ * for sending/receiving FCoE packets. FIP traffic is
+ * sent using fip_mac. For regular switches, both
+ * fip_mac and fcoe_mac would be the same.
+ */
+ if (desc_cnt == 2)
+ memcpy(granted_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
if (dlen != sizeof(struct fip_mac_desc))
goto len_err;
- memcpy(granted_mac,
- ((struct fip_mac_desc *)desc)->fd_mac,
- ETH_ALEN);
+
+ if ((desc_cnt == 3) && (sel))
+ memcpy(sel->fcoe_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
break;
case FIP_DT_FLOGI:
case FIP_DT_FDISC:
@@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* No Vx_Port description. Clear all NPIV ports,
* followed by physical port
*/
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vn_port, &lport->vports, list)
- fc_lport_reset(vn_port);
- mutex_unlock(&lport->lp_mutex);
-
mutex_lock(&fip->ctlr_mutex);
per_cpu_ptr(lport->dev_stats,
get_cpu())->VLinkFailureCount++;
@@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
} else {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cdfe5a1..e002cd4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -104,7 +104,9 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
.mailbox = 0x0042C,
+ .max_cmds = 100,
.cache_line_size = 0x20,
+ .clear_isr = 1,
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
@@ -126,7 +128,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
},
{ /* Snipe and Scamp */
.mailbox = 0x0052C,
+ .max_cmds = 100,
.cache_line_size = 0x20,
+ .clear_isr = 1,
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
@@ -148,7 +152,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
},
{ /* CRoC */
.mailbox = 0x00044,
+ .max_cmds = 1000,
.cache_line_size = 0x20,
+ .clear_isr = 0,
{
.set_interrupt_mask_reg = 0x00010,
.clr_interrupt_mask_reg = 0x00018,
@@ -847,8 +853,6 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
- mb();
-
ipr_send_command(ipr_cmd);
}
@@ -982,8 +986,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
- mb();
-
ipr_send_command(ipr_cmd);
} else {
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@@ -4339,8 +4341,7 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if ((res->bus == starget->channel) &&
- (res->target == starget->id) &&
- (res->lun == 0)) {
+ (res->target == starget->id)) {
return res;
}
}
@@ -4414,12 +4415,14 @@ static void ipr_target_destroy(struct scsi_target *starget)
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
if (ioa_cfg->sis64) {
- if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
- clear_bit(starget->id, ioa_cfg->array_ids);
- else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
- clear_bit(starget->id, ioa_cfg->vset_ids);
- else if (starget->channel == 0)
- clear_bit(starget->id, ioa_cfg->target_ids);
+ if (!ipr_find_starget(starget)) {
+ if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->array_ids);
+ else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->vset_ids);
+ else if (starget->channel == 0)
+ clear_bit(starget->id, ioa_cfg->target_ids);
+ }
}
if (sata_port) {
@@ -5048,12 +5051,14 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
del_timer(&ioa_cfg->reset_cmd->timer);
ipr_reset_ioa_job(ioa_cfg->reset_cmd);
} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
- if (ipr_debug && printk_ratelimit())
- dev_err(&ioa_cfg->pdev->dev,
- "Spurious interrupt detected. 0x%08X\n", int_reg);
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
- return IRQ_NONE;
+ if (ioa_cfg->clear_isr) {
+ if (ipr_debug && printk_ratelimit())
+ dev_err(&ioa_cfg->pdev->dev,
+ "Spurious interrupt detected. 0x%08X\n", int_reg);
+ writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ return IRQ_NONE;
+ }
} else {
if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
@@ -5153,6 +5158,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
}
}
+ if (ipr_cmd && !ioa_cfg->clear_isr)
+ break;
+
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
num_hrrq = 0;
@@ -5854,14 +5862,12 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
}
- if (likely(rc == 0)) {
- mb();
- ipr_send_command(ipr_cmd);
- } else {
- list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(rc != 0)) {
+ list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
+ ipr_send_command(ipr_cmd);
return 0;
}
@@ -6239,8 +6245,6 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
return AC_ERR_INVALID;
}
- mb();
-
ipr_send_command(ipr_cmd);
return 0;
@@ -8277,6 +8281,10 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (ioa_cfg->ipr_cmd_pool)
pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
+ kfree(ioa_cfg->ipr_cmnd_list);
+ kfree(ioa_cfg->ipr_cmnd_list_dma);
+ ioa_cfg->ipr_cmnd_list = NULL;
+ ioa_cfg->ipr_cmnd_list_dma = NULL;
ioa_cfg->ipr_cmd_pool = NULL;
}
@@ -8352,11 +8360,19 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
int i;
ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
- sizeof(struct ipr_cmnd), 16, 0);
+ sizeof(struct ipr_cmnd), 512, 0);
if (!ioa_cfg->ipr_cmd_pool)
return -ENOMEM;
+ ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
+ ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
+
+ if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
+ ipr_free_cmd_blks(ioa_cfg);
+ return -ENOMEM;
+ }
+
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
@@ -8584,6 +8600,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
host->max_channel = IPR_MAX_BUS_TO_SCAN;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
+ host->can_queue = ioa_cfg->max_cmds;
pci_set_drvdata(pdev, ioa_cfg);
p = &ioa_cfg->chip_cfg->regs;
@@ -8768,6 +8785,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
/* set SIS 32 or SIS 64 */
ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+ ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
+ ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
if (ipr_transop_timeout)
ioa_cfg->transop_timeout = ipr_transop_timeout;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index f94eaee..153b8bd 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.5.2"
-#define IPR_DRIVER_DATE "(April 27, 2011)"
+#define IPR_DRIVER_VERSION "2.5.3"
+#define IPR_DRIVER_DATE "(March 10, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -53,7 +53,7 @@
* IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
* ops the mid-layer can send to the adapter.
*/
-#define IPR_NUM_BASE_CMD_BLKS 100
+#define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds)
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
@@ -153,7 +153,7 @@
#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
-#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS
+#define IPR_MAX_COMMANDS 100
#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
IPR_NUM_INTERNAL_CMD_BLKS)
@@ -1305,7 +1305,9 @@ struct ipr_interrupts {
struct ipr_chip_cfg_t {
u32 mailbox;
+ u16 max_cmds;
u8 cache_line_size;
+ u8 clear_isr;
struct ipr_interrupt_offsets regs;
};
@@ -1388,6 +1390,7 @@ struct ipr_ioa_cfg {
u8 sis64:1;
u8 dump_timeout:1;
u8 cfg_locked:1;
+ u8 clear_isr:1;
u8 revid;
@@ -1501,8 +1504,9 @@ struct ipr_ioa_cfg {
struct ata_host ata_host;
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd"
- struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
- dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+ u32 max_cmds;
+ struct ipr_cmnd **ipr_cmnd_list;
+ dma_addr_t *ipr_cmnd_list_dma;
}; /* struct ipr_ioa_cfg */
struct ipr_cmnd {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 630291f..aceffad 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2263,7 +2263,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
mp->class = class;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
- mp->max_xid = max_xid;
+
+ /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
+ pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
+ sizeof(struct fc_exch *);
+ if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
+ mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
+ min_xid - 1;
+ } else {
+ mp->max_xid = max_xid;
+ pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
+ (fc_cpu_mask + 1);
+ }
mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
if (!mp->ep_pool)
@@ -2274,7 +2285,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
* divided across all cpus. The exch pointers array memory is
* allocated for exch range per pool.
*/
- pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
mp->pool_max_index = pool_exch_range - 1;
/*
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index bd5d31d..ef9560d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1743,8 +1743,16 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
mfs = ntohs(flp->fl_csp.sp_bb_data) &
FC_SP_BB_DATA_MASK;
if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
- mfs < lport->mfs)
+ mfs <= lport->mfs) {
lport->mfs = mfs;
+ fc_host_maxframe_size(lport->host) = mfs;
+ } else {
+ FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+ "lport->mfs:%hu\n", mfs, lport->mfs);
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
csp_flags = ntohs(flp->fl_csp.sp_features);
r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 88928f0..fe5d396 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
@@ -22,6 +22,8 @@
ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
ccflags-$(GCOV) += -O0
+ccflags-y += -Werror
+
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 5fc044f..3a1ffdd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -840,6 +840,8 @@ struct lpfc_hba {
struct dentry *debug_dumpData; /* BlockGuard BPL */
struct dentry *debug_dumpDif; /* BlockGuard BPL */
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
+ struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
+ struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
struct dentry *debug_writeGuard; /* inject write guard_tag errors */
struct dentry *debug_writeApp; /* inject write app_tag errors */
struct dentry *debug_writeRef; /* inject write ref_tag errors */
@@ -854,6 +856,8 @@ struct lpfc_hba {
uint32_t lpfc_injerr_rgrd_cnt;
uint32_t lpfc_injerr_rapp_cnt;
uint32_t lpfc_injerr_rref_cnt;
+ uint32_t lpfc_injerr_nportid;
+ struct lpfc_name lpfc_injerr_wwpn;
sector_t lpfc_injerr_lba;
#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
@@ -908,6 +912,8 @@ struct lpfc_hba {
atomic_t fast_event_count;
uint32_t fcoe_eventtag;
uint32_t fcoe_eventtag_at_fcf_scan;
+ uint32_t fcoe_cvl_eventtag;
+ uint32_t fcoe_cvl_eventtag_attn;
struct lpfc_fcf fcf;
uint8_t fc_map[3];
uint8_t valid_vlan;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 296ad5b..5eb2bc1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2575,7 +2575,7 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
# objects that have been registered with the nameserver after login.
*/
-LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
"Deregister nameserver objects before LOGO");
/*
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 22e17be..5bdf2ee 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -1010,25 +1010,35 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
{
struct dentry *dent = file->f_dentry;
struct lpfc_hba *phba = file->private_data;
- char cbuf[16];
+ char cbuf[32];
+ uint64_t tmp = 0;
int cnt = 0;
if (dent == phba->debug_writeGuard)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
else if (dent == phba->debug_writeApp)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
else if (dent == phba->debug_writeRef)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
else if (dent == phba->debug_readGuard)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
else if (dent == phba->debug_readApp)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
else if (dent == phba->debug_readRef)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt);
- else if (dent == phba->debug_InjErrLBA)
- cnt = snprintf(cbuf, 16, "0x%lx\n",
- (unsigned long) phba->lpfc_injerr_lba);
- else
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+ else if (dent == phba->debug_InjErrNPortID)
+ cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+ else if (dent == phba->debug_InjErrWWPN) {
+ memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+ tmp = cpu_to_be64(tmp);
+ cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+ } else if (dent == phba->debug_InjErrLBA) {
+ if (phba->lpfc_injerr_lba == (sector_t)(-1))
+ cnt = snprintf(cbuf, 32, "off\n");
+ else
+ cnt = snprintf(cbuf, 32, "0x%llx\n",
+ (uint64_t) phba->lpfc_injerr_lba);
+ } else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0547 Unknown debugfs error injection entry\n");
@@ -1042,7 +1052,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
struct dentry *dent = file->f_dentry;
struct lpfc_hba *phba = file->private_data;
char dstbuf[32];
- unsigned long tmp;
+ uint64_t tmp = 0;
int size;
memset(dstbuf, 0, 32);
@@ -1050,7 +1060,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
if (copy_from_user(dstbuf, buf, size))
return 0;
- if (strict_strtoul(dstbuf, 0, &tmp))
+ if (dent == phba->debug_InjErrLBA) {
+ if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ tmp = (uint64_t)(-1);
+ }
+
+ if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
return 0;
if (dent == phba->debug_writeGuard)
@@ -1067,7 +1082,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
else if (dent == phba->debug_InjErrLBA)
phba->lpfc_injerr_lba = (sector_t)tmp;
- else
+ else if (dent == phba->debug_InjErrNPortID)
+ phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+ else if (dent == phba->debug_InjErrWWPN) {
+ tmp = cpu_to_be64(tmp);
+ memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+ } else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0548 Unknown debugfs error injection entry\n");
@@ -3949,6 +3969,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ snprintf(name, sizeof(name), "InjErrNPortID");
+ phba->debug_InjErrNPortID =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrNPortID) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0809 Cannot create debugfs InjErrNPortID\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "InjErrWWPN");
+ phba->debug_InjErrWWPN =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrWWPN) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0810 Cannot create debugfs InjErrWWPN\n");
+ goto debug_failed;
+ }
+
snprintf(name, sizeof(name), "writeGuardInjErr");
phba->debug_writeGuard =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4321,6 +4363,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
phba->debug_InjErrLBA = NULL;
}
+ if (phba->debug_InjErrNPortID) { /* InjErrNPortID */
+ debugfs_remove(phba->debug_InjErrNPortID);
+ phba->debug_InjErrNPortID = NULL;
+ }
+ if (phba->debug_InjErrWWPN) {
+ debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+ phba->debug_InjErrWWPN = NULL;
+ }
if (phba->debug_writeGuard) {
debugfs_remove(phba->debug_writeGuard); /* writeGuard */
phba->debug_writeGuard = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8db2fb3..3407b39 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -925,9 +925,17 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* due to new FCF discovery
*/
if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
- (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
- !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+ (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+ if (phba->link_state < LPFC_LINK_UP)
+ goto stop_rr_fcf_flogi;
+ if ((phba->fcoe_cvl_eventtag_attn ==
+ phba->fcoe_cvl_eventtag) &&
+ (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+ goto stop_rr_fcf_flogi;
+ else
+ phba->fcoe_cvl_eventtag_attn =
+ phba->fcoe_cvl_eventtag;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
"2611 FLOGI failed on FCF (x%x), "
"status:x%x/x%x, tmo:x%x, perform "
@@ -943,6 +951,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+stop_rr_fcf_flogi:
/* FLOGI failure */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 343d87b..b507536 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2843,7 +2843,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if (mboxq->u.mb.mbxStatus) {
+ /*
+ * VFI not supported for interface type 0, so ignore any mailbox
+ * error (except VFI in use) and continue with the discovery.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"2018 REG_VFI mbxStatus error x%x "
"HBA state x%x\n",
@@ -5673,14 +5680,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
ret = 1;
spin_unlock_irq(shost->host_lock);
goto out;
- } else {
+ } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ ret = 1;
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2624 RPI %x DID %x flg %x still "
- "logged in\n",
- ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag);
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- ret = 1;
+ "2624 RPI %x DID %x flag %x "
+ "still logged in\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag);
}
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 9e2b9b2..91f0976 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -338,6 +338,12 @@ struct lpfc_cqe {
#define CQE_CODE_XRI_ABORTED 0x5
#define CQE_CODE_RECEIVE_V1 0x9
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK 0x1FF;
+
/* completion queue entry for wqe completions */
struct lpfc_wcqe_complete {
uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b38f99f..9598fdc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2704,16 +2704,14 @@ lpfc_offline_prep(struct lpfc_hba * phba)
}
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-
+ spin_unlock_irq(shost->host_lock);
/*
* Whenever an SLI4 port goes offline, free the
- * RPI. A new RPI when the adapter port comes
- * back online.
+ * RPI. Get a new RPI when the adapter port
+ * comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
-
- spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vports[i], ndlp);
}
}
@@ -2786,9 +2784,13 @@ lpfc_scsi_buf_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
spin_lock(&phba->scsi_buf_list_lock);
- list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
sb->cur_iocbq.sli4_xritag =
phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+ set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
spin_unlock(&phba->scsi_buf_list_lock);
spin_unlock_irq(&phba->hbalock);
return 0;
@@ -3723,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+ phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2549 FCF (x%x) disconnected from network, "
"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -3784,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
}
break;
case LPFC_FIP_EVENT_TYPE_CVL:
+ phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -5226,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* rpi is normalized to a zero base because the physical rpi is
* port based.
*/
- curr_rpi_range = phba->sli4_hba.next_rpi -
- phba->sli4_hba.max_cfg_param.rpi_base;
+ curr_rpi_range = phba->sli4_hba.next_rpi;
spin_unlock_irq(&phba->hbalock);
/*
@@ -5818,10 +5821,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.
ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2888 Port Error Detected "
- "during POST: "
- "port status reg 0x%x, "
- "port_smphr reg 0x%x, "
+ "2888 Unrecoverable port error "
+ "following POST: port status reg "
+ "0x%x, port_smphr reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
reg_data.word0,
portsmphr_reg.word0,
@@ -6142,7 +6144,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
@@ -7231,6 +7232,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
uint32_t rdy_chk, num_resets = 0, reset_again = 0;
union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_register reg_data;
+ uint16_t devid;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
@@ -7277,7 +7279,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
LPFC_SLIPORT_INIT_PORT);
writel(reg_data.word0, phba->sli4_hba.u.if_type2.
CTRLregaddr);
-
+ /* flush */
+ pci_read_config_word(phba->pcidev,
+ PCI_DEVICE_ID, &devid);
/*
* Poll the Port Status Register and wait for RDY for
* up to 10 seconds. If the port doesn't respond, treat
@@ -7315,11 +7319,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
phba->work_status[1] = readl(
phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2890 Port Error Detected "
- "during Port Reset: "
- "port status reg 0x%x, "
+ "2890 Port error detected during port "
+ "reset(%d): port status reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
- reg_data.word0,
+ num_resets, reg_data.word0,
phba->work_status[0],
phba->work_status[1]);
rc = -ENODEV;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 7b6b2aa..15ca2a9a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -440,11 +440,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, mbox);
+ if (rc)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
}
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+ if (rc)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
out:
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index efc055b..88f3a83 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -39,8 +39,8 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -51,13 +51,19 @@
int _dump_buf_done;
static char *dif_op_str[] = {
- "SCSI_PROT_NORMAL",
- "SCSI_PROT_READ_INSERT",
- "SCSI_PROT_WRITE_STRIP",
- "SCSI_PROT_READ_STRIP",
- "SCSI_PROT_WRITE_INSERT",
- "SCSI_PROT_READ_PASS",
- "SCSI_PROT_WRITE_PASS",
+ "PROT_NORMAL",
+ "PROT_READ_INSERT",
+ "PROT_WRITE_STRIP",
+ "PROT_READ_STRIP",
+ "PROT_WRITE_INSERT",
+ "PROT_READ_PASS",
+ "PROT_WRITE_PASS",
+};
+
+static char *dif_grd_str[] = {
+ "NO_GUARD",
+ "DIF_CRC",
+ "DIX_IP",
};
struct scsi_dif_tuple {
@@ -1281,10 +1287,14 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-#define BG_ERR_INIT 1
-#define BG_ERR_TGT 2
-#define BG_ERR_SWAP 3
-#define BG_ERR_CHECK 4
+/* Return if if error injection is detected by Initiator */
+#define BG_ERR_INIT 0x1
+/* Return if if error injection is detected by Target */
+#define BG_ERR_TGT 0x2
+/* Return if if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP 0x10
+/* Return if disabling Guard/Ref/App checking is required for error injection */
+#define BG_ERR_CHECK 0x20
/**
* lpfc_bg_err_inject - Determine if we should inject an error
@@ -1294,10 +1304,7 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
* @apptag: (out) BlockGuard application tag for transmitted data
* @new_guard (in) Value to replace CRC with if needed
*
- * Returns (1) if error injection is detected by Initiator
- * Returns (2) if error injection is detected by Target
- * Returns (3) if swapping CSUM->CRC is required for error injection
- * Returns (4) disabling Guard/Ref/App checking is required for error injection
+ * Returns BG_ERR_* bit mask or 0 if request ignored
**/
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
@@ -1305,7 +1312,10 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
struct scatterlist *sgpe; /* s/g prot entry */
struct scatterlist *sgde; /* s/g data entry */
+ struct lpfc_scsi_buf *lpfc_cmd = NULL;
struct scsi_dif_tuple *src = NULL;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_rport_data *rdata;
uint32_t op = scsi_get_prot_op(sc);
uint32_t blksize;
uint32_t numblks;
@@ -1318,8 +1328,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgpe = scsi_prot_sglist(sc);
sgde = scsi_sglist(sc);
-
lba = scsi_get_lba(sc);
+
+ /* First check if we need to match the LBA */
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
blksize = lpfc_cmd_blksize(sc);
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
@@ -1334,66 +1345,123 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sizeof(struct scsi_dif_tuple);
if (numblks < blockoff)
blockoff = numblks;
- src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- src += blockoff;
}
}
+ /* Next check if we need to match the remote NPortID or WWPN */
+ rdata = sc->device->hostdata;
+ if (rdata && rdata->pnode) {
+ ndlp = rdata->pnode;
+
+ /* Make sure we have the right NPortID if one is specified */
+ if (phba->lpfc_injerr_nportid &&
+ (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+ return 0;
+
+ /*
+ * Make sure we have the right WWPN if one is specified.
+ * wwn[0] should be a non-zero NAA in a good WWPN.
+ */
+ if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
+ (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+ sizeof(struct lpfc_name)) != 0))
+ return 0;
+ }
+
+ /* Setup a ptr to the protection data if the SCSI host provides it */
+ if (sgpe) {
+ src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ src += blockoff;
+ lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+ }
+
/* Should we change the Reference Tag */
if (reftag) {
if (phba->lpfc_injerr_wref_cnt) {
switch (op) {
case SCSI_PROT_WRITE_PASS:
- if (blockoff && src) {
- /* Insert error in middle of the IO */
+ if (src) {
+ /*
+ * For WRITE_PASS, force the error
+ * to be sent on the wire. It should
+ * be detected by the Target.
+ * If blockoff != 0 error will be
+ * inserted in middle of the IO.
+ */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9076 BLKGRD: Injecting reftag error: "
"write lba x%lx + x%x oldrefTag x%x\n",
(unsigned long)lba, blockoff,
- src->ref_tag);
+ be32_to_cpu(src->ref_tag));
/*
- * NOTE, this will change ref tag in
- * the memory location forever!
+ * Save the old ref_tag so we can
+ * restore it on completion.
*/
- src->ref_tag = 0xDEADBEEF;
+ if (lpfc_cmd) {
+ lpfc_cmd->prot_data_type =
+ LPFC_INJERR_REFTAG;
+ lpfc_cmd->prot_data_segment =
+ src;
+ lpfc_cmd->prot_data =
+ src->ref_tag;
+ }
+ src->ref_tag = cpu_to_be32(0xDEADBEEF);
phba->lpfc_injerr_wref_cnt--;
- phba->lpfc_injerr_lba =
- LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_CHECK;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+
break;
}
/* Drop thru */
- case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
/*
- * For WRITE_STRIP and WRITE_PASS,
- * force the error on data
- * being copied from SLI-Host to SLI-Port.
+ * For WRITE_INSERT, force the error
+ * to be sent on the wire. It should be
+ * detected by the Target.
*/
+ /* DEADBEEF will be the reftag on the wire */
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_wref_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_INIT;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9077 BLKGRD: Injecting reftag error: "
+ "9078 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
- case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
/*
- * For WRITE_INSERT, force the
- * error to be sent on the wire. It should be
- * detected by the Target.
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
*/
- /* DEADBEEF will be the reftag on the wire */
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_wref_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_TGT;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9078 BLKGRD: Injecting reftag error: "
+ "9077 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
}
@@ -1401,11 +1469,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (phba->lpfc_injerr_rref_cnt) {
switch (op) {
case SCSI_PROT_READ_INSERT:
- /*
- * For READ_INSERT, it doesn't make sense
- * to change the reftag.
- */
- break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_READ_PASS:
/*
@@ -1415,7 +1478,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
*/
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_rref_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ if (phba->lpfc_injerr_rref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1431,56 +1500,87 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (phba->lpfc_injerr_wapp_cnt) {
switch (op) {
case SCSI_PROT_WRITE_PASS:
- if (blockoff && src) {
- /* Insert error in middle of the IO */
+ if (src) {
+ /*
+ * For WRITE_PASS, force the error
+ * to be sent on the wire. It should
+ * be detected by the Target.
+ * If blockoff != 0 error will be
+ * inserted in middle of the IO.
+ */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9080 BLKGRD: Injecting apptag error: "
"write lba x%lx + x%x oldappTag x%x\n",
(unsigned long)lba, blockoff,
- src->app_tag);
+ be16_to_cpu(src->app_tag));
/*
- * NOTE, this will change app tag in
- * the memory location forever!
+ * Save the old app_tag so we can
+ * restore it on completion.
*/
- src->app_tag = 0xDEAD;
+ if (lpfc_cmd) {
+ lpfc_cmd->prot_data_type =
+ LPFC_INJERR_APPTAG;
+ lpfc_cmd->prot_data_segment =
+ src;
+ lpfc_cmd->prot_data =
+ src->app_tag;
+ }
+ src->app_tag = cpu_to_be16(0xDEAD);
phba->lpfc_injerr_wapp_cnt--;
- phba->lpfc_injerr_lba =
- LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_CHECK;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
break;
}
/* Drop thru */
- case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
/*
- * For WRITE_STRIP and WRITE_PASS,
- * force the error on data
- * being copied from SLI-Host to SLI-Port.
+ * For WRITE_INSERT, force the
+ * error to be sent on the wire. It should be
+ * detected by the Target.
*/
+ /* DEAD will be the apptag on the wire */
*apptag = 0xDEAD;
phba->lpfc_injerr_wapp_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_INIT;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "0812 BLKGRD: Injecting apptag error: "
+ "0813 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
- case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
/*
- * For WRITE_INSERT, force the
- * error to be sent on the wire. It should be
- * detected by the Target.
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
*/
- /* DEAD will be the apptag on the wire */
*apptag = 0xDEAD;
phba->lpfc_injerr_wapp_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_TGT;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "0813 BLKGRD: Injecting apptag error: "
+ "0812 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
}
@@ -1488,11 +1588,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (phba->lpfc_injerr_rapp_cnt) {
switch (op) {
case SCSI_PROT_READ_INSERT:
- /*
- * For READ_INSERT, it doesn't make sense
- * to change the apptag.
- */
- break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_READ_PASS:
/*
@@ -1502,7 +1597,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
*/
*apptag = 0xDEAD;
phba->lpfc_injerr_rapp_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ if (phba->lpfc_injerr_rapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1519,57 +1620,51 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (phba->lpfc_injerr_wgrd_cnt) {
switch (op) {
case SCSI_PROT_WRITE_PASS:
- if (blockoff && src) {
- /* Insert error in middle of the IO */
-
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "0815 BLKGRD: Injecting guard error: "
- "write lba x%lx + x%x oldgrdTag x%x\n",
- (unsigned long)lba, blockoff,
- src->guard_tag);
-
- /*
- * NOTE, this will change guard tag in
- * the memory location forever!
- */
- src->guard_tag = 0xDEAD;
- phba->lpfc_injerr_wgrd_cnt--;
- phba->lpfc_injerr_lba =
- LPFC_INJERR_LBA_OFF;
- rc = BG_ERR_CHECK;
- break;
- }
+ rc = BG_ERR_CHECK;
/* Drop thru */
- case SCSI_PROT_WRITE_STRIP:
+
+ case SCSI_PROT_WRITE_INSERT:
/*
- * For WRITE_STRIP and WRITE_PASS,
- * force the error on data
- * being copied from SLI-Host to SLI-Port.
+ * For WRITE_INSERT, force the
+ * error to be sent on the wire. It should be
+ * detected by the Target.
*/
phba->lpfc_injerr_wgrd_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ if (phba->lpfc_injerr_wgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
- rc = BG_ERR_SWAP;
+ rc |= BG_ERR_TGT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "0816 BLKGRD: Injecting guard error: "
+ "0817 BLKGRD: Injecting guard error: "
"write lba x%lx\n", (unsigned long)lba);
break;
- case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
/*
- * For WRITE_INSERT, force the
- * error to be sent on the wire. It should be
- * detected by the Target.
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
*/
phba->lpfc_injerr_wgrd_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ if (phba->lpfc_injerr_wgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
- rc = BG_ERR_SWAP;
+ rc = BG_ERR_INIT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "0817 BLKGRD: Injecting guard error: "
+ "0816 BLKGRD: Injecting guard error: "
"write lba x%lx\n", (unsigned long)lba);
break;
}
@@ -1577,11 +1672,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (phba->lpfc_injerr_rgrd_cnt) {
switch (op) {
case SCSI_PROT_READ_INSERT:
- /*
- * For READ_INSERT, it doesn't make sense
- * to change the guard tag.
- */
- break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_READ_PASS:
/*
@@ -1589,11 +1679,16 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* error on data being read off the wire. It
* should force an IO error to the driver.
*/
- *apptag = 0xDEAD;
phba->lpfc_injerr_rgrd_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ if (phba->lpfc_injerr_rgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
- rc = BG_ERR_SWAP;
+ rc = BG_ERR_INIT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1629,20 +1724,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- *txop = BG_OP_IN_CSUM_OUT_NODIF;
*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- *txop = BG_OP_IN_NODIF_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- *txop = BG_OP_IN_CSUM_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_CRC;
break;
case SCSI_PROT_NORMAL:
@@ -1658,20 +1753,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- *txop = BG_OP_IN_NODIF_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- *txop = BG_OP_IN_CRC_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_CRC;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- *txop = BG_OP_IN_CRC_OUT_NODIF;
*rxop = BG_OP_IN_NODIF_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_NORMAL:
@@ -1710,20 +1805,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- *txop = BG_OP_IN_CRC_OUT_NODIF;
*rxop = BG_OP_IN_NODIF_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- *txop = BG_OP_IN_NODIF_OUT_CSUM;
*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CSUM;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- *txop = BG_OP_IN_CRC_OUT_CRC;
- *rxop = BG_OP_IN_CRC_OUT_CRC;
+ *rxop = BG_OP_IN_CSUM_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_CSUM;
break;
case SCSI_PROT_NORMAL:
@@ -1735,20 +1830,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- *txop = BG_OP_IN_NODIF_OUT_CSUM;
*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CSUM;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- *txop = BG_OP_IN_CSUM_OUT_CRC;
- *rxop = BG_OP_IN_CRC_OUT_CSUM;
+ *rxop = BG_OP_IN_CSUM_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_CSUM;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- *txop = BG_OP_IN_CSUM_OUT_NODIF;
*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
break;
case SCSI_PROT_NORMAL:
@@ -1817,11 +1912,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
if (rc) {
- if (rc == BG_ERR_SWAP)
+ if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
- if (rc == BG_ERR_CHECK)
+ if (rc & BG_ERR_CHECK)
checking = 0;
}
#endif
@@ -1964,11 +2059,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
if (rc) {
- if (rc == BG_ERR_SWAP)
+ if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
- if (rc == BG_ERR_CHECK)
+ if (rc & BG_ERR_CHECK)
checking = 0;
}
#endif
@@ -2172,11 +2267,11 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
if (rc) {
- if (rc == BG_ERR_SWAP)
+ if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
- if (rc == BG_ERR_CHECK)
+ if (rc & BG_ERR_CHECK)
checking = 0;
}
#endif
@@ -2312,11 +2407,11 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
if (rc) {
- if (rc == BG_ERR_SWAP)
+ if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
- if (rc == BG_ERR_CHECK)
+ if (rc & BG_ERR_CHECK)
checking = 0;
}
#endif
@@ -2788,7 +2883,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
/* No error was reported - problem in FW? */
cmd->result = ScsiResult(DID_ERROR, 0);
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9057 BLKGRD: no errors reported!\n");
+ "9057 BLKGRD: Unknown error reported!\n");
}
out:
@@ -3460,6 +3555,37 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* pick up SLI4 exhange busy status from HBA */
lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->prot_data_type) {
+ struct scsi_dif_tuple *src = NULL;
+
+ src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+ /*
+ * Used to restore any changes to protection
+ * data for error injection.
+ */
+ switch (lpfc_cmd->prot_data_type) {
+ case LPFC_INJERR_REFTAG:
+ src->ref_tag =
+ lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_APPTAG:
+ src->app_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_GUARD:
+ src->guard_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ default:
+ break;
+ }
+
+ lpfc_cmd->prot_data = 0;
+ lpfc_cmd->prot_data_type = 0;
+ lpfc_cmd->prot_data_segment = NULL;
+ }
+#endif
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
@@ -4061,15 +4187,6 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = err;
goto out_fail_command;
}
- /*
- * Do not let the mid-layer retry I/O too fast. If an I/O is retried
- * without waiting a bit then indicate that the device is busy.
- */
- if (cmnd->retries &&
- time_before(jiffies, (cmnd->jiffies_at_alloc +
- msecs_to_jiffies(LPFC_RETRY_PAUSE *
- cmnd->retries))))
- return SCSI_MLQUEUE_DEVICE_BUSY;
ndlp = rdata->pnode;
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
@@ -4119,63 +4236,48 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
- "str=%s\n",
- cmnd->cmnd[0], scsi_get_prot_op(cmnd),
- dif_op_str[scsi_get_prot_op(cmnd)]);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x\n",
- cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
- cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
- cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
- cmnd->cmnd[9]);
+ "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
+ "guard=%s\n", cmnd->cmnd[0],
+ dif_op_str[scsi_get_prot_op(cmnd)],
+ dif_grd_str[scsi_host_get_guard(shost)]);
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9035 BLKGRD: READ @ sector %llu, "
- "count %u\n",
+ "cnt %u, rpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request));
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9036 BLKGRD: WRITE @ sector %llu, "
- "count %u cmd=%p\n",
+ "cnt %u, wpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
- cmnd);
+ (cmnd->cmnd[1]>>5));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9038 BLKGRD: rcvd unprotected cmd:"
- "%02x op:%02x str=%s\n",
- cmnd->cmnd[0], scsi_get_prot_op(cmnd),
- dif_op_str[scsi_get_prot_op(cmnd)]);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9039 BLKGRD: CDB: %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x %02x\n",
- cmnd->cmnd[0], cmnd->cmnd[1],
- cmnd->cmnd[2], cmnd->cmnd[3],
- cmnd->cmnd[4], cmnd->cmnd[5],
- cmnd->cmnd[6], cmnd->cmnd[7],
- cmnd->cmnd[8], cmnd->cmnd[9]);
+ "9038 BLKGRD: rcvd unprotected cmd:"
+ "%02x op=%s guard=%s\n", cmnd->cmnd[0],
+ dif_op_str[scsi_get_prot_op(cmnd)],
+ dif_grd_str[scsi_host_get_guard(shost)]);
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9040 dbg: READ @ sector %llu, "
- "count %u\n",
+ "cnt %u, rpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request));
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9041 dbg: WRITE @ sector %llu, "
- "count %u cmd=%p\n",
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request), cmnd);
- else
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9042 dbg: parser not implemented\n");
+ "9041 dbg: WRITE @ sector %llu, "
+ "cnt %u, wpt %d\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 9075a08..21a2ffe 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -150,9 +150,18 @@ struct lpfc_scsi_buf {
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ /* Used to restore any changes to protection data for error injection */
+ void *prot_data_segment;
+ uint32_t prot_data;
+ uint32_t prot_data_type;
+#define LPFC_INJERR_REFTAG 1
+#define LPFC_INJERR_APPTAG 2
+#define LPFC_INJERR_GUARD 3
+#endif
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
-#define LPFC_RETRY_PAUSE 300
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e0e4d8d..dbaf5b9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -5578,8 +5578,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
for (i = 0; i < count; i++)
phba->sli4_hba.rpi_ids[i] = base + i;
- lpfc_sli4_node_prep(phba);
-
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
base = phba->sli4_hba.max_cfg_param.vpi_base;
@@ -5613,6 +5611,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
rc = -ENOMEM;
goto free_vpi_ids;
}
+ phba->sli4_hba.max_cfg_param.xri_used = 0;
+ phba->sli4_hba.xri_count = 0;
phba->sli4_hba.xri_ids = kzalloc(count *
sizeof(uint16_t),
GFP_KERNEL);
@@ -6147,6 +6147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_free_mbox;
}
+ lpfc_sli4_node_prep(phba);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -7251,11 +7252,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
out_not_finished:
spin_lock_irqsave(&phba->hbalock, iflags);
- mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
- __lpfc_mbox_cmpl_put(phba, mboxq);
- /* Release the token */
- psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- phba->sli.mbox_active = NULL;
+ if (phba->sli.mbox_active) {
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ /* Release the token */
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ }
spin_unlock_irqrestore(&phba->hbalock, iflags);
return MBX_NOT_FINISHED;
@@ -7743,6 +7746,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
*pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
*pcmd == ELS_CMD_PLOGI)) {
bf_set(els_req64_sp, &wqe->els_req, 1);
bf_set(els_req64_sid, &wqe->els_req,
@@ -8385,6 +8389,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
struct lpfc_vport *vport;
+ uint32_t ext_status = 0;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8396,12 +8401,20 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
- "vpi %d rpi %d xri x%x status 0x%x\n",
+ "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
ndlp->vport->vpi, ndlp->nlp_rpi,
bf_get(lpfc_wcqe_xa_xri, axri),
- bf_get(lpfc_wcqe_xa_status, axri));
+ bf_get(lpfc_wcqe_xa_status, axri),
+ axri->parameter);
- if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+ /*
+ * Catch the ABTS protocol failure case. Older OCe FW releases returned
+ * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+ * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+ */
+ ext_status = axri->parameter & WCQE_PARAM_MASK;
+ if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+ ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
lpfc_sli_abts_recover_port(vport, ndlp);
}
@@ -9807,12 +9820,11 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
unsigned long timeout;
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
- spin_unlock_irq(&phba->hbalock);
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
- spin_lock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
@@ -9831,7 +9843,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
*/
break;
}
- }
+ } else
+ spin_unlock_irq(&phba->hbalock);
+
lpfc_sli_mbox_sys_flush(phba);
}
@@ -13272,7 +13286,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, index;
uint32_t mbox_tmo;
- uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
uint16_t xritag_start = 0, lxri = 0;
struct lpfc_rsrc_blks *rsrc_blk;
int cnt, ttl_cnt, rc = 0;
@@ -13294,6 +13308,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
cnt = 0;
ttl_cnt = 0;
+ post_els_xri_cnt = els_xri_cnt;
list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
list) {
rsrc_start = rsrc_blk->rsrc_start;
@@ -13303,11 +13318,12 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
"3014 Working ELS Extent start %d, cnt %d\n",
rsrc_start, rsrc_size);
- loop_cnt = min(els_xri_cnt, rsrc_size);
- if (ttl_cnt + loop_cnt >= els_xri_cnt) {
- loop_cnt = els_xri_cnt - ttl_cnt;
- ttl_cnt = els_xri_cnt;
- }
+ loop_cnt = min(post_els_xri_cnt, rsrc_size);
+ if (loop_cnt < post_els_xri_cnt) {
+ post_els_xri_cnt -= loop_cnt;
+ ttl_cnt += loop_cnt;
+ } else
+ ttl_cnt += post_els_xri_cnt;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14203,15 +14219,14 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
* field and RX_ID from ABTS for RX_ID field.
*/
bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
- * XRI_TAG and RX_ID fields.
+ * XRI_TAG field.
*/
bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
}
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
/* Xmit CT abts response on exchange <xid> */
@@ -15042,6 +15057,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
LPFC_MBOXQ_t *mboxq;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+ phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f2a2602..25cefc2 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.29"
+#define LPFC_DRIVER_VERSION "8.3.30"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 5e69f46..8a59a77 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -657,7 +657,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
return;
/* eat the loginfos associated with task aborts */
- if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
0x31140000 || log_info == 0x31130000))
return;
@@ -2060,12 +2060,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
{
int i = 0;
char desc[16];
- u8 revision;
u32 iounit_pg1_flags;
u32 bios_version;
bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
- pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
"ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
@@ -2074,7 +2072,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF,
- revision,
+ ioc->pdev->revision,
(bios_version & 0xFF000000) >> 24,
(bios_version & 0x00FF0000) >> 16,
(bios_version & 0x0000FF00) >> 8,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 7fceb89..3b9a28e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1026,7 +1026,6 @@ _ctl_getiocinfo(void __user *arg)
{
struct mpt2_ioctl_iocinfo karg;
struct MPT2SAS_ADAPTER *ioc;
- u8 revision;
if (copy_from_user(&karg, arg, sizeof(karg))) {
printk(KERN_ERR "failure at %s:%d/%s()!\n",
@@ -1046,8 +1045,7 @@ _ctl_getiocinfo(void __user *arg)
karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
if (ioc->pfacts)
karg.port_number = ioc->pfacts[0].PortNumber;
- pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
- karg.hw_rev = revision;
+ karg.hw_rev = ioc->pdev->revision;
karg.pci_id = ioc->pdev->device;
karg.subsystem_device = ioc->pdev->subsystem_device;
karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 3619f6e..9d82ee5 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2093,6 +2093,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct ata_task_resp *resp ;
u32 *sata_resp;
struct pm8001_device *pm8001_dev;
+ unsigned long flags;
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
@@ -2382,26 +2383,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_DEV_NO_RESPONSE;
break;
}
- spin_lock_irq(&t->task_state_lock);
+ spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irq(&t->task_state_lock);
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else if (t->uldd_task) {
- spin_unlock_irq(&t->task_state_lock);
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irq(&pm8001_ha->lock);
} else if (!t->uldd_task) {
- spin_unlock_irq(&t->task_state_lock);
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irq(&pm8001_ha->lock);
@@ -2423,6 +2424,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
+ unsigned long flags;
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
@@ -2593,26 +2595,26 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irq(&t->task_state_lock);
+ spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irq(&t->task_state_lock);
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else if (t->uldd_task) {
- spin_unlock_irq(&t->task_state_lock);
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irq(&pm8001_ha->lock);
} else if (!t->uldd_task) {
- spin_unlock_irq(&t->task_state_lock);
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irq(&pm8001_ha->lock);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 7c9f28b..fc542a9 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -431,9 +431,9 @@ static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
mbox_sts_entry->out_mbox[6]));
if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
- status = QLA_SUCCESS;
+ status = ISCSI_PING_SUCCESS;
else
- status = QLA_ERROR;
+ status = mbox_sts_entry->out_mbox[6];
data_size = sizeof(mbox_sts_entry->out_mbox);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 3d94194..ee47820 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -834,7 +834,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
{
struct scsi_qla_host *ha = to_qla_host(shost);
- struct iscsi_cls_host *ihost = shost_priv(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
qla4xxx_get_firmware_state(ha);
@@ -859,7 +859,7 @@ static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
static void qla4xxx_set_port_state(struct Scsi_Host *shost)
{
struct scsi_qla_host *ha = to_qla_host(shost);
- struct iscsi_cls_host *ihost = shost_priv(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
uint32_t state = ISCSI_PORT_STATE_DOWN;
if (test_bit(AF_LINK_UP, &ha->flags))
@@ -3445,7 +3445,6 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
{
int status = 0;
- uint8_t revision_id;
unsigned long mem_base, mem_len, db_base, db_len;
struct pci_dev *pdev = ha->pdev;
@@ -3457,10 +3456,9 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
goto iospace_error_exit;
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
- __func__, revision_id));
- ha->revision_id = revision_id;
+ __func__, pdev->revision));
+ ha->revision_id = pdev->revision;
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index ede9af9..97b30c1 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k15"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5918561..182d5a5 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,7 @@ static const char * scsi_debug_version_date = "20100324";
#define DEF_LBPU 0
#define DEF_LBPWS 0
#define DEF_LBPWS10 0
+#define DEF_LBPRZ 1
#define DEF_LOWEST_ALIGNED 0
#define DEF_NO_LUN_0 0
#define DEF_NUM_PARTS 0
@@ -186,6 +187,7 @@ static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
static unsigned int scsi_debug_lbpu = DEF_LBPU;
static unsigned int scsi_debug_lbpws = DEF_LBPWS;
static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
+static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
@@ -775,10 +777,10 @@ static int inquiry_evpd_b1(unsigned char *arr)
return 0x3c;
}
-/* Thin provisioning VPD page (SBC-3) */
+/* Logical block provisioning VPD page (SBC-3) */
static int inquiry_evpd_b2(unsigned char *arr)
{
- memset(arr, 0, 0x8);
+ memset(arr, 0, 0x4);
arr[0] = 0; /* threshold exponent */
if (scsi_debug_lbpu)
@@ -790,7 +792,10 @@ static int inquiry_evpd_b2(unsigned char *arr)
if (scsi_debug_lbpws10)
arr[1] |= 1 << 5;
- return 0x8;
+ if (scsi_debug_lbprz)
+ arr[1] |= 1 << 2;
+
+ return 0x4;
}
#define SDEBUG_LONG_INQ_SZ 96
@@ -1071,8 +1076,11 @@ static int resp_readcap16(struct scsi_cmnd * scp,
arr[13] = scsi_debug_physblk_exp & 0xf;
arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
- if (scsi_debug_lbp())
+ if (scsi_debug_lbp()) {
arr[14] |= 0x80; /* LBPME */
+ if (scsi_debug_lbprz)
+ arr[14] |= 0x40; /* LBPRZ */
+ }
arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -2046,10 +2054,13 @@ static void unmap_region(sector_t lba, unsigned int len)
block = lba + alignment;
rem = do_div(block, granularity);
- if (rem == 0 && lba + granularity <= end &&
- block < map_size)
+ if (rem == 0 && lba + granularity <= end && block < map_size) {
clear_bit(block, map_storep);
-
+ if (scsi_debug_lbprz)
+ memset(fake_storep +
+ block * scsi_debug_sector_size, 0,
+ scsi_debug_sector_size);
+ }
lba += granularity - rem;
}
}
@@ -2731,6 +2742,7 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
@@ -2772,6 +2784,7 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fac3173..1cf640e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1486,7 +1486,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
struct iscsi_uevent *ev;
int len = NLMSG_SPACE(sizeof(*ev) + data_size);
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = alloc_skb(len, GFP_NOIO);
if (!skb) {
printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
host_no, code);
@@ -1504,7 +1504,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
if (data_size)
memcpy((char *)ev + sizeof(*ev), data, data_size);
- iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
}
EXPORT_SYMBOL_GPL(iscsi_post_host_event);
@@ -1517,7 +1517,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
struct iscsi_uevent *ev;
int len = NLMSG_SPACE(sizeof(*ev) + data_size);
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = alloc_skb(len, GFP_NOIO);
if (!skb) {
printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
return;
@@ -1533,7 +1533,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
ev->r.ping_comp.data_size = data_size;
memcpy((char *)ev + sizeof(*ev), data, data_size);
- iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
}
EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 09e3df4..5ba5c2a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -664,7 +664,7 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
}
/**
- * sd_init_command - build a scsi (read or write) command from
+ * sd_prep_fn - build a scsi (read or write) command from
* information in the request structure.
* @SCpnt: pointer to mid-level's per scsi command structure that
* contains request and into which the scsi command is written
@@ -711,7 +711,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
ret = BLKPREP_KILL;
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
- "sd_init_command: block=%llu, "
+ "sd_prep_fn: block=%llu, "
"count=%d\n",
(unsigned long long)block,
this_count));
@@ -1212,9 +1212,14 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
retval = -ENODEV;
if (scsi_block_when_processing_errors(sdp)) {
+ retval = scsi_autopm_get_device(sdp);
+ if (retval)
+ goto out;
+
sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
sshdr);
+ scsi_autopm_put_device(sdp);
}
/* failed to execute TUR, assume media not present */
@@ -2644,8 +2649,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
* (e.g. /dev/sda). More precisely it is the block device major
* and minor number that is chosen here.
*
- * Assume sd_attach is not re-entrant (for time being)
- * Also think about sd_attach() and sd_remove() running coincidentally.
+ * Assume sd_probe is not re-entrant (for time being)
+ * Also think about sd_probe() and sd_remove() running coincidentally.
**/
static int sd_probe(struct device *dev)
{
@@ -2660,7 +2665,7 @@ static int sd_probe(struct device *dev)
goto out;
SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
- "sd_attach\n"));
+ "sd_probe\n"));
error = -ENOMEM;
sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a15f691..e41998c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1105,6 +1105,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
STp->drv_buffer));
}
STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
+ if (!STp->drv_buffer && STp->immediate_filemark) {
+ printk(KERN_WARNING
+ "%s: non-buffered tape: disabling writing immediate filemarks\n",
+ name);
+ STp->immediate_filemark = 0;
+ }
}
st_release_request(SRpnt);
SRpnt = NULL;
@@ -1313,6 +1319,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_FILEMARKS;
+ if (STp->immediate_filemark)
+ cmd[1] = 1;
cmd[4] = 1 + STp->two_fm;
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
@@ -2180,8 +2188,9 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char
name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
STp->scsi2_logical);
printk(KERN_INFO
- "%s: sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate,
- STp->sili);
+ "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n",
+ name, STm->sysv, STp->immediate, STp->sili,
+ STp->immediate_filemark);
printk(KERN_INFO "%s: debugging: %d\n",
name, debugging);
}
@@ -2223,6 +2232,7 @@ static int st_set_options(struct scsi_tape *STp, long options)
STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
STp->immediate = (options & MT_ST_NOWAIT) != 0;
+ STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0;
STm->sysv = (options & MT_ST_SYSV) != 0;
STp->sili = (options & MT_ST_SILI) != 0;
DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
@@ -2254,6 +2264,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
STp->scsi2_logical = value;
if ((options & MT_ST_NOWAIT) != 0)
STp->immediate = value;
+ if ((options & MT_ST_NOWAIT_EOF) != 0)
+ STp->immediate_filemark = value;
if ((options & MT_ST_SYSV) != 0)
STm->sysv = value;
if ((options & MT_ST_SILI) != 0)
@@ -2713,7 +2725,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[0] = WRITE_FILEMARKS;
if (cmd_in == MTWSM)
cmd[1] = 2;
- if (cmd_in == MTWEOFI)
+ if (cmd_in == MTWEOFI ||
+ (cmd_in == MTWEOF && STp->immediate_filemark))
cmd[1] |= 1;
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
@@ -4092,6 +4105,7 @@ static int st_probe(struct device *dev)
tpnt->scsi2_logical = ST_SCSI2LOGICAL;
tpnt->sili = ST_SILI;
tpnt->immediate = ST_NOWAIT;
+ tpnt->immediate_filemark = 0;
tpnt->default_drvbuffer = 0xff; /* No forced buffering */
tpnt->partition = 0;
tpnt->new_partition = 0;
@@ -4477,6 +4491,7 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
options |= STm->sysv ? MT_ST_SYSV : 0;
options |= STp->immediate ? MT_ST_NOWAIT : 0;
+ options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0;
options |= STp->sili ? MT_ST_SILI : 0;
l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index f91a67c..ea35632 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -120,6 +120,7 @@ struct scsi_tape {
unsigned char c_algo; /* compression algorithm */
unsigned char pos_unknown; /* after reset position unknown */
unsigned char sili; /* use SILI when reading in variable b mode */
+ unsigned char immediate_filemark; /* write filemark immediately */
int tape_type;
int long_timeout; /* timeout for commands known to take long time */
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
new file mode 100644
index 0000000..8f27f9d
--- /dev/null
+++ b/drivers/scsi/ufs/Kconfig
@@ -0,0 +1,49 @@
+#
+# Kernel configuration file for the UFS Host Controller
+#
+# This code is based on drivers/scsi/ufs/Kconfig
+# Copyright (C) 2011 Samsung Samsung India Software Operations
+#
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage host controller driver"
+ depends on PCI && SCSI
+ ---help---
+ This is a generic driver which supports PCIe UFS Host controllers.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
new file mode 100644
index 0000000..adf7895
--- /dev/null
+++ b/drivers/scsi/ufs/Makefile
@@ -0,0 +1,2 @@
+# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
new file mode 100644
index 0000000..b207529
--- /dev/null
+++ b/drivers/scsi/ufs/ufs.h
@@ -0,0 +1,207 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufs.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef _UFS_H
+#define _UFS_H
+
+#define MAX_CDB_SIZE 16
+
+#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
+ ((byte3 << 24) | (byte2 << 16) |\
+ (byte1 << 8) | (byte0))
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+ UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UPIU_TRANSACTION_COMMAND = 0x01,
+ UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UPIU_TRANSACTION_QUERY_REQ = 0x26,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+ UPIU_TRANSACTION_NOP_IN = 0x20,
+ UPIU_TRANSACTION_RESPONSE = 0x21,
+ UPIU_TRANSACTION_DATA_IN = 0x22,
+ UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UPIU_TRANSACTION_READY_XFER = 0x31,
+ UPIU_TRANSACTION_QUERY_RSP = 0x36,
+};
+
+/* UPIU Read/Write flags */
+enum {
+ UPIU_CMD_FLAGS_NONE = 0x00,
+ UPIU_CMD_FLAGS_WRITE = 0x20,
+ UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UPIU_TASK_ATTR_ORDERED = 0x01,
+ UPIU_TASK_ATTR_HEADQ = 0x02,
+ UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum {
+ UPIU_QUERY_OPCODE_NOP = 0x0,
+ UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+ UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+enum {
+ MASK_SCSI_STATUS = 0xFF,
+ MASK_TASK_RESPONSE = 0xFF00,
+ MASK_RSP_UPIU_RESULT = 0xFFFF,
+};
+
+/* Task management service response */
+enum {
+ UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+ UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+ UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+ UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+ UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ u32 dword_0;
+ u32 dword_1;
+ u32 dword_2;
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ struct utp_upiu_header header;
+ u32 exp_data_transfer_len;
+ u8 cdb[MAX_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_rsp - Response UPIU structure
+ * @header: UPIU header DW-0 to DW-2
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ u32 residual_transfer_count;
+ u32 reserved[4];
+ u16 sense_data_len;
+ u8 sense_data[18];
+};
+
+/**
+ * struct utp_upiu_task_req - Task request UPIU structure
+ * @header - UPIU header structure DW0 to DW-2
+ * @input_param1: Input parameter 1 DW-3
+ * @input_param2: Input parameter 2 DW-4
+ * @input_param3: Input parameter 3 DW-5
+ * @reserved: Reserved double words DW-6 to DW-7
+ */
+struct utp_upiu_task_req {
+ struct utp_upiu_header header;
+ u32 input_param1;
+ u32 input_param2;
+ u32 input_param3;
+ u32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_task_rsp - Task Management Response UPIU structure
+ * @header: UPIU header structure DW0-DW-2
+ * @output_param1: Ouput parameter 1 DW3
+ * @output_param2: Output parameter 2 DW4
+ * @reserved: Reserved double words DW-5 to DW-7
+ */
+struct utp_upiu_task_rsp {
+ struct utp_upiu_header header;
+ u32 output_param1;
+ u32 output_param2;
+ u32 reserved[3];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
new file mode 100644
index 0000000..52b96e8
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -0,0 +1,1978 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.1"
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+ UFSHCD_MAX_LUNS = 8,
+ UFSHCD_CMD_PER_LUN = 32,
+ UFSHCD_CAN_QUEUE = 32,
+};
+
+/* UFSHCD states */
+enum {
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_ERROR,
+};
+
+/* Interrupt configuration options */
+enum {
+ UFSHCD_INT_DISABLE,
+ UFSHCD_INT_ENABLE,
+ UFSHCD_INT_CLEAR,
+};
+
+/* Interrupt aggregation options */
+enum {
+ INT_AGGR_RESET,
+ INT_AGGR_CONFIG,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @pdev: PCI device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+
+ struct uic_command active_uic_cmd;
+ wait_queue_head_t ufshcd_tm_wait_queue;
+ unsigned long tm_condition;
+
+ u32 ufshcd_state;
+ u32 int_enable_mask;
+
+ /* Work Queues */
+ struct work_struct uic_workq;
+ struct work_struct feh_workq;
+
+ /* HBA Errors */
+ u32 errors;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ unsigned int lun;
+};
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba - Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ return readl(hba->mmio_base + REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ * @reg_hcs - host controller status register value
+ *
+ * Returns 0 if device present, non-zero if no device detected
+ */
+static inline int ufshcd_is_device_present(u32 reg_hcs)
+{
+ return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrb: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+ return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+ return task_req_descp->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ *
+ * Returns maximum number of task management request slots in case of
+ * task management queue full or returns the free slot number
+ */
+static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
+{
+ return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ writel(~(1 << pos),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR));
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ /*
+ * The mask 0xFF is for the following HCS register bits
+ * Bit Description
+ * 0 Device Present
+ * 1 UTRLRDY
+ * 2 UTMRLRDY
+ * 3 UCRDY
+ * 4 HEI
+ * 5 DEI
+ * 6-7 reserved
+ */
+ return (((reg) & (0xFF)) >> 1) ^ (0x07);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_free_hba_memory - Free allocated memory for LRB, request
+ * and task lists
+ * @hba: Pointer to adapter instance
+ */
+static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ kfree(hba->lrb);
+
+ if (hba->utmrdl_base_addr) {
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+ hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
+ }
+
+ if (hba->utrdl_base_addr) {
+ utrdl_size =
+ (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ dma_free_coherent(&hba->pdev->dev, utrdl_size,
+ hba->utrdl_base_addr, hba->utrdl_dma_addr);
+ }
+
+ if (hba->ucdl_base_addr) {
+ ucdl_size =
+ (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ dma_free_coherent(&hba->pdev->dev, ucdl_size,
+ hba->ucdl_base_addr, hba->ucdl_dma_addr);
+ }
+}
+
+/**
+ * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function checks the response UPIU for valid transaction type in
+ * response field
+ * Returns 0 on success, non-zero on failure
+ */
+static inline int
+ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
+ UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/**
+ * ufshcd_config_int_aggr - Configure interrupt aggregation values.
+ * Currently there is no use case where we want to configure
+ * interrupt aggregation dynamically. So to configure interrupt
+ * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
+ * INT_AGGR_TIMEOUT_VALUE are used.
+ * @hba: per adapter instance
+ * @option: Interrupt aggregation option
+ */
+static inline void
+ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+{
+ switch (option) {
+ case INT_AGGR_RESET:
+ writel((INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET),
+ (hba->mmio_base +
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+ break;
+ case INT_AGGR_CONFIG:
+ writel((INT_AGGR_ENABLE |
+ INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THRESHOLD_VALUE |
+ INT_AGGR_TIMEOUT_VALUE),
+ (hba->mmio_base +
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+ break;
+ }
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ (hba->mmio_base +
+ REG_UTP_TASK_REQ_LIST_RUN_STOP));
+ writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ (hba->mmio_base +
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns zero if controller is active, 1 otherwise
+ */
+static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+}
+
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ writel((1 << task_tag),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL));
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrb - pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+ int len;
+ if (lrbp->sense_buffer) {
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
+ memcpy(lrbp->sense_buffer,
+ lrbp->ucd_rsp_ptr->sense_data,
+ min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ }
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+ hba->capabilities =
+ readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES);
+
+ /* nutrs and nutmrs are 0 based values */
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutmrs =
+ ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_send_uic_command - Send UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_command: UIC command
+ */
+static inline void
+ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
+{
+ /* Write Args */
+ writel(uic_cmnd->argument1,
+ (hba->mmio_base + REG_UIC_COMMAND_ARG_1));
+ writel(uic_cmnd->argument2,
+ (hba->mmio_base + REG_UIC_COMMAND_ARG_2));
+ writel(uic_cmnd->argument3,
+ (hba->mmio_base + REG_UIC_COMMAND_ARG_3));
+
+ /* Write UIC Cmd */
+ writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
+ (hba->mmio_base + REG_UIC_COMMAND));
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @lrbp - pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+{
+ struct ufshcd_sg_entry *prd_table;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmd;
+ int sg_segments;
+ int i;
+
+ cmd = lrbp->cmd;
+ sg_segments = scsi_dma_map(cmd);
+ if (sg_segments < 0)
+ return sg_segments;
+
+ if (sg_segments) {
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
+
+ prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+ scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ prd_table[i].size =
+ cpu_to_le32(((u32) sg_dma_len(sg))-1);
+ prd_table[i].base_addr =
+ cpu_to_le32(lower_32_bits(sg->dma_address));
+ prd_table[i].upper_addr =
+ cpu_to_le32(upper_32_bits(sg->dma_address));
+ }
+ } else {
+ lrbp->utr_descriptor_ptr->prd_table_length = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_int_config - enable/disable interrupts
+ * @hba: per adapter instance
+ * @option: interrupt option
+ */
+static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
+{
+ switch (option) {
+ case UFSHCD_INT_ENABLE:
+ writel(hba->int_enable_mask,
+ (hba->mmio_base + REG_INTERRUPT_ENABLE));
+ break;
+ case UFSHCD_INT_DISABLE:
+ if (hba->ufs_version == UFSHCI_VERSION_10)
+ writel(INTERRUPT_DISABLE_MASK_10,
+ (hba->mmio_base + REG_INTERRUPT_ENABLE));
+ else
+ writel(INTERRUPT_DISABLE_MASK_11,
+ (hba->mmio_base + REG_INTERRUPT_ENABLE));
+ break;
+ }
+}
+
+/**
+ * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @lrb - pointer to local reference block
+ */
+static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_transfer_req_desc *req_desc;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ u32 data_direction;
+ u32 upiu_flags;
+
+ ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
+ req_desc = lrbp->utr_descriptor_ptr;
+
+ switch (lrbp->command_type) {
+ case UTP_CMD_TYPE_SCSI:
+ if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 =
+ cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
+
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* command descriptor fields */
+ ucd_cmd_ptr->header.dword_0 =
+ cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
+ upiu_flags,
+ lrbp->lun,
+ lrbp->task_tag));
+ ucd_cmd_ptr->header.dword_1 =
+ cpu_to_be32(
+ UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
+ 0,
+ 0,
+ 0));
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_cmd_ptr->header.dword_2 = 0;
+
+ ucd_cmd_ptr->exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->transfersize);
+
+ memcpy(ucd_cmd_ptr->cdb,
+ lrbp->cmd->cmnd,
+ (min_t(unsigned short,
+ lrbp->cmd->cmd_len,
+ MAX_CDB_SIZE)));
+ break;
+ case UTP_CMD_TYPE_DEV_MANAGE:
+ /* For query function implementation */
+ break;
+ case UTP_CMD_TYPE_UFS:
+ /* For UFS native command implementation */
+ break;
+ } /* end of switch */
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @cmd: command from SCSI Midlayer
+ * @done: call back function
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ int tag;
+ int err = 0;
+
+ hba = shost_priv(host);
+
+ tag = cmd->request->tag;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ lrbp = &hba->lrb[tag];
+
+ lrbp->cmd = cmd;
+ lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_buffer = cmd->sense_buffer;
+ lrbp->task_tag = tag;
+ lrbp->lun = cmd->device->lun;
+
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+
+ /* form UPIU before issuing the command */
+ ufshcd_compose_upiu(lrbp);
+ err = ufshcd_map_sg(lrbp);
+ if (err)
+ goto out;
+
+ /* issue command to the controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ * (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ /* Allocate memory for UTP command descriptors */
+ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ ucdl_size,
+ &hba->ucdl_dma_addr,
+ GFP_KERNEL);
+
+ /*
+ * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+ * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+ * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+ * be aligned to 128 bytes as well
+ */
+ if (!hba->ucdl_base_addr ||
+ WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(&hba->pdev->dev,
+ "Command Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Transfer descriptors
+ * UFSHCI requires 1024 byte alignment of UTRD
+ */
+ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ utrdl_size,
+ &hba->utrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utrdl_base_addr ||
+ WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(&hba->pdev->dev,
+ "Transfer Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Task Management descriptors
+ * UFSHCI requires 1024 byte alignment of UTMRD
+ */
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ utmrdl_size,
+ &hba->utmrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utmrdl_base_addr ||
+ WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(&hba->pdev->dev,
+ "Task Management Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /* Allocate memory for local reference block */
+ hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
+ if (!hba->lrb) {
+ dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+ goto out;
+ }
+ return 0;
+out:
+ ufshcd_free_hba_memory(hba);
+ return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_cmd_desc *cmd_descp;
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ dma_addr_t cmd_desc_element_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+ int cmd_desc_size;
+ int i;
+
+ utrdlp = hba->utrdl_base_addr;
+ cmd_descp = hba->ucdl_base_addr;
+
+ response_offset =
+ offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset =
+ offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+ for (i = 0; i < hba->nutrs; i++) {
+ /* Configure UTRD with command descriptor base address */
+ cmd_desc_element_addr =
+ (cmd_desc_dma_addr + (cmd_desc_size * i));
+ utrdlp[i].command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+ /* Response upiu and prdt offset should be in double words */
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16((response_offset >> 2));
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16((prdt_offset >> 2));
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+
+ hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].ucd_cmd_ptr =
+ (struct utp_upiu_cmd *)(cmd_descp + i);
+ hba->lrb[i].ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command *uic_cmd;
+ unsigned long flags;
+
+ /* check if controller is ready to accept UIC commands */
+ if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
+ UIC_COMMAND_READY) == 0x0) {
+ dev_err(&hba->pdev->dev,
+ "Controller not ready"
+ " to accept UIC commands\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* form UIC command */
+ uic_cmd = &hba->active_uic_cmd;
+ uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
+ uic_cmd->argument1 = 0;
+ uic_cmd->argument2 = 0;
+ uic_cmd->argument3 = 0;
+
+ /* enable UIC related interrupts */
+ hba->int_enable_mask |= UIC_COMMAND_COMPL;
+ ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+ /* sending UIC commands to controller */
+ ufshcd_send_uic_command(hba, uic_cmd);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Check if device is present
+ * 2. Configure run-stop-registers
+ * 3. Enable required interrupts
+ * 4. Configure interrupt aggregation
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* check if device present */
+ reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
+ if (ufshcd_is_device_present(reg)) {
+ dev_err(&hba->pdev->dev, "cc: Device not present\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ * DEI, HEI bits must be 0
+ */
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(&hba->pdev->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+ /* Enable required interrupts */
+ hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
+ UIC_ERROR |
+ UTP_TASK_REQ_COMPL |
+ DEVICE_FATAL_ERROR |
+ CONTROLLER_FATAL_ERROR |
+ SYSTEM_BUS_FATAL_ERROR);
+ ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+ /* Configure interrupt aggregation */
+ ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ scsi_unblock_requests(hba->host);
+
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ scsi_scan_host(hba->host);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_hba_enable - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int retry;
+
+ /*
+ * msleep of 1 and 5 used in this function might result in msleep(20),
+ * but it was necessary to send the UFS FPGA to reset mode during
+ * development and testing of this driver. msleep can be changed to
+ * mdelay and retry count can be reduced based on the controller.
+ */
+ if (!ufshcd_is_hba_active(hba)) {
+
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
+
+ /*
+ * This delay is based on the testing done with UFS host
+ * controller FPGA. The delay can be changed based on the
+ * host controller used.
+ */
+ msleep(5);
+ }
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ msleep(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(&hba->pdev->dev,
+ "Controller enable failed\n");
+ return -EIO;
+ }
+ msleep(5);
+ }
+ return 0;
+}
+
+/**
+ * ufshcd_initialize_hba - start the initialization process
+ * @hba: per adapter instance
+ *
+ * 1. Enable the controller via ufshcd_hba_enable.
+ * 2. Program the Transfer Request List Address with the starting address of
+ * UTRDL.
+ * 3. Program the Task Management Request List Address with starting address
+ * of UTMRDL.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int ufshcd_initialize_hba(struct ufs_hba *hba)
+{
+ if (ufshcd_hba_enable(hba))
+ return -EIO;
+
+ /* Configure UTRL and UTMRL base address registers */
+ writel(hba->utrdl_dma_addr,
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+ writel(lower_32_bits(hba->utrdl_dma_addr),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
+ writel(hba->utmrdl_dma_addr,
+ (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
+ writel(upper_32_bits(hba->utmrdl_dma_addr),
+ (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
+
+ /* Initialize unipro link startup procedure */
+ return ufshcd_dme_link_startup(hba);
+}
+
+/**
+ * ufshcd_do_reset - reset the host controller
+ * @hba: per adapter instance
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_do_reset(struct ufs_hba *hba)
+{
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
+ int tag;
+
+ /* block commands from midlayer */
+ scsi_block_requests(hba->host);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+ /* send controller to reset state */
+ ufshcd_hba_stop(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* abort outstanding commands */
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ if (test_bit(tag, &hba->outstanding_reqs)) {
+ lrbp = &hba->lrb[tag];
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd->result = DID_RESET << 16;
+ lrbp->cmd->scsi_done(lrbp->cmd);
+ lrbp->cmd = NULL;
+ }
+ }
+
+ /* clear outstanding request/task bit maps */
+ hba->outstanding_reqs = 0;
+ hba->outstanding_tasks = 0;
+
+ /* start the initialization process */
+ if (ufshcd_initialize_hba(hba)) {
+ dev_err(&hba->pdev->dev,
+ "Reset: Controller initialization failed\n");
+ return FAILED;
+ }
+ return SUCCESS;
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ sdev->tagged_supported = 1;
+
+ /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+ sdev->use_10_for_ms = 1;
+ scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+
+ /*
+ * Inform SCSI Midlayer that the LUN queue depth is same as the
+ * controller queue depth. If a LUN queue depth is less than the
+ * controller queue depth and if the LUN reports
+ * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
+ * with scsi_adjust_queue_depth.
+ */
+ scsi_activate_tcq(sdev, hba->nutrs);
+ return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ scsi_deactivate_tcq(sdev, hba->nutrs);
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_rsp *task_rsp_upiup;
+ unsigned long flags;
+ int ocs_value;
+ int task_result;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Clear completed tasks from outstanding_tasks */
+ __clear_bit(index, &hba->outstanding_tasks);
+
+ task_req_descp = hba->utmrdl_base_addr;
+ ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+ if (ocs_value == OCS_SUCCESS) {
+ task_rsp_upiup = (struct utp_upiu_task_rsp *)
+ task_req_descp[index].task_rsp_upiu;
+ task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
+ task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
+
+ if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+ task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
+ task_result = FAILED;
+ } else {
+ task_result = FAILED;
+ dev_err(&hba->pdev->dev,
+ "trc: Invalid ocs = %x\n", ocs_value);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return task_result;
+}
+
+/**
+ * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
+ * SAM_STAT_TASK_SET_FULL SCSI command status.
+ * @cmd: pointer to SCSI command
+ */
+static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
+{
+ struct ufs_hba *hba;
+ int i;
+ int lun_qdepth = 0;
+
+ hba = shost_priv(cmd->device->host);
+
+ /*
+ * LUN queue depth can be obtained by counting outstanding commands
+ * on the LUN.
+ */
+ for (i = 0; i < hba->nutrs; i++) {
+ if (test_bit(i, &hba->outstanding_reqs)) {
+
+ /*
+ * Check if the outstanding command belongs
+ * to the LUN which reported SAM_STAT_TASK_SET_FULL.
+ */
+ if (cmd->device->lun == hba->lrb[i].lun)
+ lun_qdepth++;
+ }
+ }
+
+ /*
+ * LUN queue depth will be total outstanding commands, except the
+ * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
+ */
+ scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrb: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+ int result = 0;
+
+ switch (scsi_status) {
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ ufshcd_copy_sense_data(lrbp);
+ break;
+ case SAM_STAT_BUSY:
+ result |= SAM_STAT_BUSY;
+ break;
+ case SAM_STAT_TASK_SET_FULL:
+
+ /*
+ * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
+ * depth needs to be adjusted to the exact number of
+ * outstanding commands the LUN can handle at any given time.
+ */
+ ufshcd_adjust_lun_qdepth(lrbp->cmd);
+ result |= SAM_STAT_TASK_SET_FULL;
+ break;
+ case SAM_STAT_TASK_ABORTED:
+ result |= SAM_STAT_TASK_ABORTED;
+ break;
+ default:
+ result |= DID_ERROR << 16;
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrb: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int result = 0;
+ int scsi_status;
+ int ocs;
+
+ /* overall command status of utrd */
+ ocs = ufshcd_get_tr_ocs(lrbp);
+
+ switch (ocs) {
+ case OCS_SUCCESS:
+
+ /* check if the returned transfer response is valid */
+ result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
+ if (result) {
+ dev_err(&hba->pdev->dev,
+ "Invalid response = %x\n", result);
+ break;
+ }
+
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+ break;
+ case OCS_ABORTED:
+ result |= DID_ABORT << 16;
+ break;
+ case OCS_INVALID_CMD_TABLE_ATTR:
+ case OCS_INVALID_PRDT_ATTR:
+ case OCS_MISMATCH_DATA_BUF_SIZE:
+ case OCS_MISMATCH_RESP_UPIU_SIZE:
+ case OCS_PEER_COMM_FAILURE:
+ case OCS_FATAL_ERROR:
+ default:
+ result |= DID_ERROR << 16;
+ dev_err(&hba->pdev->dev,
+ "OCS error from controller = %x\n", ocs);
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ struct ufshcd_lrb *lrb;
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+ int result;
+ int index;
+
+ lrb = hba->lrb;
+ tr_doorbell =
+ readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ for (index = 0; index < hba->nutrs; index++) {
+ if (test_bit(index, &completed_reqs)) {
+
+ result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
+
+ if (lrb[index].cmd) {
+ scsi_dma_unmap(lrb[index].cmd);
+ lrb[index].cmd->result = result;
+ lrb[index].cmd->scsi_done(lrb[index].cmd);
+
+ /* Mark completed command as NULL in LRB */
+ lrb[index].cmd = NULL;
+ }
+ } /* end of if */
+ } /* end of for */
+
+ /* clear corresponding bits of completed commands */
+ hba->outstanding_reqs ^= completed_reqs;
+
+ /* Reset interrupt aggregation counters */
+ ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+}
+
+/**
+ * ufshcd_uic_cc_handler - handle UIC command completion
+ * @work: pointer to a work queue structure
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static void ufshcd_uic_cc_handler (struct work_struct *work)
+{
+ struct ufs_hba *hba;
+
+ hba = container_of(work, struct ufs_hba, uic_workq);
+
+ if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
+ !(ufshcd_get_uic_cmd_result(hba))) {
+
+ if (ufshcd_make_hba_operational(hba))
+ dev_err(&hba->pdev->dev,
+ "cc: hba not operational state\n");
+ return;
+ }
+}
+
+/**
+ * ufshcd_fatal_err_handler - handle fatal errors
+ * @hba: per adapter instance
+ */
+static void ufshcd_fatal_err_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ hba = container_of(work, struct ufs_hba, feh_workq);
+
+ /* check if reset is already in progress */
+ if (hba->ufshcd_state != UFSHCD_STATE_RESET)
+ ufshcd_do_reset(hba);
+}
+
+/**
+ * ufshcd_err_handler - Check for fatal errors
+ * @work: pointer to a work queue structure
+ */
+static void ufshcd_err_handler(struct ufs_hba *hba)
+{
+ u32 reg;
+
+ if (hba->errors & INT_FATAL_ERRORS)
+ goto fatal_eh;
+
+ if (hba->errors & UIC_ERROR) {
+
+ reg = readl(hba->mmio_base +
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ goto fatal_eh;
+ }
+ return;
+fatal_eh:
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ schedule_work(&hba->feh_workq);
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+ u32 tm_doorbell;
+
+ tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
+ hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+ wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+ hba->errors = UFSHCD_ERROR_MASK & intr_status;
+ if (hba->errors)
+ ufshcd_err_handler(hba);
+
+ if (intr_status & UIC_COMMAND_COMPL)
+ schedule_work(&hba->uic_workq);
+
+ if (intr_status & UTP_TASK_REQ_COMPL)
+ ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+ ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ u32 intr_status;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+
+ spin_lock(hba->host->host_lock);
+ intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
+
+ if (intr_status) {
+ ufshcd_sl_intr(hba, intr_status);
+
+ /* If UFSHCI 1.0 then clear interrupt status register */
+ if (hba->ufs_version == UFSHCI_VERSION_10)
+ writel(intr_status,
+ (hba->mmio_base + REG_INTERRUPT_STATUS));
+ retval = IRQ_HANDLED;
+ }
+ spin_unlock(hba->host->host_lock);
+ return retval;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int
+ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp,
+ u8 tm_function)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_req *task_req_upiup;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int free_slot = 0;
+ int err;
+
+ host = hba->host;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* If task management queue is full */
+ free_slot = ufshcd_get_tm_free_slot(hba);
+ if (free_slot >= hba->nutmrs) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ dev_err(&hba->pdev->dev, "Task management queue full\n");
+ err = FAILED;
+ goto out;
+ }
+
+ task_req_descp = hba->utmrdl_base_addr;
+ task_req_descp += free_slot;
+
+ /* Configure task request descriptor */
+ task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ task_req_descp->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ task_req_upiup =
+ (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_req_upiup->header.dword_0 =
+ cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lrbp->lun, lrbp->task_tag));
+ task_req_upiup->header.dword_1 =
+ cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
+
+ task_req_upiup->input_param1 = lrbp->lun;
+ task_req_upiup->input_param1 =
+ cpu_to_be32(task_req_upiup->input_param1);
+ task_req_upiup->input_param2 = lrbp->task_tag;
+ task_req_upiup->input_param2 =
+ cpu_to_be32(task_req_upiup->input_param2);
+
+ /* send command to the controller */
+ __set_bit(free_slot, &hba->outstanding_tasks);
+ writel((1 << free_slot),
+ (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL));
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /* wait until the task management command is completed */
+ err =
+ wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
+ (test_bit(free_slot,
+ &hba->tm_condition) != 0),
+ 60 * HZ);
+ if (!err) {
+ dev_err(&hba->pdev->dev,
+ "Task management command timed-out\n");
+ err = FAILED;
+ goto out;
+ }
+ clear_bit(free_slot, &hba->tm_condition);
+ return ufshcd_task_req_compl(hba, free_slot);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_device_reset - reset device and abort all the pending commands
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_device_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned int tag;
+ u32 pos;
+ int err;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
+ if (err)
+ goto out;
+
+ for (pos = 0; pos < hba->nutrs; pos++) {
+ if (test_bit(pos, &hba->outstanding_reqs) &&
+ (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
+
+ /* clear the respective UTRLCLR register bit */
+ ufshcd_utrl_clear(hba, pos);
+
+ clear_bit(pos, &hba->outstanding_reqs);
+
+ if (hba->lrb[pos].cmd) {
+ scsi_dma_unmap(hba->lrb[pos].cmd);
+ hba->lrb[pos].cmd->result =
+ DID_ABORT << 16;
+ hba->lrb[pos].cmd->scsi_done(cmd);
+ hba->lrb[pos].cmd = NULL;
+ }
+ }
+ } /* end of for */
+out:
+ return err;
+}
+
+/**
+ * ufshcd_host_reset - Main reset function registered with scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_host_reset(struct scsi_cmnd *cmd)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ return SUCCESS;
+
+ return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ unsigned int tag;
+ int err;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* check if command is still pending */
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ err = FAILED;
+ spin_unlock_irqrestore(host->host_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
+ if (err)
+ goto out;
+
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* clear the respective UTRLCLR register bit */
+ ufshcd_utrl_clear(hba, tag);
+
+ __clear_bit(tag, &hba->outstanding_reqs);
+ hba->lrb[tag].cmd = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+out:
+ return err;
+}
+
+static struct scsi_host_template ufshcd_driver_template = {
+ .module = THIS_MODULE,
+ .name = UFSHCD,
+ .proc_name = UFSHCD,
+ .queuecommand = ufshcd_queuecommand,
+ .slave_alloc = ufshcd_slave_alloc,
+ .slave_destroy = ufshcd_slave_destroy,
+ .eh_abort_handler = ufshcd_abort,
+ .eh_device_reset_handler = ufshcd_device_reset,
+ .eh_host_reset_handler = ufshcd_host_reset,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = UFSHCD_CMD_PER_LUN,
+ .can_queue = UFSHCD_CAN_QUEUE,
+};
+
+/**
+ * ufshcd_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ /*
+ * TODO:
+ * 1. Block SCSI requests from SCSI midlayer
+ * 2. Change the internal driver state to non operational
+ * 3. Set UTRLRSR and UTMRLRSR bits to zero
+ * 4. Wait until outstanding commands are completed
+ * 5. Set HCE to zero to send the UFS host controller to reset state
+ */
+
+ return -ENOSYS;
+}
+
+/**
+ * ufshcd_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_resume(struct pci_dev *pdev)
+{
+ /*
+ * TODO:
+ * 1. Set HCE to 1, to start the UFS host controller
+ * initialization process
+ * 2. Set UTRLRSR and UTMRLRSR bits to 1
+ * 3. Change the internal driver state to operational
+ * 4. Unblock SCSI requests from SCSI midlayer
+ */
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_hba_free - free allocated memory for
+ * host memory space data structures
+ * @hba: per adapter instance
+ */
+static void ufshcd_hba_free(struct ufs_hba *hba)
+{
+ iounmap(hba->mmio_base);
+ ufshcd_free_hba_memory(hba);
+ pci_release_regions(hba->pdev);
+}
+
+/**
+ * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ /* disable interrupts */
+ ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
+ free_irq(pdev->irq, hba);
+
+ ufshcd_hba_stop(hba);
+ ufshcd_hba_free(hba);
+
+ scsi_remove_host(hba->host);
+ scsi_host_put(hba->host);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ int err;
+ u64 dma_mask;
+
+ /*
+ * If controller supports 64 bit addressing mode, then set the DMA
+ * mask to 64-bit, else set the DMA mask to 32-bit
+ */
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
+ dma_mask = DMA_BIT_MASK(64);
+ else
+ dma_mask = DMA_BIT_MASK(32);
+
+ err = pci_set_dma_mask(hba->pdev, dma_mask);
+ if (err)
+ return err;
+
+ err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
+
+ return err;
+}
+
+/**
+ * ufshcd_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int __devinit
+ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto out_error;
+ }
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&ufshcd_driver_template,
+ sizeof(struct ufs_hba));
+ if (!host) {
+ dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ err = -ENOMEM;
+ goto out_disable;
+ }
+ hba = shost_priv(host);
+
+ err = pci_request_regions(pdev, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request regions failed\n");
+ goto out_disable;
+ }
+
+ hba->mmio_base = pci_ioremap_bar(pdev, 0);
+ if (!hba->mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ hba->host = host;
+ hba->pdev = pdev;
+
+ /* Read capabilities registers */
+ ufshcd_hba_capabilities(hba);
+
+ /* Get UFS version supported by the controller */
+ hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(&pdev->dev, "Memory allocation failed\n");
+ goto out_iounmap;
+ }
+
+ /* Configure LRB */
+ ufshcd_host_memory_configure(hba);
+
+ host->can_queue = hba->nutrs;
+ host->cmd_per_lun = hba->nutrs;
+ host->max_id = UFSHCD_MAX_ID;
+ host->max_lun = UFSHCD_MAX_LUNS;
+ host->max_channel = UFSHCD_MAX_CHANNEL;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CDB_SIZE;
+
+ /* Initailize wait queue for task management */
+ init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
+
+ /* Initialize work queues */
+ INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
+ INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+
+ /* IRQ registration */
+ err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ if (err) {
+ dev_err(&pdev->dev, "request irq failed\n");
+ goto out_lrb_free;
+ }
+
+ /* Enable SCSI tag mapping */
+ err = scsi_init_shared_tag_map(host, host->can_queue);
+ if (err) {
+ dev_err(&pdev->dev, "init shared queue failed\n");
+ goto out_free_irq;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ err = scsi_add_host(host, &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "scsi_add_host failed\n");
+ goto out_free_irq;
+ }
+
+ /* Initialization routine */
+ err = ufshcd_initialize_hba(hba);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ goto out_free_irq;
+ }
+
+ return 0;
+
+out_free_irq:
+ free_irq(pdev->irq, hba);
+out_lrb_free:
+ ufshcd_free_hba_memory(hba);
+out_iounmap:
+ iounmap(hba->mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable:
+ scsi_host_put(host);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+out_error:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_probe,
+ .remove = __devexit_p(ufshcd_remove),
+ .shutdown = ufshcd_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ufshcd_suspend,
+ .resume = ufshcd_resume,
+#endif
+};
+
+/**
+ * ufshcd_init - Driver registration routine
+ */
+static int __init ufshcd_init(void)
+{
+ return pci_register_driver(&ufshcd_pci_driver);
+}
+module_init(ufshcd_init);
+
+/**
+ * ufshcd_exit - Driver exit clean-up routine
+ */
+static void __exit ufshcd_exit(void)
+{
+ pci_unregister_driver(&ufshcd_pci_driver);
+}
+module_exit(ufshcd_exit);
+
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
+ "Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
new file mode 100644
index 0000000..6e3510f
--- /dev/null
+++ b/drivers/scsi/ufs/ufshci.h
@@ -0,0 +1,376 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshci.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef _UFSHCI_H
+#define _UFSHCI_H
+
+enum {
+ TASK_REQ_UPIU_SIZE_DWORDS = 8,
+ TASK_RSP_UPIU_SIZE_DWORDS = 8,
+ ALIGNED_UPIU_SIZE = 128,
+};
+
+/* UFSHCI Registers */
+enum {
+ REG_CONTROLLER_CAPABILITIES = 0x00,
+ REG_UFS_VERSION = 0x08,
+ REG_CONTROLLER_DEV_ID = 0x10,
+ REG_CONTROLLER_PROD_ID = 0x14,
+ REG_INTERRUPT_STATUS = 0x20,
+ REG_INTERRUPT_ENABLE = 0x24,
+ REG_CONTROLLER_STATUS = 0x30,
+ REG_CONTROLLER_ENABLE = 0x34,
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
+ REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
+ REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
+ REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
+ REG_UIC_ERROR_CODE_DME = 0x48,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
+ REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
+ REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
+ REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
+ REG_UIC_COMMAND = 0x90,
+ REG_UIC_COMMAND_ARG_1 = 0x94,
+ REG_UIC_COMMAND_ARG_2 = 0x98,
+ REG_UIC_COMMAND_ARG_3 = 0x9C,
+};
+
+/* Controller capability masks */
+enum {
+ MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
+ MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_64_ADDRESSING_SUPPORT = 0x01000000,
+ MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+ MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+};
+
+/* UFS Version 08h */
+#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
+#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
+
+/* Controller UFSHCI version */
+enum {
+ UFSHCI_VERSION_10 = 0x00010000,
+ UFSHCI_VERSION_11 = 0x00010100,
+};
+
+/*
+ * HCDDID - Host Controller Identification Descriptor
+ * - Device ID and Device Class 10h
+ */
+#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
+#define DEVICE_ID UFS_MASK(0xFF, 24)
+
+/*
+ * HCPMID - Host Controller Identification Descriptor
+ * - Product/Manufacturer ID 14h
+ */
+#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
+#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
+
+#define UFS_BIT(x) (1L << (x))
+
+#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
+#define UIC_DME_END_PT_RESET UFS_BIT(1)
+#define UIC_ERROR UFS_BIT(2)
+#define UIC_TEST_MODE UFS_BIT(3)
+#define UIC_POWER_MODE UFS_BIT(4)
+#define UIC_HIBERNATE_EXIT UFS_BIT(5)
+#define UIC_HIBERNATE_ENTER UFS_BIT(6)
+#define UIC_LINK_LOST UFS_BIT(7)
+#define UIC_LINK_STARTUP UFS_BIT(8)
+#define UTP_TASK_REQ_COMPL UFS_BIT(9)
+#define UIC_COMMAND_COMPL UFS_BIT(10)
+#define DEVICE_FATAL_ERROR UFS_BIT(11)
+#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
+#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+
+#define UFSHCD_ERROR_MASK (UIC_ERROR |\
+ DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
+#define UTP_TASK_REQ_LIST_READY UFS_BIT(2)
+#define UIC_COMMAND_READY UFS_BIT(3)
+#define HOST_ERROR_INDICATOR UFS_BIT(4)
+#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+
+/* HCE - Host Controller Enable 34h */
+#define CONTROLLER_ENABLE UFS_BIT(0)
+#define CONTROLLER_DISABLE 0x0
+
+/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
+#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+
+/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
+#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
+
+/* UECN - Host UIC Error Code Network Layer 40h */
+#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+
+/* UECT - Host UIC Error Code Transport Layer 44h */
+#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+
+/* UECDME - Host UIC Error Code DME 48h */
+#define UIC_DME_ERROR UFS_BIT(31)
+#define UIC_DME_ERROR_CODE_MASK 0x1
+
+#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
+#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16)
+#define INT_AGGR_STATUS_BIT UFS_BIT(20)
+#define INT_AGGR_PARAM_WRITE UFS_BIT(24)
+#define INT_AGGR_ENABLE UFS_BIT(31)
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK 0xFF
+#define GEN_SELECTOR_INDEX_MASK 0xFFFF
+
+#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL 0xFF
+
+#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK 0xFF
+#define GENERIC_ERROR_CODE_MASK 0xFF
+
+/* UIC Commands */
+enum {
+ UIC_CMD_DME_GET = 0x01,
+ UIC_CMD_DME_SET = 0x02,
+ UIC_CMD_DME_PEER_GET = 0x03,
+ UIC_CMD_DME_PEER_SET = 0x04,
+ UIC_CMD_DME_POWERON = 0x10,
+ UIC_CMD_DME_POWEROFF = 0x11,
+ UIC_CMD_DME_ENABLE = 0x12,
+ UIC_CMD_DME_RESET = 0x14,
+ UIC_CMD_DME_END_PT_RST = 0x15,
+ UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UIC_CMD_RESULT_SUCCESS = 0x00,
+ UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UIC_CMD_RESULT_FAILURE = 0x01,
+ UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UIC_CMD_RESULT_BUSY = 0x09,
+ UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT 0xFF
+
+#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8)
+#define INT_AGGR_TIMEOUT_VALUE (0x02)
+
+/* Interrupt disable masks */
+enum {
+ /* Interrupt disable mask for UFSHCI v1.0 */
+ INTERRUPT_DISABLE_MASK_10 = 0xFFFF,
+
+ /* Interrupt disable mask for UFSHCI v1.1 */
+ INTERRUPT_DISABLE_MASK_11 = 0x0,
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UTP_CMD_TYPE_SCSI = 0x0,
+ UTP_CMD_TYPE_UFS = 0x1,
+ UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+enum {
+ UTP_SCSI_COMMAND = 0x00000000,
+ UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+ UTP_REQ_DESC_INT_CMD = 0x01000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+ UTP_NO_DATA_TRANSFER = 0x00000000,
+ UTP_HOST_TO_DEVICE = 0x02000000,
+ UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum {
+ OCS_SUCCESS = 0x0,
+ OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ OCS_INVALID_PRDT_ATTR = 0x2,
+ OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ OCS_PEER_COMM_FAILURE = 0x5,
+ OCS_ABORTED = 0x6,
+ OCS_FATAL_ERROR = 0x7,
+ OCS_INVALID_COMMAND_STATUS = 0x0F,
+ MASK_OCS = 0x0F,
+};
+
+/**
+ * struct ufshcd_sg_entry - UFSHCI PRD Entry
+ * @base_addr: Lower 32bit physical address DW-0
+ * @upper_addr: Upper 32bit physical address DW-1
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+struct ufshcd_sg_entry {
+ u32 base_addr;
+ u32 upper_addr;
+ u32 reserved;
+ u32 size;
+};
+
+/**
+ * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor
+ */
+struct utp_transfer_cmd_desc {
+ u8 command_upiu[ALIGNED_UPIU_SIZE];
+ u8 response_upiu[ALIGNED_UPIU_SIZE];
+ struct ufshcd_sg_entry prd_table[SG_ALL];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+struct request_desc_header {
+ u32 dword_0;
+ u32 dword_1;
+ u32 dword_2;
+ u32 dword_3;
+};
+
+/**
+ * struct utp_transfer_req_desc - UTRD structure
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-5*/
+ u32 command_desc_base_addr_lo;
+ u32 command_desc_base_addr_hi;
+
+ /* DW 6 */
+ u16 response_upiu_length;
+ u16 response_upiu_offset;
+
+ /* DW 7 */
+ u16 prd_table_length;
+ u16 prd_table_offset;
+};
+
+/**
+ * struct utp_task_req_desc - UTMRD structure
+ * @header: UTMRD header DW-0 to DW-3
+ * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
+ * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+ */
+struct utp_task_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-11 */
+ u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+
+ /* DW 12-19 */
+ u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 7264116..4411d42 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*
*/
@@ -1178,11 +1178,67 @@ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
return 0;
}
+/*
+ * Query the device, fetch the config info and return the
+ * maximum number of targets on the adapter. In case of
+ * failure due to any reason return default i.e. 16.
+ */
+static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
+{
+ struct PVSCSICmdDescConfigCmd cmd;
+ struct PVSCSIConfigPageHeader *header;
+ struct device *dev;
+ dma_addr_t configPagePA;
+ void *config_page;
+ u32 numPhys = 16;
+
+ dev = pvscsi_dev(adapter);
+ config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &configPagePA);
+ if (!config_page) {
+ dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
+ goto exit;
+ }
+ BUG_ON(configPagePA & ~PAGE_MASK);
+
+ /* Fetch config info from the device. */
+ cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
+ cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
+ cmd.cmpAddr = configPagePA;
+ cmd._pad = 0;
+
+ /*
+ * Mark the completion page header with error values. If the device
+ * completes the command successfully, it sets the status values to
+ * indicate success.
+ */
+ header = config_page;
+ memset(header, 0, sizeof *header);
+ header->hostStatus = BTSTAT_INVPARAM;
+ header->scsiStatus = SDSTAT_CHECK;
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
+
+ if (header->hostStatus == BTSTAT_SUCCESS &&
+ header->scsiStatus == SDSTAT_GOOD) {
+ struct PVSCSIConfigPageController *config;
+
+ config = config_page;
+ numPhys = config->numPhys;
+ } else
+ dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
+ header->hostStatus, header->scsiStatus);
+ pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+exit:
+ return numPhys;
+}
+
static int __devinit pvscsi_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
struct Scsi_Host *host;
+ struct device *dev;
unsigned int i;
unsigned long flags = 0;
int error;
@@ -1272,6 +1328,13 @@ static int __devinit pvscsi_probe(struct pci_dev *pdev,
}
/*
+ * Ask the device for max number of targets.
+ */
+ host->max_id = pvscsi_get_max_targets(adapter);
+ dev = pvscsi_dev(adapter);
+ dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
+
+ /*
* From this point on we should reset the adapter if anything goes
* wrong.
*/
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 62e36e7..3546e86 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*
*/
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
@@ -39,28 +39,45 @@
* host adapter status/error codes
*/
enum HostBusAdapterStatus {
- BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
- BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
- BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
- BTSTAT_DATA_UNDERRUN = 0x0c,
- BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
- BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
- BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
- BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
- BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
- BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
- BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
- BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
- BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
- BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
- BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
- BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
- BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
- BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
- BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
- BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
- BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
- BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+ BTSTAT_DATA_UNDERRUN = 0x0c,
+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence
+ * requested by target */
+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from
+ * first CCB */
+ BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment
+ * list */
+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message
+ * rejected by target */
+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the
+ * host adapter */
+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN,
+ * sent a SCSI RST */
+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI
+ * RST */
+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly
+ * (w/o tag) */
+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+};
+
+/*
+ * SCSI device status values.
+ */
+enum ScsiDeviceStatus {
+ SDSTAT_GOOD = 0x00, /* No errors. */
+ SDSTAT_CHECK = 0x02, /* Check condition. */
};
/*
@@ -114,6 +131,29 @@ struct PVSCSICmdDescResetDevice {
} __packed;
/*
+ * Command descriptor for PVSCSI_CMD_CONFIG --
+ */
+
+struct PVSCSICmdDescConfigCmd {
+ u64 cmpAddr;
+ u64 configPageAddress;
+ u32 configPageNum;
+ u32 _pad;
+} __packed;
+
+enum PVSCSIConfigPageType {
+ PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
+ PVSCSI_CONFIG_PAGE_PHY = 0x1959,
+ PVSCSI_CONFIG_PAGE_DEVICE = 0x195a,
+};
+
+enum PVSCSIConfigPageAddressType {
+ PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120,
+ PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121,
+ PVSCSI_CONFIG_PHY_ADDRESS = 0x2122,
+};
+
+/*
* Command descriptor for PVSCSI_CMD_ABORT_CMD --
*
* - currently does not support specifying the LUN.
@@ -332,6 +372,27 @@ struct PVSCSIRingCmpDesc {
u32 _pad[2];
} __packed;
+struct PVSCSIConfigPageHeader {
+ u32 pageNum;
+ u16 numDwords;
+ u16 hostStatus;
+ u16 scsiStatus;
+ u16 reserved[3];
+} __packed;
+
+struct PVSCSIConfigPageController {
+ struct PVSCSIConfigPageHeader header;
+ u64 nodeWWN; /* Device name as defined in the SAS spec. */
+ u16 manufacturer[64];
+ u16 serialNumber[64];
+ u16 opromVersion[32];
+ u16 hwVersion[32];
+ u16 firmwareVersion[32];
+ u32 numPhys;
+ u8 useConsecutivePhyWWNs;
+ u8 reserved[3];
+} __packed;
+
/*
* Interrupt status / IRQ bits.
*/
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index b3b70b0..babd947 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1581,7 +1581,7 @@ static int __init sunzilog_init(void)
if (err)
goto out_unregister_uart;
- if (!zilog_irq) {
+ if (zilog_irq) {
struct uart_sunzilog_port *up = sunzilog_irq_chain;
err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
"zs", sunzilog_irq_chain);
@@ -1622,7 +1622,7 @@ static void __exit sunzilog_exit(void)
{
platform_driver_unregister(&zs_driver);
- if (!zilog_irq) {
+ if (zilog_irq) {
struct uart_sunzilog_port *up = sunzilog_irq_chain;
/* Disable Interrupts */
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 85a5ceb..965a629 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -345,7 +345,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- skb->len <= 1, req->actual, req->actual);
+ skb->len <= 1, req->actual, PAGE_SIZE);
page = NULL;
if (req->actual < req->length) { /* Last fragment */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 958e512..05f0a80 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -398,21 +398,8 @@ static int restore_common(struct virtio_device *vdev)
return 0;
}
-static int virtballoon_thaw(struct virtio_device *vdev)
-{
- return restore_common(vdev);
-}
-
static int virtballoon_restore(struct virtio_device *vdev)
{
- struct virtio_balloon *vb = vdev->priv;
-
- /*
- * If a request wasn't complete at the time of freezing, this
- * could have been set.
- */
- vb->need_stats_update = 0;
-
return restore_common(vdev);
}
#endif
@@ -434,7 +421,6 @@ static struct virtio_driver virtio_balloon_driver = {
#ifdef CONFIG_PM
.freeze = virtballoon_freeze,
.restore = virtballoon_restore,
- .thaw = virtballoon_thaw,
#endif
};
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 635e1ef..2e03d41 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -720,24 +720,6 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
}
#ifdef CONFIG_PM
-static int virtio_pci_suspend(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
-
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, PCI_D3hot);
- return 0;
-}
-
-static int virtio_pci_resume(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
-
- pci_restore_state(pci_dev);
- pci_set_power_state(pci_dev, PCI_D0);
- return 0;
-}
-
static int virtio_pci_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -758,59 +740,24 @@ static int virtio_pci_freeze(struct device *dev)
return ret;
}
-static int restore_common(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
- int ret;
-
- ret = pci_enable_device(pci_dev);
- if (ret)
- return ret;
- pci_set_master(pci_dev);
- vp_finalize_features(&vp_dev->vdev);
-
- return ret;
-}
-
-static int virtio_pci_thaw(struct device *dev)
+static int virtio_pci_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
struct virtio_driver *drv;
int ret;
- ret = restore_common(dev);
- if (ret)
- return ret;
-
drv = container_of(vp_dev->vdev.dev.driver,
struct virtio_driver, driver);
- if (drv && drv->thaw)
- ret = drv->thaw(&vp_dev->vdev);
- else if (drv && drv->restore)
- ret = drv->restore(&vp_dev->vdev);
-
- /* Finally, tell the device we're all set */
- if (!ret)
- vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
-
- return ret;
-}
-
-static int virtio_pci_restore(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
- struct virtio_driver *drv;
- int ret;
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ return ret;
- drv = container_of(vp_dev->vdev.dev.driver,
- struct virtio_driver, driver);
+ pci_set_master(pci_dev);
+ vp_finalize_features(&vp_dev->vdev);
- ret = restore_common(dev);
- if (!ret && drv && drv->restore)
+ if (drv && drv->restore)
ret = drv->restore(&vp_dev->vdev);
/* Finally, tell the device we're all set */
@@ -821,12 +768,7 @@ static int virtio_pci_restore(struct device *dev)
}
static const struct dev_pm_ops virtio_pci_pm_ops = {
- .suspend = virtio_pci_suspend,
- .resume = virtio_pci_resume,
- .freeze = virtio_pci_freeze,
- .thaw = virtio_pci_thaw,
- .restore = virtio_pci_restore,
- .poweroff = virtio_pci_suspend,
+ SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
};
#endif
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 19e6a20..1afb4fb 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -204,7 +204,8 @@ error:
void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
void *ret;
int order = get_order(size);
@@ -253,7 +254,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr)
+ dma_addr_t dev_addr, struct dma_attrs *attrs)
{
int order = get_order(size);
phys_addr_t phys;
diff --git a/fs/aio.c b/fs/aio.c
index 4f71627..da88760 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -305,15 +305,18 @@ out_freectx:
return ERR_PTR(err);
}
-/* aio_cancel_all
+/* kill_ctx
* Cancels all outstanding aio requests on an aio context. Used
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
-static void aio_cancel_all(struct kioctx *ctx)
+static void kill_ctx(struct kioctx *ctx)
{
int (*cancel)(struct kiocb *, struct io_event *);
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
struct io_event res;
+
spin_lock_irq(&ctx->ctx_lock);
ctx->dead = 1;
while (!list_empty(&ctx->active_reqs)) {
@@ -329,15 +332,7 @@ static void aio_cancel_all(struct kioctx *ctx)
spin_lock_irq(&ctx->ctx_lock);
}
}
- spin_unlock_irq(&ctx->ctx_lock);
-}
-
-static void wait_for_all_aios(struct kioctx *ctx)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- spin_lock_irq(&ctx->ctx_lock);
if (!ctx->reqs_active)
goto out;
@@ -387,9 +382,7 @@ void exit_aio(struct mm_struct *mm)
ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
hlist_del_rcu(&ctx->list);
- aio_cancel_all(ctx);
-
- wait_for_all_aios(ctx);
+ kill_ctx(ctx);
if (1 != atomic_read(&ctx->users))
printk(KERN_DEBUG
@@ -1269,8 +1262,7 @@ static void io_destroy(struct kioctx *ioctx)
if (likely(!was_dead))
put_ioctx(ioctx); /* twice for the list */
- aio_cancel_all(ioctx);
- wait_for_all_aios(ioctx);
+ kill_ctx(ioctx);
/*
* Wake up any waiters. The setting of ctx->dead must be seen
@@ -1278,7 +1270,6 @@ static void io_destroy(struct kioctx *ioctx)
* locking done by the above calls to ensure this consistency.
*/
wake_up_all(&ioctx->wait);
- put_ioctx(ioctx); /* once for the lookup */
}
/* sys_io_setup:
@@ -1315,11 +1306,9 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
ret = PTR_ERR(ioctx);
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
- if (!ret) {
- put_ioctx(ioctx);
- return 0;
- }
- io_destroy(ioctx);
+ if (ret)
+ io_destroy(ioctx);
+ put_ioctx(ioctx);
}
out:
@@ -1337,6 +1326,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
io_destroy(ioctx);
+ put_ioctx(ioctx);
return 0;
}
pr_debug("EINVAL: io_destroy: invalid context id\n");
diff --git a/fs/exec.c b/fs/exec.c
index 9a1d9f0..b1fd202 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1371,7 +1371,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
unsigned int depth = bprm->recursion_depth;
int try,retval;
struct linux_binfmt *fmt;
- pid_t old_pid;
+ pid_t old_pid, old_vpid;
retval = security_bprm_check(bprm);
if (retval)
@@ -1382,8 +1382,9 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
return retval;
/* Need to fetch pid before load_binary changes it */
+ old_pid = current->pid;
rcu_read_lock();
- old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
+ old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
rcu_read_unlock();
retval = -ENOENT;
@@ -1406,7 +1407,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
if (retval >= 0) {
if (depth == 0) {
trace_sched_process_exec(current, old_pid, bprm);
- ptrace_event(PTRACE_EVENT_EXEC, old_pid);
+ ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
}
put_binfmt(fmt);
allow_write_access(bprm->file);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 75ad433..0b2b4db 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -1,5 +1,636 @@
+/*
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/include/linux/minix_fs.h
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
#include <linux/fs.h>
#include <linux/ext2_fs.h>
+#include <linux/blockgroup_lock.h>
+#include <linux/percpu_counter.h>
+#include <linux/rbtree.h>
+
+/* XXX Here for now... not interested in restructing headers JUST now */
+
+/* data type for block offset of block group */
+typedef int ext2_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long ext2_fsblk_t;
+
+#define E2FSBLK "%lu"
+
+struct ext2_reserve_window {
+ ext2_fsblk_t _rsv_start; /* First byte reserved */
+ ext2_fsblk_t _rsv_end; /* Last byte reserved or 0 */
+};
+
+struct ext2_reserve_window_node {
+ struct rb_node rsv_node;
+ __u32 rsv_goal_size;
+ __u32 rsv_alloc_hit;
+ struct ext2_reserve_window rsv_window;
+};
+
+struct ext2_block_alloc_info {
+ /* information about reservation window */
+ struct ext2_reserve_window_node rsv_window_node;
+ /*
+ * was i_next_alloc_block in ext2_inode_info
+ * is the logical (file-relative) number of the
+ * most-recently-allocated block in this file.
+ * We use this for detecting linearly ascending allocation requests.
+ */
+ __u32 last_alloc_logical_block;
+ /*
+ * Was i_next_alloc_goal in ext2_inode_info
+ * is the *physical* companion to i_next_alloc_block.
+ * it the the physical block number of the block which was most-recentl
+ * allocated to this file. This give us the goal (target) for the next
+ * allocation when we detect linearly ascending requests.
+ */
+ ext2_fsblk_t last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * second extended-fs super-block data in memory
+ */
+struct ext2_sb_info {
+ unsigned long s_frag_size; /* Size of a fragment in bytes */
+ unsigned long s_frags_per_block;/* Number of fragments per block */
+ unsigned long s_inodes_per_block;/* Number of inodes per block */
+ unsigned long s_frags_per_group;/* Number of fragments in a group */
+ unsigned long s_blocks_per_group;/* Number of blocks in a group */
+ unsigned long s_inodes_per_group;/* Number of inodes in a group */
+ unsigned long s_itb_per_group; /* Number of inode table blocks per group */
+ unsigned long s_gdb_count; /* Number of group descriptor blocks */
+ unsigned long s_desc_per_block; /* Number of group descriptors per block */
+ unsigned long s_groups_count; /* Number of groups in the fs */
+ unsigned long s_overhead_last; /* Last calculated overhead */
+ unsigned long s_blocks_last; /* Last seen block count */
+ struct buffer_head * s_sbh; /* Buffer containing the super block */
+ struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
+ struct buffer_head ** s_group_desc;
+ unsigned long s_mount_opt;
+ unsigned long s_sb_block;
+ uid_t s_resuid;
+ gid_t s_resgid;
+ unsigned short s_mount_state;
+ unsigned short s_pad;
+ int s_addr_per_block_bits;
+ int s_desc_per_block_bits;
+ int s_inode_size;
+ int s_first_ino;
+ spinlock_t s_next_gen_lock;
+ u32 s_next_generation;
+ unsigned long s_dir_count;
+ u8 *s_debts;
+ struct percpu_counter s_freeblocks_counter;
+ struct percpu_counter s_freeinodes_counter;
+ struct percpu_counter s_dirs_counter;
+ struct blockgroup_lock *s_blockgroup_lock;
+ /* root of the per fs reservation window tree */
+ spinlock_t s_rsv_window_lock;
+ struct rb_root s_rsv_window_root;
+ struct ext2_reserve_window_node s_rsv_window_head;
+ /*
+ * s_lock protects against concurrent modifications of s_mount_state,
+ * s_blocks_last, s_overhead_last and the content of superblock's
+ * buffer pointed to by sbi->s_es.
+ *
+ * Note: It is used in ext2_show_options() to provide a consistent view
+ * of the mount options.
+ */
+ spinlock_t s_lock;
+};
+
+static inline spinlock_t *
+sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
+{
+ return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
+}
+
+/*
+ * Define EXT2FS_DEBUG to produce debug messages
+ */
+#undef EXT2FS_DEBUG
+
+/*
+ * Define EXT2_RESERVATION to reserve data blocks for expanding files
+ */
+#define EXT2_DEFAULT_RESERVE_BLOCKS 8
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT2_MAX_RESERVE_BLOCKS 1027
+#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
+/*
+ * The second extended file system version
+ */
+#define EXT2FS_DATE "95/08/09"
+#define EXT2FS_VERSION "0.5b"
+
+/*
+ * Debug code
+ */
+#ifdef EXT2FS_DEBUG
+# define ext2_debug(f, a...) { \
+ printk ("EXT2-fs DEBUG (%s, %d): %s:", \
+ __FILE__, __LINE__, __func__); \
+ printk (f, ## a); \
+ }
+#else
+# define ext2_debug(f, a...) /**/
+#endif
+
+/*
+ * Special inode numbers
+ */
+#define EXT2_BAD_INO 1 /* Bad blocks inode */
+#define EXT2_ROOT_INO 2 /* Root inode */
+#define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */
+#define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */
+
+/* First non-reserved inode for old ext2 filesystems */
+#define EXT2_GOOD_OLD_FIRST_INO 11
+
+static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/*
+ * Macro-instructions used to manage several block sizes
+ */
+#define EXT2_MIN_BLOCK_SIZE 1024
+#define EXT2_MAX_BLOCK_SIZE 4096
+#define EXT2_MIN_BLOCK_LOG_SIZE 10
+#define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
+#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
+#define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+#define EXT2_ADDR_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_addr_per_block_bits)
+#define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size)
+#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
+
+/*
+ * Macro-instructions used to manage fragments
+ */
+#define EXT2_MIN_FRAG_SIZE 1024
+#define EXT2_MAX_FRAG_SIZE 4096
+#define EXT2_MIN_FRAG_LOG_SIZE 10
+#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
+#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
+
+/*
+ * Structure of a blocks group descriptor
+ */
+struct ext2_group_desc
+{
+ __le32 bg_block_bitmap; /* Blocks bitmap block */
+ __le32 bg_inode_bitmap; /* Inodes bitmap block */
+ __le32 bg_inode_table; /* Inodes table block */
+ __le16 bg_free_blocks_count; /* Free blocks count */
+ __le16 bg_free_inodes_count; /* Free inodes count */
+ __le16 bg_used_dirs_count; /* Directories count */
+ __le16 bg_pad;
+ __le32 bg_reserved[3];
+};
+
+/*
+ * Macro-instructions used to manage group descriptors
+ */
+#define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->s_blocks_per_group)
+#define EXT2_DESC_PER_BLOCK(s) (EXT2_SB(s)->s_desc_per_block)
+#define EXT2_INODES_PER_GROUP(s) (EXT2_SB(s)->s_inodes_per_group)
+#define EXT2_DESC_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_desc_per_block_bits)
+
+/*
+ * Constants relative to the data blocks
+ */
+#define EXT2_NDIR_BLOCKS 12
+#define EXT2_IND_BLOCK EXT2_NDIR_BLOCKS
+#define EXT2_DIND_BLOCK (EXT2_IND_BLOCK + 1)
+#define EXT2_TIND_BLOCK (EXT2_DIND_BLOCK + 1)
+#define EXT2_N_BLOCKS (EXT2_TIND_BLOCK + 1)
+
+/*
+ * Inode flags (GETFLAGS/SETFLAGS)
+ */
+#define EXT2_SECRM_FL FS_SECRM_FL /* Secure deletion */
+#define EXT2_UNRM_FL FS_UNRM_FL /* Undelete */
+#define EXT2_COMPR_FL FS_COMPR_FL /* Compress file */
+#define EXT2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
+#define EXT2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
+#define EXT2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
+#define EXT2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
+#define EXT2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
+/* Reserved for compression usage... */
+#define EXT2_DIRTY_FL FS_DIRTY_FL
+#define EXT2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
+#define EXT2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
+#define EXT2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
+/* End compression flags --- maybe not all used */
+#define EXT2_BTREE_FL FS_BTREE_FL /* btree format dir */
+#define EXT2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
+#define EXT2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
+#define EXT2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
+#define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
+#define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
+#define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
+
+#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
+#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
+
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
+ EXT2_SYNC_FL | EXT2_NODUMP_FL |\
+ EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
+ EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
+ EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & EXT2_REG_FLMASK;
+ else
+ return flags & EXT2_OTHER_FLMASK;
+}
+
+/*
+ * ioctl commands
+ */
+#define EXT2_IOC_GETFLAGS FS_IOC_GETFLAGS
+#define EXT2_IOC_SETFLAGS FS_IOC_SETFLAGS
+#define EXT2_IOC_GETVERSION FS_IOC_GETVERSION
+#define EXT2_IOC_SETVERSION FS_IOC_SETVERSION
+#define EXT2_IOC_GETRSVSZ _IOR('f', 5, long)
+#define EXT2_IOC_SETRSVSZ _IOW('f', 6, long)
+
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define EXT2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define EXT2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define EXT2_IOC32_GETVERSION FS_IOC32_GETVERSION
+#define EXT2_IOC32_SETVERSION FS_IOC32_SETVERSION
+
+/*
+ * Structure of an inode on the disk
+ */
+struct ext2_inode {
+ __le16 i_mode; /* File mode */
+ __le16 i_uid; /* Low 16 bits of Owner Uid */
+ __le32 i_size; /* Size in bytes */
+ __le32 i_atime; /* Access time */
+ __le32 i_ctime; /* Creation time */
+ __le32 i_mtime; /* Modification time */
+ __le32 i_dtime; /* Deletion Time */
+ __le16 i_gid; /* Low 16 bits of Group Id */
+ __le16 i_links_count; /* Links count */
+ __le32 i_blocks; /* Blocks count */
+ __le32 i_flags; /* File flags */
+ union {
+ struct {
+ __le32 l_i_reserved1;
+ } linux1;
+ struct {
+ __le32 h_i_translator;
+ } hurd1;
+ struct {
+ __le32 m_i_reserved1;
+ } masix1;
+ } osd1; /* OS dependent 1 */
+ __le32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
+ __le32 i_generation; /* File version (for NFS) */
+ __le32 i_file_acl; /* File ACL */
+ __le32 i_dir_acl; /* Directory ACL */
+ __le32 i_faddr; /* Fragment address */
+ union {
+ struct {
+ __u8 l_i_frag; /* Fragment number */
+ __u8 l_i_fsize; /* Fragment size */
+ __u16 i_pad1;
+ __le16 l_i_uid_high; /* these 2 fields */
+ __le16 l_i_gid_high; /* were reserved2[0] */
+ __u32 l_i_reserved2;
+ } linux2;
+ struct {
+ __u8 h_i_frag; /* Fragment number */
+ __u8 h_i_fsize; /* Fragment size */
+ __le16 h_i_mode_high;
+ __le16 h_i_uid_high;
+ __le16 h_i_gid_high;
+ __le32 h_i_author;
+ } hurd2;
+ struct {
+ __u8 m_i_frag; /* Fragment number */
+ __u8 m_i_fsize; /* Fragment size */
+ __u16 m_pad1;
+ __u32 m_i_reserved2[2];
+ } masix2;
+ } osd2; /* OS dependent 2 */
+};
+
+#define i_size_high i_dir_acl
+
+#define i_reserved1 osd1.linux1.l_i_reserved1
+#define i_frag osd2.linux2.l_i_frag
+#define i_fsize osd2.linux2.l_i_fsize
+#define i_uid_low i_uid
+#define i_gid_low i_gid
+#define i_uid_high osd2.linux2.l_i_uid_high
+#define i_gid_high osd2.linux2.l_i_gid_high
+#define i_reserved2 osd2.linux2.l_i_reserved2
+
+/*
+ * File system states
+ */
+#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */
+#define EXT2_ERROR_FS 0x0002 /* Errors detected */
+
+/*
+ * Mount flags
+ */
+#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
+#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
+#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
+#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
+#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
+#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
+#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
+#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
+#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
+#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
+#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
+#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
+#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
+#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
+#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
+#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
+
+
+#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
+#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
+#define test_opt(sb, opt) (EXT2_SB(sb)->s_mount_opt & \
+ EXT2_MOUNT_##opt)
+/*
+ * Maximal mount counts between two filesystem checks
+ */
+#define EXT2_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
+#define EXT2_DFL_CHECKINTERVAL 0 /* Don't use interval check */
+
+/*
+ * Behaviour when detecting errors
+ */
+#define EXT2_ERRORS_CONTINUE 1 /* Continue execution */
+#define EXT2_ERRORS_RO 2 /* Remount fs read-only */
+#define EXT2_ERRORS_PANIC 3 /* Panic */
+#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE
+
+/*
+ * Structure of the super block
+ */
+struct ext2_super_block {
+ __le32 s_inodes_count; /* Inodes count */
+ __le32 s_blocks_count; /* Blocks count */
+ __le32 s_r_blocks_count; /* Reserved blocks count */
+ __le32 s_free_blocks_count; /* Free blocks count */
+ __le32 s_free_inodes_count; /* Free inodes count */
+ __le32 s_first_data_block; /* First Data Block */
+ __le32 s_log_block_size; /* Block size */
+ __le32 s_log_frag_size; /* Fragment size */
+ __le32 s_blocks_per_group; /* # Blocks per group */
+ __le32 s_frags_per_group; /* # Fragments per group */
+ __le32 s_inodes_per_group; /* # Inodes per group */
+ __le32 s_mtime; /* Mount time */
+ __le32 s_wtime; /* Write time */
+ __le16 s_mnt_count; /* Mount count */
+ __le16 s_max_mnt_count; /* Maximal mount count */
+ __le16 s_magic; /* Magic signature */
+ __le16 s_state; /* File system state */
+ __le16 s_errors; /* Behaviour when detecting errors */
+ __le16 s_minor_rev_level; /* minor revision level */
+ __le32 s_lastcheck; /* time of last check */
+ __le32 s_checkinterval; /* max. time between checks */
+ __le32 s_creator_os; /* OS */
+ __le32 s_rev_level; /* Revision level */
+ __le16 s_def_resuid; /* Default uid for reserved blocks */
+ __le16 s_def_resgid; /* Default gid for reserved blocks */
+ /*
+ * These fields are for EXT2_DYNAMIC_REV superblocks only.
+ *
+ * Note: the difference between the compatible feature set and
+ * the incompatible feature set is that if there is a bit set
+ * in the incompatible feature set that the kernel doesn't
+ * know about, it should refuse to mount the filesystem.
+ *
+ * e2fsck's requirements are more strict; if it doesn't know
+ * about a feature in either the compatible or incompatible
+ * feature set, it must abort and not try to meddle with
+ * things it doesn't understand...
+ */
+ __le32 s_first_ino; /* First non-reserved inode */
+ __le16 s_inode_size; /* size of inode structure */
+ __le16 s_block_group_nr; /* block group # of this superblock */
+ __le32 s_feature_compat; /* compatible feature set */
+ __le32 s_feature_incompat; /* incompatible feature set */
+ __le32 s_feature_ro_compat; /* readonly-compatible feature set */
+ __u8 s_uuid[16]; /* 128-bit uuid for volume */
+ char s_volume_name[16]; /* volume name */
+ char s_last_mounted[64]; /* directory where last mounted */
+ __le32 s_algorithm_usage_bitmap; /* For compression */
+ /*
+ * Performance hints. Directory preallocation should only
+ * happen if the EXT2_COMPAT_PREALLOC flag is on.
+ */
+ __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
+ __u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
+ __u16 s_padding1;
+ /*
+ * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
+ */
+ __u8 s_journal_uuid[16]; /* uuid of journal superblock */
+ __u32 s_journal_inum; /* inode number of journal file */
+ __u32 s_journal_dev; /* device number of journal file */
+ __u32 s_last_orphan; /* start of list of inodes to delete */
+ __u32 s_hash_seed[4]; /* HTREE hash seed */
+ __u8 s_def_hash_version; /* Default hash version to use */
+ __u8 s_reserved_char_pad;
+ __u16 s_reserved_word_pad;
+ __le32 s_default_mount_opts;
+ __le32 s_first_meta_bg; /* First metablock block group */
+ __u32 s_reserved[190]; /* Padding to the end of the block */
+};
+
+/*
+ * Codes for operating systems
+ */
+#define EXT2_OS_LINUX 0
+#define EXT2_OS_HURD 1
+#define EXT2_OS_MASIX 2
+#define EXT2_OS_FREEBSD 3
+#define EXT2_OS_LITES 4
+
+/*
+ * Revision levels
+ */
+#define EXT2_GOOD_OLD_REV 0 /* The good old (original) format */
+#define EXT2_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
+
+#define EXT2_CURRENT_REV EXT2_GOOD_OLD_REV
+#define EXT2_MAX_SUPP_REV EXT2_DYNAMIC_REV
+
+#define EXT2_GOOD_OLD_INODE_SIZE 128
+
+/*
+ * Feature set definitions
+ */
+
+#define EXT2_HAS_COMPAT_FEATURE(sb,mask) \
+ ( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
+#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \
+ ( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
+#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \
+ ( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
+#define EXT2_SET_COMPAT_FEATURE(sb,mask) \
+ EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
+#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask) \
+ EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
+#define EXT2_SET_INCOMPAT_FEATURE(sb,mask) \
+ EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
+#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask) \
+ EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
+#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
+ EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
+#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask) \
+ EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
+
+#define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001
+#define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002
+#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004
+#define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008
+#define EXT2_FEATURE_COMPAT_RESIZE_INO 0x0010
+#define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020
+#define EXT2_FEATURE_COMPAT_ANY 0xffffffff
+
+#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
+#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
+#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
+#define EXT2_FEATURE_RO_COMPAT_ANY 0xffffffff
+
+#define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001
+#define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002
+#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004
+#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
+#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010
+#define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
+
+#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
+#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \
+ EXT2_FEATURE_INCOMPAT_META_BG)
+#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
+ EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
+#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED ~EXT2_FEATURE_RO_COMPAT_SUPP
+#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED ~EXT2_FEATURE_INCOMPAT_SUPP
+
+/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define EXT2_DEF_RESUID 0
+#define EXT2_DEF_RESGID 0
+
+/*
+ * Default mount options
+ */
+#define EXT2_DEFM_DEBUG 0x0001
+#define EXT2_DEFM_BSDGROUPS 0x0002
+#define EXT2_DEFM_XATTR_USER 0x0004
+#define EXT2_DEFM_ACL 0x0008
+#define EXT2_DEFM_UID16 0x0010
+ /* Not used by ext2, but reserved for use by ext3 */
+#define EXT3_DEFM_JMODE 0x0060
+#define EXT3_DEFM_JMODE_DATA 0x0020
+#define EXT3_DEFM_JMODE_ORDERED 0x0040
+#define EXT3_DEFM_JMODE_WBACK 0x0060
+
+/*
+ * Structure of a directory entry
+ */
+
+struct ext2_dir_entry {
+ __le32 inode; /* Inode number */
+ __le16 rec_len; /* Directory entry length */
+ __le16 name_len; /* Name length */
+ char name[]; /* File name, up to EXT2_NAME_LEN */
+};
+
+/*
+ * The new version of the directory entry. Since EXT2 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct ext2_dir_entry_2 {
+ __le32 inode; /* Inode number */
+ __le16 rec_len; /* Directory entry length */
+ __u8 name_len; /* Name length */
+ __u8 file_type;
+ char name[]; /* File name, up to EXT2_NAME_LEN */
+};
+
+/*
+ * Ext2 directory file types. Only the low 3 bits are used. The
+ * other bits are reserved for now.
+ */
+enum {
+ EXT2_FT_UNKNOWN = 0,
+ EXT2_FT_REG_FILE = 1,
+ EXT2_FT_DIR = 2,
+ EXT2_FT_CHRDEV = 3,
+ EXT2_FT_BLKDEV = 4,
+ EXT2_FT_FIFO = 5,
+ EXT2_FT_SOCK = 6,
+ EXT2_FT_SYMLINK = 7,
+ EXT2_FT_MAX
+};
+
+/*
+ * EXT2_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 4
+ */
+#define EXT2_DIR_PAD 4
+#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
+#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
+ ~EXT2_DIR_ROUND)
+#define EXT2_MAX_REC_LEN ((1<<16)-1)
+
+static inline void verify_offsets(void)
+{
+#define A(x,y) BUILD_BUG_ON(x != offsetof(struct ext2_super_block, y));
+ A(EXT2_SB_MAGIC_OFFSET, s_magic);
+ A(EXT2_SB_BLOCKS_OFFSET, s_blocks_count);
+ A(EXT2_SB_BSIZE_OFFSET, s_log_block_size);
+#undef A
+}
/*
* ext2 mount options
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index be7a8d0..cfedb2c 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -3,10 +3,7 @@
* Handler for storing security labels as extended attributes.
*/
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext2_fs.h>
+#include "ext2.h"
#include <linux/security.h>
#include "xattr.h"
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 2989467..7e19257 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -5,10 +5,7 @@
* Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/string.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext2_fs.h>
+#include "ext2.h"
#include "xattr.h"
static size_t
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index 322a56b..1c33128 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -9,8 +9,6 @@
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/buffer_head.h>
-#include <linux/ext2_fs_sb.h>
-#include <linux/ext2_fs.h>
#include <linux/blkdev.h>
#include "ext2.h"
#include "xip.h"
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 3091f62..c76832c 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -4,13 +4,7 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
#include "xattr.h"
#include "acl.h"
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 1e036b7..baac1b1 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -11,17 +11,9 @@
* David S. Miller (davem@caip.rutgers.edu), 1995
*/
-#include <linux/time.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
#include <linux/quotaops.h>
-#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
/*
* balloc.c contains the blocks allocation and deallocation routines
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index 6afc39d..909d13e 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -7,9 +7,7 @@
* Universite Pierre et Marie Curie (Paris VI)
*/
-#include <linux/buffer_head.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
#ifdef EXT3FS_DEBUG
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 34f0a07..cc761ad8 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,12 +21,7 @@
*
*/
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/buffer_head.h>
-#include <linux/slab.h>
-#include <linux/rbtree.h>
+#include "ext3.h"
static unsigned char ext3_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
diff --git a/include/linux/ext3_fs.h b/fs/ext3/ext3.h
index f5a84eef..b6515fd 100644
--- a/include/linux/ext3_fs.h
+++ b/fs/ext3/ext3.h
@@ -1,5 +1,11 @@
/*
- * linux/include/linux/ext3_fs.h
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
@@ -13,12 +19,11 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#ifndef _LINUX_EXT3_FS_H
-#define _LINUX_EXT3_FS_H
-
-#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
#include <linux/magic.h>
#include <linux/bug.h>
+#include <linux/blockgroup_lock.h>
/*
* The second extended filesystem constants/structures
@@ -75,29 +80,12 @@
#define EXT3_MIN_BLOCK_SIZE 1024
#define EXT3_MAX_BLOCK_SIZE 65536
#define EXT3_MIN_BLOCK_LOG_SIZE 10
-#ifdef __KERNEL__
-# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
-#else
-# define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
-#endif
+#define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
#define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
-#ifdef __KERNEL__
-# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
-#else
-# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
-#endif
-#ifdef __KERNEL__
+#define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
#define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits)
#define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size)
#define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino)
-#else
-#define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
- EXT3_GOOD_OLD_INODE_SIZE : \
- (s)->s_inode_size)
-#define EXT3_FIRST_INO(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
- EXT3_GOOD_OLD_FIRST_INO : \
- (s)->s_first_ino)
-#endif
/*
* Macro-instructions used to manage fragments
@@ -105,13 +93,8 @@
#define EXT3_MIN_FRAG_SIZE 1024
#define EXT3_MAX_FRAG_SIZE 4096
#define EXT3_MIN_FRAG_LOG_SIZE 10
-#ifdef __KERNEL__
-# define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
-#else
-# define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
-#endif
+#define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
+#define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
/*
* Structure of a blocks group descriptor
@@ -131,16 +114,10 @@ struct ext3_group_desc
/*
* Macro-instructions used to manage group descriptors
*/
-#ifdef __KERNEL__
-# define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
-# define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
-# define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
-#else
-# define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
-# define EXT3_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
-#endif
+#define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
+#define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
+#define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
+#define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
/*
* Constants relative to the data blocks
@@ -336,7 +313,6 @@ struct ext3_inode {
#define i_size_high i_dir_acl
-#if defined(__KERNEL__) || defined(__linux__)
#define i_reserved1 osd1.linux1.l_i_reserved1
#define i_frag osd2.linux2.l_i_frag
#define i_fsize osd2.linux2.l_i_fsize
@@ -346,24 +322,6 @@ struct ext3_inode {
#define i_gid_high osd2.linux2.l_i_gid_high
#define i_reserved2 osd2.linux2.l_i_reserved2
-#elif defined(__GNU__)
-
-#define i_translator osd1.hurd1.h_i_translator
-#define i_frag osd2.hurd2.h_i_frag;
-#define i_fsize osd2.hurd2.h_i_fsize;
-#define i_uid_high osd2.hurd2.h_i_uid_high
-#define i_gid_high osd2.hurd2.h_i_gid_high
-#define i_author osd2.hurd2.h_i_author
-
-#elif defined(__masix__)
-
-#define i_reserved1 osd1.masix1.m_i_reserved1
-#define i_frag osd2.masix2.m_i_frag
-#define i_fsize osd2.masix2.m_i_fsize
-#define i_reserved2 osd2.masix2.m_i_reserved2
-
-#endif /* defined(__KERNEL__) || defined(__linux__) */
-
/*
* File system states
*/
@@ -531,9 +489,197 @@ struct ext3_super_block {
__u32 s_reserved[162]; /* Padding to the end of the block */
};
-#ifdef __KERNEL__
-#include <linux/ext3_fs_i.h>
-#include <linux/ext3_fs_sb.h>
+/* data type for block offset of block group */
+typedef int ext3_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long ext3_fsblk_t;
+
+#define E3FSBLK "%lu"
+
+struct ext3_reserve_window {
+ ext3_fsblk_t _rsv_start; /* First byte reserved */
+ ext3_fsblk_t _rsv_end; /* Last byte reserved or 0 */
+};
+
+struct ext3_reserve_window_node {
+ struct rb_node rsv_node;
+ __u32 rsv_goal_size;
+ __u32 rsv_alloc_hit;
+ struct ext3_reserve_window rsv_window;
+};
+
+struct ext3_block_alloc_info {
+ /* information about reservation window */
+ struct ext3_reserve_window_node rsv_window_node;
+ /*
+ * was i_next_alloc_block in ext3_inode_info
+ * is the logical (file-relative) number of the
+ * most-recently-allocated block in this file.
+ * We use this for detecting linearly ascending allocation requests.
+ */
+ __u32 last_alloc_logical_block;
+ /*
+ * Was i_next_alloc_goal in ext3_inode_info
+ * is the *physical* companion to i_next_alloc_block.
+ * it the physical block number of the block which was most-recentl
+ * allocated to this file. This give us the goal (target) for the next
+ * allocation when we detect linearly ascending requests.
+ */
+ ext3_fsblk_t last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * third extended file system inode data in memory
+ */
+struct ext3_inode_info {
+ __le32 i_data[15]; /* unconverted */
+ __u32 i_flags;
+#ifdef EXT3_FRAGMENTS
+ __u32 i_faddr;
+ __u8 i_frag_no;
+ __u8 i_frag_size;
+#endif
+ ext3_fsblk_t i_file_acl;
+ __u32 i_dir_acl;
+ __u32 i_dtime;
+
+ /*
+ * i_block_group is the number of the block group which contains
+ * this file's inode. Constant across the lifetime of the inode,
+ * it is ued for making block allocation decisions - we try to
+ * place a file's data blocks near its inode block, and new inodes
+ * near to their parent directory's inode.
+ */
+ __u32 i_block_group;
+ unsigned long i_state_flags; /* Dynamic state flags for ext3 */
+
+ /* block reservation info */
+ struct ext3_block_alloc_info *i_block_alloc_info;
+
+ __u32 i_dir_start_lookup;
+#ifdef CONFIG_EXT3_FS_XATTR
+ /*
+ * Extended attributes can be read independently of the main file
+ * data. Taking i_mutex even when reading would cause contention
+ * between readers of EAs and writers of regular file data, so
+ * instead we synchronize on xattr_sem when reading or changing
+ * EAs.
+ */
+ struct rw_semaphore xattr_sem;
+#endif
+
+ struct list_head i_orphan; /* unlinked but open inodes */
+
+ /*
+ * i_disksize keeps track of what the inode size is ON DISK, not
+ * in memory. During truncate, i_size is set to the new size by
+ * the VFS prior to calling ext3_truncate(), but the filesystem won't
+ * set i_disksize to 0 until the truncate is actually under way.
+ *
+ * The intent is that i_disksize always represents the blocks which
+ * are used by this file. This allows recovery to restart truncate
+ * on orphans if we crash during truncate. We actually write i_disksize
+ * into the on-disk inode when writing inodes out, instead of i_size.
+ *
+ * The only time when i_disksize and i_size may be different is when
+ * a truncate is in progress. The only things which change i_disksize
+ * are ext3_get_block (growth) and ext3_truncate (shrinkth).
+ */
+ loff_t i_disksize;
+
+ /* on-disk additional length */
+ __u16 i_extra_isize;
+
+ /*
+ * truncate_mutex is for serialising ext3_truncate() against
+ * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
+ * data tree are chopped off during truncate. We can't do that in
+ * ext3 because whenever we perform intermediate commits during
+ * truncate, the inode and all the metadata blocks *must* be in a
+ * consistent state which allows truncation of the orphans to restart
+ * during recovery. Hence we must fix the get_block-vs-truncate race
+ * by other means, so we have truncate_mutex.
+ */
+ struct mutex truncate_mutex;
+
+ /*
+ * Transactions that contain inode's metadata needed to complete
+ * fsync and fdatasync, respectively.
+ */
+ atomic_t i_sync_tid;
+ atomic_t i_datasync_tid;
+
+ struct inode vfs_inode;
+};
+
+/*
+ * third extended-fs super-block data in memory
+ */
+struct ext3_sb_info {
+ unsigned long s_frag_size; /* Size of a fragment in bytes */
+ unsigned long s_frags_per_block;/* Number of fragments per block */
+ unsigned long s_inodes_per_block;/* Number of inodes per block */
+ unsigned long s_frags_per_group;/* Number of fragments in a group */
+ unsigned long s_blocks_per_group;/* Number of blocks in a group */
+ unsigned long s_inodes_per_group;/* Number of inodes in a group */
+ unsigned long s_itb_per_group; /* Number of inode table blocks per group */
+ unsigned long s_gdb_count; /* Number of group descriptor blocks */
+ unsigned long s_desc_per_block; /* Number of group descriptors per block */
+ unsigned long s_groups_count; /* Number of groups in the fs */
+ unsigned long s_overhead_last; /* Last calculated overhead */
+ unsigned long s_blocks_last; /* Last seen block count */
+ struct buffer_head * s_sbh; /* Buffer containing the super block */
+ struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
+ struct buffer_head ** s_group_desc;
+ unsigned long s_mount_opt;
+ ext3_fsblk_t s_sb_block;
+ uid_t s_resuid;
+ gid_t s_resgid;
+ unsigned short s_mount_state;
+ unsigned short s_pad;
+ int s_addr_per_block_bits;
+ int s_desc_per_block_bits;
+ int s_inode_size;
+ int s_first_ino;
+ spinlock_t s_next_gen_lock;
+ u32 s_next_generation;
+ u32 s_hash_seed[4];
+ int s_def_hash_version;
+ int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
+ struct percpu_counter s_freeblocks_counter;
+ struct percpu_counter s_freeinodes_counter;
+ struct percpu_counter s_dirs_counter;
+ struct blockgroup_lock *s_blockgroup_lock;
+
+ /* root of the per fs reservation window tree */
+ spinlock_t s_rsv_window_lock;
+ struct rb_root s_rsv_window_root;
+ struct ext3_reserve_window_node s_rsv_window_head;
+
+ /* Journaling */
+ struct inode * s_journal_inode;
+ struct journal_s * s_journal;
+ struct list_head s_orphan;
+ struct mutex s_orphan_lock;
+ struct mutex s_resize_lock;
+ unsigned long s_commit_interval;
+ struct block_device *journal_bdev;
+#ifdef CONFIG_QUOTA
+ char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
+ int s_jquota_fmt; /* Format of quota to use */
+#endif
+};
+
+static inline spinlock_t *
+sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
+{
+ return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
+}
+
static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
{
return sb->s_fs_info;
@@ -576,12 +722,6 @@ static inline void ext3_clear_inode_state(struct inode *inode, int bit)
{
clear_bit(bit, &EXT3_I(inode)->i_state_flags);
}
-#else
-/* Assume that user mode programs are passing in an ext3fs superblock, not
- * a kernel struct super_block. This will allow us to call the feature-test
- * macros from user land. */
-#define EXT3_SB(sb) (sb)
-#endif
#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
@@ -771,8 +911,6 @@ static inline __le16 ext3_rec_len_to_disk(unsigned len)
#define DX_HASH_HALF_MD4_UNSIGNED 4
#define DX_HASH_TEA_UNSIGNED 5
-#ifdef __KERNEL__
-
/* hash info structure used by the directory hash */
struct dx_hash_info
{
@@ -974,7 +1112,211 @@ extern const struct inode_operations ext3_special_inode_operations;
extern const struct inode_operations ext3_symlink_inode_operations;
extern const struct inode_operations ext3_fast_symlink_inode_operations;
+#define EXT3_JOURNAL(inode) (EXT3_SB((inode)->i_sb)->s_journal)
+
+/* Define the number of blocks we need to account to a transaction to
+ * modify one block of data.
+ *
+ * We may have to touch one inode, one bitmap buffer, up to three
+ * indirection blocks, the group and superblock summaries, and the data
+ * block to complete the transaction. */
+
+#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U
+
+/* Extended attribute operations touch at most two data buffers,
+ * two bitmap buffers, and two group summaries, in addition to the inode
+ * and the superblock, which are already accounted for. */
+
+#define EXT3_XATTR_TRANS_BLOCKS 6U
+
+/* Define the minimum size for a transaction which modifies data. This
+ * needs to take into account the fact that we may end up modifying two
+ * quota files too (one for the group, one for the user quota). The
+ * superblock only gets updated once, of course, so don't bother
+ * counting that again for the quota updates. */
+
+#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
+ EXT3_XATTR_TRANS_BLOCKS - 2 + \
+ EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
+
+/* Delete operations potentially hit one directory's namespace plus an
+ * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
+ * generous. We can grow the delete transaction later if necessary. */
+
+#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
+
+/* Define an arbitrary limit for the amount of data we will anticipate
+ * writing to any given transaction. For unbounded transactions such as
+ * write(2) and truncate(2) we can write more than this, but we always
+ * start off at the maximum transaction size and grow the transaction
+ * optimistically as we go. */
+
+#define EXT3_MAX_TRANS_DATA 64U
+
+/* We break up a large truncate or write transaction once the handle's
+ * buffer credits gets this low, we need either to extend the
+ * transaction or to start a new one. Reserve enough space here for
+ * inode, bitmap, superblock, group and indirection updates for at least
+ * one block, plus two quota updates. Quota allocations are not
+ * needed. */
+
+#define EXT3_RESERVE_TRANS_BLOCKS 12U
+
+#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
+
+#ifdef CONFIG_QUOTA
+/* Amount of blocks needed for quota update - we know that the structure was
+ * allocated so we need to update only inode+data */
+#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+/* Amount of blocks needed for quota insert/delete - we do some block writes
+ * but inode, sb and group updates are done only once */
+#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
+ (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
+#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
+ (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
+#else
+#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
+#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
+#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
+#endif
+#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
+
+int
+ext3_mark_iloc_dirty(handle_t *handle,
+ struct inode *inode,
+ struct ext3_iloc *iloc);
+
+/*
+ * On success, We end up with an outstanding reference count against
+ * iloc->bh. This _must_ be cleaned up later.
+ */
+
+int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
+ struct ext3_iloc *iloc);
+
+int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
+
+/*
+ * Wrapper functions with which ext3 calls into JBD. The intent here is
+ * to allow these to be turned into appropriate stubs so ext3 can control
+ * ext2 filesystems, so ext2+ext3 systems only nee one fs. This work hasn't
+ * been done yet.
+ */
+
+static inline void ext3_journal_release_buffer(handle_t *handle,
+ struct buffer_head *bh)
+{
+ journal_release_buffer(handle, bh);
+}
+
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+ struct buffer_head *bh, handle_t *handle, int err);
+
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+ struct buffer_head *bh);
+
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+ struct buffer_head *bh);
+
+int __ext3_journal_forget(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-#endif /* __KERNEL__ */
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+ unsigned long blocknr, struct buffer_head *bh);
+
+int __ext3_journal_get_create_access(const char *where,
+ handle_t *handle, struct buffer_head *bh);
+
+int __ext3_journal_dirty_metadata(const char *where,
+ handle_t *handle, struct buffer_head *bh);
+
+#define ext3_journal_get_undo_access(handle, bh) \
+ __ext3_journal_get_undo_access(__func__, (handle), (bh))
+#define ext3_journal_get_write_access(handle, bh) \
+ __ext3_journal_get_write_access(__func__, (handle), (bh))
+#define ext3_journal_revoke(handle, blocknr, bh) \
+ __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
+#define ext3_journal_get_create_access(handle, bh) \
+ __ext3_journal_get_create_access(__func__, (handle), (bh))
+#define ext3_journal_dirty_metadata(handle, bh) \
+ __ext3_journal_dirty_metadata(__func__, (handle), (bh))
+#define ext3_journal_forget(handle, bh) \
+ __ext3_journal_forget(__func__, (handle), (bh))
+
+int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
+
+handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
+int __ext3_journal_stop(const char *where, handle_t *handle);
+
+static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
+{
+ return ext3_journal_start_sb(inode->i_sb, nblocks);
+}
+
+#define ext3_journal_stop(handle) \
+ __ext3_journal_stop(__func__, (handle))
+
+static inline handle_t *ext3_journal_current_handle(void)
+{
+ return journal_current_handle();
+}
+
+static inline int ext3_journal_extend(handle_t *handle, int nblocks)
+{
+ return journal_extend(handle, nblocks);
+}
+
+static inline int ext3_journal_restart(handle_t *handle, int nblocks)
+{
+ return journal_restart(handle, nblocks);
+}
+
+static inline int ext3_journal_blocks_per_page(struct inode *inode)
+{
+ return journal_blocks_per_page(inode);
+}
+
+static inline int ext3_journal_force_commit(journal_t *journal)
+{
+ return journal_force_commit(journal);
+}
+
+/* super.c */
+int ext3_force_commit(struct super_block *sb);
+
+static inline int ext3_should_journal_data(struct inode *inode)
+{
+ if (!S_ISREG(inode->i_mode))
+ return 1;
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
+ return 1;
+ if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+ return 1;
+ return 0;
+}
+
+static inline int ext3_should_order_data(struct inode *inode)
+{
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+ if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+ return 0;
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
+ return 1;
+ return 0;
+}
+
+static inline int ext3_should_writeback_data(struct inode *inode)
+{
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+ if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+ return 0;
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
+ return 1;
+ return 0;
+}
-#endif /* _LINUX_EXT3_FS_H */
+#include <trace/events/ext3.h>
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
index d401f14..785a326 100644
--- a/fs/ext3/ext3_jbd.c
+++ b/fs/ext3/ext3_jbd.c
@@ -2,7 +2,7 @@
* Interface between ext3 and JBD
*/
-#include <linux/ext3_jbd.h>
+#include "ext3.h"
int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
struct buffer_head *bh)
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 724df69..25cb413 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -18,12 +18,8 @@
* (jj@sunsite.ms.mff.cuni.cz)
*/
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
#include <linux/quotaops.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
+#include "ext3.h"
#include "xattr.h"
#include "acl.h"
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 1860ed3..d4dff27 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -22,15 +22,9 @@
* we can depend on generic_block_fdatasync() to sync the data blocks.
*/
-#include <linux/time.h>
#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
#include <linux/writeback.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
/*
* akpm: A new design for ext3_sync_file().
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index 7d215b4..d10231d 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -9,9 +9,7 @@
* License.
*/
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
#include <linux/cryptohash.h>
#define DELTA 0x9E3779B9
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 1cde284..e3c39e4 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -12,21 +12,10 @@
* David S. Miller (davem@caip.rutgers.edu), 1995
*/
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/stat.h>
-#include <linux/string.h>
#include <linux/quotaops.h>
-#include <linux/buffer_head.h>
#include <linux/random.h>
-#include <linux/bitops.h>
-#include <trace/events/ext3.h>
-
-#include <asm/byteorder.h>
+#include "ext3.h"
#include "xattr.h"
#include "acl.h"
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 6d34186..10d7812 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -22,22 +22,12 @@
* Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
*/
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/ext3_jbd.h>
-#include <linux/jbd.h>
#include <linux/highuid.h>
-#include <linux/pagemap.h>
#include <linux/quotaops.h>
-#include <linux/string.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/mpage.h>
-#include <linux/uio.h>
-#include <linux/bio.h>
-#include <linux/fiemap.h>
#include <linux/namei.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
#include "xattr.h"
#include "acl.h"
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 4af574c..677a5c2 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -7,15 +7,10 @@
* Universite Pierre et Marie Curie (Paris VI)
*/
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/capability.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
#include <linux/mount.h>
-#include <linux/time.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
+#include "ext3.h"
long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index e8e2117..d7940b2 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -24,20 +24,8 @@
* Theodore Ts'o, 2002
*/
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/jbd.h>
-#include <linux/time.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/fcntl.h>
-#include <linux/stat.h>
-#include <linux/string.h>
#include <linux/quotaops.h>
-#include <linux/buffer_head.h>
-#include <linux/bio.h>
-#include <trace/events/ext3.h>
-
+#include "ext3.h"
#include "namei.h"
#include "xattr.h"
#include "acl.h"
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 7916e4ce..0f814f3 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -11,10 +11,7 @@
#define EXT3FS_DEBUG
-#include <linux/ext3_jbd.h>
-
-#include <linux/errno.h>
-#include <linux/slab.h>
+#include "ext3.h"
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index e0b45b9..cf0b592 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -17,22 +17,12 @@
*/
#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/parser.h>
-#include <linux/buffer_head.h>
#include <linux/exportfs.h>
-#include <linux/vfs.h>
+#include <linux/statfs.h>
#include <linux/random.h>
#include <linux/mount.h>
-#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include <linux/log2.h>
@@ -40,13 +30,13 @@
#include <asm/uaccess.h>
+#define CREATE_TRACE_POINTS
+
+#include "ext3.h"
#include "xattr.h"
#include "acl.h"
#include "namei.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/ext3.h>
-
#ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
#define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
#else
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index 7c48982..6b01c3e 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -17,10 +17,8 @@
* ext3 symlink handling code
*/
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
#include <linux/namei.h>
+#include "ext3.h"
#include "xattr.h"
static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index d565759..d22ebb7 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -50,14 +50,9 @@
* by the buffer lock.
*/
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
#include <linux/mbcache.h>
#include <linux/quotaops.h>
-#include <linux/rwsem.h>
#include "xattr.h"
#include "acl.h"
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index ea26f2a..3387664 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -3,12 +3,8 @@
* Handler for storing security labels as extended attributes.
*/
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
#include <linux/security.h>
+#include "ext3.h"
#include "xattr.h"
static size_t
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index 2526a88..d75727c 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -5,11 +5,7 @@
* Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/string.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
#include "xattr.h"
static size_t
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index b32e473..5612af3 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -5,10 +5,7 @@
* Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
#include "xattr.h"
static size_t
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 7683458..a3d2c9e 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -18,7 +18,6 @@
#include <linux/mount.h>
#include <linux/fs.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/ext2_fs.h>
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/crc32.h>
diff --git a/fs/namei.c b/fs/namei.c
index e615ff3..1898198 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1054,53 +1054,65 @@ static void follow_dotdot(struct nameidata *nd)
}
/*
- * Allocate a dentry with name and parent, and perform a parent
- * directory ->lookup on it. Returns the new dentry, or ERR_PTR
- * on error. parent->d_inode->i_mutex must be held. d_lookup must
- * have verified that no child exists while under i_mutex.
+ * This looks up the name in dcache, possibly revalidates the old dentry and
+ * allocates a new one if not found or not valid. In the need_lookup argument
+ * returns whether i_op->lookup is necessary.
+ *
+ * dir->d_inode->i_mutex must be held
*/
-static struct dentry *d_alloc_and_lookup(struct dentry *parent,
- struct qstr *name, struct nameidata *nd)
+static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
+ struct nameidata *nd, bool *need_lookup)
{
- struct inode *inode = parent->d_inode;
struct dentry *dentry;
- struct dentry *old;
+ int error;
- /* Don't create child dentry for a dead directory. */
- if (unlikely(IS_DEADDIR(inode)))
- return ERR_PTR(-ENOENT);
+ *need_lookup = false;
+ dentry = d_lookup(dir, name);
+ if (dentry) {
+ if (d_need_lookup(dentry)) {
+ *need_lookup = true;
+ } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
+ error = d_revalidate(dentry, nd);
+ if (unlikely(error <= 0)) {
+ if (error < 0) {
+ dput(dentry);
+ return ERR_PTR(error);
+ } else if (!d_invalidate(dentry)) {
+ dput(dentry);
+ dentry = NULL;
+ }
+ }
+ }
+ }
- dentry = d_alloc(parent, name);
- if (unlikely(!dentry))
- return ERR_PTR(-ENOMEM);
+ if (!dentry) {
+ dentry = d_alloc(dir, name);
+ if (unlikely(!dentry))
+ return ERR_PTR(-ENOMEM);
- old = inode->i_op->lookup(inode, dentry, nd);
- if (unlikely(old)) {
- dput(dentry);
- dentry = old;
+ *need_lookup = true;
}
return dentry;
}
/*
- * We already have a dentry, but require a lookup to be performed on the parent
- * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error.
- * parent->d_inode->i_mutex must be held. d_lookup must have verified that no
- * child exists while under i_mutex.
+ * Call i_op->lookup on the dentry. The dentry must be negative but may be
+ * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
+ *
+ * dir->d_inode->i_mutex must be held
*/
-static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry,
- struct nameidata *nd)
+static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
{
- struct inode *inode = parent->d_inode;
struct dentry *old;
/* Don't create child dentry for a dead directory. */
- if (unlikely(IS_DEADDIR(inode))) {
+ if (unlikely(IS_DEADDIR(dir))) {
dput(dentry);
return ERR_PTR(-ENOENT);
}
- old = inode->i_op->lookup(inode, dentry, nd);
+ old = dir->i_op->lookup(dir, dentry, nd);
if (unlikely(old)) {
dput(dentry);
dentry = old;
@@ -1108,6 +1120,19 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr
return dentry;
}
+static struct dentry *__lookup_hash(struct qstr *name,
+ struct dentry *base, struct nameidata *nd)
+{
+ bool need_lookup;
+ struct dentry *dentry;
+
+ dentry = lookup_dcache(name, base, nd, &need_lookup);
+ if (!need_lookup)
+ return dentry;
+
+ return lookup_real(base->d_inode, dentry, nd);
+}
+
/*
* It's more convoluted than I'd like it to be, but... it's still fairly
* small and for now I'd prefer to have fast path as straight as possible.
@@ -1139,6 +1164,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
return -ECHILD;
nd->seq = seq;
+ if (unlikely(d_need_lookup(dentry)))
+ goto unlazy;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
status = d_revalidate(dentry, nd);
if (unlikely(status <= 0)) {
@@ -1147,8 +1174,6 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
goto unlazy;
}
}
- if (unlikely(d_need_lookup(dentry)))
- goto unlazy;
path->mnt = mnt;
path->dentry = dentry;
if (unlikely(!__follow_mount_rcu(nd, path, inode)))
@@ -1163,38 +1188,14 @@ unlazy:
dentry = __d_lookup(parent, name);
}
- if (dentry && unlikely(d_need_lookup(dentry))) {
+ if (unlikely(!dentry))
+ goto need_lookup;
+
+ if (unlikely(d_need_lookup(dentry))) {
dput(dentry);
- dentry = NULL;
- }
-retry:
- if (unlikely(!dentry)) {
- struct inode *dir = parent->d_inode;
- BUG_ON(nd->inode != dir);
-
- mutex_lock(&dir->i_mutex);
- dentry = d_lookup(parent, name);
- if (likely(!dentry)) {
- dentry = d_alloc_and_lookup(parent, name, nd);
- if (IS_ERR(dentry)) {
- mutex_unlock(&dir->i_mutex);
- return PTR_ERR(dentry);
- }
- /* known good */
- need_reval = 0;
- status = 1;
- } else if (unlikely(d_need_lookup(dentry))) {
- dentry = d_inode_lookup(parent, dentry, nd);
- if (IS_ERR(dentry)) {
- mutex_unlock(&dir->i_mutex);
- return PTR_ERR(dentry);
- }
- /* known good */
- need_reval = 0;
- status = 1;
- }
- mutex_unlock(&dir->i_mutex);
+ goto need_lookup;
}
+
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
status = d_revalidate(dentry, nd);
if (unlikely(status <= 0)) {
@@ -1204,12 +1205,10 @@ retry:
}
if (!d_invalidate(dentry)) {
dput(dentry);
- dentry = NULL;
- need_reval = 1;
- goto retry;
+ goto need_lookup;
}
}
-
+done:
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
@@ -1221,6 +1220,16 @@ retry:
nd->flags |= LOOKUP_JUMPED;
*inode = path->dentry->d_inode;
return 0;
+
+need_lookup:
+ BUG_ON(nd->inode != parent->d_inode);
+
+ mutex_lock(&parent->d_inode->i_mutex);
+ dentry = __lookup_hash(name, parent, nd);
+ mutex_unlock(&parent->d_inode->i_mutex);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ goto done;
}
static inline int may_lookup(struct nameidata *nd)
@@ -1846,59 +1855,6 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
return err;
}
-static struct dentry *__lookup_hash(struct qstr *name,
- struct dentry *base, struct nameidata *nd)
-{
- struct inode *inode = base->d_inode;
- struct dentry *dentry;
- int err;
-
- err = inode_permission(inode, MAY_EXEC);
- if (err)
- return ERR_PTR(err);
-
- /*
- * Don't bother with __d_lookup: callers are for creat as
- * well as unlink, so a lot of the time it would cost
- * a double lookup.
- */
- dentry = d_lookup(base, name);
-
- if (dentry && d_need_lookup(dentry)) {
- /*
- * __lookup_hash is called with the parent dir's i_mutex already
- * held, so we are good to go here.
- */
- dentry = d_inode_lookup(base, dentry, nd);
- if (IS_ERR(dentry))
- return dentry;
- }
-
- if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) {
- int status = d_revalidate(dentry, nd);
- if (unlikely(status <= 0)) {
- /*
- * The dentry failed validation.
- * If d_revalidate returned 0 attempt to invalidate
- * the dentry otherwise d_revalidate is asking us
- * to return a fail status.
- */
- if (status < 0) {
- dput(dentry);
- return ERR_PTR(status);
- } else if (!d_invalidate(dentry)) {
- dput(dentry);
- dentry = NULL;
- }
- }
- }
-
- if (!dentry)
- dentry = d_alloc_and_lookup(base, name, nd);
-
- return dentry;
-}
-
/*
* Restricted form of lookup. Doesn't follow links, single-component only,
* needs parent already locked. Doesn't follow mounts.
@@ -1924,6 +1880,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
unsigned int c;
+ int err;
WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
@@ -1948,6 +1905,10 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
return ERR_PTR(err);
}
+ err = inode_permission(base->d_inode, MAY_EXEC);
+ if (err)
+ return ERR_PTR(err);
+
return __lookup_hash(&this, base, NULL);
}
@@ -2749,7 +2710,7 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
/*
* The dentry_unhash() helper will try to drop the dentry early: we
- * should have a usage count of 2 if we're the only user of this
+ * should have a usage count of 1 if we're the only user of this
* dentry, and if that is true (possibly after pruning the dcache),
* then we drop the dentry now.
*
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index a6fda3c..a1a1bfd 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -28,8 +28,6 @@
#include "suballoc.h"
#include "move_extents.h"
-#include <linux/ext2_fs.h>
-
#define o2info_from_user(a, b) \
copy_from_user(&(a), (b), sizeof(a))
#define o2info_to_user(a, b) \
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index f37c32b..50952c9 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -105,26 +105,12 @@ static const struct inode_operations pstore_dir_inode_operations = {
.unlink = pstore_unlink,
};
-static struct inode *pstore_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev)
+static struct inode *pstore_get_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
-
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_uid = inode->i_gid = 0;
- inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- switch (mode & S_IFMT) {
- case S_IFREG:
- inode->i_fop = &pstore_file_operations;
- break;
- case S_IFDIR:
- inode->i_op = &pstore_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- inc_nlink(inode);
- break;
- }
}
return inode;
}
@@ -216,9 +202,11 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
return rc;
rc = -ENOMEM;
- inode = pstore_get_inode(pstore_sb, root->d_inode, S_IFREG | 0444, 0);
+ inode = pstore_get_inode(pstore_sb);
if (!inode)
goto fail;
+ inode->i_mode = S_IFREG | 0444;
+ inode->i_fop = &pstore_file_operations;
private = kmalloc(sizeof *private + size, GFP_KERNEL);
if (!private)
goto fail_alloc;
@@ -293,10 +281,12 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent)
parse_options(data);
- inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ inode = pstore_get_inode(sb);
if (inode) {
- /* override ramfs "dir" options so we catch unlink(2) */
+ inode->i_mode = S_IFDIR | 0755;
inode->i_op = &pstore_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
}
sb->s_root = d_make_root(inode);
if (!sb->s_root)
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 8a36183..1488302 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -10,6 +10,7 @@
#error "Cannot use generic cmpxchg on SMP"
#endif
+#include <linux/types.h>
#include <linux/irqflags.h>
#ifndef xchg
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index d838c94..2eba340 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -31,6 +31,8 @@ static inline void crypto_set_aead_spawn(
crypto_set_spawn(&spawn->base, inst);
}
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
+
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 3a748a6..06e8b32 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -34,6 +34,8 @@ static inline void crypto_set_skcipher_spawn(
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask);
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
+
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 34a7b89..64ff02d 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -617,6 +617,17 @@ struct drm_get_cap {
__u64 value;
};
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+ __u32 handle;
+
+ /** Flags.. only applicable for handle->fd */
+ __u32 flags;
+
+ /** Returned dmabuf file descriptor */
+ __s32 fd;
+};
+
#include "drm_mode.h"
#define DRM_IOCTL_BASE 'd'
@@ -673,7 +684,8 @@ struct drm_get_cap {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
-#define DRM_IOCTL_GEM_PRIME_OPEN DRM_IOWR(0x2e, struct drm_gem_open)
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 574bd1c..dd73104 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -91,6 +91,7 @@ struct drm_device;
#define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
+#define DRM_UT_PRIME 0x08
/*
* Three debug levels are defined.
* drm_core, drm_driver, drm_kms
@@ -150,6 +151,7 @@ int drm_err(const char *func, const char *format, ...);
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
#define DRIVER_BUS_PCI 0x1
#define DRIVER_BUS_PLATFORM 0x2
@@ -215,6 +217,11 @@ int drm_err(const char *func, const char *format, ...);
drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \
__func__, fmt, ##args); \
} while (0)
+#define DRM_DEBUG_PRIME(fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_PRIME, DRM_NAME, \
+ __func__, fmt, ##args); \
+ } while (0)
#define DRM_LOG(fmt, args...) \
do { \
drm_ut_debug_printk(DRM_UT_CORE, NULL, \
@@ -238,6 +245,7 @@ int drm_err(const char *func, const char *format, ...);
#else
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
+#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#define DRM_LOG(fmt, arg...) do { } while (0)
#define DRM_LOG_KMS(fmt, args...) do { } while (0)
@@ -410,6 +418,12 @@ struct drm_pending_event {
void (*destroy)(struct drm_pending_event *event);
};
+/* initial implementaton using a linked list - todo hashtab */
+struct drm_prime_file_private {
+ struct list_head head;
+ struct mutex lock;
+};
+
/** File private data */
struct drm_file {
int authenticated;
@@ -437,6 +451,8 @@ struct drm_file {
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
+
+ struct drm_prime_file_private prime;
};
/** Wait queue */
@@ -652,6 +668,12 @@ struct drm_gem_object {
uint32_t pending_write_domain;
void *driver_private;
+
+ /* dma buf exported from this GEM object */
+ struct dma_buf *export_dma_buf;
+
+ /* dma buf attachment backing this object */
+ struct dma_buf_attachment *import_attach;
};
#include "drm_crtc.h"
@@ -890,6 +912,20 @@ struct drm_driver {
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+ /* prime: */
+ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags, int *prime_fd);
+ /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+ int prime_fd, uint32_t *handle);
+ /* export GEM -> dmabuf */
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ /* import dmabuf -> GEM */
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1509,6 +1545,32 @@ extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
extern int drm_gem_name_info(struct seq_file *m, void *data);
+
+extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd);
+extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+
+extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+
+int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
+int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
+ struct drm_gem_object **obj);
+
#if DRM_DEBUG_CODE
extern int drm_vma_info(struct seq_file *m, void *data);
#endif
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 0a0001b..923afb5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -44,4 +44,8 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+#ifdef CONFIG_INTEL_IOMMU
+extern int intel_iommu_gfx_mapped;
+#endif
+
#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a4b5da2..3c9b616 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -3,6 +3,7 @@ header-y += can/
header-y += caif/
header-y += dvb/
header-y += hdlc/
+header-y += hsi/
header-y += isdn/
header-y += mmc/
header-y += nfsd/
@@ -120,7 +121,6 @@ header-y += errno.h
header-y += errqueue.h
header-y += ethtool.h
header-y += eventpoll.h
-header-y += ext2_fs.h
header-y += fadvise.h
header-y += falloc.h
header-y += fanotify.h
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1ffdb98..a2c819d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -764,12 +764,6 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
*
*/
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-/* These strip const, as traditionally they weren't const. */
-#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
-#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
-#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
-#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
-
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
diff --git a/include/linux/cryptouser.h b/include/linux/cryptouser.h
index 532fb58..4abf2ea 100644
--- a/include/linux/cryptouser.h
+++ b/include/linux/cryptouser.h
@@ -100,3 +100,6 @@ struct crypto_report_rng {
char type[CRYPTO_MAX_NAME];
unsigned int seedsize;
};
+
+#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
+ sizeof(struct crypto_report_blkcipher))
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 71ad34e..547ab56 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -13,6 +13,8 @@
enum dma_attr {
DMA_ATTR_WRITE_BARRIER,
DMA_ATTR_WEAK_ORDERING,
+ DMA_ATTR_WRITE_COMBINE,
+ DMA_ATTR_NON_CONSISTENT,
DMA_ATTR_MAX,
};
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 5a736af..dfc099e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,10 +9,15 @@
#include <linux/scatterlist.h>
struct dma_map_ops {
- void* (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+ void* (*alloc)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs);
+ void (*free)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
+ int (*mmap)(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index ce1b719..2723e71 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -18,574 +18,25 @@
#include <linux/types.h>
#include <linux/magic.h>
-#include <linux/fs.h>
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT2FS_DEBUG to produce debug messages
- */
-#undef EXT2FS_DEBUG
-
-/*
- * Define EXT2_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT2_DEFAULT_RESERVE_BLOCKS 8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT2_MAX_RESERVE_BLOCKS 1027
-#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
-/*
- * The second extended file system version
- */
-#define EXT2FS_DATE "95/08/09"
-#define EXT2FS_VERSION "0.5b"
-
-/*
- * Debug code
- */
-#ifdef EXT2FS_DEBUG
-# define ext2_debug(f, a...) { \
- printk ("EXT2-fs DEBUG (%s, %d): %s:", \
- __FILE__, __LINE__, __func__); \
- printk (f, ## a); \
- }
-#else
-# define ext2_debug(f, a...) /**/
-#endif
-
-/*
- * Special inode numbers
- */
-#define EXT2_BAD_INO 1 /* Bad blocks inode */
-#define EXT2_ROOT_INO 2 /* Root inode */
-#define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */
-#define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */
-
-/* First non-reserved inode for old ext2 filesystems */
-#define EXT2_GOOD_OLD_FIRST_INO 11
-
-#ifdef __KERNEL__
-#include <linux/ext2_fs_sb.h>
-static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-#else
-/* Assume that user mode programs are passing in an ext2fs superblock, not
- * a kernel struct super_block. This will allow us to call the feature-test
- * macros from user land. */
-#define EXT2_SB(sb) (sb)
-#endif
+#define EXT2_NAME_LEN 255
/*
* Maximal count of links to a file
*/
#define EXT2_LINK_MAX 32000
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT2_MIN_BLOCK_SIZE 1024
-#define EXT2_MAX_BLOCK_SIZE 4096
-#define EXT2_MIN_BLOCK_LOG_SIZE 10
-#ifdef __KERNEL__
-# define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
-#else
-# define EXT2_BLOCK_SIZE(s) (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size)
-#endif
-#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
-#ifdef __KERNEL__
-# define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
-#else
-# define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
-#endif
-#ifdef __KERNEL__
-#define EXT2_ADDR_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_addr_per_block_bits)
-#define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size)
-#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
-#else
-#define EXT2_INODE_SIZE(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
- EXT2_GOOD_OLD_INODE_SIZE : \
- (s)->s_inode_size)
-#define EXT2_FIRST_INO(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
- EXT2_GOOD_OLD_FIRST_INO : \
- (s)->s_first_ino)
-#endif
+#define EXT2_SB_MAGIC_OFFSET 0x38
+#define EXT2_SB_BLOCKS_OFFSET 0x04
+#define EXT2_SB_BSIZE_OFFSET 0x18
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT2_MIN_FRAG_SIZE 1024
-#define EXT2_MAX_FRAG_SIZE 4096
-#define EXT2_MIN_FRAG_LOG_SIZE 10
-#ifdef __KERNEL__
-# define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
-# define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
-#else
-# define EXT2_FRAG_SIZE(s) (EXT2_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT2_FRAGS_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / EXT2_FRAG_SIZE(s))
-#endif
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext2_group_desc
+static inline u64 ext2_image_size(void *ext2_sb)
{
- __le32 bg_block_bitmap; /* Blocks bitmap block */
- __le32 bg_inode_bitmap; /* Inodes bitmap block */
- __le32 bg_inode_table; /* Inodes table block */
- __le16 bg_free_blocks_count; /* Free blocks count */
- __le16 bg_free_inodes_count; /* Free inodes count */
- __le16 bg_used_dirs_count; /* Directories count */
- __le16 bg_pad;
- __le32 bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#ifdef __KERNEL__
-# define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->s_blocks_per_group)
-# define EXT2_DESC_PER_BLOCK(s) (EXT2_SB(s)->s_desc_per_block)
-# define EXT2_INODES_PER_GROUP(s) (EXT2_SB(s)->s_inodes_per_group)
-# define EXT2_DESC_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_desc_per_block_bits)
-#else
-# define EXT2_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
-# define EXT2_DESC_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_group_desc))
-# define EXT2_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
-#endif
-
-/*
- * Constants relative to the data blocks
- */
-#define EXT2_NDIR_BLOCKS 12
-#define EXT2_IND_BLOCK EXT2_NDIR_BLOCKS
-#define EXT2_DIND_BLOCK (EXT2_IND_BLOCK + 1)
-#define EXT2_TIND_BLOCK (EXT2_DIND_BLOCK + 1)
-#define EXT2_N_BLOCKS (EXT2_TIND_BLOCK + 1)
-
-/*
- * Inode flags (GETFLAGS/SETFLAGS)
- */
-#define EXT2_SECRM_FL FS_SECRM_FL /* Secure deletion */
-#define EXT2_UNRM_FL FS_UNRM_FL /* Undelete */
-#define EXT2_COMPR_FL FS_COMPR_FL /* Compress file */
-#define EXT2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
-#define EXT2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
-#define EXT2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
-#define EXT2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
-#define EXT2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
-/* Reserved for compression usage... */
-#define EXT2_DIRTY_FL FS_DIRTY_FL
-#define EXT2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
-#define EXT2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
-#define EXT2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
-/* End compression flags --- maybe not all used */
-#define EXT2_BTREE_FL FS_BTREE_FL /* btree format dir */
-#define EXT2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
-#define EXT2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
-#define EXT2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
-#define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
-#define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
-#define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
-#define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
-
-#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
-#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
- EXT2_SYNC_FL | EXT2_NODUMP_FL |\
- EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
- EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
- EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
-{
- if (S_ISDIR(mode))
- return flags;
- else if (S_ISREG(mode))
- return flags & EXT2_REG_FLMASK;
- else
- return flags & EXT2_OTHER_FLMASK;
+ __u8 *p = ext2_sb;
+ if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
+ return 0;
+ return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
+ le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
}
-/*
- * ioctl commands
- */
-#define EXT2_IOC_GETFLAGS FS_IOC_GETFLAGS
-#define EXT2_IOC_SETFLAGS FS_IOC_SETFLAGS
-#define EXT2_IOC_GETVERSION FS_IOC_GETVERSION
-#define EXT2_IOC_SETVERSION FS_IOC_SETVERSION
-#define EXT2_IOC_GETRSVSZ _IOR('f', 5, long)
-#define EXT2_IOC_SETRSVSZ _IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
-#define EXT2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
-#define EXT2_IOC32_GETVERSION FS_IOC32_GETVERSION
-#define EXT2_IOC32_SETVERSION FS_IOC32_SETVERSION
-
-/*
- * Structure of an inode on the disk
- */
-struct ext2_inode {
- __le16 i_mode; /* File mode */
- __le16 i_uid; /* Low 16 bits of Owner Uid */
- __le32 i_size; /* Size in bytes */
- __le32 i_atime; /* Access time */
- __le32 i_ctime; /* Creation time */
- __le32 i_mtime; /* Modification time */
- __le32 i_dtime; /* Deletion Time */
- __le16 i_gid; /* Low 16 bits of Group Id */
- __le16 i_links_count; /* Links count */
- __le32 i_blocks; /* Blocks count */
- __le32 i_flags; /* File flags */
- union {
- struct {
- __le32 l_i_reserved1;
- } linux1;
- struct {
- __le32 h_i_translator;
- } hurd1;
- struct {
- __le32 m_i_reserved1;
- } masix1;
- } osd1; /* OS dependent 1 */
- __le32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
- __le32 i_generation; /* File version (for NFS) */
- __le32 i_file_acl; /* File ACL */
- __le32 i_dir_acl; /* Directory ACL */
- __le32 i_faddr; /* Fragment address */
- union {
- struct {
- __u8 l_i_frag; /* Fragment number */
- __u8 l_i_fsize; /* Fragment size */
- __u16 i_pad1;
- __le16 l_i_uid_high; /* these 2 fields */
- __le16 l_i_gid_high; /* were reserved2[0] */
- __u32 l_i_reserved2;
- } linux2;
- struct {
- __u8 h_i_frag; /* Fragment number */
- __u8 h_i_fsize; /* Fragment size */
- __le16 h_i_mode_high;
- __le16 h_i_uid_high;
- __le16 h_i_gid_high;
- __le32 h_i_author;
- } hurd2;
- struct {
- __u8 m_i_frag; /* Fragment number */
- __u8 m_i_fsize; /* Fragment size */
- __u16 m_pad1;
- __u32 m_i_reserved2[2];
- } masix2;
- } osd2; /* OS dependent 2 */
-};
-
-#define i_size_high i_dir_acl
-
-#if defined(__KERNEL__) || defined(__linux__)
-#define i_reserved1 osd1.linux1.l_i_reserved1
-#define i_frag osd2.linux2.l_i_frag
-#define i_fsize osd2.linux2.l_i_fsize
-#define i_uid_low i_uid
-#define i_gid_low i_gid
-#define i_uid_high osd2.linux2.l_i_uid_high
-#define i_gid_high osd2.linux2.l_i_gid_high
-#define i_reserved2 osd2.linux2.l_i_reserved2
-#endif
-
-#ifdef __hurd__
-#define i_translator osd1.hurd1.h_i_translator
-#define i_frag osd2.hurd2.h_i_frag
-#define i_fsize osd2.hurd2.h_i_fsize
-#define i_uid_high osd2.hurd2.h_i_uid_high
-#define i_gid_high osd2.hurd2.h_i_gid_high
-#define i_author osd2.hurd2.h_i_author
-#endif
-
-#ifdef __masix__
-#define i_reserved1 osd1.masix1.m_i_reserved1
-#define i_frag osd2.masix2.m_i_frag
-#define i_fsize osd2.masix2.m_i_fsize
-#define i_reserved2 osd2.masix2.m_i_reserved2
-#endif
-
-/*
- * File system states
- */
-#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */
-#define EXT2_ERROR_FS 0x0002 /* Errors detected */
-
-/*
- * Mount flags
- */
-#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
-#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
-#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
-#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
-#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
-#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
-#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
-#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
-#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
-#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
-#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
-#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
-#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
-#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
-#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
-#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
-
-
-#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
-#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
-#define test_opt(sb, opt) (EXT2_SB(sb)->s_mount_opt & \
- EXT2_MOUNT_##opt)
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT2_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
-#define EXT2_DFL_CHECKINTERVAL 0 /* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT2_ERRORS_CONTINUE 1 /* Continue execution */
-#define EXT2_ERRORS_RO 2 /* Remount fs read-only */
-#define EXT2_ERRORS_PANIC 3 /* Panic */
-#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext2_super_block {
- __le32 s_inodes_count; /* Inodes count */
- __le32 s_blocks_count; /* Blocks count */
- __le32 s_r_blocks_count; /* Reserved blocks count */
- __le32 s_free_blocks_count; /* Free blocks count */
- __le32 s_free_inodes_count; /* Free inodes count */
- __le32 s_first_data_block; /* First Data Block */
- __le32 s_log_block_size; /* Block size */
- __le32 s_log_frag_size; /* Fragment size */
- __le32 s_blocks_per_group; /* # Blocks per group */
- __le32 s_frags_per_group; /* # Fragments per group */
- __le32 s_inodes_per_group; /* # Inodes per group */
- __le32 s_mtime; /* Mount time */
- __le32 s_wtime; /* Write time */
- __le16 s_mnt_count; /* Mount count */
- __le16 s_max_mnt_count; /* Maximal mount count */
- __le16 s_magic; /* Magic signature */
- __le16 s_state; /* File system state */
- __le16 s_errors; /* Behaviour when detecting errors */
- __le16 s_minor_rev_level; /* minor revision level */
- __le32 s_lastcheck; /* time of last check */
- __le32 s_checkinterval; /* max. time between checks */
- __le32 s_creator_os; /* OS */
- __le32 s_rev_level; /* Revision level */
- __le16 s_def_resuid; /* Default uid for reserved blocks */
- __le16 s_def_resgid; /* Default gid for reserved blocks */
- /*
- * These fields are for EXT2_DYNAMIC_REV superblocks only.
- *
- * Note: the difference between the compatible feature set and
- * the incompatible feature set is that if there is a bit set
- * in the incompatible feature set that the kernel doesn't
- * know about, it should refuse to mount the filesystem.
- *
- * e2fsck's requirements are more strict; if it doesn't know
- * about a feature in either the compatible or incompatible
- * feature set, it must abort and not try to meddle with
- * things it doesn't understand...
- */
- __le32 s_first_ino; /* First non-reserved inode */
- __le16 s_inode_size; /* size of inode structure */
- __le16 s_block_group_nr; /* block group # of this superblock */
- __le32 s_feature_compat; /* compatible feature set */
- __le32 s_feature_incompat; /* incompatible feature set */
- __le32 s_feature_ro_compat; /* readonly-compatible feature set */
- __u8 s_uuid[16]; /* 128-bit uuid for volume */
- char s_volume_name[16]; /* volume name */
- char s_last_mounted[64]; /* directory where last mounted */
- __le32 s_algorithm_usage_bitmap; /* For compression */
- /*
- * Performance hints. Directory preallocation should only
- * happen if the EXT2_COMPAT_PREALLOC flag is on.
- */
- __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
- __u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
- __u16 s_padding1;
- /*
- * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
- */
- __u8 s_journal_uuid[16]; /* uuid of journal superblock */
- __u32 s_journal_inum; /* inode number of journal file */
- __u32 s_journal_dev; /* device number of journal file */
- __u32 s_last_orphan; /* start of list of inodes to delete */
- __u32 s_hash_seed[4]; /* HTREE hash seed */
- __u8 s_def_hash_version; /* Default hash version to use */
- __u8 s_reserved_char_pad;
- __u16 s_reserved_word_pad;
- __le32 s_default_mount_opts;
- __le32 s_first_meta_bg; /* First metablock block group */
- __u32 s_reserved[190]; /* Padding to the end of the block */
-};
-
-/*
- * Codes for operating systems
- */
-#define EXT2_OS_LINUX 0
-#define EXT2_OS_HURD 1
-#define EXT2_OS_MASIX 2
-#define EXT2_OS_FREEBSD 3
-#define EXT2_OS_LITES 4
-
-/*
- * Revision levels
- */
-#define EXT2_GOOD_OLD_REV 0 /* The good old (original) format */
-#define EXT2_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
-
-#define EXT2_CURRENT_REV EXT2_GOOD_OLD_REV
-#define EXT2_MAX_SUPP_REV EXT2_DYNAMIC_REV
-
-#define EXT2_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT2_HAS_COMPAT_FEATURE(sb,mask) \
- ( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \
- ( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \
- ( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT2_SET_COMPAT_FEATURE(sb,mask) \
- EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask) \
- EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT2_SET_INCOMPAT_FEATURE(sb,mask) \
- EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask) \
- EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
- EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask) \
- EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001
-#define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004
-#define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008
-#define EXT2_FEATURE_COMPAT_RESIZE_INO 0x0010
-#define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020
-#define EXT2_FEATURE_COMPAT_ANY 0xffffffff
-
-#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
-#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
-#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
-#define EXT2_FEATURE_RO_COMPAT_ANY 0xffffffff
-
-#define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001
-#define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
-#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010
-#define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
-
-#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \
- EXT2_FEATURE_INCOMPAT_META_BG)
-#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
-#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED ~EXT2_FEATURE_RO_COMPAT_SUPP
-#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED ~EXT2_FEATURE_INCOMPAT_SUPP
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define EXT2_DEF_RESUID 0
-#define EXT2_DEF_RESGID 0
-
-/*
- * Default mount options
- */
-#define EXT2_DEFM_DEBUG 0x0001
-#define EXT2_DEFM_BSDGROUPS 0x0002
-#define EXT2_DEFM_XATTR_USER 0x0004
-#define EXT2_DEFM_ACL 0x0008
-#define EXT2_DEFM_UID16 0x0010
- /* Not used by ext2, but reserved for use by ext3 */
-#define EXT3_DEFM_JMODE 0x0060
-#define EXT3_DEFM_JMODE_DATA 0x0020
-#define EXT3_DEFM_JMODE_ORDERED 0x0040
-#define EXT3_DEFM_JMODE_WBACK 0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT2_NAME_LEN 255
-
-struct ext2_dir_entry {
- __le32 inode; /* Inode number */
- __le16 rec_len; /* Directory entry length */
- __le16 name_len; /* Name length */
- char name[EXT2_NAME_LEN]; /* File name */
-};
-
-/*
- * The new version of the directory entry. Since EXT2 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext2_dir_entry_2 {
- __le32 inode; /* Inode number */
- __le16 rec_len; /* Directory entry length */
- __u8 name_len; /* Name length */
- __u8 file_type;
- char name[EXT2_NAME_LEN]; /* File name */
-};
-
-/*
- * Ext2 directory file types. Only the low 3 bits are used. The
- * other bits are reserved for now.
- */
-enum {
- EXT2_FT_UNKNOWN = 0,
- EXT2_FT_REG_FILE = 1,
- EXT2_FT_DIR = 2,
- EXT2_FT_CHRDEV = 3,
- EXT2_FT_BLKDEV = 4,
- EXT2_FT_FIFO = 5,
- EXT2_FT_SOCK = 6,
- EXT2_FT_SYMLINK = 7,
- EXT2_FT_MAX
-};
-
-/*
- * EXT2_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT2_DIR_PAD 4
-#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
-#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
- ~EXT2_DIR_ROUND)
-#define EXT2_MAX_REC_LEN ((1<<16)-1)
-
#endif /* _LINUX_EXT2_FS_H */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
deleted file mode 100644
index db4d9f5..0000000
--- a/include/linux/ext2_fs_sb.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * linux/include/linux/ext2_fs_sb.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- * from
- *
- * linux/include/linux/minix_fs_sb.h
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-#ifndef _LINUX_EXT2_FS_SB
-#define _LINUX_EXT2_FS_SB
-
-#include <linux/blockgroup_lock.h>
-#include <linux/percpu_counter.h>
-#include <linux/rbtree.h>
-
-/* XXX Here for now... not interested in restructing headers JUST now */
-
-/* data type for block offset of block group */
-typedef int ext2_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext2_fsblk_t;
-
-#define E2FSBLK "%lu"
-
-struct ext2_reserve_window {
- ext2_fsblk_t _rsv_start; /* First byte reserved */
- ext2_fsblk_t _rsv_end; /* Last byte reserved or 0 */
-};
-
-struct ext2_reserve_window_node {
- struct rb_node rsv_node;
- __u32 rsv_goal_size;
- __u32 rsv_alloc_hit;
- struct ext2_reserve_window rsv_window;
-};
-
-struct ext2_block_alloc_info {
- /* information about reservation window */
- struct ext2_reserve_window_node rsv_window_node;
- /*
- * was i_next_alloc_block in ext2_inode_info
- * is the logical (file-relative) number of the
- * most-recently-allocated block in this file.
- * We use this for detecting linearly ascending allocation requests.
- */
- __u32 last_alloc_logical_block;
- /*
- * Was i_next_alloc_goal in ext2_inode_info
- * is the *physical* companion to i_next_alloc_block.
- * it the the physical block number of the block which was most-recentl
- * allocated to this file. This give us the goal (target) for the next
- * allocation when we detect linearly ascending requests.
- */
- ext2_fsblk_t last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * second extended-fs super-block data in memory
- */
-struct ext2_sb_info {
- unsigned long s_frag_size; /* Size of a fragment in bytes */
- unsigned long s_frags_per_block;/* Number of fragments per block */
- unsigned long s_inodes_per_block;/* Number of inodes per block */
- unsigned long s_frags_per_group;/* Number of fragments in a group */
- unsigned long s_blocks_per_group;/* Number of blocks in a group */
- unsigned long s_inodes_per_group;/* Number of inodes in a group */
- unsigned long s_itb_per_group; /* Number of inode table blocks per group */
- unsigned long s_gdb_count; /* Number of group descriptor blocks */
- unsigned long s_desc_per_block; /* Number of group descriptors per block */
- unsigned long s_groups_count; /* Number of groups in the fs */
- unsigned long s_overhead_last; /* Last calculated overhead */
- unsigned long s_blocks_last; /* Last seen block count */
- struct buffer_head * s_sbh; /* Buffer containing the super block */
- struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
- struct buffer_head ** s_group_desc;
- unsigned long s_mount_opt;
- unsigned long s_sb_block;
- uid_t s_resuid;
- gid_t s_resgid;
- unsigned short s_mount_state;
- unsigned short s_pad;
- int s_addr_per_block_bits;
- int s_desc_per_block_bits;
- int s_inode_size;
- int s_first_ino;
- spinlock_t s_next_gen_lock;
- u32 s_next_generation;
- unsigned long s_dir_count;
- u8 *s_debts;
- struct percpu_counter s_freeblocks_counter;
- struct percpu_counter s_freeinodes_counter;
- struct percpu_counter s_dirs_counter;
- struct blockgroup_lock *s_blockgroup_lock;
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
- struct ext2_reserve_window_node s_rsv_window_head;
- /*
- * s_lock protects against concurrent modifications of s_mount_state,
- * s_blocks_last, s_overhead_last and the content of superblock's
- * buffer pointed to by sbi->s_es.
- *
- * Note: It is used in ext2_show_options() to provide a consistent view
- * of the mount options.
- */
- spinlock_t s_lock;
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
-{
- return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-#endif /* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
deleted file mode 100644
index f42c098..0000000
--- a/include/linux/ext3_fs_i.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * linux/include/linux/ext3_fs_i.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- * from
- *
- * linux/include/linux/minix_fs_i.h
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_I
-#define _LINUX_EXT3_FS_I
-
-#include <linux/rwsem.h>
-#include <linux/rbtree.h>
-#include <linux/seqlock.h>
-#include <linux/mutex.h>
-
-/* data type for block offset of block group */
-typedef int ext3_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext3_fsblk_t;
-
-#define E3FSBLK "%lu"
-
-struct ext3_reserve_window {
- ext3_fsblk_t _rsv_start; /* First byte reserved */
- ext3_fsblk_t _rsv_end; /* Last byte reserved or 0 */
-};
-
-struct ext3_reserve_window_node {
- struct rb_node rsv_node;
- __u32 rsv_goal_size;
- __u32 rsv_alloc_hit;
- struct ext3_reserve_window rsv_window;
-};
-
-struct ext3_block_alloc_info {
- /* information about reservation window */
- struct ext3_reserve_window_node rsv_window_node;
- /*
- * was i_next_alloc_block in ext3_inode_info
- * is the logical (file-relative) number of the
- * most-recently-allocated block in this file.
- * We use this for detecting linearly ascending allocation requests.
- */
- __u32 last_alloc_logical_block;
- /*
- * Was i_next_alloc_goal in ext3_inode_info
- * is the *physical* companion to i_next_alloc_block.
- * it the physical block number of the block which was most-recentl
- * allocated to this file. This give us the goal (target) for the next
- * allocation when we detect linearly ascending requests.
- */
- ext3_fsblk_t last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * third extended file system inode data in memory
- */
-struct ext3_inode_info {
- __le32 i_data[15]; /* unconverted */
- __u32 i_flags;
-#ifdef EXT3_FRAGMENTS
- __u32 i_faddr;
- __u8 i_frag_no;
- __u8 i_frag_size;
-#endif
- ext3_fsblk_t i_file_acl;
- __u32 i_dir_acl;
- __u32 i_dtime;
-
- /*
- * i_block_group is the number of the block group which contains
- * this file's inode. Constant across the lifetime of the inode,
- * it is ued for making block allocation decisions - we try to
- * place a file's data blocks near its inode block, and new inodes
- * near to their parent directory's inode.
- */
- __u32 i_block_group;
- unsigned long i_state_flags; /* Dynamic state flags for ext3 */
-
- /* block reservation info */
- struct ext3_block_alloc_info *i_block_alloc_info;
-
- __u32 i_dir_start_lookup;
-#ifdef CONFIG_EXT3_FS_XATTR
- /*
- * Extended attributes can be read independently of the main file
- * data. Taking i_mutex even when reading would cause contention
- * between readers of EAs and writers of regular file data, so
- * instead we synchronize on xattr_sem when reading or changing
- * EAs.
- */
- struct rw_semaphore xattr_sem;
-#endif
-
- struct list_head i_orphan; /* unlinked but open inodes */
-
- /*
- * i_disksize keeps track of what the inode size is ON DISK, not
- * in memory. During truncate, i_size is set to the new size by
- * the VFS prior to calling ext3_truncate(), but the filesystem won't
- * set i_disksize to 0 until the truncate is actually under way.
- *
- * The intent is that i_disksize always represents the blocks which
- * are used by this file. This allows recovery to restart truncate
- * on orphans if we crash during truncate. We actually write i_disksize
- * into the on-disk inode when writing inodes out, instead of i_size.
- *
- * The only time when i_disksize and i_size may be different is when
- * a truncate is in progress. The only things which change i_disksize
- * are ext3_get_block (growth) and ext3_truncate (shrinkth).
- */
- loff_t i_disksize;
-
- /* on-disk additional length */
- __u16 i_extra_isize;
-
- /*
- * truncate_mutex is for serialising ext3_truncate() against
- * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
- * data tree are chopped off during truncate. We can't do that in
- * ext3 because whenever we perform intermediate commits during
- * truncate, the inode and all the metadata blocks *must* be in a
- * consistent state which allows truncation of the orphans to restart
- * during recovery. Hence we must fix the get_block-vs-truncate race
- * by other means, so we have truncate_mutex.
- */
- struct mutex truncate_mutex;
-
- /*
- * Transactions that contain inode's metadata needed to complete
- * fsync and fdatasync, respectively.
- */
- atomic_t i_sync_tid;
- atomic_t i_datasync_tid;
-
- struct inode vfs_inode;
-};
-
-#endif /* _LINUX_EXT3_FS_I */
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
deleted file mode 100644
index 6436525..0000000
--- a/include/linux/ext3_fs_sb.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * linux/include/linux/ext3_fs_sb.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- * from
- *
- * linux/include/linux/minix_fs_sb.h
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_SB
-#define _LINUX_EXT3_FS_SB
-
-#ifdef __KERNEL__
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/blockgroup_lock.h>
-#include <linux/percpu_counter.h>
-#endif
-#include <linux/rbtree.h>
-
-/*
- * third extended-fs super-block data in memory
- */
-struct ext3_sb_info {
- unsigned long s_frag_size; /* Size of a fragment in bytes */
- unsigned long s_frags_per_block;/* Number of fragments per block */
- unsigned long s_inodes_per_block;/* Number of inodes per block */
- unsigned long s_frags_per_group;/* Number of fragments in a group */
- unsigned long s_blocks_per_group;/* Number of blocks in a group */
- unsigned long s_inodes_per_group;/* Number of inodes in a group */
- unsigned long s_itb_per_group; /* Number of inode table blocks per group */
- unsigned long s_gdb_count; /* Number of group descriptor blocks */
- unsigned long s_desc_per_block; /* Number of group descriptors per block */
- unsigned long s_groups_count; /* Number of groups in the fs */
- unsigned long s_overhead_last; /* Last calculated overhead */
- unsigned long s_blocks_last; /* Last seen block count */
- struct buffer_head * s_sbh; /* Buffer containing the super block */
- struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
- struct buffer_head ** s_group_desc;
- unsigned long s_mount_opt;
- ext3_fsblk_t s_sb_block;
- uid_t s_resuid;
- gid_t s_resgid;
- unsigned short s_mount_state;
- unsigned short s_pad;
- int s_addr_per_block_bits;
- int s_desc_per_block_bits;
- int s_inode_size;
- int s_first_ino;
- spinlock_t s_next_gen_lock;
- u32 s_next_generation;
- u32 s_hash_seed[4];
- int s_def_hash_version;
- int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
- struct percpu_counter s_freeblocks_counter;
- struct percpu_counter s_freeinodes_counter;
- struct percpu_counter s_dirs_counter;
- struct blockgroup_lock *s_blockgroup_lock;
-
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
- struct ext3_reserve_window_node s_rsv_window_head;
-
- /* Journaling */
- struct inode * s_journal_inode;
- struct journal_s * s_journal;
- struct list_head s_orphan;
- struct mutex s_orphan_lock;
- struct mutex s_resize_lock;
- unsigned long s_commit_interval;
- struct block_device *journal_bdev;
-#ifdef CONFIG_QUOTA
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
-#endif
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
-{
- return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-#endif /* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
deleted file mode 100644
index d7b5ddc..0000000
--- a/include/linux/ext3_jbd.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * linux/include/linux/ext3_jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Ext3-specific journaling extensions.
- */
-
-#ifndef _LINUX_EXT3_JBD_H
-#define _LINUX_EXT3_JBD_H
-
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-
-#define EXT3_JOURNAL(inode) (EXT3_SB((inode)->i_sb)->s_journal)
-
-/* Define the number of blocks we need to account to a transaction to
- * modify one block of data.
- *
- * We may have to touch one inode, one bitmap buffer, up to three
- * indirection blocks, the group and superblock summaries, and the data
- * block to complete the transaction. */
-
-#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U
-
-/* Extended attribute operations touch at most two data buffers,
- * two bitmap buffers, and two group summaries, in addition to the inode
- * and the superblock, which are already accounted for. */
-
-#define EXT3_XATTR_TRANS_BLOCKS 6U
-
-/* Define the minimum size for a transaction which modifies data. This
- * needs to take into account the fact that we may end up modifying two
- * quota files too (one for the group, one for the user quota). The
- * superblock only gets updated once, of course, so don't bother
- * counting that again for the quota updates. */
-
-#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
- EXT3_XATTR_TRANS_BLOCKS - 2 + \
- EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
-
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
- * generous. We can grow the delete transaction later if necessary. */
-
-#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
-
-/* Define an arbitrary limit for the amount of data we will anticipate
- * writing to any given transaction. For unbounded transactions such as
- * write(2) and truncate(2) we can write more than this, but we always
- * start off at the maximum transaction size and grow the transaction
- * optimistically as we go. */
-
-#define EXT3_MAX_TRANS_DATA 64U
-
-/* We break up a large truncate or write transaction once the handle's
- * buffer credits gets this low, we need either to extend the
- * transaction or to start a new one. Reserve enough space here for
- * inode, bitmap, superblock, group and indirection updates for at least
- * one block, plus two quota updates. Quota allocations are not
- * needed. */
-
-#define EXT3_RESERVE_TRANS_BLOCKS 12U
-
-#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
-
-#ifdef CONFIG_QUOTA
-/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-/* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
-#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
- (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
-#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
- (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
-#else
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
-#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
-#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
-#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
-
-int
-ext3_mark_iloc_dirty(handle_t *handle,
- struct inode *inode,
- struct ext3_iloc *iloc);
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh. This _must_ be cleaned up later.
- */
-
-int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
- struct ext3_iloc *iloc);
-
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
-
-/*
- * Wrapper functions with which ext3 calls into JBD. The intent here is
- * to allow these to be turned into appropriate stubs so ext3 can control
- * ext2 filesystems, so ext2+ext3 systems only nee one fs. This work hasn't
- * been done yet.
- */
-
-static inline void ext3_journal_release_buffer(handle_t *handle,
- struct buffer_head *bh)
-{
- journal_release_buffer(handle, bh);
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
- struct buffer_head *bh, handle_t *handle, int err);
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
- struct buffer_head *bh);
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
- struct buffer_head *bh);
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
- struct buffer_head *bh);
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
- unsigned long blocknr, struct buffer_head *bh);
-
-int __ext3_journal_get_create_access(const char *where,
- handle_t *handle, struct buffer_head *bh);
-
-int __ext3_journal_dirty_metadata(const char *where,
- handle_t *handle, struct buffer_head *bh);
-
-#define ext3_journal_get_undo_access(handle, bh) \
- __ext3_journal_get_undo_access(__func__, (handle), (bh))
-#define ext3_journal_get_write_access(handle, bh) \
- __ext3_journal_get_write_access(__func__, (handle), (bh))
-#define ext3_journal_revoke(handle, blocknr, bh) \
- __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
-#define ext3_journal_get_create_access(handle, bh) \
- __ext3_journal_get_create_access(__func__, (handle), (bh))
-#define ext3_journal_dirty_metadata(handle, bh) \
- __ext3_journal_dirty_metadata(__func__, (handle), (bh))
-#define ext3_journal_forget(handle, bh) \
- __ext3_journal_forget(__func__, (handle), (bh))
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
-
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext3_journal_stop(const char *where, handle_t *handle);
-
-static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
-{
- return ext3_journal_start_sb(inode->i_sb, nblocks);
-}
-
-#define ext3_journal_stop(handle) \
- __ext3_journal_stop(__func__, (handle))
-
-static inline handle_t *ext3_journal_current_handle(void)
-{
- return journal_current_handle();
-}
-
-static inline int ext3_journal_extend(handle_t *handle, int nblocks)
-{
- return journal_extend(handle, nblocks);
-}
-
-static inline int ext3_journal_restart(handle_t *handle, int nblocks)
-{
- return journal_restart(handle, nblocks);
-}
-
-static inline int ext3_journal_blocks_per_page(struct inode *inode)
-{
- return journal_blocks_per_page(inode);
-}
-
-static inline int ext3_journal_force_commit(journal_t *journal)
-{
- return journal_force_commit(journal);
-}
-
-/* super.c */
-int ext3_force_commit(struct super_block *sb);
-
-static inline int ext3_should_journal_data(struct inode *inode)
-{
- if (!S_ISREG(inode->i_mode))
- return 1;
- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
- return 1;
- if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
- return 1;
- return 0;
-}
-
-static inline int ext3_should_order_data(struct inode *inode)
-{
- if (!S_ISREG(inode->i_mode))
- return 0;
- if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
- return 0;
- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
- return 1;
- return 0;
-}
-
-static inline int ext3_should_writeback_data(struct inode *inode)
-{
- if (!S_ISREG(inode->i_mode))
- return 0;
- if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
- return 0;
- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
- return 1;
- return 0;
-}
-
-#endif /* _LINUX_EXT3_JBD_H */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 4db7b68..cdc9b71 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -2,6 +2,7 @@
#define _LINUX_FIREWIRE_H
#include <linux/completion.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -64,8 +65,6 @@
#define CSR_MODEL 0x17
#define CSR_DIRECTORY_ID 0x20
-struct device;
-
struct fw_csr_iterator {
const u32 *p;
const u32 *end;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index dd478fc..5f3f3be 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -144,12 +144,14 @@ struct event_filter;
enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER,
+#ifdef CONFIG_PERF_EVENTS
TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
TRACE_REG_PERF_OPEN,
TRACE_REG_PERF_CLOSE,
TRACE_REG_PERF_ADD,
TRACE_REG_PERF_DEL,
+#endif
};
struct ftrace_event_call;
diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild
new file mode 100644
index 0000000..271a770
--- /dev/null
+++ b/include/linux/hsi/Kbuild
@@ -0,0 +1 @@
+header-y += hsi_char.h
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
new file mode 100644
index 0000000..4b17806
--- /dev/null
+++ b/include/linux/hsi/hsi.h
@@ -0,0 +1,410 @@
+/*
+ * HSI core header file.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_H__
+#define __LINUX_HSI_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+/* HSI message ttype */
+#define HSI_MSG_READ 0
+#define HSI_MSG_WRITE 1
+
+/* HSI configuration values */
+enum {
+ HSI_MODE_STREAM = 1,
+ HSI_MODE_FRAME,
+};
+
+enum {
+ HSI_FLOW_SYNC, /* Synchronized flow */
+ HSI_FLOW_PIPE, /* Pipelined flow */
+};
+
+enum {
+ HSI_ARB_RR, /* Round-robin arbitration */
+ HSI_ARB_PRIO, /* Channel priority arbitration */
+};
+
+#define HSI_MAX_CHANNELS 16
+
+/* HSI message status codes */
+enum {
+ HSI_STATUS_COMPLETED, /* Message transfer is completed */
+ HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */
+ HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */
+ HSI_STATUS_QUEUED, /* Message waiting to be served */
+ HSI_STATUS_ERROR, /* Error when message transfer was ongoing */
+};
+
+/* HSI port event codes */
+enum {
+ HSI_EVENT_START_RX,
+ HSI_EVENT_STOP_RX,
+};
+
+/**
+ * struct hsi_config - Configuration for RX/TX HSI modules
+ * @mode: Bit transmission mode (STREAM or FRAME)
+ * @channels: Number of channels to use [1..16]
+ * @speed: Max bit transmission speed (Kbit/s)
+ * @flow: RX flow type (SYNCHRONIZED or PIPELINE)
+ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
+ */
+struct hsi_config {
+ unsigned int mode;
+ unsigned int channels;
+ unsigned int speed;
+ union {
+ unsigned int flow; /* RX only */
+ unsigned int arb_mode; /* TX only */
+ };
+};
+
+/**
+ * struct hsi_board_info - HSI client board info
+ * @name: Name for the HSI device
+ * @hsi_id: HSI controller id where the client sits
+ * @port: Port number in the controller where the client sits
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @platform_data: Platform related data
+ * @archdata: Architecture-dependent device data
+ */
+struct hsi_board_info {
+ const char *name;
+ unsigned int hsi_id;
+ unsigned int port;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ void *platform_data;
+ struct dev_archdata *archdata;
+};
+
+#ifdef CONFIG_HSI_BOARDINFO
+extern int hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len);
+#else
+static inline int hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len)
+{
+ return 0;
+}
+#endif /* CONFIG_HSI_BOARDINFO */
+
+/**
+ * struct hsi_client - HSI client attached to an HSI port
+ * @device: Driver model representation of the device
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @hsi_start_rx: Called after incoming wake line goes high
+ * @hsi_stop_rx: Called after incoming wake line goes low
+ */
+struct hsi_client {
+ struct device device;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ void (*hsi_start_rx)(struct hsi_client *cl);
+ void (*hsi_stop_rx)(struct hsi_client *cl);
+ /* private: */
+ unsigned int pclaimed:1;
+ struct list_head link;
+};
+
+#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
+
+static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
+{
+ dev_set_drvdata(&cl->device, data);
+}
+
+static inline void *hsi_client_drvdata(struct hsi_client *cl)
+{
+ return dev_get_drvdata(&cl->device);
+}
+
+/**
+ * struct hsi_client_driver - Driver associated to an HSI client
+ * @driver: Driver model representation of the driver
+ */
+struct hsi_client_driver {
+ struct device_driver driver;
+};
+
+#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\
+ driver)
+
+int hsi_register_client_driver(struct hsi_client_driver *drv);
+
+static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+
+/**
+ * struct hsi_msg - HSI message descriptor
+ * @link: Free to use by the current descriptor owner
+ * @cl: HSI device client that issues the transfer
+ * @sgt: Head of the scatterlist array
+ * @context: Client context data associated to the transfer
+ * @complete: Transfer completion callback
+ * @destructor: Destructor to free resources when flushing
+ * @status: Status of the transfer when completed
+ * @actual_len: Actual length of data transfered on completion
+ * @channel: Channel were to TX/RX the message
+ * @ttype: Transfer type (TX if set, RX otherwise)
+ * @break_frame: if true HSI will send/receive a break frame. Data buffers are
+ * ignored in the request.
+ */
+struct hsi_msg {
+ struct list_head link;
+ struct hsi_client *cl;
+ struct sg_table sgt;
+ void *context;
+
+ void (*complete)(struct hsi_msg *msg);
+ void (*destructor)(struct hsi_msg *msg);
+
+ int status;
+ unsigned int actual_len;
+ unsigned int channel;
+ unsigned int ttype:1;
+ unsigned int break_frame:1;
+};
+
+struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags);
+void hsi_free_msg(struct hsi_msg *msg);
+
+/**
+ * struct hsi_port - HSI port device
+ * @device: Driver model representation of the device
+ * @tx_cfg: Current TX path configuration
+ * @rx_cfg: Current RX path configuration
+ * @num: Port number
+ * @shared: Set when port can be shared by different clients
+ * @claimed: Reference count of clients which claimed the port
+ * @lock: Serialize port claim
+ * @async: Asynchronous transfer callback
+ * @setup: Callback to set the HSI client configuration
+ * @flush: Callback to clean the HW state and destroy all pending transfers
+ * @start_tx: Callback to inform that a client wants to TX data
+ * @stop_tx: Callback to inform that a client no longer wishes to TX data
+ * @release: Callback to inform that a client no longer uses the port
+ * @clients: List of hsi_clients using the port.
+ * @clock: Lock to serialize access to the clients list.
+ */
+struct hsi_port {
+ struct device device;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ unsigned int num;
+ unsigned int shared:1;
+ int claimed;
+ struct mutex lock;
+ int (*async)(struct hsi_msg *msg);
+ int (*setup)(struct hsi_client *cl);
+ int (*flush)(struct hsi_client *cl);
+ int (*start_tx)(struct hsi_client *cl);
+ int (*stop_tx)(struct hsi_client *cl);
+ int (*release)(struct hsi_client *cl);
+ struct list_head clients;
+ spinlock_t clock;
+};
+
+#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
+#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
+
+void hsi_event(struct hsi_port *port, unsigned int event);
+int hsi_claim_port(struct hsi_client *cl, unsigned int share);
+void hsi_release_port(struct hsi_client *cl);
+
+static inline int hsi_port_claimed(struct hsi_client *cl)
+{
+ return cl->pclaimed;
+}
+
+static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data)
+{
+ dev_set_drvdata(&port->device, data);
+}
+
+static inline void *hsi_port_drvdata(struct hsi_port *port)
+{
+ return dev_get_drvdata(&port->device);
+}
+
+/**
+ * struct hsi_controller - HSI controller device
+ * @device: Driver model representation of the device
+ * @owner: Pointer to the module owning the controller
+ * @id: HSI controller ID
+ * @num_ports: Number of ports in the HSI controller
+ * @port: Array of HSI ports
+ */
+struct hsi_controller {
+ struct device device;
+ struct module *owner;
+ unsigned int id;
+ unsigned int num_ports;
+ struct hsi_port *port;
+};
+
+#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
+
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
+void hsi_free_controller(struct hsi_controller *hsi);
+int hsi_register_controller(struct hsi_controller *hsi);
+void hsi_unregister_controller(struct hsi_controller *hsi);
+
+static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi,
+ void *data)
+{
+ dev_set_drvdata(&hsi->device, data);
+}
+
+static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
+{
+ return dev_get_drvdata(&hsi->device);
+}
+
+static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
+ unsigned int num)
+{
+ return (num < hsi->num_ports) ? &hsi->port[num] : NULL;
+}
+
+/*
+ * API for HSI clients
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
+
+/**
+ * hsi_id - Get HSI controller ID associated to a client
+ * @cl: Pointer to a HSI client
+ *
+ * Return the controller id where the client is attached to
+ */
+static inline unsigned int hsi_id(struct hsi_client *cl)
+{
+ return to_hsi_controller(cl->device.parent->parent)->id;
+}
+
+/**
+ * hsi_port_id - Gets the port number a client is attached to
+ * @cl: Pointer to HSI client
+ *
+ * Return the port number associated to the client
+ */
+static inline unsigned int hsi_port_id(struct hsi_client *cl)
+{
+ return to_hsi_port(cl->device.parent)->num;
+}
+
+/**
+ * hsi_setup - Configure the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * When sharing ports, clients should either relay on a single
+ * client setup or have the same setup for all of them.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_setup(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->setup(cl);
+}
+
+/**
+ * hsi_flush - Flush all pending transactions on the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * This function will destroy all pending hsi_msg in the port and reset
+ * the HW port so it is ready to receive and transmit from a clean state.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_flush(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->flush(cl);
+}
+
+/**
+ * hsi_async_read - Submit a read transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ msg->ttype = HSI_MSG_READ;
+ return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_async_write - Submit a write transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ msg->ttype = HSI_MSG_WRITE;
+ return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_start_tx - Signal the port that the client wants to start a TX
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_start_tx(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->start_tx(cl);
+}
+
+/**
+ * hsi_stop_tx - Signal the port that the client no longer wants to transmit
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_stop_tx(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->stop_tx(cl);
+}
+#endif /* __LINUX_HSI_H__ */
diff --git a/include/linux/hsi/hsi_char.h b/include/linux/hsi/hsi_char.h
new file mode 100644
index 0000000..76160b4
--- /dev/null
+++ b/include/linux/hsi/hsi_char.h
@@ -0,0 +1,63 @@
+/*
+ * Part of the HSI character device driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Andras Domokos <andras.domokos at nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+
+#ifndef __HSI_CHAR_H
+#define __HSI_CHAR_H
+
+#define HSI_CHAR_MAGIC 'k'
+#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IOWR(num, dtype) _IOWR(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IO(num) _IO(HSI_CHAR_MAGIC, num)
+
+#define HSC_RESET HSC_IO(16)
+#define HSC_SET_PM HSC_IO(17)
+#define HSC_SEND_BREAK HSC_IO(18)
+#define HSC_SET_RX HSC_IOW(19, struct hsc_rx_config)
+#define HSC_GET_RX HSC_IOW(20, struct hsc_rx_config)
+#define HSC_SET_TX HSC_IOW(21, struct hsc_tx_config)
+#define HSC_GET_TX HSC_IOW(22, struct hsc_tx_config)
+
+#define HSC_PM_DISABLE 0
+#define HSC_PM_ENABLE 1
+
+#define HSC_MODE_STREAM 1
+#define HSC_MODE_FRAME 2
+#define HSC_FLOW_SYNC 0
+#define HSC_ARB_RR 0
+#define HSC_ARB_PRIO 1
+
+struct hsc_rx_config {
+ uint32_t mode;
+ uint32_t flow;
+ uint32_t channels;
+};
+
+struct hsc_tx_config {
+ uint32_t mode;
+ uint32_t channels;
+ uint32_t speed;
+ uint32_t arb_mode;
+};
+
+#endif /* __HSI_CHAR_H */
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index 79c4f26..18a5d02 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -22,7 +22,7 @@
#define EQL_DEFAULT_SLAVE_PRIORITY 28800
#define EQL_DEFAULT_MAX_SLAVES 4
#define EQL_DEFAULT_MTU 576
-#define EQL_DEFAULT_RESCHED_IVAL 100
+#define EQL_DEFAULT_RESCHED_IVAL HZ
#define EQL_ENSLAVE (SIOCDEVPRIVATE)
#define EQL_EMANCIPATE (SIOCDEVPRIVATE + 1)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index a5375e7..645231c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -430,16 +430,10 @@ extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
* Most likely, you want to use tracing_on/tracing_off.
*/
#ifdef CONFIG_RING_BUFFER
-void tracing_on(void);
-void tracing_off(void);
/* trace_off_permanent stops recording with no way to bring it back */
void tracing_off_permanent(void);
-int tracing_is_on(void);
#else
-static inline void tracing_on(void) { }
-static inline void tracing_off(void) { }
static inline void tracing_off_permanent(void) { }
-static inline int tracing_is_on(void) { return 0; }
#endif
enum ftrace_dump_mode {
@@ -449,6 +443,10 @@ enum ftrace_dump_mode {
};
#ifdef CONFIG_TRACING
+void tracing_on(void);
+void tracing_off(void);
+int tracing_is_on(void);
+
extern void tracing_start(void);
extern void tracing_stop(void);
extern void ftrace_off_permanent(void);
@@ -533,6 +531,11 @@ static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
static inline void trace_dump_stack(void) { }
+
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline int tracing_is_on(void) { return 0; }
+
static inline int
trace_printk(const char *fmt, ...)
{
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fa39183..c4d2fc1 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -63,7 +63,8 @@ enum kgdb_bptype {
BP_HARDWARE_BREAKPOINT,
BP_WRITE_WATCHPOINT,
BP_READ_WATCHPOINT,
- BP_ACCESS_WATCHPOINT
+ BP_ACCESS_WATCHPOINT,
+ BP_POKE_BREAKPOINT,
};
enum kgdb_bpstate {
@@ -207,8 +208,8 @@ extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
/* Optional functions. */
extern int kgdb_validate_break_address(unsigned long addr);
-extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
-extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
+extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
+extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
* kgdb_arch_late - Perform any architecture specific initalization.
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 9efeae6..dd99c329 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -110,12 +110,29 @@ call_usermodehelper(char *path, char **argv, char **envp, int wait)
extern struct ctl_table usermodehelper_table[];
+enum umh_disable_depth {
+ UMH_ENABLED = 0,
+ UMH_FREEZING,
+ UMH_DISABLED,
+};
+
extern void usermodehelper_init(void);
-extern int usermodehelper_disable(void);
-extern void usermodehelper_enable(void);
-extern bool usermodehelper_is_disabled(void);
-extern void read_lock_usermodehelper(void);
-extern void read_unlock_usermodehelper(void);
+extern int __usermodehelper_disable(enum umh_disable_depth depth);
+extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
+
+static inline int usermodehelper_disable(void)
+{
+ return __usermodehelper_disable(UMH_DISABLED);
+}
+
+static inline void usermodehelper_enable(void)
+{
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
+}
+
+extern int usermodehelper_read_trylock(void);
+extern long usermodehelper_read_lock_wait(long timeout);
+extern void usermodehelper_read_unlock(void);
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index eab507f..fad48aa 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -22,6 +22,23 @@
#include <linux/key.h>
#include <linux/skbuff.h>
+struct lsm_network_audit {
+ int netif;
+ struct sock *sk;
+ u16 family;
+ __be16 dport;
+ __be16 sport;
+ union {
+ struct {
+ __be32 daddr;
+ __be32 saddr;
+ } v4;
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ } v6;
+ } fam;
+};
/* Auxiliary data to use in generating the audit record. */
struct common_audit_data {
@@ -41,23 +58,7 @@ struct common_audit_data {
struct path path;
struct dentry *dentry;
struct inode *inode;
- struct {
- int netif;
- struct sock *sk;
- u16 family;
- __be16 dport;
- __be16 sport;
- union {
- struct {
- __be32 daddr;
- __be32 saddr;
- } v4;
- struct {
- struct in6_addr daddr;
- struct in6_addr saddr;
- } v6;
- } fam;
- } net;
+ struct lsm_network_audit *net;
int cap;
int ipc_id;
struct task_struct *tsk;
@@ -72,64 +73,15 @@ struct common_audit_data {
/* this union contains LSM specific data */
union {
#ifdef CONFIG_SECURITY_SMACK
- /* SMACK data */
- struct smack_audit_data {
- const char *function;
- char *subject;
- char *object;
- char *request;
- int result;
- } smack_audit_data;
+ struct smack_audit_data *smack_audit_data;
#endif
#ifdef CONFIG_SECURITY_SELINUX
- /* SELinux data */
- struct {
- u32 ssid;
- u32 tsid;
- u16 tclass;
- u32 requested;
- u32 audited;
- u32 denied;
- /*
- * auditdeny is a bit tricky and unintuitive. See the
- * comments in avc.c for it's meaning and usage.
- */
- u32 auditdeny;
- struct av_decision *avd;
- int result;
- } selinux_audit_data;
+ struct selinux_audit_data *selinux_audit_data;
#endif
#ifdef CONFIG_SECURITY_APPARMOR
- struct {
- int error;
- int op;
- int type;
- void *profile;
- const char *name;
- const char *info;
- union {
- void *target;
- struct {
- long pos;
- void *target;
- } iface;
- struct {
- int rlim;
- unsigned long max;
- } rlim;
- struct {
- const char *target;
- u32 request;
- u32 denied;
- uid_t ouid;
- } fs;
- };
- } apparmor_audit_data;
+ struct apparmor_audit_data *apparmor_audit_data;
#endif
- };
- /* these callback will be implemented by a specific LSM */
- void (*lsm_pre_audit)(struct audit_buffer *, void *);
- void (*lsm_post_audit)(struct audit_buffer *, void *);
+ }; /* per LSM data pointer union */
};
#define v4info fam.v4
@@ -146,6 +98,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
{ memset((_d), 0, sizeof(struct common_audit_data)); \
(_d)->type = LSM_AUDIT_DATA_##_t; }
-void common_lsm_audit(struct common_audit_data *a);
+void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *));
#endif
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
index 8f82575..18543e2 100644
--- a/include/linux/mtio.h
+++ b/include/linux/mtio.h
@@ -194,6 +194,7 @@ struct mtpos {
#define MT_ST_SYSV 0x1000
#define MT_ST_NOWAIT 0x2000
#define MT_ST_SILI 0x4000
+#define MT_ST_NOWAIT_EOF 0x8000
/* The mode parameters to be controlled. Parameter chosen with bits 20-28 */
#define MT_ST_CLEAR_DEFAULT 0xfffff
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index bd9f55a..ddbb6a9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -299,18 +299,31 @@ struct perf_event_mmap_page {
/*
* Bits needed to read the hw events in user-space.
*
- * u32 seq;
- * s64 count;
+ * u32 seq, time_mult, time_shift, idx, width;
+ * u64 count, enabled, running;
+ * u64 cyc, time_offset;
+ * s64 pmc = 0;
*
* do {
* seq = pc->lock;
- *
* barrier()
- * if (pc->index) {
- * count = pmc_read(pc->index - 1);
- * count += pc->offset;
- * } else
- * goto regular_read;
+ *
+ * enabled = pc->time_enabled;
+ * running = pc->time_running;
+ *
+ * if (pc->cap_usr_time && enabled != running) {
+ * cyc = rdtsc();
+ * time_offset = pc->time_offset;
+ * time_mult = pc->time_mult;
+ * time_shift = pc->time_shift;
+ * }
+ *
+ * idx = pc->index;
+ * count = pc->offset;
+ * if (pc->cap_usr_rdpmc && idx) {
+ * width = pc->pmc_width;
+ * pmc = rdpmc(idx - 1);
+ * }
*
* barrier();
* } while (pc->lock != seq);
@@ -323,14 +336,57 @@ struct perf_event_mmap_page {
__s64 offset; /* add to hardware event value */
__u64 time_enabled; /* time event active */
__u64 time_running; /* time event on cpu */
- __u32 time_mult, time_shift;
+ union {
+ __u64 capabilities;
+ __u64 cap_usr_time : 1,
+ cap_usr_rdpmc : 1,
+ cap_____res : 62;
+ };
+
+ /*
+ * If cap_usr_rdpmc this field provides the bit-width of the value
+ * read using the rdpmc() or equivalent instruction. This can be used
+ * to sign extend the result like:
+ *
+ * pmc <<= 64 - width;
+ * pmc >>= 64 - width; // signed shift right
+ * count += pmc;
+ */
+ __u16 pmc_width;
+
+ /*
+ * If cap_usr_time the below fields can be used to compute the time
+ * delta since time_enabled (in ns) using rdtsc or similar.
+ *
+ * u64 quot, rem;
+ * u64 delta;
+ *
+ * quot = (cyc >> time_shift);
+ * rem = cyc & ((1 << time_shift) - 1);
+ * delta = time_offset + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ *
+ * Where time_offset,time_mult,time_shift and cyc are read in the
+ * seqcount loop described above. This delta can then be added to
+ * enabled and possible running (if idx), improving the scaling:
+ *
+ * enabled += delta;
+ * if (idx)
+ * running += delta;
+ *
+ * quot = count / running;
+ * rem = count % running;
+ * count = quot * enabled + (rem * enabled) / running;
+ */
+ __u16 time_shift;
+ __u32 time_mult;
__u64 time_offset;
/*
* Hole for extension of the self monitor capabilities
*/
- __u64 __reserved[121]; /* align to 1k */
+ __u64 __reserved[120]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
@@ -550,6 +606,7 @@ struct perf_guest_info_callbacks {
#include <linux/irq_work.h>
#include <linux/static_key.h>
#include <linux/atomic.h>
+#include <linux/sysfs.h>
#include <asm/local.h>
#define PERF_MAX_STACK_DEPTH 255
@@ -1291,5 +1348,18 @@ do { \
register_cpu_notifier(&fn##_nb); \
} while (0)
+
+#define PMU_FORMAT_ATTR(_name, _format) \
+static ssize_t \
+_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+ \
+static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
+
#endif /* __KERNEL__ */
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index d056263..b0f2c56 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -4,8 +4,8 @@
* GPL v2 Only
*/
-#ifndef __ATMEL_NAND_H__
-#define __ATMEL_NAND_H__
+#ifndef __ATMEL_H__
+#define __ATMEL_H__
#include <linux/mtd/nand.h>
@@ -24,4 +24,4 @@ struct atmel_nand_data {
unsigned int num_parts;
};
-#endif /* __ATMEL_NAND_H__ */
+#endif /* __ATMEL_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 2e9191a..233149c 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -8,6 +8,7 @@
#include <linux/notifier.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
+#include <linux/workqueue.h>
enum {
PM_QOS_RESERVED = 0,
@@ -29,6 +30,7 @@ enum {
struct pm_qos_request {
struct plist_node node;
int pm_qos_class;
+ struct delayed_work work; /* for pm_qos_update_request_timeout */
};
struct dev_pm_qos_request {
@@ -73,6 +75,8 @@ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
s32 value);
void pm_qos_update_request(struct pm_qos_request *req,
s32 new_value);
+void pm_qos_update_request_timeout(struct pm_qos_request *req,
+ s32 new_value, unsigned long timeout_us);
void pm_qos_remove_request(struct pm_qos_request *req);
int pm_qos_request(int pm_qos_class);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 7abb160..b021084 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -71,7 +71,7 @@ struct regulator_state {
* @uV_offset: Offset applied to voltages from consumer to compensate for
* voltage drops.
*
- * @min_uA: Smallest consumers consumers may set.
+ * @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
*
* @valid_modes_mask: Mask of modes which may be configured by consumers.
@@ -134,10 +134,8 @@ struct regulation_constraints {
/**
* struct regulator_consumer_supply - supply -> device mapping
*
- * This maps a supply name to a device. Only one of dev or dev_name
- * can be specified. Use of dev_name allows support for buses which
- * make struct device available late such as I2C and is the preferred
- * form.
+ * This maps a supply name to a device. Use of dev_name allows support for
+ * buses which make struct device available late such as I2C.
*
* @dev_name: Result of dev_name() for the consumer.
* @supply: Name for the supply.
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 67be037..7be2e88 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_off(struct ring_buffer *buffer);
+void ring_buffer_record_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d0018d2..8efd28a 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -96,7 +96,6 @@ struct virtio_driver {
void (*config_changed)(struct virtio_device *dev);
#ifdef CONFIG_PM
int (*freeze)(struct virtio_device *dev);
- int (*thaw)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
#endif
};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 248fb05..83d800c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -620,8 +620,10 @@ struct sta_bss_parameters {
* @llid: mesh local link id
* @plid: mesh peer link id
* @plink_state: mesh peer link state
- * @signal: signal strength of last received packet in dBm
- * @signal_avg: signal strength average in dBm
+ * @signal: the signal strength, type depends on the wiphy's signal_type
+ NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: avg signal strength, type depends on the wiphy's signal_type
+ NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @txrate: current unicast bitrate from this station
* @rxrate: current unicast bitrate to this station
* @rx_packets: packets received from this station
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 9c23ee8..917741b 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -261,7 +261,8 @@ struct iscsi_uevent {
} host_event;
struct msg_ping_comp {
uint32_t host_no;
- uint32_t status;
+ uint32_t status; /* enum
+ * iscsi_ping_status_code */
uint32_t pid; /* unique ping id associated
with each ping request */
uint32_t data_size;
@@ -483,6 +484,20 @@ enum iscsi_port_state {
ISCSI_PORT_STATE_UP = 0x2,
};
+/* iSCSI PING status/error code */
+enum iscsi_ping_status_code {
+ ISCSI_PING_SUCCESS = 0,
+ ISCSI_PING_FW_DISABLED = 0x1,
+ ISCSI_PING_IPADDR_INVALID = 0x2,
+ ISCSI_PING_LINKLOCAL_IPV6_ADDR_INVALID = 0x3,
+ ISCSI_PING_TIMEOUT = 0x4,
+ ISCSI_PING_INVALID_DEST_ADDR = 0x5,
+ ISCSI_PING_OVERSIZE_PACKET = 0x6,
+ ISCSI_PING_ICMP_ERROR = 0x7,
+ ISCSI_PING_MAX_REQ_EXCEEDED = 0x8,
+ ISCSI_PING_NO_ARP_RECEIVED = 0x9,
+};
+
#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
@@ -578,6 +593,6 @@ struct iscsi_chap_rec {
char username[ISCSI_CHAP_AUTH_NAME_MAX_LEN];
uint8_t password[ISCSI_CHAP_AUTH_SECRET_MAX_LEN];
uint8_t password_length;
-} __packed;
+};
#endif
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 5a35a2a..cfdb55f 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -165,7 +165,8 @@ struct fcoe_ctlr {
* @switch_name: WWN of switch from advertisement
* @fabric_name: WWN of fabric from advertisement
* @fc_map: FC_MAP value from advertisement
- * @fcf_mac: Ethernet address of the FCF
+ * @fcf_mac: Ethernet address of the FCF for FIP traffic
+ * @fcoe_mac: Ethernet address of the FCF for FCoE traffic
* @vfid: virtual fabric ID
* @pri: selection priority, smaller values are better
* @flogi_sent: current FLOGI sent to this FCF
@@ -188,6 +189,7 @@ struct fcoe_fcf {
u32 fc_map;
u16 vfid;
u8 fcf_mac[ETH_ALEN];
+ u8 fcoe_mac[ETH_ALEN];
u8 pri;
u8 flogi_sent;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index fbc7b1a..ea7a203 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -295,7 +295,7 @@ TRACE_EVENT(sched_process_exec,
TP_fast_assign(
__assign_str(filename, bprm->filename);
__entry->pid = p->pid;
- __entry->old_pid = p->pid;
+ __entry->old_pid = old_pid;
),
TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 2ea2fdc..4f4d449 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -7,11 +7,13 @@ extern void xen_swiotlb_init(int verbose);
extern void
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags);
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs);
extern void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
diff --git a/init/Kconfig b/init/Kconfig
index 72f33fa..6cfd71d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1414,8 +1414,8 @@ endif # MODULES
config INIT_ALL_POSSIBLE
bool
help
- Back when each arch used to define their own cpu_online_map and
- cpu_possible_map, some of them chose to initialize cpu_possible_map
+ Back when each arch used to define their own cpu_online_mask and
+ cpu_possible_mask, some of them chose to initialize cpu_possible_mask
with all 1s, and others with all 0s. When they were centralised,
it was better to provide this option than to break all the archs
and have several arch maintainers pursuing me down dark alleys.
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 3098a38..9047330 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -2,7 +2,6 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/minix_fs.h>
-#include <linux/ext2_fs.h>
#include <linux/romfs_fs.h>
#include <linux/initrd.h>
#include <linux/sched.h>
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 01f1306..6212586 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -54,20 +54,19 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
{
const int size = 512;
struct minix_super_block *minixsb;
- struct ext2_super_block *ext2sb;
struct romfs_super_block *romfsb;
struct cramfs_super *cramfsb;
struct squashfs_super_block *squashfsb;
int nblocks = -1;
unsigned char *buf;
const char *compress_name;
+ unsigned long n;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
minixsb = (struct minix_super_block *) buf;
- ext2sb = (struct ext2_super_block *) buf;
romfsb = (struct romfs_super_block *) buf;
cramfsb = (struct cramfs_super *) buf;
squashfsb = (struct squashfs_super_block *) buf;
@@ -150,12 +149,12 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
}
/* Try ext2 */
- if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) {
+ n = ext2_image_size(buf);
+ if (n) {
printk(KERN_NOTICE
"RAMDISK: ext2 filesystem found at block %d\n",
start_block);
- nblocks = le32_to_cpu(ext2sb->s_blocks_count) <<
- le32_to_cpu(ext2sb->s_log_block_size);
+ nblocks = n;
goto done;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b96ad75..14f7070 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -270,11 +270,11 @@ static struct file_system_type cpuset_fs_type = {
* are online. If none are online, walk up the cpuset hierarchy
* until we find one that does have some online cpus. If we get
* all the way to the top and still haven't found any online cpus,
- * return cpu_online_map. Or if passed a NULL cs from an exit'ing
- * task, return cpu_online_map.
+ * return cpu_online_mask. Or if passed a NULL cs from an exit'ing
+ * task, return cpu_online_mask.
*
* One way or another, we guarantee to return some non-empty subset
- * of cpu_online_map.
+ * of cpu_online_mask.
*
* Call with callback_mutex held.
*/
@@ -867,7 +867,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
int retval;
int is_load_balanced;
- /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
+ /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
if (cs == &top_cpuset)
return -EACCES;
@@ -2149,7 +2149,7 @@ void __init cpuset_init_smp(void)
*
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
- * subset of cpu_online_map, even if this means going outside the
+ * subset of cpu_online_mask, even if this means going outside the
* tasks cpuset.
**/
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1dc53ba..0557f24 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -160,37 +160,39 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
* Weak aliases for breakpoint management,
* can be overriden by architectures when needed:
*/
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
- err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+ err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ BREAK_INSTR_SIZE);
if (err)
return err;
-
- return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
+ err = probe_kernel_write((char *)bpt->bpt_addr,
+ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ return err;
}
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
+ return probe_kernel_write((char *)bpt->bpt_addr,
+ (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
}
int __weak kgdb_validate_break_address(unsigned long addr)
{
- char tmp_variable[BREAK_INSTR_SIZE];
+ struct kgdb_bkpt tmp;
int err;
- /* Validate setting the breakpoint and then removing it. In the
+ /* Validate setting the breakpoint and then removing it. If the
* remove fails, the kernel needs to emit a bad message because we
* are deep trouble not being able to put things back the way we
* found them.
*/
- err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+ tmp.bpt_addr = addr;
+ err = kgdb_arch_set_breakpoint(&tmp);
if (err)
return err;
- err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+ err = kgdb_arch_remove_breakpoint(&tmp);
if (err)
printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
"memory destroyed at: %lx", addr);
@@ -234,7 +236,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
*/
int dbg_activate_sw_breakpoints(void)
{
- unsigned long addr;
int error;
int ret = 0;
int i;
@@ -243,16 +244,15 @@ int dbg_activate_sw_breakpoints(void)
if (kgdb_break[i].state != BP_SET)
continue;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_set_breakpoint(addr,
- kgdb_break[i].saved_instr);
+ error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
if (error) {
ret = error;
- printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+ printk(KERN_INFO "KGDB: BP install failed: %lx",
+ kgdb_break[i].bpt_addr);
continue;
}
- kgdb_flush_swbreak_addr(addr);
+ kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
kgdb_break[i].state = BP_ACTIVE;
}
return ret;
@@ -301,7 +301,6 @@ int dbg_set_sw_break(unsigned long addr)
int dbg_deactivate_sw_breakpoints(void)
{
- unsigned long addr;
int error;
int ret = 0;
int i;
@@ -309,15 +308,14 @@ int dbg_deactivate_sw_breakpoints(void)
for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
if (kgdb_break[i].state != BP_ACTIVE)
continue;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_remove_breakpoint(addr,
- kgdb_break[i].saved_instr);
+ error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error) {
- printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+ printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
+ kgdb_break[i].bpt_addr);
ret = error;
}
- kgdb_flush_swbreak_addr(addr);
+ kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
kgdb_break[i].state = BP_SET;
}
return ret;
@@ -351,7 +349,6 @@ int kgdb_isremovedbreak(unsigned long addr)
int dbg_remove_all_break(void)
{
- unsigned long addr;
int error;
int i;
@@ -359,12 +356,10 @@ int dbg_remove_all_break(void)
for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
if (kgdb_break[i].state != BP_ACTIVE)
goto setundefined;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_remove_breakpoint(addr,
- kgdb_break[i].saved_instr);
+ error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error)
printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
- addr);
+ kgdb_break[i].bpt_addr);
setundefined:
kgdb_break[i].state = BP_UNDEFINED;
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 9b5f17d..bb9520f 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -743,7 +743,7 @@ kdb_printit:
kdb_input_flush();
c = console_drivers;
- if (!dbg_io_ops->is_console) {
+ if (dbg_io_ops && !dbg_io_ops->is_console) {
len = strlen(moreprompt);
cp = moreprompt;
while (len--) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4b50357..a6a9ec4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3348,7 +3348,7 @@ static void calc_timer_values(struct perf_event *event,
*running = ctx_time - event->tstamp_running;
}
-void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
@@ -3398,7 +3398,7 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
- perf_update_user_clock(userpg, now);
+ arch_perf_update_userpage(userpg, now);
barrier();
++userpg->lock;
@@ -7116,6 +7116,13 @@ void __init perf_event_init(void)
/* do not patch jump label more than once per second */
jump_label_rate_limit(&perf_sched_events, HZ);
+
+ /*
+ * Build time assertion that we keep the data_head at the intended
+ * location. IOW, validation we got the __reserved[] size right.
+ */
+ BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
+ != 1024);
}
static int __init perf_event_sysfs_init(void)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index c3c46c7..0c56d44 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -5,6 +5,7 @@
* context. The enqueueing is NMI-safe.
*/
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 957a7aa..05698a7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -322,7 +322,7 @@ static void __call_usermodehelper(struct work_struct *work)
* land has been frozen during a system-wide hibernation or suspend operation).
* Should always be manipulated under umhelper_sem acquired for write.
*/
-static int usermodehelper_disabled = 1;
+static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);
@@ -334,32 +334,110 @@ static atomic_t running_helpers = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
/*
+ * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
+ * to become 'false'.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
+
+/*
* Time to wait for running_helpers to become zero before the setting of
* usermodehelper_disabled in usermodehelper_disable() fails
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
-void read_lock_usermodehelper(void)
+int usermodehelper_read_trylock(void)
{
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
down_read(&umhelper_sem);
+ for (;;) {
+ prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
+ TASK_INTERRUPTIBLE);
+ if (!usermodehelper_disabled)
+ break;
+
+ if (usermodehelper_disabled == UMH_DISABLED)
+ ret = -EAGAIN;
+
+ up_read(&umhelper_sem);
+
+ if (ret)
+ break;
+
+ schedule();
+ try_to_freeze();
+
+ down_read(&umhelper_sem);
+ }
+ finish_wait(&usermodehelper_disabled_waitq, &wait);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
+
+long usermodehelper_read_lock_wait(long timeout)
+{
+ DEFINE_WAIT(wait);
+
+ if (timeout < 0)
+ return -EINVAL;
+
+ down_read(&umhelper_sem);
+ for (;;) {
+ prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!usermodehelper_disabled)
+ break;
+
+ up_read(&umhelper_sem);
+
+ timeout = schedule_timeout(timeout);
+ if (!timeout)
+ break;
+
+ down_read(&umhelper_sem);
+ }
+ finish_wait(&usermodehelper_disabled_waitq, &wait);
+ return timeout;
}
-EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
+EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
-void read_unlock_usermodehelper(void)
+void usermodehelper_read_unlock(void)
{
up_read(&umhelper_sem);
}
-EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
+EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
/**
- * usermodehelper_disable - prevent new helpers from being started
+ * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
+ * depth: New value to assign to usermodehelper_disabled.
+ *
+ * Change the value of usermodehelper_disabled (under umhelper_sem locked for
+ * writing) and wakeup tasks waiting for it to change.
*/
-int usermodehelper_disable(void)
+void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
+{
+ down_write(&umhelper_sem);
+ usermodehelper_disabled = depth;
+ wake_up(&usermodehelper_disabled_waitq);
+ up_write(&umhelper_sem);
+}
+
+/**
+ * __usermodehelper_disable - Prevent new helpers from being started.
+ * @depth: New value to assign to usermodehelper_disabled.
+ *
+ * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
+ */
+int __usermodehelper_disable(enum umh_disable_depth depth)
{
long retval;
+ if (!depth)
+ return -EINVAL;
+
down_write(&umhelper_sem);
- usermodehelper_disabled = 1;
+ usermodehelper_disabled = depth;
up_write(&umhelper_sem);
/*
@@ -374,31 +452,10 @@ int usermodehelper_disable(void)
if (retval)
return 0;
- down_write(&umhelper_sem);
- usermodehelper_disabled = 0;
- up_write(&umhelper_sem);
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
return -EAGAIN;
}
-/**
- * usermodehelper_enable - allow new helpers to be started again
- */
-void usermodehelper_enable(void)
-{
- down_write(&umhelper_sem);
- usermodehelper_disabled = 0;
- up_write(&umhelper_sem);
-}
-
-/**
- * usermodehelper_is_disabled - check if new helpers are allowed to be started
- */
-bool usermodehelper_is_disabled(void)
-{
- return usermodehelper_disabled;
-}
-EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
-
static void helper_lock(void)
{
atomic_inc(&running_helpers);
diff --git a/kernel/padata.c b/kernel/padata.c
index 6f10eb2..89fe3d1 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,6 +1,8 @@
/*
* padata.c - generic interface to process data streams in parallel
*
+ * See Documentation/padata.txt for an api documentation.
+ *
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
@@ -354,13 +356,13 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
return -ENOMEM;
- cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
+ cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pd->cpumask.cbcpu);
return -ENOMEM;
}
- cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
+ cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
return 0;
}
@@ -564,7 +566,7 @@ EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
static bool padata_validate_cpumask(struct padata_instance *pinst,
const struct cpumask *cpumask)
{
- if (!cpumask_intersects(cpumask, cpu_active_mask)) {
+ if (!cpumask_intersects(cpumask, cpu_online_mask)) {
pinst->flags |= PADATA_INVALID;
return false;
}
@@ -678,7 +680,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
struct parallel_data *pd;
- if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask)) {
pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
pinst->cpumask.cbcpu);
if (!pd)
@@ -746,6 +748,9 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
return -ENOMEM;
padata_replace(pinst, pd);
+
+ cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
+ cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
}
return 0;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 0a186cf..e09dfbf 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -16,7 +16,6 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/async.h>
-#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -611,14 +610,10 @@ int hibernate(void)
if (error)
goto Exit;
- error = usermodehelper_disable();
- if (error)
- goto Exit;
-
/* Allocate memory management structures */
error = create_basic_memory_bitmaps();
if (error)
- goto Enable_umh;
+ goto Exit;
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
@@ -661,8 +656,6 @@ int hibernate(void)
Free_bitmaps:
free_basic_memory_bitmaps();
- Enable_umh:
- usermodehelper_enable();
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
pm_restore_console();
@@ -777,15 +770,9 @@ static int software_resume(void)
if (error)
goto close_finish;
- error = usermodehelper_disable();
- if (error)
- goto close_finish;
-
error = create_basic_memory_bitmaps();
- if (error) {
- usermodehelper_enable();
+ if (error)
goto close_finish;
- }
pr_debug("PM: Preparing processes for restore.\n");
error = freeze_processes();
@@ -806,7 +793,6 @@ static int software_resume(void)
thaw_processes();
Done:
free_basic_memory_bitmaps();
- usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
pm_restore_console();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0d2aeb2..19db29f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <linux/kmod.h>
/*
* Timeout for stopping processes
@@ -122,6 +123,10 @@ int freeze_processes(void)
{
int error;
+ error = __usermodehelper_disable(UMH_FREEZING);
+ if (error)
+ return error;
+
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
@@ -130,6 +135,7 @@ int freeze_processes(void)
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
+ __usermodehelper_set_disable_depth(UMH_DISABLED);
oom_killer_disable();
}
printk("\n");
@@ -187,6 +193,8 @@ void thaw_processes(void)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ usermodehelper_enable();
+
schedule();
printk("done.\n");
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index d6d6dbd..6a031e6 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -230,6 +230,21 @@ int pm_qos_request_active(struct pm_qos_request *req)
EXPORT_SYMBOL_GPL(pm_qos_request_active);
/**
+ * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
+ * @work: work struct for the delayed work (timeout)
+ *
+ * This cancels the timeout request by falling back to the default at timeout.
+ */
+static void pm_qos_work_fn(struct work_struct *work)
+{
+ struct pm_qos_request *req = container_of(to_delayed_work(work),
+ struct pm_qos_request,
+ work);
+
+ pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+}
+
+/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
* @pm_qos_class: identifies which list of qos request to use
@@ -253,6 +268,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
return;
}
req->pm_qos_class = pm_qos_class;
+ INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value);
}
@@ -279,6 +295,9 @@ void pm_qos_update_request(struct pm_qos_request *req,
return;
}
+ if (delayed_work_pending(&req->work))
+ cancel_delayed_work_sync(&req->work);
+
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
@@ -287,6 +306,34 @@ void pm_qos_update_request(struct pm_qos_request *req,
EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
+ * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
+ * @req : handle to list element holding a pm_qos request to use
+ * @new_value: defines the temporal qos request
+ * @timeout_us: the effective duration of this qos request in usecs.
+ *
+ * After timeout_us, this qos request is cancelled automatically.
+ */
+void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
+ unsigned long timeout_us)
+{
+ if (!req)
+ return;
+ if (WARN(!pm_qos_request_active(req),
+ "%s called for unknown object.", __func__))
+ return;
+
+ if (delayed_work_pending(&req->work))
+ cancel_delayed_work_sync(&req->work);
+
+ if (new_value != req->node.prio)
+ pm_qos_update_target(
+ pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_UPDATE_REQ, new_value);
+
+ schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
+}
+
+/**
* pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
@@ -305,6 +352,9 @@ void pm_qos_remove_request(struct pm_qos_request *req)
return;
}
+ if (delayed_work_pending(&req->work))
+ cancel_delayed_work_sync(&req->work);
+
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 88e5c967..396d262 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
@@ -102,17 +101,12 @@ static int suspend_prepare(void)
if (error)
goto Finish;
- error = usermodehelper_disable();
- if (error)
- goto Finish;
-
error = suspend_freeze_processes();
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
- usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
@@ -259,7 +253,6 @@ int suspend_devices_and_enter(suspend_state_t state)
static void suspend_finish(void)
{
suspend_thaw_processes();
- usermodehelper_enable();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 33c4329..91b0fd0 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -12,7 +12,6 @@
#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
-#include <linux/kmod.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
@@ -222,14 +221,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
sys_sync();
printk("done.\n");
- error = usermodehelper_disable();
- if (error)
- break;
-
error = freeze_processes();
- if (error)
- usermodehelper_enable();
- else
+ if (!error)
data->frozen = 1;
break;
@@ -238,7 +231,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
pm_restore_gfp_mask();
thaw_processes();
- usermodehelper_enable();
data->frozen = 0;
break;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e3ed0ec..4603b9d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
int dest_cpu;
/* Look for allowed, online CPU in same node. */
- for_each_cpu_mask(dest_cpu, *nodemask) {
+ for_each_cpu(dest_cpu, nodemask) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for (;;) {
/* Any allowed, online CPU? */
- for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
+ for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
+ finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
if (mm)
@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
- struct pt_regs *regs = get_irq_regs();
-
if (oops_in_progress)
return;
@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
-
- if (regs)
- show_regs(regs);
- else
- dump_stack();
+ dump_stack();
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 42b1f30..fb3acba 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifndef finish_arch_switch
# define finish_arch_switch(prev) do { } while (0)
#endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch() do { } while (0)
+#endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cd31345..a1d2849 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,7 +141,7 @@ if FTRACE
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
- select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
select KALLSYMS
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 867bd1d..0fa92f6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -249,7 +249,8 @@ static void update_ftrace_function(void)
#else
__ftrace_trace_function = func;
#endif
- ftrace_trace_function = ftrace_test_stop_func;
+ ftrace_trace_function =
+ (func == ftrace_stub) ? func : ftrace_test_stop_func;
#endif
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f5b7b5c..cf8d11e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -154,33 +154,10 @@ enum {
static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
-#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
-
-/**
- * tracing_on - enable all tracing buffers
- *
- * This function enables all tracing buffers that may have been
- * disabled with tracing_off.
- */
-void tracing_on(void)
-{
- set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_on);
+/* Used for individual buffers (after the counter) */
+#define RB_BUFFER_OFF (1 << 20)
-/**
- * tracing_off - turn off all tracing buffers
- *
- * This function stops all tracing buffers from recording data.
- * It does not disable any overhead the tracers themselves may
- * be causing. This function simply causes all recording to
- * the ring buffers to fail.
- */
-void tracing_off(void)
-{
- clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_off);
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
/**
* tracing_off_permanent - permanently disable ring buffers
@@ -193,15 +170,6 @@ void tracing_off_permanent(void)
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
}
-/**
- * tracing_is_on - show state of ring buffers enabled
- */
-int tracing_is_on(void)
-{
- return ring_buffer_flags == RB_BUFFERS_ON;
-}
-EXPORT_SYMBOL_GPL(tracing_is_on);
-
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2619,6 +2587,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
/**
+ * ring_buffer_record_off - stop all writes into the buffer
+ * @buffer: The ring buffer to stop writes to.
+ *
+ * This prevents all writes to the buffer. Any attempt to write
+ * to the buffer after this will fail and return NULL.
+ *
+ * This is different than ring_buffer_record_disable() as
+ * it works like an on/off switch, where as the disable() verison
+ * must be paired with a enable().
+ */
+void ring_buffer_record_off(struct ring_buffer *buffer)
+{
+ unsigned int rd;
+ unsigned int new_rd;
+
+ do {
+ rd = atomic_read(&buffer->record_disabled);
+ new_rd = rd | RB_BUFFER_OFF;
+ } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_off);
+
+/**
+ * ring_buffer_record_on - restart writes into the buffer
+ * @buffer: The ring buffer to start writes to.
+ *
+ * This enables all writes to the buffer that was disabled by
+ * ring_buffer_record_off().
+ *
+ * This is different than ring_buffer_record_enable() as
+ * it works like an on/off switch, where as the enable() verison
+ * must be paired with a disable().
+ */
+void ring_buffer_record_on(struct ring_buffer *buffer)
+{
+ unsigned int rd;
+ unsigned int new_rd;
+
+ do {
+ rd = atomic_read(&buffer->record_disabled);
+ new_rd = rd & ~RB_BUFFER_OFF;
+ } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_on);
+
+/**
+ * ring_buffer_record_is_on - return true if the ring buffer can write
+ * @buffer: The ring buffer to see if write is enabled
+ *
+ * Returns true if the ring buffer is in a state that it accepts writes.
+ */
+int ring_buffer_record_is_on(struct ring_buffer *buffer)
+{
+ return !atomic_read(&buffer->record_disabled);
+}
+
+/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to.
* @cpu: The CPU buffer to stop
@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
-#ifdef CONFIG_TRACING
-static ssize_t
-rb_simple_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- unsigned long *p = filp->private_data;
- char buf[64];
- int r;
-
- if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
- r = sprintf(buf, "permanently disabled\n");
- else
- r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
-
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-rb_simple_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- unsigned long *p = filp->private_data;
- unsigned long val;
- int ret;
-
- ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
- if (ret)
- return ret;
-
- if (val)
- set_bit(RB_BUFFERS_ON_BIT, p);
- else
- clear_bit(RB_BUFFERS_ON_BIT, p);
-
- (*ppos)++;
-
- return cnt;
-}
-
-static const struct file_operations rb_simple_fops = {
- .open = tracing_open_generic,
- .read = rb_simple_read,
- .write = rb_simple_write,
- .llseek = default_llseek,
-};
-
-
-static __init int rb_init_debugfs(void)
-{
- struct dentry *d_tracer;
-
- d_tracer = tracing_init_dentry();
-
- trace_create_file("tracing_on", 0644, d_tracer,
- &ring_buffer_flags, &rb_simple_fops);
-
- return 0;
-}
-
-fs_initcall(rb_init_debugfs);
-#endif
-
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 10d5503..ed7b5d1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -36,6 +36,7 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/poll.h>
+#include <linux/nmi.h>
#include <linux/fs.h>
#include "trace.h"
@@ -352,6 +353,59 @@ static void wakeup_work_handler(struct work_struct *work)
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
/**
+ * tracing_on - enable tracing buffers
+ *
+ * This function enables tracing buffers that may have been
+ * disabled with tracing_off.
+ */
+void tracing_on(void)
+{
+ if (global_trace.buffer)
+ ring_buffer_record_on(global_trace.buffer);
+ /*
+ * This flag is only looked at when buffers haven't been
+ * allocated yet. We don't really care about the race
+ * between setting this flag and actually turning
+ * on the buffer.
+ */
+ global_trace.buffer_disabled = 0;
+}
+EXPORT_SYMBOL_GPL(tracing_on);
+
+/**
+ * tracing_off - turn off tracing buffers
+ *
+ * This function stops the tracing buffers from recording data.
+ * It does not disable any overhead the tracers themselves may
+ * be causing. This function simply causes all recording to
+ * the ring buffers to fail.
+ */
+void tracing_off(void)
+{
+ if (global_trace.buffer)
+ ring_buffer_record_on(global_trace.buffer);
+ /*
+ * This flag is only looked at when buffers haven't been
+ * allocated yet. We don't really care about the race
+ * between setting this flag and actually turning
+ * on the buffer.
+ */
+ global_trace.buffer_disabled = 1;
+}
+EXPORT_SYMBOL_GPL(tracing_off);
+
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+ if (global_trace.buffer)
+ return ring_buffer_record_is_on(global_trace.buffer);
+ return !global_trace.buffer_disabled;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
+/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
@@ -1644,6 +1698,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
int cpu_file = iter->cpu_file;
u64 next_ts = 0, ts;
int next_cpu = -1;
+ int next_size = 0;
int cpu;
/*
@@ -1675,9 +1730,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
next_cpu = cpu;
next_ts = ts;
next_lost = lost_events;
+ next_size = iter->ent_size;
}
}
+ iter->ent_size = next_size;
+
if (ent_cpu)
*ent_cpu = next_cpu;
@@ -4567,6 +4625,55 @@ static __init void create_trace_options_dir(void)
create_trace_option_core_file(trace_options[i], i);
}
+static ssize_t
+rb_simple_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct ring_buffer *buffer = filp->private_data;
+ char buf[64];
+ int r;
+
+ if (buffer)
+ r = ring_buffer_record_is_on(buffer);
+ else
+ r = 0;
+
+ r = sprintf(buf, "%d\n", r);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+rb_simple_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct ring_buffer *buffer = filp->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ if (buffer) {
+ if (val)
+ ring_buffer_record_on(buffer);
+ else
+ ring_buffer_record_off(buffer);
+ }
+
+ (*ppos)++;
+
+ return cnt;
+}
+
+static const struct file_operations rb_simple_fops = {
+ .open = tracing_open_generic,
+ .read = rb_simple_read,
+ .write = rb_simple_write,
+ .llseek = default_llseek,
+};
+
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
@@ -4626,6 +4733,9 @@ static __init int tracer_init_debugfs(void)
trace_create_file("trace_clock", 0644, d_tracer, NULL,
&trace_clock_fops);
+ trace_create_file("tracing_on", 0644, d_tracer,
+ global_trace.buffer, &rb_simple_fops);
+
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4798,6 +4908,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(&iter);
}
+ touch_nmi_watchdog();
trace_printk_seq(&iter.seq);
}
@@ -4863,6 +4974,8 @@ __init static int tracer_alloc_buffers(void)
goto out_free_cpumask;
}
global_trace.entries = ring_buffer_size(global_trace.buffer);
+ if (global_trace.buffer_disabled)
+ tracing_off();
#ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 54faec7..95059f0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -154,6 +154,7 @@ struct trace_array {
struct ring_buffer *buffer;
unsigned long entries;
int cpu;
+ int buffer_disabled;
cycle_t time_start;
struct task_struct *waiter;
struct trace_array_cpu *data[NR_CPUS];
@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
filter)
#include "trace_entries.h"
-#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_FUNCTION_TRACER
int perf_ftrace_event_register(struct ftrace_event_call *call,
enum trace_reg type, void *data);
#else
#define perf_ftrace_event_register NULL
#endif /* CONFIG_FUNCTION_TRACER */
-#endif /* CONFIG_PERF_EVENTS */
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index d91eb05..4108e12 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -166,6 +166,12 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
#define FTRACE_STACK_ENTRIES 8
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
FTRACE_ENTRY(kernel_stack, stack_entry,
TRACE_STACK,
@@ -175,8 +181,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
__dynamic_array(unsigned long, caller )
),
- F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
- "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+ F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
__entry->caller[0], __entry->caller[1], __entry->caller[2],
__entry->caller[3], __entry->caller[4], __entry->caller[5],
__entry->caller[6], __entry->caller[7]),
@@ -193,8 +200,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
__array( unsigned long, caller, FTRACE_STACK_ENTRIES )
),
- F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
- "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+ F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
__entry->caller[0], __entry->caller[1], __entry->caller[2],
__entry->caller[3], __entry->caller[4], __entry->caller[5],
__entry->caller[6], __entry->caller[7]),
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7b46c9b..3dd15e8 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -162,7 +162,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
#define __dynamic_array(type, item)
#undef F_printk
-#define F_printk(fmt, args...) #fmt ", " __stringify(args)
+#define F_printk(fmt, args...) __stringify(fmt) ", " __stringify(args)
#undef FTRACE_ENTRY_REG
#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
diff --git a/net/802/garp.c b/net/802/garp.c
index 8e21b6d..a5c2248 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -167,7 +167,8 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
return NULL;
}
-static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new)
+static struct garp_attr *garp_attr_create(struct garp_applicant *app,
+ const void *data, u8 len, u8 type)
{
struct rb_node *parent = NULL, **p = &app->gid.rb_node;
struct garp_attr *attr;
@@ -176,21 +177,16 @@ static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new)
while (*p) {
parent = *p;
attr = rb_entry(parent, struct garp_attr, node);
- d = garp_attr_cmp(attr, new->data, new->dlen, new->type);
+ d = garp_attr_cmp(attr, data, len, type);
if (d < 0)
p = &parent->rb_left;
else if (d > 0)
p = &parent->rb_right;
+ else {
+ /* The attribute already exists; re-use it. */
+ return attr;
+ }
}
- rb_link_node(&new->node, parent, p);
- rb_insert_color(&new->node, &app->gid);
-}
-
-static struct garp_attr *garp_attr_create(struct garp_applicant *app,
- const void *data, u8 len, u8 type)
-{
- struct garp_attr *attr;
-
attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
if (!attr)
return attr;
@@ -198,7 +194,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app,
attr->type = type;
attr->dlen = len;
memcpy(attr->data, data, len);
- garp_attr_insert(app, attr);
+
+ rb_link_node(&attr->node, parent, p);
+ rb_insert_color(&attr->node, &app->gid);
return attr;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 5d59155..6c7dc9d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1596,6 +1596,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
+ skb->skb_iif = 0;
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4dc1c10..167ea10 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2041,7 +2041,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (err < 0)
goto e_err;
}
- rth = rt_dst_alloc(init_net.loopback_dev,
+ rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 496b627..3992e26 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -881,6 +881,16 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
}
+static struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags)
+{
+ if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
+ flags |= RT6_LOOKUP_F_IFACE;
+
+ return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
+}
+
void ip6_route_input(struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -895,10 +905,7 @@ void ip6_route_input(struct sk_buff *skb)
.flowi6_proto = iph->nexthdr,
};
- if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
- flags |= RT6_LOOKUP_F_IFACE;
-
- skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
+ skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
}
static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
@@ -2537,7 +2544,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6;
- int err, iif = 0;
+ int err, iif = 0, oif = 0;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
if (err < 0)
@@ -2564,15 +2571,29 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
iif = nla_get_u32(tb[RTA_IIF]);
if (tb[RTA_OIF])
- fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]);
+ oif = nla_get_u32(tb[RTA_OIF]);
if (iif) {
struct net_device *dev;
+ int flags = 0;
+
dev = __dev_get_by_index(net, iif);
if (!dev) {
err = -ENODEV;
goto errout;
}
+
+ fl6.flowi6_iif = iif;
+
+ if (!ipv6_addr_any(&fl6.saddr))
+ flags |= RT6_LOOKUP_F_HAS_SADDR;
+
+ rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
+ flags);
+ } else {
+ fl6.flowi6_oif = oif;
+
+ rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -2587,7 +2608,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
skb_reset_mac_header(skb);
skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
- rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 1068f66..64d3ce5 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -49,6 +49,8 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
+ del_timer_sync(&tid_rx->reorder_timer);
+
for (i = 0; i < tid_rx->buf_size; i++)
dev_kfree_skb(tid_rx->reorder_buf[i]);
kfree(tid_rx->reorder_buf);
@@ -91,7 +93,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
tid, WLAN_BACK_RECIPIENT, reason);
del_timer_sync(&tid_rx->session_timer);
- del_timer_sync(&tid_rx->reorder_timer);
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b581a24..1633648 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -102,9 +102,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- /* If this off-channel logic ever changes, ieee80211_on_oper_channel
- * may need to change as well.
- */
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (local->scan_channel) {
chan = local->scan_channel;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 33cd169..c70e176 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -370,7 +370,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
*/
drv_sw_scan_start(local);
- local->leave_oper_channel_time = 0;
+ local->leave_oper_channel_time = jiffies;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 3eb348b..d98c868 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
+#include <linux/atomic.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/slab.h>
@@ -17,7 +18,6 @@
#include <linux/errno.h>
#include <net/netlink.h>
#include <net/sock.h>
-#include <asm/atomic.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 1ab8689..906cc05 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -95,11 +95,11 @@ static int rose_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = addr;
int err;
- if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len))
+ if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
return 0;
if (dev->flags & IFF_UP) {
- err = rose_add_loopback_node((rose_address *)dev->dev_addr);
+ err = rose_add_loopback_node((rose_address *)sa->sa_data);
if (err)
return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4c1eb94..e49da27 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2386,7 +2386,9 @@ nla_put_failure:
}
static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
- int flags, struct net_device *dev,
+ int flags,
+ struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
const u8 *mac_addr, struct station_info *sinfo)
{
void *hdr;
@@ -2425,12 +2427,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
if (sinfo->filled & STATION_INFO_PLINK_STATE)
NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
sinfo->plink_state);
- if (sinfo->filled & STATION_INFO_SIGNAL)
- NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
- sinfo->signal);
- if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
- NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
- sinfo->signal_avg);
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ if (sinfo->filled & STATION_INFO_SIGNAL)
+ NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
+ sinfo->signal);
+ if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
+ NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+ sinfo->signal_avg);
+ break;
+ default:
+ break;
+ }
if (sinfo->filled & STATION_INFO_TX_BITRATE) {
if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
NL80211_STA_INFO_TX_BITRATE))
@@ -2523,7 +2531,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
if (nl80211_send_station(skb,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- netdev, mac_addr,
+ dev, netdev, mac_addr,
&sinfo) < 0)
goto out;
@@ -2568,7 +2576,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0,
- dev, mac_addr, &sinfo) < 0) {
+ rdev, dev, mac_addr, &sinfo) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -7596,7 +7604,8 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
+ if (nl80211_send_station(msg, 0, 0, 0,
+ rdev, dev, mac_addr, sinfo) < 0) {
nlmsg_free(msg);
return;
}
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 0d6004e..cf7b12f 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -254,6 +254,6 @@ case "$1" in
esac
# Remove structure forward declarations.
-if [ -n $remove_structs ]; then
+if [ -n "$remove_structs" ]; then
LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' $1
fi
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 5ff6777..cc3520d 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -115,23 +115,23 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=");
- audit_log_string(ab, aa_audit_type[sa->aad.type]);
+ audit_log_string(ab, aa_audit_type[sa->aad->type]);
}
- if (sa->aad.op) {
+ if (sa->aad->op) {
audit_log_format(ab, " operation=");
- audit_log_string(ab, op_table[sa->aad.op]);
+ audit_log_string(ab, op_table[sa->aad->op]);
}
- if (sa->aad.info) {
+ if (sa->aad->info) {
audit_log_format(ab, " info=");
- audit_log_string(ab, sa->aad.info);
- if (sa->aad.error)
- audit_log_format(ab, " error=%d", sa->aad.error);
+ audit_log_string(ab, sa->aad->info);
+ if (sa->aad->error)
+ audit_log_format(ab, " error=%d", sa->aad->error);
}
- if (sa->aad.profile) {
- struct aa_profile *profile = sa->aad.profile;
+ if (sa->aad->profile) {
+ struct aa_profile *profile = sa->aad->profile;
pid_t pid;
rcu_read_lock();
pid = rcu_dereference(tsk->real_parent)->pid;
@@ -145,9 +145,9 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
audit_log_untrustedstring(ab, profile->base.hname);
}
- if (sa->aad.name) {
+ if (sa->aad->name) {
audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, sa->aad.name);
+ audit_log_untrustedstring(ab, sa->aad->name);
}
}
@@ -159,10 +159,8 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
void aa_audit_msg(int type, struct common_audit_data *sa,
void (*cb) (struct audit_buffer *, void *))
{
- sa->aad.type = type;
- sa->lsm_pre_audit = audit_pre;
- sa->lsm_post_audit = cb;
- common_lsm_audit(sa);
+ sa->aad->type = type;
+ common_lsm_audit(sa, audit_pre, cb);
}
/**
@@ -184,7 +182,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
BUG_ON(!profile);
if (type == AUDIT_APPARMOR_AUTO) {
- if (likely(!sa->aad.error)) {
+ if (likely(!sa->aad->error)) {
if (AUDIT_MODE(profile) != AUDIT_ALL)
return 0;
type = AUDIT_APPARMOR_AUDIT;
@@ -196,21 +194,21 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
AUDIT_MODE(profile) == AUDIT_QUIET))
- return sa->aad.error;
+ return sa->aad->error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
type = AUDIT_APPARMOR_KILL;
if (!unconfined(profile))
- sa->aad.profile = profile;
+ sa->aad->profile = profile;
aa_audit_msg(type, sa, cb);
- if (sa->aad.type == AUDIT_APPARMOR_KILL)
+ if (sa->aad->type == AUDIT_APPARMOR_KILL)
(void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current);
- if (sa->aad.type == AUDIT_APPARMOR_ALLOWED)
- return complain_error(sa->aad.error);
+ if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
+ return complain_error(sa->aad->error);
- return sa->aad.error;
+ return sa->aad->error;
}
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 9982c48..088dba3 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -64,11 +64,13 @@ static int audit_caps(struct aa_profile *profile, struct task_struct *task,
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, CAP);
+ sa.aad = &aad;
sa.tsk = task;
sa.u.cap = cap;
- sa.aad.op = OP_CAPABLE;
- sa.aad.error = error;
+ sa.aad->op = OP_CAPABLE;
+ sa.aad->error = error;
if (likely(!error)) {
/* test if auditing is being forced */
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 5d176f2..2f8fcba 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -67,22 +67,22 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
struct common_audit_data *sa = va;
uid_t fsuid = current_fsuid();
- if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
+ if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " requested_mask=");
- audit_file_mask(ab, sa->aad.fs.request);
+ audit_file_mask(ab, sa->aad->fs.request);
}
- if (sa->aad.fs.denied & AA_AUDIT_FILE_MASK) {
+ if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " denied_mask=");
- audit_file_mask(ab, sa->aad.fs.denied);
+ audit_file_mask(ab, sa->aad->fs.denied);
}
- if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
+ if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " fsuid=%d", fsuid);
- audit_log_format(ab, " ouid=%d", sa->aad.fs.ouid);
+ audit_log_format(ab, " ouid=%d", sa->aad->fs.ouid);
}
- if (sa->aad.fs.target) {
+ if (sa->aad->fs.target) {
audit_log_format(ab, " target=");
- audit_log_untrustedstring(ab, sa->aad.fs.target);
+ audit_log_untrustedstring(ab, sa->aad->fs.target);
}
}
@@ -107,45 +107,47 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
{
int type = AUDIT_APPARMOR_AUTO;
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = op,
- sa.aad.fs.request = request;
- sa.aad.name = name;
- sa.aad.fs.target = target;
- sa.aad.fs.ouid = ouid;
- sa.aad.info = info;
- sa.aad.error = error;
-
- if (likely(!sa.aad.error)) {
+ sa.aad = &aad;
+ aad.op = op,
+ aad.fs.request = request;
+ aad.name = name;
+ aad.fs.target = target;
+ aad.fs.ouid = ouid;
+ aad.info = info;
+ aad.error = error;
+
+ if (likely(!sa.aad->error)) {
u32 mask = perms->audit;
if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
mask = 0xffff;
/* mask off perms that are not being force audited */
- sa.aad.fs.request &= mask;
+ sa.aad->fs.request &= mask;
- if (likely(!sa.aad.fs.request))
+ if (likely(!sa.aad->fs.request))
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else {
/* only report permissions that were denied */
- sa.aad.fs.request = sa.aad.fs.request & ~perms->allow;
+ sa.aad->fs.request = sa.aad->fs.request & ~perms->allow;
- if (sa.aad.fs.request & perms->kill)
+ if (sa.aad->fs.request & perms->kill)
type = AUDIT_APPARMOR_KILL;
/* quiet known rejects, assumes quiet and kill do not overlap */
- if ((sa.aad.fs.request & perms->quiet) &&
+ if ((sa.aad->fs.request & perms->quiet) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL)
- sa.aad.fs.request &= ~perms->quiet;
+ sa.aad->fs.request &= ~perms->quiet;
- if (!sa.aad.fs.request)
- return COMPLAIN_MODE(profile) ? 0 : sa.aad.error;
+ if (!sa.aad->fs.request)
+ return COMPLAIN_MODE(profile) ? 0 : sa.aad->error;
}
- sa.aad.fs.denied = sa.aad.fs.request & ~perms->allow;
+ sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow;
return aa_audit(type, profile, gfp, &sa, file_audit_cb);
}
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 4ba78c2..3868b1e 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -103,7 +103,33 @@ enum aa_ops {
};
-/* define a short hand for apparmor_audit_data portion of common_audit_data */
+struct apparmor_audit_data {
+ int error;
+ int op;
+ int type;
+ void *profile;
+ const char *name;
+ const char *info;
+ union {
+ void *target;
+ struct {
+ long pos;
+ void *target;
+ } iface;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
+ struct {
+ const char *target;
+ u32 request;
+ u32 denied;
+ uid_t ouid;
+ } fs;
+ };
+};
+
+/* define a short hand for apparmor_audit_data structure */
#define aad apparmor_audit_data
void aa_audit_msg(int type, struct common_audit_data *sa,
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ee05c6..c3da93a 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -26,7 +26,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
audit_log_format(ab, " target=");
- audit_log_untrustedstring(ab, sa->aad.target);
+ audit_log_untrustedstring(ab, sa->aad->target);
}
/**
@@ -41,10 +41,12 @@ static int aa_audit_ptrace(struct aa_profile *profile,
struct aa_profile *target, int error)
{
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = OP_PTRACE;
- sa.aad.target = target;
- sa.aad.error = error;
+ sa.aad = &aad;
+ aad.op = OP_PTRACE;
+ aad.target = target;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_ATOMIC, &sa,
audit_cb);
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 9516948..e75829b 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -65,8 +65,10 @@ void aa_info_message(const char *str)
{
if (audit_enabled) {
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.info = str;
+ sa.aad = &aad;
+ aad.info = str;
aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
}
printk(KERN_INFO "AppArmor: %s\n", str);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 97ce8fa..ad05d39 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -588,10 +588,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
error = aa_setprocattr_permipc(args);
} else {
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = OP_SETPROCATTR;
- sa.aad.info = name;
- sa.aad.error = -EINVAL;
+ sa.aad = &aad;
+ aad.op = OP_SETPROCATTR;
+ aad.info = name;
+ aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED,
__aa_current_profile(), GFP_KERNEL,
&sa, NULL);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 9064143..f1f7506 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -964,11 +964,13 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
int error)
{
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = op;
- sa.aad.name = name;
- sa.aad.info = info;
- sa.aad.error = error;
+ sa.aad = &aad;
+ aad.op = op;
+ aad.name = name;
+ aad.info = info;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_STATUS, __aa_current_profile(), gfp,
&sa, NULL);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 25fd51e..deab7c7 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -70,13 +70,13 @@ struct aa_ext {
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
- if (sa->aad.iface.target) {
- struct aa_profile *name = sa->aad.iface.target;
+ if (sa->aad->iface.target) {
+ struct aa_profile *name = sa->aad->iface.target;
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, name->base.hname);
}
- if (sa->aad.iface.pos)
- audit_log_format(ab, " offset=%ld", sa->aad.iface.pos);
+ if (sa->aad->iface.pos)
+ audit_log_format(ab, " offset=%ld", sa->aad->iface.pos);
}
/**
@@ -94,13 +94,15 @@ static int audit_iface(struct aa_profile *new, const char *name,
{
struct aa_profile *profile = __aa_current_profile();
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
+ sa.aad = &aad;
if (e)
- sa.aad.iface.pos = e->pos - e->start;
- sa.aad.iface.target = new;
- sa.aad.name = name;
- sa.aad.info = info;
- sa.aad.error = error;
+ aad.iface.pos = e->pos - e->start;
+ aad.iface.target = new;
+ aad.name = name;
+ aad.info = info;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_STATUS, profile, GFP_KERNEL, &sa,
audit_cb);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 72c25a4f..2fe8613 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -34,7 +34,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
struct common_audit_data *sa = va;
audit_log_format(ab, " rlimit=%s value=%lu",
- rlim_names[sa->aad.rlim.rlim], sa->aad.rlim.max);
+ rlim_names[sa->aad->rlim.rlim], sa->aad->rlim.max);
}
/**
@@ -50,12 +50,14 @@ static int audit_resource(struct aa_profile *profile, unsigned int resource,
unsigned long value, int error)
{
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = OP_SETRLIMIT,
- sa.aad.rlim.rlim = resource;
- sa.aad.rlim.max = value;
- sa.aad.error = error;
+ sa.aad = &aad;
+ aad.op = OP_SETRLIMIT,
+ aad.rlim.rlim = resource;
+ aad.rlim.max = value;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_KERNEL, &sa,
audit_cb);
}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 8b8f090..90c129b 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -49,8 +49,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (ih == NULL)
return -EINVAL;
- ad->u.net.v4info.saddr = ih->saddr;
- ad->u.net.v4info.daddr = ih->daddr;
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
if (proto)
*proto = ih->protocol;
@@ -64,8 +64,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
@@ -73,8 +73,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
@@ -82,16 +82,16 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
struct sctphdr *sh = sctp_hdr(skb);
if (sh == NULL)
break;
- ad->u.net.sport = sh->source;
- ad->u.net.dport = sh->dest;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
break;
}
default:
@@ -119,8 +119,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
ip6 = ipv6_hdr(skb);
if (ip6 == NULL)
return -EINVAL;
- ad->u.net.v6info.saddr = ip6->saddr;
- ad->u.net.v6info.daddr = ip6->daddr;
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
ret = 0;
/* IPv6 can have several extension header before the Transport header
* skip them */
@@ -140,8 +140,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
@@ -151,8 +151,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
@@ -162,8 +162,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
@@ -172,8 +172,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
if (sh == NULL)
break;
- ad->u.net.sport = sh->source;
- ad->u.net.dport = sh->dest;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
break;
}
default:
@@ -281,8 +281,8 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
break;
case LSM_AUDIT_DATA_NET:
- if (a->u.net.sk) {
- struct sock *sk = a->u.net.sk;
+ if (a->u.net->sk) {
+ struct sock *sk = a->u.net->sk;
struct unix_sock *u;
int len = 0;
char *p = NULL;
@@ -330,29 +330,29 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
}
- switch (a->u.net.family) {
+ switch (a->u.net->family) {
case AF_INET:
- print_ipv4_addr(ab, a->u.net.v4info.saddr,
- a->u.net.sport,
+ print_ipv4_addr(ab, a->u.net->v4info.saddr,
+ a->u.net->sport,
"saddr", "src");
- print_ipv4_addr(ab, a->u.net.v4info.daddr,
- a->u.net.dport,
+ print_ipv4_addr(ab, a->u.net->v4info.daddr,
+ a->u.net->dport,
"daddr", "dest");
break;
case AF_INET6:
- print_ipv6_addr(ab, &a->u.net.v6info.saddr,
- a->u.net.sport,
+ print_ipv6_addr(ab, &a->u.net->v6info.saddr,
+ a->u.net->sport,
"saddr", "src");
- print_ipv6_addr(ab, &a->u.net.v6info.daddr,
- a->u.net.dport,
+ print_ipv6_addr(ab, &a->u.net->v6info.daddr,
+ a->u.net->dport,
"daddr", "dest");
break;
}
- if (a->u.net.netif > 0) {
+ if (a->u.net->netif > 0) {
struct net_device *dev;
/* NOTE: we always use init's namespace */
- dev = dev_get_by_index(&init_net, a->u.net.netif);
+ dev = dev_get_by_index(&init_net, a->u.net->netif);
if (dev) {
audit_log_format(ab, " netif=%s", dev->name);
dev_put(dev);
@@ -378,11 +378,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
/**
* common_lsm_audit - generic LSM auditing function
* @a: auxiliary audit data
+ * @pre_audit: lsm-specific pre-audit callback
+ * @post_audit: lsm-specific post-audit callback
*
* setup the audit buffer for common security information
* uses callback to print LSM specific information
*/
-void common_lsm_audit(struct common_audit_data *a)
+void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
{
struct audit_buffer *ab;
@@ -394,13 +398,13 @@ void common_lsm_audit(struct common_audit_data *a)
if (ab == NULL)
return;
- if (a->lsm_pre_audit)
- a->lsm_pre_audit(ab, a);
+ if (pre_audit)
+ pre_audit(ab, a);
dump_common_audit_data(ab, a);
- if (a->lsm_post_audit)
- a->lsm_post_audit(ab, a);
+ if (post_audit)
+ post_audit(ab, a);
audit_log_end(ab);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 6989472..8ee42b2 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -436,9 +436,9 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, "avc: %s ",
- ad->selinux_audit_data.denied ? "denied" : "granted");
- avc_dump_av(ab, ad->selinux_audit_data.tclass,
- ad->selinux_audit_data.audited);
+ ad->selinux_audit_data->slad->denied ? "denied" : "granted");
+ avc_dump_av(ab, ad->selinux_audit_data->slad->tclass,
+ ad->selinux_audit_data->slad->audited);
audit_log_format(ab, " for ");
}
@@ -452,22 +452,25 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, " ");
- avc_dump_query(ab, ad->selinux_audit_data.ssid,
- ad->selinux_audit_data.tsid,
- ad->selinux_audit_data.tclass);
+ avc_dump_query(ab, ad->selinux_audit_data->slad->ssid,
+ ad->selinux_audit_data->slad->tsid,
+ ad->selinux_audit_data->slad->tclass);
}
/* This is the slow part of avc audit with big stack footprint */
static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied,
- struct av_decision *avd, struct common_audit_data *a,
+ struct common_audit_data *a,
unsigned flags)
{
struct common_audit_data stack_data;
+ struct selinux_audit_data sad = {0,};
+ struct selinux_late_audit_data slad;
if (!a) {
a = &stack_data;
COMMON_AUDIT_DATA_INIT(a, NONE);
+ a->selinux_audit_data = &sad;
}
/*
@@ -481,15 +484,15 @@ static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
(flags & MAY_NOT_BLOCK))
return -ECHILD;
- a->selinux_audit_data.tclass = tclass;
- a->selinux_audit_data.requested = requested;
- a->selinux_audit_data.ssid = ssid;
- a->selinux_audit_data.tsid = tsid;
- a->selinux_audit_data.audited = audited;
- a->selinux_audit_data.denied = denied;
- a->lsm_pre_audit = avc_audit_pre_callback;
- a->lsm_post_audit = avc_audit_post_callback;
- common_lsm_audit(a);
+ slad.tclass = tclass;
+ slad.requested = requested;
+ slad.ssid = ssid;
+ slad.tsid = tsid;
+ slad.audited = audited;
+ slad.denied = denied;
+
+ a->selinux_audit_data->slad = &slad;
+ common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
return 0;
}
@@ -513,7 +516,7 @@ static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
* be performed under a lock, to allow the lock to be released
* before calling the auditing code.
*/
-int avc_audit(u32 ssid, u32 tsid,
+inline int avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd, int result, struct common_audit_data *a,
unsigned flags)
@@ -523,7 +526,7 @@ int avc_audit(u32 ssid, u32 tsid,
if (unlikely(denied)) {
audited = denied & avd->auditdeny;
/*
- * a->selinux_audit_data.auditdeny is TRICKY! Setting a bit in
+ * a->selinux_audit_data->auditdeny is TRICKY! Setting a bit in
* this field means that ANY denials should NOT be audited if
* the policy contains an explicit dontaudit rule for that
* permission. Take notice that this is unrelated to the
@@ -532,15 +535,15 @@ int avc_audit(u32 ssid, u32 tsid,
*
* denied == READ
* avd.auditdeny & ACCESS == 0 (not set means explicit rule)
- * selinux_audit_data.auditdeny & ACCESS == 1
+ * selinux_audit_data->auditdeny & ACCESS == 1
*
* We will NOT audit the denial even though the denied
* permission was READ and the auditdeny checks were for
* ACCESS
*/
if (a &&
- a->selinux_audit_data.auditdeny &&
- !(a->selinux_audit_data.auditdeny & avd->auditdeny))
+ a->selinux_audit_data->auditdeny &&
+ !(a->selinux_audit_data->auditdeny & avd->auditdeny))
audited = 0;
} else if (result)
audited = denied = requested;
@@ -551,7 +554,7 @@ int avc_audit(u32 ssid, u32 tsid,
return slow_avc_audit(ssid, tsid, tclass,
requested, audited, denied,
- avd, a, flags);
+ a, flags);
}
/**
@@ -741,6 +744,41 @@ int avc_ss_reset(u32 seqno)
return rc;
}
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd)
+{
+ rcu_read_unlock();
+ security_compute_av(ssid, tsid, tclass, avd);
+ rcu_read_lock();
+ return avc_insert(ssid, tsid, tclass, avd);
+}
+
+static noinline int avc_denied(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ unsigned flags,
+ struct av_decision *avd)
+{
+ if (flags & AVC_STRICT)
+ return -EACCES;
+
+ if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+ return -EACCES;
+
+ avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+ tsid, tclass, avd->seqno);
+ return 0;
+}
+
+
/**
* avc_has_perm_noaudit - Check permissions but perform no auditing.
* @ssid: source security identifier
@@ -761,7 +799,7 @@ int avc_ss_reset(u32 seqno)
* auditing, e.g. in cases where a lock must be held for the check but
* should be released for the auditing.
*/
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
struct av_decision *avd)
@@ -776,26 +814,15 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
node = avc_lookup(ssid, tsid, tclass);
if (unlikely(!node)) {
- rcu_read_unlock();
- security_compute_av(ssid, tsid, tclass, avd);
- rcu_read_lock();
- node = avc_insert(ssid, tsid, tclass, avd);
+ node = avc_compute_av(ssid, tsid, tclass, avd);
} else {
memcpy(avd, &node->ae.avd, sizeof(*avd));
avd = &node->ae.avd;
}
denied = requested & ~(avd->allowed);
-
- if (denied) {
- if (flags & AVC_STRICT)
- rc = -EACCES;
- else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
- avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
- tsid, tclass, avd->seqno);
- else
- rc = -EACCES;
- }
+ if (unlikely(denied))
+ rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
rcu_read_unlock();
return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 15c6c56..d85b793 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/errno.h>
-#include <linux/ext2_fs.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/xattr.h>
@@ -1421,6 +1420,7 @@ static int cred_has_capability(const struct cred *cred,
int cap, int audit)
{
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
struct av_decision avd;
u16 sclass;
u32 sid = cred_sid(cred);
@@ -1428,6 +1428,7 @@ static int cred_has_capability(const struct cred *cred,
int rc;
COMMON_AUDIT_DATA_INIT(&ad, CAP);
+ ad.selinux_audit_data = &sad;
ad.tsk = current;
ad.u.cap = cap;
@@ -1493,9 +1494,11 @@ static int inode_has_perm_noadp(const struct cred *cred,
unsigned flags)
{
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
COMMON_AUDIT_DATA_INIT(&ad, INODE);
ad.u.inode = inode;
+ ad.selinux_audit_data = &sad;
return inode_has_perm(cred, inode, perms, &ad, flags);
}
@@ -1508,9 +1511,11 @@ static inline int dentry_has_perm(const struct cred *cred,
{
struct inode *inode = dentry->d_inode;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
+ ad.selinux_audit_data = &sad;
return inode_has_perm(cred, inode, av, &ad, 0);
}
@@ -1523,9 +1528,11 @@ static inline int path_has_perm(const struct cred *cred,
{
struct inode *inode = path->dentry->d_inode;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
COMMON_AUDIT_DATA_INIT(&ad, PATH);
ad.u.path = *path;
+ ad.selinux_audit_data = &sad;
return inode_has_perm(cred, inode, av, &ad, 0);
}
@@ -1544,11 +1551,13 @@ static int file_has_perm(const struct cred *cred,
struct file_security_struct *fsec = file->f_security;
struct inode *inode = file->f_path.dentry->d_inode;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = cred_sid(cred);
int rc;
COMMON_AUDIT_DATA_INIT(&ad, PATH);
ad.u.path = file->f_path;
+ ad.selinux_audit_data = &sad;
if (sid != fsec->sid) {
rc = avc_has_perm(sid, fsec->sid,
@@ -1578,6 +1587,7 @@ static int may_create(struct inode *dir,
struct superblock_security_struct *sbsec;
u32 sid, newsid;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
int rc;
dsec = dir->i_security;
@@ -1588,6 +1598,7 @@ static int may_create(struct inode *dir,
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
+ ad.selinux_audit_data = &sad;
rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
DIR__ADD_NAME | DIR__SEARCH,
@@ -1632,6 +1643,7 @@ static int may_link(struct inode *dir,
{
struct inode_security_struct *dsec, *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
u32 av;
int rc;
@@ -1641,6 +1653,7 @@ static int may_link(struct inode *dir,
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
+ ad.selinux_audit_data = &sad;
av = DIR__SEARCH;
av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
@@ -1675,6 +1688,7 @@ static inline int may_rename(struct inode *old_dir,
{
struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
u32 av;
int old_is_dir, new_is_dir;
@@ -1686,6 +1700,7 @@ static inline int may_rename(struct inode *old_dir,
new_dsec = new_dir->i_security;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.selinux_audit_data = &sad;
ad.u.dentry = old_dentry;
rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
@@ -1971,6 +1986,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
struct inode *inode = bprm->file->f_path.dentry->d_inode;
int rc;
@@ -2010,6 +2026,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
}
COMMON_AUDIT_DATA_INIT(&ad, PATH);
+ ad.selinux_audit_data = &sad;
ad.u.path = bprm->file->f_path;
if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
@@ -2099,6 +2116,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
{
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
struct file *file, *devnull = NULL;
struct tty_struct *tty;
struct fdtable *fdt;
@@ -2136,6 +2154,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
/* Revalidate access to inherited open files. */
COMMON_AUDIT_DATA_INIT(&ad, INODE);
+ ad.selinux_audit_data = &sad;
spin_lock(&files->file_lock);
for (;;) {
@@ -2473,6 +2492,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
int rc;
rc = superblock_doinit(sb, data);
@@ -2484,6 +2504,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.selinux_audit_data = &sad;
ad.u.dentry = sb->s_root;
return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
}
@@ -2492,8 +2513,10 @@ static int selinux_sb_statfs(struct dentry *dentry)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.selinux_audit_data = &sad;
ad.u.dentry = dentry->d_sb->s_root;
return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
@@ -2657,6 +2680,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 perms;
bool from_access;
unsigned flags = mask & MAY_NOT_BLOCK;
@@ -2669,10 +2693,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, INODE);
+ ad.selinux_audit_data = &sad;
ad.u.inode = inode;
if (from_access)
- ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS;
+ ad.selinux_audit_data->auditdeny |= FILE__AUDIT_ACCESS;
perms = file_mask_to_av(inode->i_mode, mask);
@@ -2738,6 +2763,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
struct inode_security_struct *isec = inode->i_security;
struct superblock_security_struct *sbsec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 newsid, sid = current_sid();
int rc = 0;
@@ -2752,6 +2778,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return -EPERM;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.selinux_audit_data = &sad;
ad.u.dentry = dentry;
rc = avc_has_perm(sid, isec->sid, isec->sclass,
@@ -2971,15 +2998,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
/* fall through */
case FIGETBSZ:
/* fall through */
- case EXT2_IOC_GETFLAGS:
+ case FS_IOC_GETFLAGS:
/* fall through */
- case EXT2_IOC_GETVERSION:
+ case FS_IOC_GETVERSION:
error = file_has_perm(cred, file, FILE__GETATTR);
break;
- case EXT2_IOC_SETFLAGS:
+ case FS_IOC_SETFLAGS:
/* fall through */
- case EXT2_IOC_SETVERSION:
+ case FS_IOC_SETVERSION:
error = file_has_perm(cred, file, FILE__SETATTR);
break;
@@ -3346,10 +3373,12 @@ static int selinux_kernel_module_request(char *kmod_name)
{
u32 sid;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
sid = task_sid(current);
COMMON_AUDIT_DATA_INIT(&ad, KMOD);
+ ad.selinux_audit_data = &sad;
ad.u.kmod_name = kmod_name;
return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
@@ -3488,8 +3517,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (ihlen < sizeof(_iph))
goto out;
- ad->u.net.v4info.saddr = ih->saddr;
- ad->u.net.v4info.daddr = ih->daddr;
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
ret = 0;
if (proto)
@@ -3507,8 +3536,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
@@ -3523,8 +3552,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
@@ -3539,8 +3568,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
@@ -3567,8 +3596,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (ip6 == NULL)
goto out;
- ad->u.net.v6info.saddr = ip6->saddr;
- ad->u.net.v6info.daddr = ip6->daddr;
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
ret = 0;
nexthdr = ip6->nexthdr;
@@ -3588,8 +3617,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
@@ -3600,8 +3629,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
@@ -3612,8 +3641,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
@@ -3633,13 +3662,13 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
char *addrp;
int ret;
- switch (ad->u.net.family) {
+ switch (ad->u.net->family) {
case PF_INET:
ret = selinux_parse_skb_ipv4(skb, ad, proto);
if (ret)
goto parse_error;
- addrp = (char *)(src ? &ad->u.net.v4info.saddr :
- &ad->u.net.v4info.daddr);
+ addrp = (char *)(src ? &ad->u.net->v4info.saddr :
+ &ad->u.net->v4info.daddr);
goto okay;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -3647,8 +3676,8 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
ret = selinux_parse_skb_ipv6(skb, ad, proto);
if (ret)
goto parse_error;
- addrp = (char *)(src ? &ad->u.net.v6info.saddr :
- &ad->u.net.v6info.daddr);
+ addrp = (char *)(src ? &ad->u.net->v6info.saddr :
+ &ad->u.net->v6info.daddr);
goto okay;
#endif /* IPV6 */
default:
@@ -3722,13 +3751,17 @@ static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
{
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
u32 tsid = task_sid(task);
if (sksec->sid == SECINITSID_KERNEL)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = sk;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->sk = sk;
return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad);
}
@@ -3806,6 +3839,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
char *addrp;
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3832,8 +3867,10 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (err)
goto out;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sport = htons(snum);
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family;
err = avc_has_perm(sksec->sid, sid,
sksec->sclass,
SOCKET__NAME_BIND, &ad);
@@ -3865,13 +3902,15 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
goto out;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sport = htons(snum);
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family;
if (family == PF_INET)
- ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
+ ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
else
- ad.u.net.v6info.saddr = addr6->sin6_addr;
+ ad.u.net->v6info.saddr = addr6->sin6_addr;
err = avc_has_perm(sksec->sid, sid,
sksec->sclass, node_perm, &ad);
@@ -3898,6 +3937,8 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
if (sksec->sclass == SECCLASS_TCP_SOCKET ||
sksec->sclass == SECCLASS_DCCP_SOCKET) {
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3923,8 +3964,10 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.dport = htons(snum);
- ad.u.net.family = sk->sk_family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->dport = htons(snum);
+ ad.u.net->family = sk->sk_family;
err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
if (err)
goto out;
@@ -4013,10 +4056,14 @@ static int selinux_socket_unix_stream_connect(struct sock *sock,
struct sk_security_struct *sksec_other = other->sk_security;
struct sk_security_struct *sksec_new = newsk->sk_security;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
int err;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = other;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->sk = other;
err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
sksec_other->sclass,
@@ -4043,9 +4090,13 @@ static int selinux_socket_unix_may_send(struct socket *sock,
struct sk_security_struct *ssec = sock->sk->sk_security;
struct sk_security_struct *osec = other->sk->sk_security;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = other->sk;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->sk = other->sk;
return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
&ad);
@@ -4081,11 +4132,15 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
char *addrp;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = skb->skb_iif;
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
@@ -4112,6 +4167,8 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
u16 family = sk->sk_family;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4136,8 +4193,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = skb->skb_iif;
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
@@ -4472,6 +4531,8 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
char *addrp;
u32 peer_sid;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
u8 secmark_active;
u8 netlbl_active;
u8 peerlbl_active;
@@ -4489,8 +4550,10 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
return NF_DROP;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
return NF_DROP;
@@ -4577,6 +4640,8 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
struct sock *sk = skb->sk;
struct sk_security_struct *sksec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 proto;
@@ -4585,8 +4650,10 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
sksec = sk->sk_security;
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
return NF_DROP;
@@ -4608,6 +4675,8 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
u32 peer_sid;
struct sock *sk;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4654,8 +4723,10 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
}
COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.selinux_audit_data = &sad;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
return NF_DROP;
@@ -4770,11 +4841,13 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
isec = ipc_perms->security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = ipc_perms->key;
return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -4795,6 +4868,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
int rc;
@@ -4805,6 +4879,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
isec = msq->q_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4825,11 +4900,13 @@ static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
isec = msq->q_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = msq->q_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4869,6 +4946,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
int rc;
@@ -4890,6 +4968,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
}
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = msq->q_perm.key;
/* Can this process write to the queue? */
@@ -4914,6 +4993,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = task_sid(target);
int rc;
@@ -4921,6 +5001,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
msec = msg->security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid,
@@ -4936,6 +5017,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
int rc;
@@ -4946,6 +5028,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
isec = shp->shm_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = shp->shm_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4966,11 +5049,13 @@ static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
isec = shp->shm_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = shp->shm_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -5028,6 +5113,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
int rc;
@@ -5038,6 +5124,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
isec = sma->sem_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = sma->sem_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5058,11 +5145,13 @@ static int selinux_sem_associate(struct sem_array *sma, int semflg)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
+ struct selinux_audit_data sad = {0,};
u32 sid = current_sid();
isec = sma->sem_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.selinux_audit_data = &sad;
ad.u.ipc_id = sma->sem_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 005a91b..1931370 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -47,6 +47,31 @@ struct avc_cache_stats {
};
/*
+ * We only need this data after we have decided to send an audit message.
+ */
+struct selinux_late_audit_data {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ u32 requested;
+ u32 audited;
+ u32 denied;
+ int result;
+};
+
+/*
+ * We collect this at the beginning or during an selinux security operation
+ */
+struct selinux_audit_data {
+ /*
+ * auditdeny is a bit tricky and unintuitive. See the
+ * comments in avc.c for it's meaning and usage.
+ */
+ u32 auditdeny;
+ struct selinux_late_audit_data *slad;
+};
+
+/*
* AVC operations
*/
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 48a7d00..d7018bf 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -344,7 +344,7 @@ static int sel_make_classes(void);
static int sel_make_policycap(void);
/* declaration for sel_make_class_dirs */
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino);
static ssize_t sel_read_mls(struct file *filp, char __user *buf,
@@ -1678,13 +1678,9 @@ static int sel_make_class_dir_entries(char *classname, int index,
inode->i_ino = sel_class_to_ino(index);
d_add(dentry, inode);
- dentry = d_alloc_name(dir, "perms");
- if (!dentry)
- return -ENOMEM;
-
- rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
- if (rc)
- return rc;
+ dentry = sel_make_dir(dir, "perms", &last_class_ino);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
rc = sel_make_perm_files(classname, index, dentry);
@@ -1733,15 +1729,12 @@ static int sel_make_classes(void)
for (i = 0; i < nclasses; i++) {
struct dentry *class_name_dir;
- rc = -ENOMEM;
- class_name_dir = d_alloc_name(class_dir, classes[i]);
- if (!class_name_dir)
- goto out;
-
- rc = sel_make_dir(class_dir->d_inode, class_name_dir,
+ class_name_dir = sel_make_dir(class_dir, classes[i],
&last_class_ino);
- if (rc)
+ if (IS_ERR(class_name_dir)) {
+ rc = PTR_ERR(class_name_dir);
goto out;
+ }
/* i+1 since class values are 1-indexed */
rc = sel_make_class_dir_entries(classes[i], i + 1,
@@ -1787,14 +1780,20 @@ static int sel_make_policycap(void)
return 0;
}
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino)
{
+ struct dentry *dentry = d_alloc_name(dir, name);
struct inode *inode;
- inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
- if (!inode)
- return -ENOMEM;
+ if (!dentry)
+ return ERR_PTR(-ENOMEM);
+
+ inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -1803,16 +1802,16 @@ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
inc_nlink(inode);
d_add(dentry, inode);
/* bump link count on parent directory, too */
- inc_nlink(dir);
+ inc_nlink(dir->d_inode);
- return 0;
+ return dentry;
}
static int sel_fill_super(struct super_block *sb, void *data, int silent)
{
int ret;
struct dentry *dentry;
- struct inode *inode, *root_inode;
+ struct inode *inode;
struct inode_security_struct *isec;
static struct tree_descr selinux_files[] = {
@@ -1839,18 +1838,12 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
if (ret)
goto err;
- root_inode = sb->s_root->d_inode;
-
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME);
- if (!dentry)
+ bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
+ if (IS_ERR(bool_dir)) {
+ ret = PTR_ERR(bool_dir);
+ bool_dir = NULL;
goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
- bool_dir = dentry;
+ }
ret = -ENOMEM;
dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
@@ -1872,54 +1865,39 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
d_add(dentry, inode);
selinux_null = dentry;
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "avc");
- if (!dentry)
- goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto err;
+ }
ret = sel_make_avc_files(dentry);
if (ret)
goto err;
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "initial_contexts");
- if (!dentry)
- goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto err;
+ }
ret = sel_make_initcon_files(dentry);
if (ret)
goto err;
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "class");
- if (!dentry)
- goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
- class_dir = dentry;
-
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "policy_capabilities");
- if (!dentry)
+ class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
+ if (IS_ERR(class_dir)) {
+ ret = PTR_ERR(class_dir);
+ class_dir = NULL;
goto err;
+ }
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
+ if (IS_ERR(policycap_dir)) {
+ ret = PTR_ERR(policycap_dir);
+ policycap_dir = NULL;
goto err;
-
- policycap_dir = dentry;
-
+ }
return 0;
err:
printk(KERN_ERR "SELinux: %s: failed while creating inodes\n",
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 2ad0065..4ede719 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -185,6 +185,15 @@ struct smack_known {
*/
#define SMK_NUM_ACCESS_TYPE 5
+/* SMACK data */
+struct smack_audit_data {
+ const char *function;
+ char *subject;
+ char *object;
+ char *request;
+ int result;
+};
+
/*
* Smack audit data; is empty if CONFIG_AUDIT not set
* to save some stack
@@ -192,6 +201,7 @@ struct smack_known {
struct smk_audit_info {
#ifdef CONFIG_AUDIT
struct common_audit_data a;
+ struct smack_audit_data sad;
#endif
};
/*
@@ -311,7 +321,16 @@ static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
{
memset(a, 0, sizeof(*a));
a->a.type = type;
- a->a.smack_audit_data.function = func;
+ a->a.smack_audit_data = &a->sad;
+ a->a.smack_audit_data->function = func;
+}
+
+static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func,
+ char type, struct lsm_network_audit *net)
+{
+ smk_ad_init(a, func, type);
+ memset(net, 0, sizeof(*net));
+ a->a.u.net = net;
}
static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
@@ -337,7 +356,7 @@ static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
struct sock *sk)
{
- a->a.u.net.sk = sk;
+ a->a.u.net->sk = sk;
}
#else /* no AUDIT */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index cc7cb6e..c8115f7 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -275,9 +275,9 @@ static inline void smack_str_from_perm(char *string, int access)
static void smack_log_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
- struct smack_audit_data *sad = &ad->smack_audit_data;
+ struct smack_audit_data *sad = ad->smack_audit_data;
audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
- ad->smack_audit_data.function,
+ ad->smack_audit_data->function,
sad->result ? "denied" : "granted");
audit_log_format(ab, " subject=");
audit_log_untrustedstring(ab, sad->subject);
@@ -310,19 +310,19 @@ void smack_log(char *subject_label, char *object_label, int request,
if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
return;
- if (a->smack_audit_data.function == NULL)
- a->smack_audit_data.function = "unknown";
+ sad = a->smack_audit_data;
+
+ if (sad->function == NULL)
+ sad->function = "unknown";
/* end preparing the audit data */
- sad = &a->smack_audit_data;
smack_str_from_perm(request_buffer, request);
sad->subject = subject_label;
sad->object = object_label;
sad->request = request_buffer;
sad->result = result;
- a->lsm_pre_audit = smack_log_callback;
- common_lsm_audit(a);
+ common_lsm_audit(a, smack_log_callback, NULL);
}
#else /* #ifdef CONFIG_AUDIT */
void smack_log(char *subject_label, char *object_label, int request,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index cd667b4..81c03a5 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1939,16 +1939,17 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
char *hostsp;
struct socket_smack *ssp = sk->sk_security;
struct smk_audit_info ad;
+ struct lsm_network_audit net;
rcu_read_lock();
hostsp = smack_host_label(sap);
if (hostsp != NULL) {
sk_lbl = SMACK_UNLABELED_SOCKET;
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = sap->sin_family;
- ad.a.u.net.dport = sap->sin_port;
- ad.a.u.net.v4info.daddr = sap->sin_addr.s_addr;
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sap->sin_family;
+ ad.a.u.net->dport = sap->sin_port;
+ ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
#endif
rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
} else {
@@ -2808,9 +2809,10 @@ static int smack_unix_stream_connect(struct sock *sock,
struct socket_smack *osp = other->sk_security;
struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
+ struct lsm_network_audit net;
int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other);
if (!capable(CAP_MAC_OVERRIDE))
@@ -2840,9 +2842,10 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
struct socket_smack *ssp = sock->sk->sk_security;
struct socket_smack *osp = other->sk->sk_security;
struct smk_audit_info ad;
+ struct lsm_network_audit net;
int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other->sk);
if (!capable(CAP_MAC_OVERRIDE))
@@ -2990,6 +2993,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
char *csp;
int rc;
struct smk_audit_info ad;
+ struct lsm_network_audit net;
if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
return 0;
@@ -3007,9 +3011,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = sk->sk_family;
- ad.a.u.net.netif = skb->skb_iif;
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
/*
@@ -3152,6 +3156,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
char *sp;
int rc;
struct smk_audit_info ad;
+ struct lsm_network_audit net;
/* handle mapped IPv4 packets arriving via IPv6 sockets */
if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
@@ -3166,9 +3171,9 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = family;
- ad.a.u.net.netif = skb->skb_iif;
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+ ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
/*
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index afbabf4..3fea5a1 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -58,9 +58,9 @@ static int mpc8610_hpcd_machine_probe(struct snd_soc_card *card)
{
struct mpc8610_hpcd_data *machine_data =
container_of(card, struct mpc8610_hpcd_data, card);
- struct ccsr_guts_86xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
- guts = ioremap(guts_phys, sizeof(struct ccsr_guts_86xx));
+ guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
if (!guts) {
dev_err(card->dev, "could not map global utilities\n");
return -ENOMEM;
@@ -142,9 +142,9 @@ static int mpc8610_hpcd_machine_remove(struct snd_soc_card *card)
{
struct mpc8610_hpcd_data *machine_data =
container_of(card, struct mpc8610_hpcd_data, card);
- struct ccsr_guts_86xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
- guts = ioremap(guts_phys, sizeof(struct ccsr_guts_86xx));
+ guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
if (!guts) {
dev_err(card->dev, "could not map global utilities\n");
return -ENOMEM;
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 4662340..982a1c9 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -46,7 +46,7 @@
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* device: The device to set as the target (CCSR_GUTS_DMUXCR_xxx)
*/
-static inline void guts_set_dmuxcr(struct ccsr_guts_85xx __iomem *guts,
+static inline void guts_set_dmuxcr(struct ccsr_guts __iomem *guts,
unsigned int co, unsigned int ch, unsigned int device)
{
unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
@@ -90,9 +90,9 @@ static int p1022_ds_machine_probe(struct snd_soc_card *card)
{
struct machine_data *mdata =
container_of(card, struct machine_data, card);
- struct ccsr_guts_85xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
- guts = ioremap(guts_phys, sizeof(struct ccsr_guts_85xx));
+ guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
if (!guts) {
dev_err(card->dev, "could not map global utilities\n");
return -ENOMEM;
@@ -164,9 +164,9 @@ static int p1022_ds_machine_remove(struct snd_soc_card *card)
{
struct machine_data *mdata =
container_of(card, struct machine_data, card);
- struct ccsr_guts_85xx __iomem *guts;
+ struct ccsr_guts __iomem *guts;
- guts = ioremap(guts_phys, sizeof(struct ccsr_guts_85xx));
+ guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
if (!guts) {
dev_err(card->dev, "could not map global utilities\n");
return -ENOMEM;
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 87feeee..2d89f02 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -48,6 +48,9 @@ OPTIONS
Only consider these symbols. CSV that understands
file://filename entries.
+--symbol-filter=::
+ Only show symbols that match (partially) with this filter.
+
-U::
--hide-unresolved::
Only display entries resolved to a symbol.
@@ -110,6 +113,8 @@ OPTIONS
requires a tty, if one is not present, as when piping to other
commands, the stdio interface is used.
+--gtk:: Use the GTK2 interface.
+
-k::
--vmlinux=<file>::
vmlinux pathname
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 74fd7f8..820371f 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -182,7 +182,7 @@ endif
### --- END CONFIGURATION SECTION ---
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
BASIC_LDFLAGS =
# Guard against environment variables
@@ -234,6 +234,25 @@ endif
export PERL_PATH
+FLEX = $(CROSS_COMPILE)flex
+BISON= $(CROSS_COMPILE)bison
+
+event-parser:
+ $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+ $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
+
+$(OUTPUT)util/parse-events-flex.c: event-parser
+$(OUTPUT)util/parse-events-bison.c: event-parser
+
+pmu-parser:
+ $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+ $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
+
+$(OUTPUT)util/pmu-flex.c: pmu-parser
+$(OUTPUT)util/pmu-bison.c: pmu-parser
+
+$(OUTPUT)util/parse-events.o: event-parser pmu-parser
+
LIB_FILE=$(OUTPUT)libperf.a
LIB_H += ../../include/linux/perf_event.h
@@ -249,7 +268,7 @@ LIB_H += util/include/linux/const.h
LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
-LIB_H += util/include/linux/module.h
+LIB_H += util/include/linux/export.h
LIB_H += util/include/linux/poison.h
LIB_H += util/include/linux/prefetch.h
LIB_H += util/include/linux/rbtree.h
@@ -276,6 +295,7 @@ LIB_H += util/build-id.h
LIB_H += util/debug.h
LIB_H += util/debugfs.h
LIB_H += util/sysfs.h
+LIB_H += util/pmu.h
LIB_H += util/event.h
LIB_H += util/evsel.h
LIB_H += util/evlist.h
@@ -323,6 +343,7 @@ LIB_OBJS += $(OUTPUT)util/config.o
LIB_OBJS += $(OUTPUT)util/ctype.o
LIB_OBJS += $(OUTPUT)util/debugfs.o
LIB_OBJS += $(OUTPUT)util/sysfs.o
+LIB_OBJS += $(OUTPUT)util/pmu.o
LIB_OBJS += $(OUTPUT)util/environment.o
LIB_OBJS += $(OUTPUT)util/event.o
LIB_OBJS += $(OUTPUT)util/evlist.o
@@ -359,6 +380,10 @@ LIB_OBJS += $(OUTPUT)util/session.o
LIB_OBJS += $(OUTPUT)util/thread.o
LIB_OBJS += $(OUTPUT)util/thread_map.o
LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
+LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
+LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
+LIB_OBJS += $(OUTPUT)util/pmu-flex.o
+LIB_OBJS += $(OUTPUT)util/pmu-bison.o
LIB_OBJS += $(OUTPUT)util/trace-event-read.o
LIB_OBJS += $(OUTPUT)util/trace-event-info.o
LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
@@ -501,6 +526,20 @@ else
endif
endif
+ifdef NO_GTK2
+ BASIC_CFLAGS += -DNO_GTK2
+else
+ FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
+ ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
+ msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
+ BASIC_CFLAGS += -DNO_GTK2_SUPPORT
+ else
+ BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
+ EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
+ LIB_OBJS += $(OUTPUT)util/gtk/browser.o
+ endif
+endif
+
ifdef NO_LIBPERL
BASIC_CFLAGS += -DNO_LIBPERL
else
@@ -647,6 +686,8 @@ ifndef V
QUIET_LINK = @echo ' ' LINK $@;
QUIET_MKDIR = @echo ' ' MKDIR $@;
QUIET_GEN = @echo ' ' GEN $@;
+ QUIET_FLEX = @echo ' ' FLEX $@;
+ QUIET_BISON = @echo ' ' BISON $@;
endif
endif
@@ -727,12 +768,28 @@ $(OUTPUT)perf.o perf.spec \
$(SCRIPTS) \
: $(OUTPUT)PERF-VERSION-FILE
+.SUFFIXES:
+.SUFFIXES: .o .c .S .s
+
+# These two need to be here so that when O= is not used they take precedence
+# over the general rule for .o
+
+$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+
+$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
+ $(QUIET_CC)$(CC) -o $@ -S $(ALL_CFLAGS) $<
$(OUTPUT)%.o: %.S
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.s: %.S
+ $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -795,6 +852,8 @@ help:
@echo ' html - make html documentation'
@echo ' info - make GNU info documentation (access with info <foo>)'
@echo ' pdf - make pdf documentation'
+ @echo ' event-parser - make event parser code'
+ @echo ' pmu-parser - make pmu format parser code'
@echo ' TAGS - use etags to make tag information for source browsing'
@echo ' tags - use ctags to make tag information for source browsing'
@echo ' cscope - use cscope to make interactive browsing database'
@@ -931,6 +990,7 @@ clean:
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
$(MAKE) -C Documentation/ clean
$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
+ $(RM) $(OUTPUT)util/*-{bison,flex}*
$(python-clean)
.PHONY: all install clean strip
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 4f19513..d29d350 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -24,6 +24,11 @@ static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
+struct perf_diff {
+ struct perf_tool tool;
+ struct perf_session *session;
+};
+
static int hists__add_entry(struct hists *self,
struct addr_location *al, u64 period)
{
@@ -32,12 +37,14 @@ static int hists__add_entry(struct hists *self,
return -ENOMEM;
}
-static int diff__process_sample_event(struct perf_tool *tool __used,
+static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
struct machine *machine)
{
+ struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
+ struct perf_session *session = _diff->session;
struct addr_location al;
if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
@@ -49,24 +56,26 @@ static int diff__process_sample_event(struct perf_tool *tool __used,
if (al.filtered || al.sym == NULL)
return 0;
- if (hists__add_entry(&evsel->hists, &al, sample->period)) {
+ if (hists__add_entry(&session->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
- evsel->hists.stats.total_period += sample->period;
+ session->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_tool perf_diff = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .lost = perf_event__process_lost,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
+static struct perf_diff diff = {
+ .tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -107,12 +116,6 @@ static void hists__resort_entries(struct hists *self)
self->entries = tmp;
}
-static void hists__set_positions(struct hists *self)
-{
- hists__output_resort(self);
- hists__resort_entries(self);
-}
-
static struct hist_entry *hists__find_entry(struct hists *self,
struct hist_entry *he)
{
@@ -146,30 +149,37 @@ static void hists__match(struct hists *older, struct hists *newer)
static int __cmd_diff(void)
{
int ret, i;
+#define older (session[0])
+#define newer (session[1])
struct perf_session *session[2];
- session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff);
- session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff);
+ older = perf_session__new(input_old, O_RDONLY, force, false,
+ &diff.tool);
+ newer = perf_session__new(input_new, O_RDONLY, force, false,
+ &diff.tool);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
- ret = perf_session__process_events(session[i], &perf_diff);
+ diff.session = session[i];
+ ret = perf_session__process_events(session[i], &diff.tool);
if (ret)
goto out_delete;
+ hists__output_resort(&session[i]->hists);
}
- hists__output_resort(&session[1]->hists);
if (show_displacement)
- hists__set_positions(&session[0]->hists);
+ hists__resort_entries(&older->hists);
- hists__match(&session[0]->hists, &session[1]->hists);
- hists__fprintf(&session[1]->hists, &session[0]->hists,
+ hists__match(&older->hists, &newer->hists);
+ hists__fprintf(&newer->hists, &older->hists,
show_displacement, true, 0, 0, stdout);
out_delete:
for (i = 0; i < 2; ++i)
perf_session__delete(session[i]);
return ret;
+#undef older
+#undef newer
}
static const char * const diff_usage[] = {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e91c6e..2e31743 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -40,7 +40,7 @@ struct perf_report {
struct perf_tool tool;
struct perf_session *session;
char const *input_name;
- bool force, use_tui, use_stdio;
+ bool force, use_tui, use_gtk, use_stdio;
bool hide_unresolved;
bool dont_use_callchains;
bool show_full_info;
@@ -50,6 +50,7 @@ struct perf_report {
const char *pretty_printing_style;
symbol_filter_t annotate_init;
const char *cpu_list;
+ const char *symbol_filter_str;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
@@ -400,6 +401,9 @@ static int __cmd_report(struct perf_report *rep)
list_for_each_entry(pos, &session->evlist->entries, node) {
struct hists *hists = &pos->hists;
+ if (pos->idx == 0)
+ hists->symbol_filter_str = rep->symbol_filter_str;
+
hists__collapse_resort(hists);
hists__output_resort(hists);
nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
@@ -411,8 +415,13 @@ static int __cmd_report(struct perf_report *rep)
}
if (use_browser > 0) {
- perf_evlist__tui_browse_hists(session->evlist, help,
- NULL, NULL, 0);
+ if (use_browser == 1) {
+ perf_evlist__tui_browse_hists(session->evlist, help,
+ NULL, NULL, 0);
+ } else if (use_browser == 2) {
+ perf_evlist__gtk_browse_hists(session->evlist, help,
+ NULL, NULL, 0);
+ }
} else
perf_evlist__tty_browse_hists(session->evlist, rep, help);
@@ -569,6 +578,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
"pretty printing style key: normal raw"),
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
"Use the stdio interface"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
@@ -591,6 +601,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
"only consider symbols in these comms"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
+ OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
+ "only show symbols that (partially) match with this filter"),
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
"width[,width...]",
"don't try to adjust column width, use these fixed values"),
@@ -624,6 +636,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
use_browser = 0;
else if (report.use_tui)
use_browser = 1;
+ else if (report.use_gtk)
+ use_browser = 2;
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
@@ -660,7 +674,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
}
if (strcmp(report.input_name, "-") != 0) {
- setup_browser(true);
+ if (report.use_gtk)
+ perf_gtk_setup_browser(argc, argv, true);
+ else
+ setup_browser(true);
} else {
use_browser = 0;
}
@@ -709,11 +726,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
} else
symbol_conf.exclude_other = false;
- /*
- * Any (unrecognized) arguments left?
- */
- if (argc)
- usage_with_options(report_usage, options);
+ if (argc) {
+ /*
+ * Special case: if there's an argument left then assume that
+ * it's a symbol filter:
+ */
+ if (argc > 1)
+ usage_with_options(report_usage, options);
+
+ report.symbol_filter_str = argv[0];
+ }
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ea40e4e..c941bb6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -296,7 +296,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
if (system_wide)
return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
group, group_fd);
- if (!target_pid && !target_tid) {
+ if (!target_pid && !target_tid && (!group || evsel == first)) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 3e087ce..1c5b980 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -13,6 +13,7 @@
#include "util/parse-events.h"
#include "util/symbol.h"
#include "util/thread_map.h"
+#include "util/pmu.h"
#include "../../include/linux/hw_breakpoint.h"
#include <sys/mman.h>
@@ -650,7 +651,7 @@ static int test__checkevent_raw(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
- TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->attr.config);
return 0;
}
@@ -677,6 +678,24 @@ static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong period",
+ 100000 == evsel->attr.sample_period);
+ TEST_ASSERT_VAL("wrong config1",
+ 0 == evsel->attr.config1);
+ TEST_ASSERT_VAL("wrong config2",
+ 1 == evsel->attr.config2);
+ return 0;
+}
+
static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -858,6 +877,115 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
return test__checkevent_genhw(evlist);
}
+static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_breakpoint(evlist);
+}
+
+static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_breakpoint_x(evlist);
+}
+
+static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_breakpoint_r(evlist);
+}
+
+static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_breakpoint_w(evlist);
+}
+
+static int test__checkevent_pmu(struct perf_evlist *evlist)
+{
+
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 10 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong config1", 1 == evsel->attr.config1);
+ TEST_ASSERT_VAL("wrong config2", 3 == evsel->attr.config2);
+ TEST_ASSERT_VAL("wrong period", 1000 == evsel->attr.sample_period);
+
+ return 0;
+}
+
+static int test__checkevent_list(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+
+ /* r1 */
+ evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
+ TEST_ASSERT_VAL("wrong config2", 0 == evsel->attr.config2);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ /* syscalls:sys_enter_open:k */
+ evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong sample_type",
+ (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
+ evsel->attr.sample_type);
+ TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ /* 1:1:hp */
+ evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+ TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return 0;
+}
+
static struct test__event_st {
const char *name;
__u32 type;
@@ -872,7 +1000,7 @@ static struct test__event_st {
.check = test__checkevent_tracepoint_multi,
},
{
- .name = "r1",
+ .name = "r1a",
.check = test__checkevent_raw,
},
{
@@ -884,6 +1012,10 @@ static struct test__event_st {
.check = test__checkevent_symbolic_name,
},
{
+ .name = "cycles/period=100000,config2/",
+ .check = test__checkevent_symbolic_name_config,
+ },
+ {
.name = "faults",
.check = test__checkevent_symbolic_alias,
},
@@ -916,7 +1048,7 @@ static struct test__event_st {
.check = test__checkevent_tracepoint_multi_modifier,
},
{
- .name = "r1:kp",
+ .name = "r1a:kp",
.check = test__checkevent_raw_modifier,
},
{
@@ -935,6 +1067,30 @@ static struct test__event_st {
.name = "L1-dcache-load-miss:kp",
.check = test__checkevent_genhw_modifier,
},
+ {
+ .name = "mem:0:u",
+ .check = test__checkevent_breakpoint_modifier,
+ },
+ {
+ .name = "mem:0:x:k",
+ .check = test__checkevent_breakpoint_x_modifier,
+ },
+ {
+ .name = "mem:0:r:hp",
+ .check = test__checkevent_breakpoint_r_modifier,
+ },
+ {
+ .name = "mem:0:w:up",
+ .check = test__checkevent_breakpoint_w_modifier,
+ },
+ {
+ .name = "cpu/config=10,config1,config2=3,period=1000/u",
+ .check = test__checkevent_pmu,
+ },
+ {
+ .name = "r1,syscalls:sys_enter_open:k,1:1:hp",
+ .check = test__checkevent_list,
+ },
};
#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -960,10 +1116,9 @@ static int test__parse_events(void)
}
ret = e->check(evlist);
+ perf_evlist__delete(evlist);
if (ret)
break;
-
- perf_evlist__delete(evlist);
}
return ret;
@@ -1462,6 +1617,11 @@ static int test__rdpmc(void)
#endif
+static int test__perf_pmu(void)
+{
+ return perf_pmu__test();
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -1497,6 +1657,10 @@ static struct test {
.func = test__PERF_RECORD,
},
{
+ .desc = "Test perf pmu format parsing",
+ .func = test__perf_pmu,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 6170fd2..d9084e0 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -65,6 +65,21 @@ int main(void)
endef
endif
+ifndef NO_GTK2
+define SOURCE_GTK2
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error \"-Wstrict-prototypes\"
+
+int main(int argc, char *argv[])
+{
+ gtk_init(&argc, &argv);
+
+ return 0;
+}
+endef
+endif
+
ifndef NO_LIBPERL
define SOURCE_PERL_EMBED
#include <EXTERN.h>
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e5a462f..199f69e 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -28,8 +28,8 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
int symbol__alloc_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
- size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
- (sym->end - sym->start) * sizeof(u64));
+ const size_t size = sym->end - sym->start + 1;
+ size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
if (notes->src == NULL)
@@ -64,7 +64,7 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
- if (addr >= sym->end)
+ if (addr > sym->end)
return 0;
offset = addr - sym->start;
@@ -408,7 +408,7 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
if (!notes->src->lines)
return -1;
- start = map->unmap_ip(map, sym->start);
+ start = map__rip_2objdump(map, sym->start);
for (i = 0; i < len; i++) {
char *path = NULL;
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index fc5e5a0..8dd224d 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -45,6 +45,18 @@ void setup_browser(bool fallback_to_pager);
void exit_browser(bool wait_for_ok);
#endif
+#ifdef NO_GTK2_SUPPORT
+static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager)
+{
+ if (fallback_to_pager)
+ setup_pager();
+}
+static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {}
+#else
+void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager);
+void perf_gtk_exit_browser(bool wait_for_ok);
+#endif
+
char *alias_lookup(const char *alias);
int split_cmdline(char *cmdline, const char ***argv);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 159263d..1986d80 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -51,13 +51,15 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
void perf_evlist__config_attrs(struct perf_evlist *evlist,
struct perf_record_opts *opts)
{
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *first;
if (evlist->cpus->map[0] < 0)
opts->no_inherit = true;
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
list_for_each_entry(evsel, &evlist->entries, node) {
- perf_evsel__config(evsel, opts);
+ perf_evsel__config(evsel, opts, first);
if (evlist->nr_entries > 1)
evsel->attr.sample_type |= PERF_SAMPLE_ID;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f421f7c..8c13dbc 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -34,7 +34,7 @@ int __perf_evsel__sample_size(u64 sample_type)
return size;
}
-static void hists__init(struct hists *hists)
+void hists__init(struct hists *hists)
{
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -63,7 +63,8 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel;
}
-void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
+ struct perf_evsel *first)
{
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
@@ -134,7 +135,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
attr->mmap = track;
attr->comm = track;
- if (!opts->target_pid && !opts->target_tid && !opts->system_wide) {
+ if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
+ (!opts->group || evsel == first)) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
@@ -578,6 +580,8 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
return -EFAULT;
data->raw_data = (void *) pdata;
+
+ array = (void *)array + data->raw_size + sizeof(u32);
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 326b8e4..3d6b3e4 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -80,7 +80,8 @@ void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
void perf_evsel__config(struct perf_evsel *evsel,
- struct perf_record_opts *opts);
+ struct perf_record_opts *opts,
+ struct perf_evsel *first);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
@@ -169,4 +170,6 @@ static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
return __perf_evsel__sample_size(evsel->attr.sample_type);
}
+void hists__init(struct hists *hists);
+
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/gtk/browser.c b/tools/perf/util/gtk/browser.c
new file mode 100644
index 0000000..258352a
--- /dev/null
+++ b/tools/perf/util/gtk/browser.c
@@ -0,0 +1,189 @@
+#include "../evlist.h"
+#include "../cache.h"
+#include "../evsel.h"
+#include "../sort.h"
+#include "../hist.h"
+#include "gtk.h"
+
+#include <signal.h>
+
+#define MAX_COLUMNS 32
+
+void perf_gtk_setup_browser(int argc, const char *argv[],
+ bool fallback_to_pager __used)
+{
+ gtk_init(&argc, (char ***)&argv);
+}
+
+void perf_gtk_exit_browser(bool wait_for_ok __used)
+{
+ gtk_main_quit();
+}
+
+static void perf_gtk_signal(int sig)
+{
+ psignal(sig, "perf");
+ gtk_main_quit();
+}
+
+static void perf_gtk_resize_window(GtkWidget *window)
+{
+ GdkRectangle rect;
+ GdkScreen *screen;
+ int monitor;
+ int height;
+ int width;
+
+ screen = gtk_widget_get_screen(window);
+
+ monitor = gdk_screen_get_monitor_at_window(screen, window->window);
+
+ gdk_screen_get_monitor_geometry(screen, monitor, &rect);
+
+ width = rect.width * 3 / 4;
+ height = rect.height * 3 / 4;
+
+ gtk_window_resize(GTK_WINDOW(window), width, height);
+}
+
+static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists)
+{
+ GType col_types[MAX_COLUMNS];
+ GtkCellRenderer *renderer;
+ struct sort_entry *se;
+ GtkListStore *store;
+ struct rb_node *nd;
+ u64 total_period;
+ GtkWidget *view;
+ int col_idx;
+ int nr_cols;
+
+ nr_cols = 0;
+
+ /* The percentage column */
+ col_types[nr_cols++] = G_TYPE_STRING;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ col_types[nr_cols++] = G_TYPE_STRING;
+ }
+
+ store = gtk_list_store_newv(nr_cols, col_types);
+
+ view = gtk_tree_view_new();
+
+ renderer = gtk_cell_renderer_text_new();
+
+ col_idx = 0;
+
+ /* The percentage column */
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, "Overhead (%)",
+ renderer, "text",
+ col_idx++, NULL);
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, se->se_header,
+ renderer, "text",
+ col_idx++, NULL);
+ }
+
+ gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+
+ g_object_unref(GTK_TREE_MODEL(store));
+
+ total_period = hists->stats.total_period;
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ GtkTreeIter iter;
+ double percent;
+ char s[512];
+
+ if (h->filtered)
+ continue;
+
+ gtk_list_store_append(store, &iter);
+
+ col_idx = 0;
+
+ percent = (h->period * 100.0) / total_period;
+
+ snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
+
+ gtk_list_store_set(store, &iter, col_idx++, s, -1);
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ se->se_snprintf(h, s, ARRAY_SIZE(s),
+ hists__col_len(hists, se->se_width_idx));
+
+ gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ }
+ }
+
+ gtk_container_add(GTK_CONTAINER(window), view);
+}
+
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
+ const char *help __used,
+ void (*timer) (void *arg)__used,
+ void *arg __used, int delay_secs __used)
+{
+ struct perf_evsel *pos;
+ GtkWidget *notebook;
+ GtkWidget *window;
+
+ signal(SIGSEGV, perf_gtk_signal);
+ signal(SIGFPE, perf_gtk_signal);
+ signal(SIGINT, perf_gtk_signal);
+ signal(SIGQUIT, perf_gtk_signal);
+ signal(SIGTERM, perf_gtk_signal);
+
+ window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
+
+ gtk_window_set_title(GTK_WINDOW(window), "perf report");
+
+ g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+
+ notebook = gtk_notebook_new();
+
+ list_for_each_entry(pos, &evlist->entries, node) {
+ struct hists *hists = &pos->hists;
+ const char *evname = event_name(pos);
+ GtkWidget *scrolled_window;
+ GtkWidget *tab_label;
+
+ scrolled_window = gtk_scrolled_window_new(NULL, NULL);
+
+ gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
+ GTK_POLICY_AUTOMATIC,
+ GTK_POLICY_AUTOMATIC);
+
+ perf_gtk_show_hists(scrolled_window, hists);
+
+ tab_label = gtk_label_new(evname);
+
+ gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
+ }
+
+ gtk_container_add(GTK_CONTAINER(window), notebook);
+
+ gtk_widget_show_all(window);
+
+ perf_gtk_resize_window(window);
+
+ gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+
+ gtk_main();
+
+ return 0;
+}
diff --git a/tools/perf/util/gtk/gtk.h b/tools/perf/util/gtk/gtk.h
new file mode 100644
index 0000000..75177ee
--- /dev/null
+++ b/tools/perf/util/gtk/gtk.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_GTK_H_
+#define _PERF_GTK_H_ 1
+
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+#endif /* _PERF_GTK_H_ */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index fcd9cf3..4c7c2d7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1177,7 +1177,7 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
goto error;
msz = sizeof(attr);
- if (sz < (ssize_t)msz)
+ if (sz < msz)
msz = sz;
for (i = 0 ; i < nre; i++) {
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3dc99a9..2ec4b60 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -10,11 +10,14 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he);
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he);
enum hist_filter {
HIST_FILTER__DSO,
HIST_FILTER__THREAD,
HIST_FILTER__PARENT,
+ HIST_FILTER__SYMBOL,
};
struct callchain_param callchain_param = {
@@ -420,6 +423,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
hists__filter_entry_by_dso(hists, he);
hists__filter_entry_by_thread(hists, he);
+ hists__filter_entry_by_symbol(hists, he);
}
static void __hists__collapse_resort(struct hists *hists, bool threaded)
@@ -603,7 +607,7 @@ static void init_rem_hits(void)
rem_hits.ms.sym = rem_sq_bracket;
}
-static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
u64 total_samples, int depth,
int depth_mask, int left_margin)
{
@@ -611,21 +615,16 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
struct callchain_node *child;
struct callchain_list *chain;
int new_depth_mask = depth_mask;
- u64 new_total;
u64 remaining;
size_t ret = 0;
int i;
uint entries_printed = 0;
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = self->children_hit;
- else
- new_total = total_samples;
-
- remaining = new_total;
+ remaining = total_samples;
- node = rb_first(&self->rb_root);
+ node = rb_first(root);
while (node) {
+ u64 new_total;
u64 cumul;
child = rb_entry(node, struct callchain_node, rb_node);
@@ -653,11 +652,17 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
list_for_each_entry(chain, &child->val, list) {
ret += ipchain__fprintf_graph(fp, chain, depth,
new_depth_mask, i++,
- new_total,
+ total_samples,
cumul,
left_margin);
}
- ret += __callchain__fprintf_graph(fp, child, new_total,
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = child->children_hit;
+ else
+ new_total = total_samples;
+
+ ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
depth + 1,
new_depth_mask | (1 << depth),
left_margin);
@@ -667,61 +672,75 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
}
if (callchain_param.mode == CHAIN_GRAPH_REL &&
- remaining && remaining != new_total) {
+ remaining && remaining != total_samples) {
if (!rem_sq_bracket)
return ret;
new_depth_mask &= ~(1 << (depth - 1));
-
ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
- new_depth_mask, 0, new_total,
+ new_depth_mask, 0, total_samples,
remaining, left_margin);
}
return ret;
}
-static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
u64 total_samples, int left_margin)
{
+ struct callchain_node *cnode;
struct callchain_list *chain;
+ u32 entries_printed = 0;
bool printed = false;
+ struct rb_node *node;
int i = 0;
- int ret = 0;
- u32 entries_printed = 0;
-
- list_for_each_entry(chain, &self->val, list) {
- if (!i++ && sort__first_dimension == SORT_SYM)
- continue;
-
- if (!printed) {
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "|\n");
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "---");
-
- left_margin += 3;
- printed = true;
- } else
- ret += callchain__fprintf_left_margin(fp, left_margin);
+ int ret;
- if (chain->ms.sym)
- ret += fprintf(fp, " %s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+ /*
+ * If have one single callchain root, don't bother printing
+ * its percentage (100 % in fractal mode and the same percentage
+ * than the hist in graph mode). This also avoid one level of column.
+ */
+ node = rb_first(root);
+ if (node && !rb_next(node)) {
+ cnode = rb_entry(node, struct callchain_node, rb_node);
+ list_for_each_entry(chain, &cnode->val, list) {
+ /*
+ * If we sort by symbol, the first entry is the same than
+ * the symbol. No need to print it otherwise it appears as
+ * displayed twice.
+ */
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
- if (++entries_printed == callchain_param.print_limit)
- break;
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+ root = &cnode->rb_root;
}
- ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
-
- return ret;
+ return __callchain__fprintf_graph(fp, root, total_samples,
+ 1, 1, left_margin);
}
-static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
- u64 total_samples)
+static size_t __callchain__fprintf_flat(FILE *fp,
+ struct callchain_node *self,
+ u64 total_samples)
{
struct callchain_list *chain;
size_t ret = 0;
@@ -729,7 +748,7 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
if (!self)
return 0;
- ret += callchain__fprintf_flat(fp, self->parent, total_samples);
+ ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
list_for_each_entry(chain, &self->val, list) {
@@ -745,44 +764,58 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
return ret;
}
-static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
- u64 total_samples, int left_margin,
- FILE *fp)
+static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
+ u64 total_samples)
{
- struct rb_node *rb_node;
- struct callchain_node *chain;
size_t ret = 0;
u32 entries_printed = 0;
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
- rb_node = rb_first(&he->sorted_chain);
+ rb_node = rb_first(self);
while (rb_node) {
double percent;
chain = rb_entry(rb_node, struct callchain_node, rb_node);
percent = chain->hit * 100.0 / total_samples;
- switch (callchain_param.mode) {
- case CHAIN_FLAT:
- ret += percent_color_fprintf(fp, " %6.2f%%\n",
- percent);
- ret += callchain__fprintf_flat(fp, chain, total_samples);
- break;
- case CHAIN_GRAPH_ABS: /* Falldown */
- case CHAIN_GRAPH_REL:
- ret += callchain__fprintf_graph(fp, chain, total_samples,
- left_margin);
- case CHAIN_NONE:
- default:
- break;
- }
+
+ ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
+ ret += __callchain__fprintf_flat(fp, chain, total_samples);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
+
rb_node = rb_next(rb_node);
}
return ret;
}
+static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
+ u64 total_samples, int left_margin,
+ FILE *fp)
+{
+ switch (callchain_param.mode) {
+ case CHAIN_GRAPH_REL:
+ return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
+ left_margin);
+ break;
+ case CHAIN_GRAPH_ABS:
+ return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+ left_margin);
+ break;
+ case CHAIN_FLAT:
+ return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
+ break;
+ case CHAIN_NONE:
+ break;
+ default:
+ pr_err("Bad callchain mode\n");
+ }
+
+ return 0;
+}
+
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
struct rb_node *next = rb_first(&hists->entries);
@@ -887,9 +920,9 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
diff = new_percent - old_percent;
if (fabs(diff) >= 0.01)
- ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+ scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
else
- ret += scnprintf(bf, sizeof(bf), " ");
+ scnprintf(bf, sizeof(bf), " ");
if (sep)
ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -898,9 +931,9 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
if (show_displacement) {
if (displacement)
- ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement);
+ scnprintf(bf, sizeof(bf), "%+4ld", displacement);
else
- ret += scnprintf(bf, sizeof(bf), " ");
+ scnprintf(bf, sizeof(bf), " ");
if (sep)
ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -1247,6 +1280,37 @@ void hists__filter_by_thread(struct hists *hists)
}
}
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->symbol_filter_str != NULL &&
+ (!he->ms.sym || strstr(he->ms.sym->name,
+ hists->symbol_filter_str) == NULL)) {
+ he->filtered |= (1 << HIST_FILTER__SYMBOL);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->nr_entries = hists->stats.total_period = 0;
+ hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (hists__filter_entry_by_symbol(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+ }
+}
+
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
{
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 9413f3e..2cae9df 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -62,6 +62,7 @@ struct hists {
const struct thread *thread_filter;
const struct dso *dso_filter;
const char *uid_filter_str;
+ const char *symbol_filter_str;
pthread_mutex_t lock;
struct events_stats stats;
u64 event_stream;
@@ -107,6 +108,7 @@ int hist_entry__annotate(struct hist_entry *self, size_t privsize);
void hists__filter_by_dso(struct hists *hists);
void hists__filter_by_thread(struct hists *hists);
+void hists__filter_by_symbol(struct hists *hists);
u16 hists__col_len(struct hists *self, enum hist_column col);
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
@@ -145,6 +147,23 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
int refresh);
#endif
+#ifdef NO_GTK2_SUPPORT
+static inline
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
+ const char *help __used,
+ void(*timer)(void *arg) __used,
+ void *arg __used,
+ int refresh __used)
+{
+ return 0;
+}
+
+#else
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
+ void(*timer)(void *arg), void *arg,
+ int refresh);
+#endif
+
unsigned int hists__sort_list_width(struct hists *self);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/export.h
index b43e2dc..b43e2dc 100644
--- a/tools/perf/util/include/linux/module.h
+++ b/tools/perf/util/include/linux/export.h
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c7a6f6f..5b3a0ef 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -11,6 +11,10 @@
#include "cache.h"
#include "header.h"
#include "debugfs.h"
+#include "parse-events-flex.h"
+#include "pmu.h"
+
+#define MAX_NAME_LEN 100
struct event_symbol {
u8 type;
@@ -19,11 +23,8 @@ struct event_symbol {
const char *alias;
};
-enum event_result {
- EVT_FAILED,
- EVT_HANDLED,
- EVT_HANDLED_ALL
-};
+int parse_events_parse(struct list_head *list, struct list_head *list_tmp,
+ int *idx);
#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -354,7 +355,24 @@ const char *__event_name(int type, u64 config)
return "unknown";
}
-static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
+static int add_event(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name)
+{
+ struct perf_evsel *evsel;
+
+ event_attr_init(attr);
+
+ evsel = perf_evsel__new(attr, (*idx)++);
+ if (!evsel)
+ return -ENOMEM;
+
+ list_add_tail(&evsel->node, list);
+
+ evsel->name = strdup(name);
+ return 0;
+}
+
+static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
{
int i, j;
int n, longest = -1;
@@ -362,58 +380,57 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
for (i = 0; i < size; i++) {
for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
n = strlen(names[i][j]);
- if (n > longest && !strncasecmp(*str, names[i][j], n))
+ if (n > longest && !strncasecmp(str, names[i][j], n))
longest = n;
}
- if (longest > 0) {
- *str += longest;
+ if (longest > 0)
return i;
- }
}
return -1;
}
-static enum event_result
-parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
+int parse_events_add_cache(struct list_head *list, int *idx,
+ char *type, char *op_result1, char *op_result2)
{
- const char *s = *str;
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
int cache_type = -1, cache_op = -1, cache_result = -1;
+ char *op_result[2] = { op_result1, op_result2 };
+ int i, n;
- cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
/*
* No fallback - if we cannot get a clear cache type
* then bail out:
*/
+ cache_type = parse_aliases(type, hw_cache,
+ PERF_COUNT_HW_CACHE_MAX);
if (cache_type == -1)
- return EVT_FAILED;
+ return -EINVAL;
- while ((cache_op == -1 || cache_result == -1) && *s == '-') {
- ++s;
+ n = snprintf(name, MAX_NAME_LEN, "%s", type);
+
+ for (i = 0; (i < 2) && (op_result[i]); i++) {
+ char *str = op_result[i];
+
+ snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
if (cache_op == -1) {
- cache_op = parse_aliases(&s, hw_cache_op,
- PERF_COUNT_HW_CACHE_OP_MAX);
+ cache_op = parse_aliases(str, hw_cache_op,
+ PERF_COUNT_HW_CACHE_OP_MAX);
if (cache_op >= 0) {
if (!is_cache_op_valid(cache_type, cache_op))
- return EVT_FAILED;
+ return -EINVAL;
continue;
}
}
if (cache_result == -1) {
- cache_result = parse_aliases(&s, hw_cache_result,
+ cache_result = parse_aliases(str, hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX);
if (cache_result >= 0)
continue;
}
-
- /*
- * Can't parse this as a cache op or result, so back up
- * to the '-'.
- */
- --s;
- break;
}
/*
@@ -428,20 +445,17 @@ parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
if (cache_result == -1)
cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
- attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
- attr->type = PERF_TYPE_HW_CACHE;
-
- *str = s;
- return EVT_HANDLED;
+ memset(&attr, 0, sizeof(attr));
+ attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
+ attr.type = PERF_TYPE_HW_CACHE;
+ return add_event(list, idx, &attr, name);
}
-static enum event_result
-parse_single_tracepoint_event(char *sys_name,
- const char *evt_name,
- unsigned int evt_length,
- struct perf_event_attr *attr,
- const char **strp)
+static int add_tracepoint(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
{
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
char evt_path[MAXPATHLEN];
char id_buf[4];
u64 id;
@@ -452,130 +466,80 @@ parse_single_tracepoint_event(char *sys_name,
fd = open(evt_path, O_RDONLY);
if (fd < 0)
- return EVT_FAILED;
+ return -1;
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
close(fd);
- return EVT_FAILED;
+ return -1;
}
close(fd);
id = atoll(id_buf);
- attr->config = id;
- attr->type = PERF_TYPE_TRACEPOINT;
- *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */
-
- attr->sample_type |= PERF_SAMPLE_RAW;
- attr->sample_type |= PERF_SAMPLE_TIME;
- attr->sample_type |= PERF_SAMPLE_CPU;
-
- attr->sample_period = 1;
+ memset(&attr, 0, sizeof(attr));
+ attr.config = id;
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type |= PERF_SAMPLE_RAW;
+ attr.sample_type |= PERF_SAMPLE_TIME;
+ attr.sample_type |= PERF_SAMPLE_CPU;
+ attr.sample_period = 1;
- return EVT_HANDLED;
+ snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
+ return add_event(list, idx, &attr, name);
}
-/* sys + ':' + event + ':' + flags*/
-#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
-static enum event_result
-parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
- const char *evt_exp, char *flags)
+static int add_tracepoint_multi(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
{
char evt_path[MAXPATHLEN];
struct dirent *evt_ent;
DIR *evt_dir;
+ int ret = 0;
snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
evt_dir = opendir(evt_path);
-
if (!evt_dir) {
perror("Can't open event dir");
- return EVT_FAILED;
+ return -1;
}
- while ((evt_ent = readdir(evt_dir))) {
- char event_opt[MAX_EVOPT_LEN + 1];
- int len;
-
+ while (!ret && (evt_ent = readdir(evt_dir))) {
if (!strcmp(evt_ent->d_name, ".")
|| !strcmp(evt_ent->d_name, "..")
|| !strcmp(evt_ent->d_name, "enable")
|| !strcmp(evt_ent->d_name, "filter"))
continue;
- if (!strglobmatch(evt_ent->d_name, evt_exp))
+ if (!strglobmatch(evt_ent->d_name, evt_name))
continue;
- len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
- evt_ent->d_name, flags ? ":" : "",
- flags ?: "");
- if (len < 0)
- return EVT_FAILED;
-
- if (parse_events(evlist, event_opt, 0))
- return EVT_FAILED;
+ ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
}
- return EVT_HANDLED_ALL;
+ return ret;
}
-static enum event_result
-parse_tracepoint_event(struct perf_evlist *evlist, const char **strp,
- struct perf_event_attr *attr)
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ char *sys, char *event)
{
- const char *evt_name;
- char *flags = NULL, *comma_loc;
- char sys_name[MAX_EVENT_LENGTH];
- unsigned int sys_length, evt_length;
-
- if (debugfs_valid_mountpoint(tracing_events_path))
- return 0;
-
- evt_name = strchr(*strp, ':');
- if (!evt_name)
- return EVT_FAILED;
-
- sys_length = evt_name - *strp;
- if (sys_length >= MAX_EVENT_LENGTH)
- return 0;
+ int ret;
- strncpy(sys_name, *strp, sys_length);
- sys_name[sys_length] = '\0';
- evt_name = evt_name + 1;
+ ret = debugfs_valid_mountpoint(tracing_events_path);
+ if (ret)
+ return ret;
- comma_loc = strchr(evt_name, ',');
- if (comma_loc) {
- /* take the event name up to the comma */
- evt_name = strndup(evt_name, comma_loc - evt_name);
- }
- flags = strchr(evt_name, ':');
- if (flags) {
- /* split it out: */
- evt_name = strndup(evt_name, flags - evt_name);
- flags++;
- }
-
- evt_length = strlen(evt_name);
- if (evt_length >= MAX_EVENT_LENGTH)
- return EVT_FAILED;
- if (strpbrk(evt_name, "*?")) {
- *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
- return parse_multiple_tracepoint_event(evlist, sys_name,
- evt_name, flags);
- } else {
- return parse_single_tracepoint_event(sys_name, evt_name,
- evt_length, attr, strp);
- }
+ return strpbrk(event, "*?") ?
+ add_tracepoint_multi(list, idx, sys, event) :
+ add_tracepoint(list, idx, sys, event);
}
-static enum event_result
-parse_breakpoint_type(const char *type, const char **strp,
- struct perf_event_attr *attr)
+static int
+parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
{
int i;
for (i = 0; i < 3; i++) {
- if (!type[i])
+ if (!type || !type[i])
break;
switch (type[i]) {
@@ -589,164 +553,146 @@ parse_breakpoint_type(const char *type, const char **strp,
attr->bp_type |= HW_BREAKPOINT_X;
break;
default:
- return EVT_FAILED;
+ return -EINVAL;
}
}
+
if (!attr->bp_type) /* Default */
attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
- *strp = type + i;
-
- return EVT_HANDLED;
+ return 0;
}
-static enum event_result
-parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+ void *ptr, char *type)
{
- const char *target;
- const char *type;
- char *endaddr;
- u64 addr;
- enum event_result err;
-
- target = strchr(*strp, ':');
- if (!target)
- return EVT_FAILED;
-
- if (strncmp(*strp, "mem", target - *strp) != 0)
- return EVT_FAILED;
-
- target++;
-
- addr = strtoull(target, &endaddr, 0);
- if (target == endaddr)
- return EVT_FAILED;
-
- attr->bp_addr = addr;
- *strp = endaddr;
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
- type = strchr(target, ':');
+ memset(&attr, 0, sizeof(attr));
+ attr.bp_addr = (unsigned long) ptr;
- /* If no type is defined, just rw as default */
- if (!type) {
- attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
- } else {
- err = parse_breakpoint_type(++type, strp, attr);
- if (err == EVT_FAILED)
- return EVT_FAILED;
- }
+ if (parse_breakpoint_type(type, &attr))
+ return -EINVAL;
/*
* We should find a nice way to override the access length
* Provide some defaults for now
*/
- if (attr->bp_type == HW_BREAKPOINT_X)
- attr->bp_len = sizeof(long);
+ if (attr.bp_type == HW_BREAKPOINT_X)
+ attr.bp_len = sizeof(long);
else
- attr->bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
- attr->type = PERF_TYPE_BREAKPOINT;
+ attr.type = PERF_TYPE_BREAKPOINT;
- return EVT_HANDLED;
+ snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
+ return add_event(list, idx, &attr, name);
}
-static int check_events(const char *str, unsigned int i)
+static int config_term(struct perf_event_attr *attr,
+ struct parse_events__term *term)
{
- int n;
+ switch (term->type) {
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ attr->config = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ attr->config1 = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ attr->config2 = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ attr->sample_period = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ /*
+ * TODO uncomment when the field is available
+ * attr->branch_sample_type = term->val.num;
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
- n = strlen(event_symbols[i].symbol);
- if (!strncasecmp(str, event_symbols[i].symbol, n))
- return n;
+static int config_attr(struct perf_event_attr *attr,
+ struct list_head *head, int fail)
+{
+ struct parse_events__term *term;
- n = strlen(event_symbols[i].alias);
- if (n) {
- if (!strncasecmp(str, event_symbols[i].alias, n))
- return n;
- }
+ list_for_each_entry(term, head, list)
+ if (config_term(attr, term) && fail)
+ return -EINVAL;
return 0;
}
-static enum event_result
-parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_numeric(struct list_head *list, int *idx,
+ unsigned long type, unsigned long config,
+ struct list_head *head_config)
{
- const char *str = *strp;
- unsigned int i;
- int n;
-
- for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
- n = check_events(str, i);
- if (n > 0) {
- attr->type = event_symbols[i].type;
- attr->config = event_symbols[i].config;
- *strp = str + n;
- return EVT_HANDLED;
- }
- }
- return EVT_FAILED;
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = type;
+ attr.config = config;
+
+ if (head_config &&
+ config_attr(&attr, head_config, 1))
+ return -EINVAL;
+
+ return add_event(list, idx, &attr,
+ (char *) __event_name(type, config));
}
-static enum event_result
-parse_raw_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_pmu(struct list_head *list, int *idx,
+ char *name, struct list_head *head_config)
{
- const char *str = *strp;
- u64 config;
- int n;
-
- if (*str != 'r')
- return EVT_FAILED;
- n = hex2u64(str + 1, &config);
- if (n > 0) {
- const char *end = str + n + 1;
- if (*end != '\0' && *end != ',' && *end != ':')
- return EVT_FAILED;
-
- *strp = end;
- attr->type = PERF_TYPE_RAW;
- attr->config = config;
- return EVT_HANDLED;
- }
- return EVT_FAILED;
+ struct perf_event_attr attr;
+ struct perf_pmu *pmu;
+
+ pmu = perf_pmu__find(name);
+ if (!pmu)
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+
+ /*
+ * Configure hardcoded terms first, no need to check
+ * return value when called with fail == 0 ;)
+ */
+ config_attr(&attr, head_config, 0);
+
+ if (perf_pmu__config(pmu, &attr, head_config))
+ return -EINVAL;
+
+ return add_event(list, idx, &attr, (char *) "pmu");
}
-static enum event_result
-parse_numeric_event(const char **strp, struct perf_event_attr *attr)
+void parse_events_update_lists(struct list_head *list_event,
+ struct list_head *list_all)
{
- const char *str = *strp;
- char *endp;
- unsigned long type;
- u64 config;
-
- type = strtoul(str, &endp, 0);
- if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
- str = endp + 1;
- config = strtoul(str, &endp, 0);
- if (endp > str) {
- attr->type = type;
- attr->config = config;
- *strp = endp;
- return EVT_HANDLED;
- }
- }
- return EVT_FAILED;
+ /*
+ * Called for single event definition. Update the
+ * 'all event' list, and reinit the 'signle event'
+ * list, for next event definition.
+ */
+ list_splice_tail(list_event, list_all);
+ INIT_LIST_HEAD(list_event);
}
-static int
-parse_event_modifier(const char **strp, struct perf_event_attr *attr)
+int parse_events_modifier(struct list_head *list, char *str)
{
- const char *str = *strp;
+ struct perf_evsel *evsel;
int exclude = 0, exclude_GH = 0;
int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
- if (!*str)
+ if (str == NULL)
return 0;
- if (*str == ',')
- return 0;
-
- if (*str++ != ':')
- return -1;
-
while (*str) {
if (*str == 'u') {
if (!exclude)
@@ -775,111 +721,62 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
++str;
}
- if (str < *strp + 2)
- return -1;
- *strp = str;
+ /*
+ * precise ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ if (precise > 3)
+ return -EINVAL;
- attr->exclude_user = eu;
- attr->exclude_kernel = ek;
- attr->exclude_hv = eh;
- attr->precise_ip = precise;
- attr->exclude_host = eH;
- attr->exclude_guest = eG;
+ list_for_each_entry(evsel, list, node) {
+ evsel->attr.exclude_user = eu;
+ evsel->attr.exclude_kernel = ek;
+ evsel->attr.exclude_hv = eh;
+ evsel->attr.precise_ip = precise;
+ evsel->attr.exclude_host = eH;
+ evsel->attr.exclude_guest = eG;
+ }
return 0;
}
-/*
- * Each event can have multiple symbolic names.
- * Symbolic names are (almost) exactly matched.
- */
-static enum event_result
-parse_event_symbols(struct perf_evlist *evlist, const char **str,
- struct perf_event_attr *attr)
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
{
- enum event_result ret;
-
- ret = parse_tracepoint_event(evlist, str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
-
- ret = parse_raw_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ LIST_HEAD(list);
+ LIST_HEAD(list_tmp);
+ YY_BUFFER_STATE buffer;
+ int ret, idx = evlist->nr_entries;
- ret = parse_numeric_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ buffer = parse_events__scan_string(str);
- ret = parse_symbolic_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ ret = parse_events_parse(&list, &list_tmp, &idx);
- ret = parse_generic_hw_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ parse_events__flush_buffer(buffer);
+ parse_events__delete_buffer(buffer);
- ret = parse_breakpoint_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
-
- fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
- fprintf(stderr, "Run 'perf list' for a list of valid events\n");
- return EVT_FAILED;
-
-modifier:
- if (parse_event_modifier(str, attr) < 0) {
- fprintf(stderr, "invalid event modifier: '%s'\n", *str);
- fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n");
-
- return EVT_FAILED;
+ if (!ret) {
+ int entries = idx - evlist->nr_entries;
+ perf_evlist__splice_list_tail(evlist, &list, entries);
+ return 0;
}
+ /*
+ * There are 2 users - builtin-record and builtin-test objects.
+ * Both call perf_evlist__delete in case of error, so we dont
+ * need to bother.
+ */
+ fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
+ fprintf(stderr, "Run 'perf list' for a list of valid events\n");
return ret;
}
-int parse_events(struct perf_evlist *evlist , const char *str, int unset __used)
-{
- struct perf_event_attr attr;
- enum event_result ret;
- const char *ostr;
-
- for (;;) {
- ostr = str;
- memset(&attr, 0, sizeof(attr));
- event_attr_init(&attr);
- ret = parse_event_symbols(evlist, &str, &attr);
- if (ret == EVT_FAILED)
- return -1;
-
- if (!(*str == 0 || *str == ',' || isspace(*str)))
- return -1;
-
- if (ret != EVT_HANDLED_ALL) {
- struct perf_evsel *evsel;
- evsel = perf_evsel__new(&attr, evlist->nr_entries);
- if (evsel == NULL)
- return -1;
- perf_evlist__add(evlist, evsel);
-
- evsel->name = calloc(str - ostr + 1, 1);
- if (!evsel->name)
- return -1;
- strncpy(evsel->name, ostr, str - ostr);
- }
-
- if (*str == 0)
- break;
- if (*str == ',')
- ++str;
- while (isspace(*str))
- ++str;
- }
-
- return 0;
-}
-
int parse_events_option(const struct option *opt, const char *str,
int unset __used)
{
@@ -1052,8 +949,6 @@ int print_hwcache_events(const char *event_glob)
return printed;
}
-#define MAX_NAME_LEN 100
-
/*
* Print the help text for the event symbols:
*/
@@ -1102,8 +997,12 @@ void print_events(const char *event_glob)
printf("\n");
printf(" %-50s [%s]\n",
- "rNNN (see 'perf list --help' on how to encode it)",
+ "rNNN",
event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" %-50s [%s]\n",
+ "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" (see 'perf list --help' on how to encode it)\n");
printf("\n");
printf(" %-50s [%s]\n",
@@ -1113,3 +1012,51 @@ void print_events(const char *event_glob)
print_tracepoint_events(NULL, NULL);
}
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term)
+{
+ return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX;
+}
+
+int parse_events__new_term(struct parse_events__term **_term, int type,
+ char *config, char *str, long num)
+{
+ struct parse_events__term *term;
+
+ term = zalloc(sizeof(*term));
+ if (!term)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&term->list);
+ term->type = type;
+ term->config = config;
+
+ switch (type) {
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ case PARSE_EVENTS__TERM_TYPE_NUM:
+ term->val.num = num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_STR:
+ term->val.str = str;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *_term = term;
+ return 0;
+}
+
+void parse_events__free_terms(struct list_head *terms)
+{
+ struct parse_events__term *term, *h;
+
+ list_for_each_entry_safe(term, h, terms, list)
+ free(term);
+
+ free(terms);
+}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 7e0cbe7..ca069f8 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -33,6 +33,55 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
#define EVENTS_HELP_MAX (128*1024)
+enum {
+ PARSE_EVENTS__TERM_TYPE_CONFIG,
+ PARSE_EVENTS__TERM_TYPE_CONFIG1,
+ PARSE_EVENTS__TERM_TYPE_CONFIG2,
+ PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
+ PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+ PARSE_EVENTS__TERM_TYPE_NUM,
+ PARSE_EVENTS__TERM_TYPE_STR,
+
+ PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
+ PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+};
+
+struct parse_events__term {
+ char *config;
+ union {
+ char *str;
+ long num;
+ } val;
+ int type;
+
+ struct list_head list;
+};
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term);
+int parse_events__new_term(struct parse_events__term **term, int type,
+ char *config, char *str, long num);
+void parse_events__free_terms(struct list_head *terms);
+int parse_events_modifier(struct list_head *list __used, char *str __used);
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ char *sys, char *event);
+int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
+ unsigned long config1, unsigned long config2,
+ char *mod);
+int parse_events_add_numeric(struct list_head *list, int *idx,
+ unsigned long type, unsigned long config,
+ struct list_head *head_config);
+int parse_events_add_cache(struct list_head *list, int *idx,
+ char *type, char *op_result1, char *op_result2);
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+ void *ptr, char *type);
+int parse_events_add_pmu(struct list_head *list, int *idx,
+ char *pmu , struct list_head *head_config);
+void parse_events_update_lists(struct list_head *list_event,
+ struct list_head *list_all);
+void parse_events_error(struct list_head *list_all,
+ struct list_head *list_event,
+ int *idx, char const *msg);
+
void print_events(const char *event_glob);
void print_events_type(u8 type);
void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
new file mode 100644
index 0000000..05d766e
--- /dev/null
+++ b/tools/perf/util/parse-events.l
@@ -0,0 +1,127 @@
+
+%option prefix="parse_events_"
+
+%{
+#include <errno.h>
+#include "../perf.h"
+#include "parse-events-bison.h"
+#include "parse-events.h"
+
+static int __value(char *str, int base, int token)
+{
+ long num;
+
+ errno = 0;
+ num = strtoul(str, NULL, base);
+ if (errno)
+ return PE_ERROR;
+
+ parse_events_lval.num = num;
+ return token;
+}
+
+static int value(int base)
+{
+ return __value(parse_events_text, base, PE_VALUE);
+}
+
+static int raw(void)
+{
+ return __value(parse_events_text + 1, 16, PE_RAW);
+}
+
+static int str(int token)
+{
+ parse_events_lval.str = strdup(parse_events_text);
+ return token;
+}
+
+static int sym(int type, int config)
+{
+ parse_events_lval.num = (type << 16) + config;
+ return PE_VALUE_SYM;
+}
+
+static int term(int type)
+{
+ parse_events_lval.num = type;
+ return PE_TERM;
+}
+
+%}
+
+num_dec [0-9]+
+num_hex 0x[a-fA-F0-9]+
+num_raw_hex [a-fA-F0-9]+
+name [a-zA-Z_*?][a-zA-Z0-9_*?]*
+modifier_event [ukhp]{1,5}
+modifier_bp [rwx]
+
+%%
+cpu-cycles|cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+L1-dcache|l1-d|l1d|L1-data |
+L1-icache|l1-i|l1i|L1-instruction |
+LLC|L2 |
+dTLB|d-tlb|Data-TLB |
+iTLB|i-tlb|Instruction-TLB |
+branch|branches|bpu|btb|bpc |
+node { return str(PE_NAME_CACHE_TYPE); }
+
+load|loads|read |
+store|stores|write |
+prefetch|prefetches |
+speculative-read|speculative-load |
+refs|Reference|ops|access |
+misses|miss { return str(PE_NAME_CACHE_OP_RESULT); }
+
+ /*
+ * These are event config hardcoded term names to be specified
+ * within xxx/.../ syntax. So far we dont clash with other names,
+ * so we can put them here directly. In case the we have a conflict
+ * in future, this needs to go into '//' condition block.
+ */
+config { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+period { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+
+mem: { return PE_PREFIX_MEM; }
+r{num_raw_hex} { return raw(); }
+{num_dec} { return value(10); }
+{num_hex} { return value(16); }
+
+{modifier_event} { return str(PE_MODIFIER_EVENT); }
+{modifier_bp} { return str(PE_MODIFIER_BP); }
+{name} { return str(PE_NAME); }
+"/" { return '/'; }
+- { return '-'; }
+, { return ','; }
+: { return ':'; }
+= { return '='; }
+
+%%
+
+int parse_events_wrap(void)
+{
+ return 1;
+}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
new file mode 100644
index 0000000..d9637da
--- /dev/null
+++ b/tools/perf/util/parse-events.y
@@ -0,0 +1,229 @@
+
+%name-prefix "parse_events_"
+%parse-param {struct list_head *list_all}
+%parse-param {struct list_head *list_event}
+%parse-param {int *idx}
+
+%{
+
+#define YYDEBUG 1
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include "types.h"
+#include "util.h"
+#include "parse-events.h"
+
+extern int parse_events_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+ if (val) \
+ YYABORT; \
+} while (0)
+
+%}
+
+%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
+%token PE_NAME
+%token PE_MODIFIER_EVENT PE_MODIFIER_BP
+%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
+%token PE_PREFIX_MEM PE_PREFIX_RAW
+%token PE_ERROR
+%type <num> PE_VALUE
+%type <num> PE_VALUE_SYM
+%type <num> PE_RAW
+%type <num> PE_TERM
+%type <str> PE_NAME
+%type <str> PE_NAME_CACHE_TYPE
+%type <str> PE_NAME_CACHE_OP_RESULT
+%type <str> PE_MODIFIER_EVENT
+%type <str> PE_MODIFIER_BP
+%type <head> event_config
+%type <term> event_term
+
+%union
+{
+ char *str;
+ unsigned long num;
+ struct list_head *head;
+ struct parse_events__term *term;
+}
+%%
+
+events:
+events ',' event | event
+
+event:
+event_def PE_MODIFIER_EVENT
+{
+ /*
+ * Apply modifier on all events added by single event definition
+ * (there could be more events added for multiple tracepoint
+ * definitions via '*?'.
+ */
+ ABORT_ON(parse_events_modifier(list_event, $2));
+ parse_events_update_lists(list_event, list_all);
+}
+|
+event_def
+{
+ parse_events_update_lists(list_event, list_all);
+}
+
+event_def: event_pmu |
+ event_legacy_symbol |
+ event_legacy_cache sep_dc |
+ event_legacy_mem |
+ event_legacy_tracepoint sep_dc |
+ event_legacy_numeric sep_dc |
+ event_legacy_raw sep_dc
+
+event_pmu:
+PE_NAME '/' event_config '/'
+{
+ ABORT_ON(parse_events_add_pmu(list_event, idx, $1, $3));
+ parse_events__free_terms($3);
+}
+
+event_legacy_symbol:
+PE_VALUE_SYM '/' event_config '/'
+{
+ int type = $1 >> 16;
+ int config = $1 & 255;
+
+ ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, $3));
+ parse_events__free_terms($3);
+}
+|
+PE_VALUE_SYM sep_slash_dc
+{
+ int type = $1 >> 16;
+ int config = $1 & 255;
+
+ ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, NULL));
+}
+
+event_legacy_cache:
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
+{
+ ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, $5));
+}
+|
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
+{
+ ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, NULL));
+}
+|
+PE_NAME_CACHE_TYPE
+{
+ ABORT_ON(parse_events_add_cache(list_event, idx, $1, NULL, NULL));
+}
+
+event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+ ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, $4));
+}
+|
+PE_PREFIX_MEM PE_VALUE sep_dc
+{
+ ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, NULL));
+}
+
+event_legacy_tracepoint:
+PE_NAME ':' PE_NAME
+{
+ ABORT_ON(parse_events_add_tracepoint(list_event, idx, $1, $3));
+}
+
+event_legacy_numeric:
+PE_VALUE ':' PE_VALUE
+{
+ ABORT_ON(parse_events_add_numeric(list_event, idx, $1, $3, NULL));
+}
+
+event_legacy_raw:
+PE_RAW
+{
+ ABORT_ON(parse_events_add_numeric(list_event, idx, PERF_TYPE_RAW, $1, NULL));
+}
+
+event_config:
+event_config ',' event_term
+{
+ struct list_head *head = $1;
+ struct parse_events__term *term = $3;
+
+ ABORT_ON(!head);
+ list_add_tail(&term->list, head);
+ $$ = $1;
+}
+|
+event_term
+{
+ struct list_head *head = malloc(sizeof(*head));
+ struct parse_events__term *term = $1;
+
+ ABORT_ON(!head);
+ INIT_LIST_HEAD(head);
+ list_add_tail(&term->list, head);
+ $$ = head;
+}
+
+event_term:
+PE_NAME '=' PE_NAME
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR,
+ $1, $3, 0));
+ $$ = term;
+}
+|
+PE_NAME '=' PE_VALUE
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+ $1, NULL, $3));
+ $$ = term;
+}
+|
+PE_NAME
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+ $1, NULL, 1));
+ $$ = term;
+}
+|
+PE_TERM '=' PE_VALUE
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3));
+ $$ = term;
+}
+|
+PE_TERM
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1));
+ $$ = term;
+}
+
+sep_dc: ':' |
+
+sep_slash_dc: '/' | ':' |
+
+%%
+
+void parse_events_error(struct list_head *list_all __used,
+ struct list_head *list_event __used,
+ int *idx __used,
+ char const *msg __used)
+{
+}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
new file mode 100644
index 0000000..cb08a11
--- /dev/null
+++ b/tools/perf/util/pmu.c
@@ -0,0 +1,469 @@
+
+#include <linux/list.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <dirent.h>
+#include "sysfs.h"
+#include "util.h"
+#include "pmu.h"
+#include "parse-events.h"
+
+int perf_pmu_parse(struct list_head *list, char *name);
+extern FILE *perf_pmu_in;
+
+static LIST_HEAD(pmus);
+
+/*
+ * Parse & process all the sysfs attributes located under
+ * the directory specified in 'dir' parameter.
+ */
+static int pmu_format_parse(char *dir, struct list_head *head)
+{
+ struct dirent *evt_ent;
+ DIR *format_dir;
+ int ret = 0;
+
+ format_dir = opendir(dir);
+ if (!format_dir)
+ return -EINVAL;
+
+ while (!ret && (evt_ent = readdir(format_dir))) {
+ char path[PATH_MAX];
+ char *name = evt_ent->d_name;
+ FILE *file;
+
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+ ret = -EINVAL;
+ file = fopen(path, "r");
+ if (!file)
+ break;
+
+ perf_pmu_in = file;
+ ret = perf_pmu_parse(head, name);
+ fclose(file);
+ }
+
+ closedir(format_dir);
+ return ret;
+}
+
+/*
+ * Reading/parsing the default pmu format definition, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
+ */
+static int pmu_format(char *name, struct list_head *format)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/format", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ if (pmu_format_parse(path, format))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Reading/parsing the default pmu type value, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
+ */
+static int pmu_type(char *name, __u32 *type)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+ FILE *file;
+ int ret = 0;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/type", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ file = fopen(path, "r");
+ if (!file)
+ return -EINVAL;
+
+ if (1 != fscanf(file, "%u", type))
+ ret = -1;
+
+ fclose(file);
+ return ret;
+}
+
+static struct perf_pmu *pmu_lookup(char *name)
+{
+ struct perf_pmu *pmu;
+ LIST_HEAD(format);
+ __u32 type;
+
+ /*
+ * The pmu data we store & need consists of the pmu
+ * type value and format definitions. Load both right
+ * now.
+ */
+ if (pmu_format(name, &format))
+ return NULL;
+
+ if (pmu_type(name, &type))
+ return NULL;
+
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return NULL;
+
+ INIT_LIST_HEAD(&pmu->format);
+ list_splice(&format, &pmu->format);
+ pmu->name = strdup(name);
+ pmu->type = type;
+ return pmu;
+}
+
+static struct perf_pmu *pmu_find(char *name)
+{
+ struct perf_pmu *pmu;
+
+ list_for_each_entry(pmu, &pmus, list)
+ if (!strcmp(pmu->name, name))
+ return pmu;
+
+ return NULL;
+}
+
+struct perf_pmu *perf_pmu__find(char *name)
+{
+ struct perf_pmu *pmu;
+
+ /*
+ * Once PMU is loaded it stays in the list,
+ * so we keep us from multiple reading/parsing
+ * the pmu format definitions.
+ */
+ pmu = pmu_find(name);
+ if (pmu)
+ return pmu;
+
+ return pmu_lookup(name);
+}
+
+static struct perf_pmu__format*
+pmu_find_format(struct list_head *formats, char *name)
+{
+ struct perf_pmu__format *format;
+
+ list_for_each_entry(format, formats, list)
+ if (!strcmp(format->name, name))
+ return format;
+
+ return NULL;
+}
+
+/*
+ * Returns value based on the format definition (format parameter)
+ * and unformated value (value parameter).
+ *
+ * TODO maybe optimize a little ;)
+ */
+static __u64 pmu_format_value(unsigned long *format, __u64 value)
+{
+ unsigned long fbit, vbit;
+ __u64 v = 0;
+
+ for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
+
+ if (!test_bit(fbit, format))
+ continue;
+
+ if (!(value & (1llu << vbit++)))
+ continue;
+
+ v |= (1llu << fbit);
+ }
+
+ return v;
+}
+
+/*
+ * Setup one of config[12] attr members based on the
+ * user input data - temr parameter.
+ */
+static int pmu_config_term(struct list_head *formats,
+ struct perf_event_attr *attr,
+ struct parse_events__term *term)
+{
+ struct perf_pmu__format *format;
+ __u64 *vp;
+
+ /*
+ * Support only for hardcoded and numnerial terms.
+ * Hardcoded terms should be already in, so nothing
+ * to be done for them.
+ */
+ if (parse_events__is_hardcoded_term(term))
+ return 0;
+
+ if (term->type != PARSE_EVENTS__TERM_TYPE_NUM)
+ return -EINVAL;
+
+ format = pmu_find_format(formats, term->config);
+ if (!format)
+ return -EINVAL;
+
+ switch (format->value) {
+ case PERF_PMU_FORMAT_VALUE_CONFIG:
+ vp = &attr->config;
+ break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG1:
+ vp = &attr->config1;
+ break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG2:
+ vp = &attr->config2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *vp |= pmu_format_value(format->bits, term->val.num);
+ return 0;
+}
+
+static int pmu_config(struct list_head *formats, struct perf_event_attr *attr,
+ struct list_head *head_terms)
+{
+ struct parse_events__term *term, *h;
+
+ list_for_each_entry_safe(term, h, head_terms, list)
+ if (pmu_config_term(formats, attr, term))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Configures event's 'attr' parameter based on the:
+ * 1) users input - specified in terms parameter
+ * 2) pmu format definitions - specified by pmu parameter
+ */
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+ struct list_head *head_terms)
+{
+ attr->type = pmu->type;
+ return pmu_config(&pmu->format, attr, head_terms);
+}
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+ int config, unsigned long *bits)
+{
+ struct perf_pmu__format *format;
+
+ format = zalloc(sizeof(*format));
+ if (!format)
+ return -ENOMEM;
+
+ format->name = strdup(name);
+ format->value = config;
+ memcpy(format->bits, bits, sizeof(format->bits));
+
+ list_add_tail(&format->list, list);
+ return 0;
+}
+
+void perf_pmu__set_format(unsigned long *bits, long from, long to)
+{
+ long b;
+
+ if (!to)
+ to = from;
+
+ memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS));
+ for (b = from; b <= to; b++)
+ set_bit(b, bits);
+}
+
+/* Simulated format definitions. */
+static struct test_format {
+ const char *name;
+ const char *value;
+} test_formats[] = {
+ { "krava01", "config:0-1,62-63\n", },
+ { "krava02", "config:10-17\n", },
+ { "krava03", "config:5\n", },
+ { "krava11", "config1:0,2,4,6,8,20-28\n", },
+ { "krava12", "config1:63\n", },
+ { "krava13", "config1:45-47\n", },
+ { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
+ { "krava22", "config2:8,18,48,58\n", },
+ { "krava23", "config2:28-29,38\n", },
+};
+
+#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
+
+/* Simulated users input. */
+static struct parse_events__term test_terms[] = {
+ {
+ .config = (char *) "krava01",
+ .val.num = 15,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava02",
+ .val.num = 170,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava03",
+ .val.num = 1,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava11",
+ .val.num = 27,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava12",
+ .val.num = 1,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava13",
+ .val.num = 2,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava21",
+ .val.num = 119,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava22",
+ .val.num = 11,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava23",
+ .val.num = 2,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+};
+#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
+
+/*
+ * Prepare format directory data, exported by kernel
+ * at /sys/bus/event_source/devices/<dev>/format.
+ */
+static char *test_format_dir_get(void)
+{
+ static char dir[PATH_MAX];
+ unsigned int i;
+
+ snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX");
+ if (!mkdtemp(dir))
+ return NULL;
+
+ for (i = 0; i < TEST_FORMATS_CNT; i++) {
+ static char name[PATH_MAX];
+ struct test_format *format = &test_formats[i];
+ FILE *file;
+
+ snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+
+ file = fopen(name, "w");
+ if (!file)
+ return NULL;
+
+ if (1 != fwrite(format->value, strlen(format->value), 1, file))
+ break;
+
+ fclose(file);
+ }
+
+ return dir;
+}
+
+/* Cleanup format directory. */
+static int test_format_dir_put(char *dir)
+{
+ char buf[PATH_MAX];
+ snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir);
+ if (system(buf))
+ return -1;
+
+ snprintf(buf, PATH_MAX, "rmdir %s\n", dir);
+ return system(buf);
+}
+
+static struct list_head *test_terms_list(void)
+{
+ static LIST_HEAD(terms);
+ unsigned int i;
+
+ for (i = 0; i < TERMS_CNT; i++)
+ list_add_tail(&test_terms[i].list, &terms);
+
+ return &terms;
+}
+
+#undef TERMS_CNT
+
+int perf_pmu__test(void)
+{
+ char *format = test_format_dir_get();
+ LIST_HEAD(formats);
+ struct list_head *terms = test_terms_list();
+ int ret;
+
+ if (!format)
+ return -EINVAL;
+
+ do {
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+
+ ret = pmu_format_parse(format, &formats);
+ if (ret)
+ break;
+
+ ret = pmu_config(&formats, &attr, terms);
+ if (ret)
+ break;
+
+ ret = -EINVAL;
+
+ if (attr.config != 0xc00000000002a823)
+ break;
+ if (attr.config1 != 0x8000400000000145)
+ break;
+ if (attr.config2 != 0x0400000020041d07)
+ break;
+
+ ret = 0;
+ } while (0);
+
+ test_format_dir_put(format);
+ return ret;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
new file mode 100644
index 0000000..68c0db9
--- /dev/null
+++ b/tools/perf/util/pmu.h
@@ -0,0 +1,41 @@
+#ifndef __PMU_H
+#define __PMU_H
+
+#include <linux/bitops.h>
+#include "../../../include/linux/perf_event.h"
+
+enum {
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
+};
+
+#define PERF_PMU_FORMAT_BITS 64
+
+struct perf_pmu__format {
+ char *name;
+ int value;
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+ struct list_head list;
+};
+
+struct perf_pmu {
+ char *name;
+ __u32 type;
+ struct list_head format;
+ struct list_head list;
+};
+
+struct perf_pmu *perf_pmu__find(char *name);
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+ struct list_head *head_terms);
+
+int perf_pmu_wrap(void);
+void perf_pmu_error(struct list_head *list, char *name, char const *msg);
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+ int config, unsigned long *bits);
+void perf_pmu__set_format(unsigned long *bits, long from, long to);
+
+int perf_pmu__test(void);
+#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
new file mode 100644
index 0000000..a15d9fb
--- /dev/null
+++ b/tools/perf/util/pmu.l
@@ -0,0 +1,43 @@
+%option prefix="perf_pmu_"
+
+%{
+#include <stdlib.h>
+#include <linux/bitops.h>
+#include "pmu.h"
+#include "pmu-bison.h"
+
+static int value(int base)
+{
+ long num;
+
+ errno = 0;
+ num = strtoul(perf_pmu_text, NULL, base);
+ if (errno)
+ return PP_ERROR;
+
+ perf_pmu_lval.num = num;
+ return PP_VALUE;
+}
+
+%}
+
+num_dec [0-9]+
+
+%%
+
+{num_dec} { return value(10); }
+config { return PP_CONFIG; }
+config1 { return PP_CONFIG1; }
+config2 { return PP_CONFIG2; }
+- { return '-'; }
+: { return ':'; }
+, { return ','; }
+. { ; }
+\n { ; }
+
+%%
+
+int perf_pmu_wrap(void)
+{
+ return 1;
+}
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
new file mode 100644
index 0000000..20ea77e
--- /dev/null
+++ b/tools/perf/util/pmu.y
@@ -0,0 +1,93 @@
+
+%name-prefix "perf_pmu_"
+%parse-param {struct list_head *format}
+%parse-param {char *name}
+
+%{
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <string.h>
+#include "pmu.h"
+
+extern int perf_pmu_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+ if (val) \
+ YYABORT; \
+} while (0)
+
+%}
+
+%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
+%token PP_VALUE PP_ERROR
+%type <num> PP_VALUE
+%type <bits> bit_term
+%type <bits> bits
+
+%union
+{
+ unsigned long num;
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+}
+
+%%
+
+format:
+format format_term
+|
+format_term
+
+format_term:
+PP_CONFIG ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ $3));
+}
+|
+PP_CONFIG1 ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ $3));
+}
+|
+PP_CONFIG2 ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
+ $3));
+}
+
+bits:
+bits ',' bit_term
+{
+ bitmap_or($$, $1, $3, 64);
+}
+|
+bit_term
+{
+ memcpy($$, $1, sizeof($1));
+}
+
+bit_term:
+PP_VALUE '-' PP_VALUE
+{
+ perf_pmu__set_format($$, $1, $3);
+}
+|
+PP_VALUE
+{
+ perf_pmu__set_format($$, $1, 0);
+}
+
+%%
+
+void perf_pmu_error(struct list_head *list __used,
+ char *name __used,
+ char const *msg __used)
+{
+}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2cc162d..d448984 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -972,10 +972,12 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
struct dwarf_callback_param *param = data;
struct probe_finder *pf = param->data;
struct perf_probe_point *pp = &pf->pev->point;
+ Dwarf_Attribute attr;
/* Check tag and diename */
if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
- !die_compare_name(sp_die, pp->function))
+ !die_compare_name(sp_die, pp->function) ||
+ dwarf_attr(sp_die, DW_AT_declaration, &attr))
return DWARF_CB_OK;
/* Check declared file */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 002ebbf..9412e3b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -140,6 +140,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
INIT_LIST_HEAD(&self->ordered_samples.to_free);
machine__init(&self->host_machine, "", HOST_KERNEL_ID);
+ hists__init(&self->hists);
if (mode == O_RDONLY) {
if (perf_session__open(self, force) < 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5dd83c3..c0a028c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,6 +1,5 @@
#include <dirent.h>
#include <errno.h>
-#include <libgen.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
@@ -51,6 +50,8 @@ struct symbol_conf symbol_conf = {
int dso__name_len(const struct dso *dso)
{
+ if (!dso)
+ return strlen("[unknown]");
if (verbose)
return dso->long_name_len;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index a4088ce..dfd1bd8 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -722,7 +722,7 @@ static char *event_read_name(void)
static int event_read_id(void)
{
char *token;
- int id;
+ int id = -1;
if (read_expected_item(EVENT_ITEM, "ID") < 0)
return -1;
@@ -731,15 +731,13 @@ static int event_read_id(void)
return -1;
if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
+ goto free;
id = strtoul(token, NULL, 0);
- free_token(token);
- return id;
- fail:
+ free:
free_token(token);
- return -1;
+ return id;
}
static int field_is_string(struct format_field *field)
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
index 84d761b..6ee82f6 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/util/ui/browser.h
@@ -49,6 +49,8 @@ int ui_browser__warning(struct ui_browser *browser, int timeout,
const char *format, ...);
int ui_browser__help_window(struct ui_browser *browser, const char *text);
bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
+int ui_browser__input_window(const char *title, const char *text, char *input,
+ const char *exit_msg, int delay_sec);
void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index fa530fc..d7a1c4a 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -879,6 +879,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
char *options[16];
int nr_options = 0;
int key = -1;
+ char buf[64];
if (browser == NULL)
return -1;
@@ -933,6 +934,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
goto zoom_dso;
case 't':
goto zoom_thread;
+ case 's':
+ if (ui_browser__input_window("Symbol to show",
+ "Please enter the name of symbol you want to see",
+ buf, "ENTER: OK, ESC: Cancel",
+ delay_secs * 2) == K_ENTER) {
+ self->symbol_filter_str = *buf ? buf : NULL;
+ hists__filter_by_symbol(self);
+ hist_browser__reset(browser);
+ }
+ continue;
case K_F1:
case 'h':
case '?':
@@ -950,7 +961,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"C Collapse all callchains\n"
"E Expand all callchains\n"
"d Zoom into current DSO\n"
- "t Zoom into current Thread");
+ "t Zoom into current Thread\n"
+ "s Filter symbol by name");
continue;
case K_ENTER:
case K_RIGHT:
diff --git a/tools/perf/util/ui/keysyms.h b/tools/perf/util/ui/keysyms.h
index 3458b19..809eca5 100644
--- a/tools/perf/util/ui/keysyms.h
+++ b/tools/perf/util/ui/keysyms.h
@@ -16,6 +16,8 @@
#define K_TAB '\t'
#define K_UNTAB SL_KEY_UNTAB
#define K_UP SL_KEY_UP
+#define K_BKSPC 0x7f
+#define K_DEL SL_KEY_DELETE
/* Not really keys */
#define K_TIMER -1
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 45daa7c..ad4374a 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -69,6 +69,88 @@ int ui__popup_menu(int argc, char * const argv[])
return popup_menu__run(&menu);
}
+int ui_browser__input_window(const char *title, const char *text, char *input,
+ const char *exit_msg, int delay_secs)
+{
+ int x, y, len, key;
+ int max_len = 60, nr_lines = 0;
+ static char buf[50];
+ const char *t;
+
+ t = text;
+ while (1) {
+ const char *sep = strchr(t, '\n');
+
+ if (sep == NULL)
+ sep = strchr(t, '\0');
+ len = sep - t;
+ if (max_len < len)
+ max_len = len;
+ ++nr_lines;
+ if (*sep == '\0')
+ break;
+ t = sep + 1;
+ }
+
+ max_len += 2;
+ nr_lines += 8;
+ y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+ x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+ SLsmg_set_color(0);
+ SLsmg_draw_box(y, x++, nr_lines, max_len);
+ if (title) {
+ SLsmg_gotorc(y, x + 1);
+ SLsmg_write_string((char *)title);
+ }
+ SLsmg_gotorc(++y, x);
+ nr_lines -= 7;
+ max_len -= 2;
+ SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+ nr_lines, max_len, 1);
+ y += nr_lines;
+ len = 5;
+ while (len--) {
+ SLsmg_gotorc(y + len - 1, x);
+ SLsmg_write_nstring((char *)" ", max_len);
+ }
+ SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+ SLsmg_gotorc(y + 3, x);
+ SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_refresh();
+
+ x += 2;
+ len = 0;
+ key = ui__getch(delay_secs);
+ while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+ if (key == K_BKSPC) {
+ if (len == 0)
+ goto next_key;
+ SLsmg_gotorc(y, x + --len);
+ SLsmg_write_char(' ');
+ } else {
+ buf[len] = key;
+ SLsmg_gotorc(y, x + len++);
+ SLsmg_write_char(key);
+ }
+ SLsmg_refresh();
+
+ /* XXX more graceful overflow handling needed */
+ if (len == sizeof(buf) - 1) {
+ ui_helpline__push("maximum size of symbol name reached!");
+ key = K_ENTER;
+ break;
+ }
+next_key:
+ key = ui__getch(delay_secs);
+ }
+
+ buf[len] = '\0';
+ strncpy(input, buf, len+1);
+ return key;
+}
+
int ui__question_window(const char *title, const char *text,
const char *exit_msg, int delay_secs)
{
OpenPOWER on IntegriCloud