summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c5
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/sonypi.c11
-rw-r--r--drivers/char/toshiba.c12
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h18
-rw-r--r--drivers/dma/ioat/dma_v2.c69
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c60
-rw-r--r--drivers/dma/ioat/registers.h1
-rw-r--r--drivers/dma/shdma.c324
-rw-r--r--drivers/dma/shdma.h9
-rw-r--r--drivers/firewire/Kconfig44
-rw-r--r--drivers/firewire/core-cdev.c27
-rw-r--r--drivers/firewire/core-transaction.c118
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c14
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c174
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h123
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c251
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c32
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c414
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c83
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/ieee1394/Kconfig59
-rw-r--r--drivers/input/ff-memless.c48
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c29
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c29
-rw-r--r--drivers/input/joystick/iforce/iforce.h2
-rw-r--r--drivers/input/keyboard/atkbd.c5
-rw-r--r--drivers/input/keyboard/matrix_keypad.c29
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c11
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c14
-rw-r--r--drivers/input/misc/wistron_btns.c2
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/hgpk.c1
-rw-r--r--drivers/input/mouse/lifebook.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c5
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/lguest/segments.c4
-rw-r--r--drivers/md/md.c42
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c12
-rw-r--r--drivers/message/i2o/i2o_config.c13
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/net/3c507.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h16
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/ibmlana.c3
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/e1000_phy.c9
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c9
-rw-r--r--drivers/net/igbvf/netdev.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/pcnet32.c3
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/falcon.c1
-rw-r--r--drivers/net/sfc/falcon_xmac.c38
-rw-r--r--drivers/net/sfc/mcdi_phy.c93
-rw-r--r--drivers/net/sfc/net_driver.h1
-rw-r--r--drivers/net/sfc/nic.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c238
-rw-r--r--drivers/net/sfc/siena.c1
-rw-r--r--drivers/net/sfc/tenxpress.c138
-rw-r--r--drivers/net/sfc/tx.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/ucc_geth.c42
-rw-r--r--drivers/net/via-rhine.c41
-rw-r--r--drivers/net/vxge/vxge-main.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c42
-rw-r--r--drivers/net/wireless/b43/dma.c197
-rw-r--r--drivers/net/wireless/b43/dma.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c68
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/libertas/mesh.c4
-rw-r--r--drivers/net/wireless/libertas/scan.c22
-rw-r--r--drivers/net/wireless/libertas/wext.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/orinoco/wext.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c140
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c3
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/intel-iommu.c6
-rw-r--r--drivers/pci/intr_remapping.c2
-rw-r--r--drivers/pci/pci-acpi.c10
-rw-r--r--drivers/pci/pci.c15
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug4
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c4
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/quirks.c74
-rw-r--r--drivers/pci/search.c6
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/platform/x86/Kconfig28
-rw-r--r--drivers/platform/x86/dell-wmi.c18
-rw-r--r--drivers/platform/x86/hp-wmi.c9
-rw-r--r--drivers/platform/x86/msi-wmi.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c59
-rw-r--r--drivers/platform/x86/wmi.c36
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/qdio.h36
-rw-r--r--drivers/s390/cio/qdio_debug.c114
-rw-r--r--drivers/s390/cio/qdio_main.c71
-rw-r--r--drivers/s390/cio/qdio_perf.c149
-rw-r--r--drivers/s390/cio/qdio_perf.h62
-rw-r--r--drivers/s390/cio/qdio_thinint.c8
-rw-r--r--drivers/sbus/char/bbc_envctrl.c64
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c4
-rwxr-xr-xdrivers/scsi/lpfc/lpfc_hbadisc.c25
-rwxr-xr-x[-rw-r--r--]drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/pmcraid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/video/pxafb.c4
209 files changed, 3005 insertions, 2242 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8a07363..368ae6d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -28,7 +28,7 @@ source "drivers/md/Kconfig"
source "drivers/message/fusion/Kconfig"
-source "drivers/ieee1394/Kconfig"
+source "drivers/firewire/Kconfig"
source "drivers/message/i2o/Kconfig"
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5f2c379..79d33d9 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -81,6 +81,23 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
/*
+ * According to the ACPI specification the BIOS should make sure that ACPI is
+ * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
+ * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
+ * on such systems during resume. Unfortunately that doesn't help in
+ * particularly pathological cases in which SCI_EN has to be set directly on
+ * resume, although the specification states very clearly that this flag is
+ * owned by the hardware. The set_sci_en_on_resume variable will be set in such
+ * cases.
+ */
+static bool set_sci_en_on_resume;
+
+void __init acpi_set_sci_en_on_resume(void)
+{
+ set_sci_en_on_resume = true;
+}
+
+/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
* kernel command line option that causes the following variable to be set.
@@ -170,18 +187,6 @@ static void acpi_pm_end(void)
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
extern void do_suspend_lowlevel(void);
static u32 acpi_suspend_states[] = {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 05dff63..72e76b4 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -999,8 +999,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
sprintf(name, "acpi_video%d", count++);
device->backlight = backlight_device_register(name,
NULL, device, &acpi_backlight_ops);
- device->backlight->props.max_brightness = device->brightness->count-3;
kfree(name);
+ if (IS_ERR(device->backlight))
+ return;
+ device->backlight->props.max_brightness = device->brightness->count-3;
result = sysfs_create_link(&device->backlight->dev.kobj,
&device->dev->dev.kobj, "device");
@@ -1979,6 +1981,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
+ /* no warning message if acpi_backlight=vendor is used */
+ if (!acpi_video_backlight_support())
+ return 0;
+
if (!device->brightness)
goto out;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eb4fa19..ce1fa92 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7101,7 +7101,7 @@ static struct DAC960_privdata DAC960_BA_privdata = {
static struct DAC960_privdata DAC960_LP_privdata = {
.HardwareType = DAC960_LP_Controller,
- .FirmwareType = DAC960_LP_Controller,
+ .FirmwareType = DAC960_V2_Controller,
.InterruptHandler = DAC960_LP_InterruptHandler,
.MemoryWindowSize = DAC960_LP_RegisterWindowSize,
};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 13bb69d..64a223b 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -735,21 +735,6 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
part_stat_unlock();
}
-/*
- * Ensure we don't create aliases in VI caches
- */
-static inline void
-killalias(struct bio *bio)
-{
- struct bio_vec *bv;
- int i;
-
- if (bio_data_dir(bio) == READ)
- __bio_for_each_segment(bv, bio, i, 0) {
- flush_dcache_page(bv->bv_page);
- }
-}
-
void
aoecmd_ata_rsp(struct sk_buff *skb)
{
@@ -871,7 +856,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
if (buf->flags & BUFFL_FAIL)
bio_endio(buf->bio, -EIO);
else {
- killalias(buf->bio);
+ bio_flush_dcache_pages(buf->bio);
bio_endio(buf->bio, 0);
}
mempool_free(buf, d->bufpool);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2312d78..c9755876 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1490,7 +1490,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
-extern struct file_operations drbd_proc_fops;
+extern const struct file_operations drbd_proc_fops;
extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 157d1e4..9348f33 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/drbd.h>
#include <asm/uaccess.h>
#include <asm/types.h>
@@ -151,7 +150,7 @@ wait_queue_head_t drbd_pp_wait;
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
-static struct block_device_operations drbd_ops = {
+static const struct block_device_operations drbd_ops = {
.owner = THIS_MODULE,
.open = drbd_open,
.release = drbd_release,
@@ -3623,7 +3622,7 @@ _drbd_fault_random(struct fault_random_state *rsp)
{
long refresh;
- if (--rsp->count < 0) {
+ if (!rsp->count--) {
get_random_bytes(&refresh, sizeof(refresh));
rsp->state += refresh;
rsp->count = FAULT_RANDOM_REFRESH;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index bdd0b49..df8ad96 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -38,7 +38,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file);
struct proc_dir_entry *drbd_proc;
-struct file_operations drbd_proc_fops = {
+const struct file_operations drbd_proc_fops = {
.owner = THIS_MODULE,
.open = drbd_proc_open,
.read = seq_read,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c548f24..259c135 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -28,7 +28,6 @@
#include <asm/uaccess.h>
#include <net/sock.h>
-#include <linux/version.h>
#include <linux/drbd.h>
#include <linux/fs.h>
#include <linux/file.h>
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ed8796f..b453c2b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/drbd.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
@@ -34,7 +33,6 @@
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e0339aa..02b2583 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -860,7 +860,7 @@ static int mg_probe(struct platform_device *plat_dev)
err = -EINVAL;
goto probe_err_2;
}
- host->dev_base = ioremap(rsc->start , rsc->end + 1);
+ host->dev_base = ioremap(rsc->start, resource_size(rsc));
if (!host->dev_base) {
printk(KERN_ERR "%s:%d ioremap fail\n",
__func__, __LINE__);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 2fb2e6c..5aa7a58 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -725,9 +725,14 @@ static struct pci_driver agp_amd64_pci_driver = {
int __init agp_amd64_init(void)
{
int err = 0;
+ static int done = 0;
if (agp_off)
return -EINVAL;
+
+ if (done++)
+ return agp_bridges_found ? 0 : -ENODEV;
+
err = pci_register_driver(&agp_amd64_pci_driver);
if (err < 0)
return err;
@@ -771,12 +776,8 @@ static void __exit agp_amd64_cleanup(void)
pci_unregister_driver(&agp_amd64_pci_driver);
}
-/* On AMD64 the PCI driver needs to initialize this driver early
- for the IOMMU, so it has to be called via a backdoor. */
-#ifndef CONFIG_GART_IOMMU
module_init(agp_amd64_init);
module_exit(agp_amd64_cleanup);
-#endif
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e989f67..3d9c61e 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -158,10 +158,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
goto out;
}
}
-out_unlock:
- mutex_unlock(&rng_mutex);
out:
return ret ? : err;
+out_unlock:
+ mutex_unlock(&rng_mutex);
+ goto out;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 679cd08..176f175 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -3204,7 +3204,7 @@ static __devinit int init_ipmi_si(void)
#ifdef CONFIG_ACPI
spmi_find_bmc();
#endif
-#ifdef CONFIG_PNP
+#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
#endif
@@ -3330,7 +3330,7 @@ static __exit void cleanup_ipmi_si(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
-#ifdef CONFIG_PNP
+#ifdef CONFIG_ACPI
pnp_unregister_driver(&ipmi_pnp_driver);
#endif
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 0798754..bba727c 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -50,7 +50,6 @@
#include <linux/err.h>
#include <linux/kfifo.h>
#include <linux/platform_device.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -905,14 +904,13 @@ static int sonypi_misc_release(struct inode *inode, struct file *file)
static int sonypi_misc_open(struct inode *inode, struct file *file)
{
- lock_kernel();
mutex_lock(&sonypi_device.lock);
/* Flush input queue on first open */
if (!sonypi_device.open_count)
kfifo_reset(&sonypi_device.fifo);
sonypi_device.open_count++;
mutex_unlock(&sonypi_device.lock);
- unlock_kernel();
+
return 0;
}
@@ -955,10 +953,10 @@ static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
return 0;
}
-static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
+static long sonypi_misc_ioctl(struct file *fp,
unsigned int cmd, unsigned long arg)
{
- int ret = 0;
+ long ret = 0;
void __user *argp = (void __user *)arg;
u8 val8;
u16 val16;
@@ -1074,7 +1072,8 @@ static const struct file_operations sonypi_misc_fops = {
.open = sonypi_misc_open,
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
- .ioctl = sonypi_misc_ioctl,
+ .unlocked_ioctl = sonypi_misc_ioctl,
+ .llseek = no_llseek,
};
static struct miscdevice sonypi_misc_device = {
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 663cd15d..f8bc79f 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -68,7 +68,7 @@
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-
+#include <linux/smp_lock.h>
#include <linux/toshiba.h>
#define TOSH_MINOR_DEV 181
@@ -88,13 +88,13 @@ static int tosh_date;
static int tosh_sci;
static int tosh_fan;
-static int tosh_ioctl(struct inode *, struct file *, unsigned int,
+static long tosh_ioctl(struct file *, unsigned int,
unsigned long);
static const struct file_operations tosh_fops = {
.owner = THIS_MODULE,
- .ioctl = tosh_ioctl,
+ .unlocked_ioctl = tosh_ioctl,
};
static struct miscdevice tosh_device = {
@@ -252,8 +252,7 @@ int tosh_smm(SMMRegisters *regs)
EXPORT_SYMBOL(tosh_smm);
-static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
- unsigned long arg)
+static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
SMMRegisters regs;
SMMRegisters __user *argp = (SMMRegisters __user *)arg;
@@ -275,13 +274,16 @@ static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
return -EINVAL;
/* do we need to emulate the fan ? */
+ lock_kernel();
if (tosh_fan==1) {
if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) {
err = tosh_emulate_fan(&regs);
+ unlock_kernel();
break;
}
}
err = tosh_smm(&regs);
+ unlock_kernel();
break;
default:
return -EINVAL;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f151125..efc1a61 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
cookie, done ? *done : 0, used ? *used : 0);
- spin_lock_bh(atchan->lock);
+ spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(atchan->lock);
+ spin_unlock_bh(&atchan->lock);
if (done)
*done = last_complete;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4a99cd9..b5f2ee0 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1294,8 +1294,8 @@ static int __exit coh901318_remove(struct platform_device *pdev)
dma_async_device_unregister(&base->dma_slave);
coh901318_pool_destroy(&base->pool);
free_irq(platform_get_irq(pdev, 0), base);
- kfree(base);
iounmap(base->virtbase);
+ kfree(base);
release_mem_region(pdev->resource->start,
resource_size(pdev->resource));
return 0;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 285bed0..d28369f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1270,8 +1270,6 @@ static int __init dw_probe(struct platform_device *pdev)
goto err_kfree;
}
- memset(dw, 0, sizeof *dw);
-
dw->regs = ioremap(io->start, DW_REGLEN);
if (!dw->regs) {
err = -ENOMEM;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36..dcc4ab7 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
dma->dev = &pdev->dev;
if (!dma->chancnt) {
- dev_err(dev, "zero channels detected\n");
+ dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 45edde9..bbc3e78 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
* @dca: direct cache access context
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
+ * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
struct dca_provider *dca;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
+ int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
+static inline void ioat_reset(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+
+ writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u8 cmd;
+
+ cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
+}
+
static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
{
struct ioat_chan_common *chan = &ioat->base;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8f1f7f0..5f7a500 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
__ioat2_start_null_desc(ioat);
}
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ unsigned long end = jiffies + tmo;
+ int err = 0;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
+ if (end && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
status = ioat_chansts(chan);
cpu_relax();
}
+ return err;
+}
+
+int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
+{
+ unsigned long end = jiffies + tmo;
+ int err = 0;
+
+ ioat_reset(chan);
+ while (ioat_reset_pending(chan)) {
+ if (end && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ cpu_relax();
+ }
+
+ return err;
+}
+
+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock);
}
+static int ioat2_reset_hw(struct ioat_chan_common *chan)
+{
+ /* throw away whatever the channel was doing and get it initialized */
+ u32 chanerr;
+
+ ioat2_quiesce(chan, msecs_to_jiffies(100));
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+}
+
/**
* ioat2_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
+ if (device->reset_hw(&ioat->base)) {
+ i = 0;
+ break;
+ }
}
dma->chancnt = i;
return i;
@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring;
- u32 chanerr;
int order;
/* have we already been set up? */
@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
/* Setup register to interrupt and write completion status on error */
writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr) {
- dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- }
-
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat);
-
- /* Delay 100ms after reset to allow internal DMA logic to quiesce
- * before removing DMA descriptor resources.
- */
- writeb(IOAT_CHANCMD_RESET,
- chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
- mdelay(100);
+ device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock);
descs = ioat2_ring_space(ioat);
@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
int err;
device->enumerate_channels = ioat2_enumerate_channels;
+ device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef..3afad8d 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data);
void ioat2_timer_event(unsigned long data);
+int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
+int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
extern struct kobj_type ioat2_ktype;
extern struct kmem_cache *ioat2_cache;
#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 42f6f10..9908c9e 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
num_descs = ioat2_xferlen_to_descs(ioat, len);
/* we need 2x the number of descriptors to cover greater than 3
- * sources
+ * sources (we need 1 extra source in the q-only continuation
+ * case and 3 extra sources in the p+q continuation case.
*/
- if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
+ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+ (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
with_ext = 1;
num_descs *= 2;
} else
@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
return 0;
}
+static int ioat3_reset_hw(struct ioat_chan_common *chan)
+{
+ /* throw away whatever the channel was doing and get it
+ * initialized, with ioat3 specific workarounds
+ */
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ u32 chanerr;
+ u16 dev_id;
+ int err;
+
+ ioat2_quiesce(chan, msecs_to_jiffies(100));
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ /* -= IOAT ver.3 workarounds =- */
+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
+ * that can cause stability issues for IOAT ver.3, and clear any
+ * pending errors
+ */
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (err) {
+ dev_err(&pdev->dev, "channel error register unreachable\n");
+ return err;
+ }
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+
+ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+}
+
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
- u16 dev_id;
u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
+ device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_prep_dma_xor_val = NULL;
#endif
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
-
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
-
err = ioat_probe(device);
if (err)
return err;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index f015ec1..e8ae63b 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
+#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
/* MMIO Device Registers */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2e4a54c..d10cc89 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
#include <linux/platform_device.h>
#include <cpu/dma.h>
#include <asm/dma-sh.h>
#include "shdma.h"
/* DMA descriptor control */
-#define DESC_LAST (-1)
-#define DESC_COMP (1)
-#define DESC_NCOMP (0)
+enum sh_dmae_desc_status {
+ DESC_IDLE,
+ DESC_PREPARED,
+ DESC_SUBMITTED,
+ DESC_COMPLETED, /* completed, have to call callback */
+ DESC_WAITING, /* callback called, waiting for ack / re-submit */
+};
#define NR_DESCS_PER_CHANNEL 32
/*
@@ -45,6 +48,8 @@
*/
#define RS_DEFAULT (RS_DUAL)
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+
#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
}
-static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
{
- sh_dmae_writel(sh_chan, hw.sar, SAR);
- sh_dmae_writel(sh_chan, hw.dar, DAR);
- sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
+ sh_dmae_writel(sh_chan, hw->sar, SAR);
+ sh_dmae_writel(sh_chan, hw->dar, DAR);
+ sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
}
static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct sh_desc *desc = tx_to_sh_desc(tx);
+ struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+ dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
spin_lock_bh(&sh_chan->desc_lock);
@@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
if (cookie < 0)
cookie = 1;
- /* If desc only in the case of 1 */
- if (desc->async_tx.cookie != -EBUSY)
- desc->async_tx.cookie = cookie;
- sh_chan->common.cookie = desc->async_tx.cookie;
+ sh_chan->common.cookie = cookie;
+ tx->cookie = cookie;
+
+ /* Mark all chunks of this descriptor as submitted, move to the queue */
+ list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+ /*
+ * All chunks are on the global ld_free, so, we have to find
+ * the end of the chain ourselves
+ */
+ if (chunk != desc && (chunk->mark == DESC_IDLE ||
+ chunk->async_tx.cookie > 0 ||
+ chunk->async_tx.cookie == -EBUSY ||
+ &chunk->node == &sh_chan->ld_free))
+ break;
+ chunk->mark = DESC_SUBMITTED;
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ chunk->cookie = cookie;
+ list_move_tail(&chunk->node, &sh_chan->ld_queue);
+ last = chunk;
+ }
+
+ last->async_tx.callback = callback;
+ last->async_tx.callback_param = tx->callback_param;
- list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
+ dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
+ tx->cookie, &last->async_tx, sh_chan->id,
+ desc->hw.sar, desc->hw.tcr, desc->hw.dar);
spin_unlock_bh(&sh_chan->desc_lock);
return cookie;
}
+/* Called with desc_lock held */
static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
{
- struct sh_desc *desc, *_desc, *ret = NULL;
+ struct sh_desc *desc;
- spin_lock_bh(&sh_chan->desc_lock);
- list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
- if (async_tx_test_ack(&desc->async_tx)) {
+ list_for_each_entry(desc, &sh_chan->ld_free, node)
+ if (desc->mark != DESC_PREPARED) {
+ BUG_ON(desc->mark != DESC_IDLE);
list_del(&desc->node);
- ret = desc;
- break;
+ return desc;
}
- }
- spin_unlock_bh(&sh_chan->desc_lock);
-
- return ret;
-}
-
-static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
-{
- if (desc) {
- spin_lock_bh(&sh_chan->desc_lock);
-
- list_splice_init(&desc->tx_list, &sh_chan->ld_free);
- list_add(&desc->node, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
- }
+ return NULL;
}
static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
- desc->async_tx.flags = DMA_CTRL_ACK;
- INIT_LIST_HEAD(&desc->tx_list);
- sh_dmae_put_desc(sh_chan, desc);
+ desc->mark = DESC_IDLE;
spin_lock_bh(&sh_chan->desc_lock);
+ list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
spin_unlock_bh(&sh_chan->desc_lock);
@@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
- BUG_ON(!list_empty(&sh_chan->ld_queue));
+ /* Prepared and not submitted descriptors can still be on the queue */
+ if (!list_empty(&sh_chan->ld_queue))
+ sh_dmae_chan_ld_cleanup(sh_chan, true);
+
spin_lock_bh(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
@@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
struct sh_dmae_chan *sh_chan;
struct sh_desc *first = NULL, *prev = NULL, *new;
size_t copy_size;
+ LIST_HEAD(tx_list);
+ int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
if (!chan)
return NULL;
@@ -301,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sh_chan = to_sh_chan(chan);
+ /* Have to lock the whole loop to protect against concurrent release */
+ spin_lock_bh(&sh_chan->desc_lock);
+
+ /*
+ * Chaining:
+ * first descriptor is what user is dealing with in all API calls, its
+ * cookie is at first set to -EBUSY, at tx-submit to a positive
+ * number
+ * if more than one chunk is needed further chunks have cookie = -EINVAL
+ * the last chunk, if not equal to the first, has cookie = -ENOSPC
+ * all chunks are linked onto the tx_list head with their .node heads
+ * only during this function, then they are immediately spliced
+ * back onto the free list in form of a chain
+ */
do {
- /* Allocate the link descriptor from DMA pool */
+ /* Allocate the link descriptor from the free list */
new = sh_dmae_get_desc(sh_chan);
if (!new) {
dev_err(sh_chan->dev,
"No free memory for link descriptor\n");
- goto err_get_desc;
+ list_for_each_entry(new, &tx_list, node)
+ new->mark = DESC_IDLE;
+ list_splice(&tx_list, &sh_chan->ld_free);
+ spin_unlock_bh(&sh_chan->desc_lock);
+ return NULL;
}
- copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
+ copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
new->hw.sar = dma_src;
new->hw.dar = dma_dest;
new->hw.tcr = copy_size;
- if (!first)
+ if (!first) {
+ /* First desc */
+ new->async_tx.cookie = -EBUSY;
first = new;
+ } else {
+ /* Other desc - invisible to the user */
+ new->async_tx.cookie = -EINVAL;
+ }
- new->mark = DESC_NCOMP;
- async_tx_ack(&new->async_tx);
+ dev_dbg(sh_chan->dev,
+ "chaining %u of %u with %p, dst %x, cookie %d\n",
+ copy_size, len, &new->async_tx, dma_dest,
+ new->async_tx.cookie);
+
+ new->mark = DESC_PREPARED;
+ new->async_tx.flags = flags;
+ new->chunks = chunks--;
prev = new;
len -= copy_size;
dma_src += copy_size;
dma_dest += copy_size;
/* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &first->tx_list);
+ list_add_tail(&new->node, &tx_list);
} while (len);
- new->async_tx.flags = flags; /* client is in control of this ack */
- new->async_tx.cookie = -EBUSY; /* Last desc */
+ if (new != first)
+ new->async_tx.cookie = -ENOSPC;
- return &first->async_tx;
+ /* Put them back on the free list, so, they don't get lost */
+ list_splice_tail(&tx_list, &sh_chan->ld_free);
-err_get_desc:
- sh_dmae_put_desc(sh_chan, first);
- return NULL;
+ spin_unlock_bh(&sh_chan->desc_lock);
+ return &first->async_tx;
}
-/*
- * sh_chan_ld_cleanup - Clean up link descriptors
- *
- * This function clean up the ld_queue of DMA channel.
- */
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
+static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
{
struct sh_desc *desc, *_desc;
+ /* Is the "exposed" head of a chain acked? */
+ bool head_acked = false;
+ dma_cookie_t cookie = 0;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
spin_lock_bh(&sh_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
- dma_async_tx_callback callback;
- void *callback_param;
-
- /* non send data */
- if (desc->mark == DESC_NCOMP)
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+ BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+ BUG_ON(desc->mark != DESC_SUBMITTED &&
+ desc->mark != DESC_COMPLETED &&
+ desc->mark != DESC_WAITING);
+
+ /*
+ * queue is ordered, and we use this loop to (1) clean up all
+ * completed descriptors, and to (2) update descriptor flags of
+ * any chunks in a (partially) completed chain
+ */
+ if (!all && desc->mark == DESC_SUBMITTED &&
+ desc->cookie != cookie)
break;
- /* send data sesc */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
+ if (tx->cookie > 0)
+ cookie = tx->cookie;
- /* Remove from ld_queue list */
- list_splice_init(&desc->tx_list, &sh_chan->ld_free);
+ if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+ BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
+ sh_chan->completed_cookie = desc->cookie;
+ }
- dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
- desc);
+ /* Call callback on the last chunk */
+ if (desc->mark == DESC_COMPLETED && tx->callback) {
+ desc->mark = DESC_WAITING;
+ callback = tx->callback;
+ param = tx->callback_param;
+ dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
+ tx->cookie, tx, sh_chan->id);
+ BUG_ON(desc->chunks != 1);
+ break;
+ }
- list_move(&desc->node, &sh_chan->ld_free);
- /* Run the link descriptor callback function */
- if (callback) {
- spin_unlock_bh(&sh_chan->desc_lock);
- dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
- desc);
- callback(callback_param);
- spin_lock_bh(&sh_chan->desc_lock);
+ if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+ if (desc->mark == DESC_COMPLETED) {
+ BUG_ON(tx->cookie < 0);
+ desc->mark = DESC_WAITING;
+ }
+ head_acked = async_tx_test_ack(tx);
+ } else {
+ switch (desc->mark) {
+ case DESC_COMPLETED:
+ desc->mark = DESC_WAITING;
+ /* Fall through */
+ case DESC_WAITING:
+ if (head_acked)
+ async_tx_ack(&desc->async_tx);
+ }
+ }
+
+ dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
+ tx, tx->cookie);
+
+ if (((desc->mark == DESC_COMPLETED ||
+ desc->mark == DESC_WAITING) &&
+ async_tx_test_ack(&desc->async_tx)) || all) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+ list_move(&desc->node, &sh_chan->ld_free);
}
}
spin_unlock_bh(&sh_chan->desc_lock);
+
+ if (callback)
+ callback(param);
+
+ return callback;
+}
+
+/*
+ * sh_chan_ld_cleanup - Clean up link descriptors
+ *
+ * This function cleans up the ld_queue of DMA channel.
+ */
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
+{
+ while (__ld_cleanup(sh_chan, all))
+ ;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
- struct list_head *ld_node;
- struct sh_dmae_regs hw;
+ struct sh_desc *sd;
+ spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_busy(sh_chan))
+ if (dmae_is_busy(sh_chan)) {
+ spin_unlock_bh(&sh_chan->desc_lock);
return;
+ }
/* Find the first un-transfer desciptor */
- for (ld_node = sh_chan->ld_queue.next;
- (ld_node != &sh_chan->ld_queue)
- && (to_sh_desc(ld_node)->mark == DESC_COMP);
- ld_node = ld_node->next)
- cpu_relax();
-
- if (ld_node != &sh_chan->ld_queue) {
- /* Get the ld start address from ld_queue */
- hw = to_sh_desc(ld_node)->hw;
- dmae_set_reg(sh_chan, hw);
- dmae_start(sh_chan);
- }
+ list_for_each_entry(sd, &sh_chan->ld_queue, node)
+ if (sd->mark == DESC_SUBMITTED) {
+ /* Get the ld start address from ld_queue */
+ dmae_set_reg(sh_chan, &sd->hw);
+ dmae_start(sh_chan);
+ break;
+ }
+
+ spin_unlock_bh(&sh_chan->desc_lock);
}
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -420,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
dma_cookie_t last_used;
dma_cookie_t last_complete;
- sh_dmae_chan_ld_cleanup(sh_chan);
+ sh_dmae_chan_ld_cleanup(sh_chan, false);
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
- if (last_complete == -EBUSY)
- last_complete = last_used;
+ BUG_ON(last_complete < 0);
if (done)
*done = last_complete;
@@ -480,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
err = sh_dmae_rst(0);
if (err)
return err;
+#ifdef SH_DMAC_BASE1
if (shdev->pdata.mode & SHDMA_DMAOR1) {
err = sh_dmae_rst(1);
if (err)
return err;
}
+#endif
disable_irq(irq);
return IRQ_HANDLED;
}
@@ -494,35 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
static void dmae_do_tasklet(unsigned long data)
{
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
- struct sh_desc *desc, *_desc, *cur_desc = NULL;
+ struct sh_desc *desc;
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
- list_for_each_entry_safe(desc, _desc,
- &sh_chan->ld_queue, node) {
- if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
- cur_desc = desc;
+ spin_lock(&sh_chan->desc_lock);
+ list_for_each_entry(desc, &sh_chan->ld_queue, node) {
+ if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
+ desc->mark == DESC_SUBMITTED) {
+ dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
+ desc->async_tx.cookie, &desc->async_tx,
+ desc->hw.dar);
+ desc->mark = DESC_COMPLETED;
break;
}
}
+ spin_unlock(&sh_chan->desc_lock);
- if (cur_desc) {
- switch (cur_desc->async_tx.cookie) {
- case 0: /* other desc data */
- break;
- case -EBUSY: /* last desc */
- sh_chan->completed_cookie =
- cur_desc->async_tx.cookie;
- break;
- default: /* first desc ( 0 < )*/
- sh_chan->completed_cookie =
- cur_desc->async_tx.cookie - 1;
- break;
- }
- cur_desc->mark = DESC_COMP;
- }
/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
- sh_dmae_chan_ld_cleanup(sh_chan);
+ sh_dmae_chan_ld_cleanup(sh_chan, false);
}
static unsigned int get_dmae_irq(unsigned int id)
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 60b81e5..108f1cf 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H
-#include <linux/device.h>
-#include <linux/dmapool.h>
#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
@@ -26,13 +26,16 @@ struct sh_dmae_regs {
};
struct sh_desc {
- struct list_head tx_list;
struct sh_dmae_regs hw;
struct list_head node;
struct dma_async_tx_descriptor async_tx;
+ dma_cookie_t cookie;
+ int chunks;
int mark;
};
+struct device;
+
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 13efcd3..a9371b3 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,5 +1,10 @@
+menu "IEEE 1394 (FireWire) support"
+ depends on PCI || BROKEN
+ # firewire-core does not depend on PCI but is
+ # not useful without PCI controller driver
+
comment "You can enable one or both FireWire driver stacks."
-comment "See the help texts for more information."
+comment "The newer stack is recommended."
config FIREWIRE
tristate "FireWire driver stack"
@@ -15,16 +20,6 @@ config FIREWIRE
To compile this driver as a module, say M here: the module will be
called firewire-core.
- This module functionally replaces ieee1394, raw1394, and video1394.
- To access it from application programs, you generally need at least
- libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
- No libraries are required to access storage devices through the
- firewire-sbp2 driver.
-
- NOTE:
- FireWire audio devices currently require the old drivers (ieee1394,
- ohci1394, raw1394).
-
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE
@@ -34,22 +29,7 @@ config FIREWIRE_OHCI
is the only chipset in use, so say Y here.
To compile this driver as a module, say M here: The module will be
- called firewire-ohci. It replaces ohci1394 of the classic IEEE 1394
- stack.
-
- NOTE:
- If you want to install firewire-ohci and ohci1394 together, you
- should configure them only as modules and blacklist the driver(s)
- which you don't want to have auto-loaded. Add either
-
- blacklist firewire-ohci
- or
- blacklist ohci1394
- blacklist video1394
- blacklist dv1394
-
- to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution.
+ called firewire-ohci.
config FIREWIRE_OHCI_DEBUG
bool
@@ -66,8 +46,7 @@ config FIREWIRE_SBP2
like scanners.
To compile this driver as a module, say M here: The module will be
- called firewire-sbp2. It replaces sbp2 of the classic IEEE 1394
- stack.
+ called firewire-sbp2.
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
@@ -83,5 +62,8 @@ config FIREWIRE_NET
NOTE, this driver is not stable yet!
To compile this driver as a module, say M here: The module will be
- called firewire-net. It replaces eth1394 of the classic IEEE 1394
- stack.
+ called firewire-net.
+
+source "drivers/ieee1394/Kconfig"
+
+endmenu
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 231e6ee..e6d63849 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -601,8 +601,9 @@ static void release_request(struct client *client,
struct inbound_transaction_resource *r = container_of(resource,
struct inbound_transaction_resource, resource);
- fw_send_response(client->device->card, r->request,
- RCODE_CONFLICT_ERROR);
+ if (r->request)
+ fw_send_response(client->device->card, r->request,
+ RCODE_CONFLICT_ERROR);
kfree(r);
}
@@ -645,7 +646,8 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
failed:
kfree(r);
kfree(e);
- fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+ if (request)
+ fw_send_response(card, request, RCODE_CONFLICT_ERROR);
}
static void release_address_handler(struct client *client,
@@ -715,15 +717,18 @@ static int ioctl_send_response(struct client *client, void *buffer)
r = container_of(resource, struct inbound_transaction_resource,
resource);
- if (request->length < r->length)
- r->length = request->length;
-
- if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
- ret = -EFAULT;
- goto out;
+ if (r->request) {
+ if (request->length < r->length)
+ r->length = request->length;
+ if (copy_from_user(r->data, u64_to_uptr(request->data),
+ r->length)) {
+ ret = -EFAULT;
+ kfree(r->request);
+ goto out;
+ }
+ fw_send_response(client->device->card, r->request,
+ request->rcode);
}
-
- fw_send_response(client->device->card, r->request, request->rcode);
out:
kfree(r);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 842739d..495849e 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -432,14 +432,20 @@ static struct fw_address_handler *lookup_overlapping_address_handler(
return NULL;
}
+static bool is_enclosing_handler(struct fw_address_handler *handler,
+ unsigned long long offset, size_t length)
+{
+ return handler->offset <= offset &&
+ offset + length <= handler->offset + handler->length;
+}
+
static struct fw_address_handler *lookup_enclosing_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
list_for_each_entry(handler, list, link) {
- if (handler->offset <= offset &&
- offset + length <= handler->offset + handler->length)
+ if (is_enclosing_handler(handler, offset, length))
return handler;
}
@@ -465,6 +471,12 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static bool is_in_fcp_region(u64 offset, size_t length)
+{
+ return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
+ offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
+}
+
/**
* fw_core_add_address_handler - register for incoming requests
* @handler: callback
@@ -477,8 +489,11 @@ const struct fw_address_region fw_unit_space_region =
* give the details of the particular request.
*
* Return value: 0 on success, non-zero otherwise.
+ *
* The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset.
+ *
+ * Address allocations are exclusive, except for the FCP registers.
*/
int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region)
@@ -498,10 +513,12 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
- other =
- lookup_overlapping_address_handler(&address_handler_list,
- handler->offset,
- handler->length);
+ if (is_in_fcp_region(handler->offset, handler->length))
+ other = NULL;
+ else
+ other = lookup_overlapping_address_handler
+ (&address_handler_list,
+ handler->offset, handler->length);
if (other != NULL) {
handler->offset += other->length;
} else {
@@ -668,6 +685,9 @@ static struct fw_request *allocate_request(struct fw_packet *p)
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
+ if (WARN_ONCE(!request, "invalid for FCP address handlers"))
+ return;
+
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
@@ -686,26 +706,15 @@ void fw_send_response(struct fw_card *card,
}
EXPORT_SYMBOL(fw_send_response);
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+static void handle_exclusive_region_request(struct fw_card *card,
+ struct fw_packet *p,
+ struct fw_request *request,
+ unsigned long long offset)
{
struct fw_address_handler *handler;
- struct fw_request *request;
- unsigned long long offset;
unsigned long flags;
int tcode, destination, source;
- if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
- return;
-
- request = allocate_request(p);
- if (request == NULL) {
- /* FIXME: send statically allocated busy packet. */
- return;
- }
-
- offset =
- ((unsigned long long)
- HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
tcode = HEADER_GET_TCODE(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
@@ -732,6 +741,73 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
request->data, request->length,
handler->callback_data);
}
+
+static void handle_fcp_region_request(struct fw_card *card,
+ struct fw_packet *p,
+ struct fw_request *request,
+ unsigned long long offset)
+{
+ struct fw_address_handler *handler;
+ unsigned long flags;
+ int tcode, destination, source;
+
+ if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
+ offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
+ request->length > 0x200) {
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+
+ return;
+ }
+
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ destination = HEADER_GET_DESTINATION(p->header[0]);
+ source = HEADER_GET_SOURCE(p->header[1]);
+
+ if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
+ tcode != TCODE_WRITE_BLOCK_REQUEST) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+
+ return;
+ }
+
+ spin_lock_irqsave(&address_handler_lock, flags);
+ list_for_each_entry(handler, &address_handler_list, link) {
+ if (is_enclosing_handler(handler, offset, request->length))
+ handler->address_callback(card, NULL, tcode,
+ destination, source,
+ p->generation, p->speed,
+ offset, request->data,
+ request->length,
+ handler->callback_data);
+ }
+ spin_unlock_irqrestore(&address_handler_lock, flags);
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+{
+ struct fw_request *request;
+ unsigned long long offset;
+
+ if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
+ return;
+
+ request = allocate_request(p);
+ if (request == NULL) {
+ /* FIXME: send statically allocated busy packet. */
+ return;
+ }
+
+ offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
+ p->header[2];
+
+ if (!is_in_fcp_region(offset, request->length))
+ handle_exclusive_region_request(card, p, request, offset);
+ else
+ handle_fcp_region_request(card, p, request, offset);
+
+}
EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 96768e1..a61571c 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2226,7 +2226,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
if (rest == 0)
return -EINVAL;
- /* FIXME: make packet-per-buffer/dual-buffer a context option */
while (rest > 0) {
d = context_get_descriptors(&ctx->context,
z + header_z, &d_bus);
@@ -2470,7 +2469,10 @@ static int __devinit pci_probe(struct pci_dev *dev,
}
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+#if 0
+ /* FIXME: make it a context option or remove dual-buffer mode */
ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
+#endif
/* dual-buffer mode is broken if more than one IR context is active */
if (dev->vendor == PCI_VENDOR_ID_AGERE &&
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3..a1fce68 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE,
- gart_info->table_mask);
+ PAGE_SIZE);
if (gart_info->table_handle == NULL)
return -ENOMEM;
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+ if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+ DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+ gart_info->table_mask);
+ ret = 1;
+ goto done;
+ }
+
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e30..8417cc4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+ dmah = drm_pci_alloc(dev, map->size, map->size);
if (!dmah) {
kfree(map);
return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
while (entry->buf_count < count) {
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5c9f798..defcaf1 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -911,23 +911,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
int uninitialized_var(width), height;
cvt = &(timing->data.other_data.data.cvt[i]);
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
- switch (cvt->code[1] & 0xc0) {
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
case 0x00:
width = height * 4 / 3;
break;
- case 0x40:
+ case 0x04:
width = height * 16 / 9;
break;
- case 0x80:
+ case 0x08:
width = height * 16 / 10;
break;
- case 0xc0:
+ case 0x0c:
width = height * 15 / 9;
break;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa0..100ee48 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
force = DRM_FORCE_ON;
break;
case 'D':
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
else
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094f..e68ebf9 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
- dma_addr_t maxaddr)
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
if (align > size)
return NULL;
- if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
- DRM_ERROR("Setting pci dma mask failed\n");
- return NULL;
- }
-
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf..9c9998c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
mem = kmap_atomic(pages[page], KM_USER0);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(pages[page], KM_USER0);
+ kunmap_atomic(mem, KM_USER0);
}
}
@@ -386,34 +386,6 @@ out:
return 0;
}
-static int i915_registers_info(struct seq_file *m, void *data) {
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t reg;
-
-#define DUMP_RANGE(start, end) \
- for (reg=start; reg < end; reg += 4) \
- seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
-
- DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
- DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
- DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
- DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
- DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
- DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
- DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
- DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
- DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
- DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
- DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
- DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
- DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
- DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
-
- return 0;
-}
-
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_regs", i915_registers_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfea..bbe4781 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
@@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PAGEFLIPPING:
value = 1;
break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = dev_priv->has_gem;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- param->param);
+ param->param);
return -EINVAL;
}
@@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
- unsigned long cfb_base, ll_base;
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
0xff000000;
- if (IS_MOBILE(dev) || IS_I9XX(dev))
- dev_priv->cursor_needs_physical = true;
- else
- dev_priv->cursor_needs_physical = false;
-
- if (IS_I965G(dev) || IS_G33(dev))
- dev_priv->cursor_needs_physical = false;
-
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto destroy_ringbuffer;
+ intel_modeset_init(dev);
+
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
@@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- intel_modeset_init(dev);
-
drm_helper_initial_config(dev);
return 0;
@@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
- int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
@@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
+ dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
+ mmio_bar = IS_I9XX(dev) ? 0 : 1;
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
@@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 24286ca..2ffffd7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#include "drm_pciids.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400);
static struct drm_driver driver;
-static struct pci_device_id pciidlist[] = {
- i915_PCI_IDS
+#define INTEL_VGA_DEVICE(id, info) { \
+ .class = PCI_CLASS_DISPLAY_VGA << 8, \
+ .class_mask = 0xffff00, \
+ .vendor = 0x8086, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+const static struct intel_device_info intel_i830_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_845g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i85x_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i865g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i915g_info = {
+ .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i915gm_info = {
+ .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945g_info = {
+ .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945gm_info = {
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i965g_info = {
+ .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_i965gm_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g33_info = {
+ .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g45_info = {
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_gm45_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_pineview_info = {
+ .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_d_info = {
+ .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_m_info = {
+ .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct pci_device_id pciidlist[] = {
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+ INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ {0, 0, 0}
};
#if defined(CONFIG_DRM_I915_KMS)
@@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev)
return i915_resume(dev);
}
+static int
+i915_pm_suspend(struct device *dev)
+{
+ return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
+}
+
+static int
+i915_pm_resume(struct device *dev)
+{
+ return i915_pci_resume(to_pci_dev(dev));
+}
+
+static int
+i915_pm_freeze(struct device *dev)
+{
+ return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
+}
+
+static int
+i915_pm_thaw(struct device *dev)
+{
+ /* thaw during hibernate, do nothing! */
+ return 0;
+}
+
+static int
+i915_pm_poweroff(struct device *dev)
+{
+ return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
+}
+
+static int
+i915_pm_restore(struct device *dev)
+{
+ return i915_pci_resume(to_pci_dev(dev));
+}
+
+const struct dev_pm_ops i915_pm_ops = {
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_restore,
+};
+
static struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
@@ -303,8 +464,6 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
- .suspend = i915_suspend,
- .resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
@@ -344,10 +503,7 @@ static struct drm_driver driver = {
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
-#ifdef CONFIG_PM
- .resume = i915_pci_resume,
- .suspend = i915_pci_suspend,
-#endif
+ .driver.pm = &i915_pm_ops,
},
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac7..29dd676 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
struct intel_overlay;
+struct intel_device_info {
+ u8 is_mobile : 1;
+ u8 is_i8xx : 1;
+ u8 is_i915g : 1;
+ u8 is_i9xx : 1;
+ u8 is_i945gm : 1;
+ u8 is_i965g : 1;
+ u8 is_i965gm : 1;
+ u8 is_g33 : 1;
+ u8 need_gfx_hws : 1;
+ u8 is_g4x : 1;
+ u8 is_pineview : 1;
+ u8 is_ironlake : 1;
+ u8 has_fbc : 1;
+ u8 has_rc6 : 1;
+ u8 has_pipe_cxsr : 1;
+ u8 has_hotplug : 1;
+ u8 cursor_needs_physical : 1;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ const struct intel_device_info *info;
+
int has_gem;
void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
int hangcheck_count;
uint32_t last_acthd;
- bool cursor_needs_physical;
-
struct drm_mm vram;
unsigned long cfb_size;
@@ -287,8 +307,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveRENDERSTANDBY;
- u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -561,6 +579,7 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
+bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
+ int tiling_mode);
+bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
-
-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
- (dev)->pci_device == 0x27AE)
-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- (dev)->pci_device == 0x0042 || \
- (dev)->pci_device == 0x0046)
-
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12)
-
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-
-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- IS_GM45(dev))
-
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
-
-#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
- (dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2 || \
- (IS_PINEVIEW(dev)))
-
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
+#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IRONLAKE(dev))
+#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
+#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
- (IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_PINEVIEW(dev) && \
- !IS_IRONLAKE(dev))
-#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf..2748609 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
+ /* release the fence reg _after_ flushing */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
bool retry_alloc = false;
int ret;
- if (dev_priv->mm.suspended)
- return -EBUSY;
-
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
@@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
@@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
+ bool need_fence;
+
+ need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence && !i915_obj_fenceable(dev, obj))
+ i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
if (ret)
return ret;
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to install fence: %d\n",
+ ret);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
entry->offset = obj_priv->gtt_offset;
/* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
+ struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
@@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry **relocs)
{
@@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL)
+ if (*relocs == NULL) {
+ DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
return -ENOMEM;
+ }
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry *relocs)
{
@@ -3536,7 +3559,7 @@ err:
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
}
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec_list)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_relocation_entry *relocs;
- int ret, ret2, i, pinned = 0;
+ int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
@@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
+ if (object_list == NULL) {
+ DRM_ERROR("Failed to allocate object list for %d buffers\n",
args->buffer_count);
ret = -ENOMEM;
goto pre_mutex_err;
}
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- goto pre_mutex_err;
- }
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
@@ -3884,20 +3895,6 @@ err:
mutex_unlock(&dev->struct_mutex);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
-
/* Copy the updated relocations out regardless of current error
* state. Failure to update the relocs would mean that the next
* time userland calls execbuf, it would do so with presumed offset
@@ -3914,12 +3911,158 @@ err:
pre_mutex_err:
drm_free_large(object_list);
- drm_free_large(exec_list);
kfree(cliprects);
return ret;
}
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (!IS_I965G(dev))
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = 0;
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ } else {
+ DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
+
int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
@@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (ret)
return ret;
}
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle tiled surfaces.
- */
- if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
- return ret;
- }
- }
+
obj_priv->pin_count++;
/* If the object is not active and not pending a flush,
@@ -4708,7 +4839,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c..df278b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
/**
- * Returns the size of the fence for a tiled object of the given size.
+ * Returns whether an object is currently fenceable. If not, it may need
+ * to be unbound and have its pitch adjusted.
*/
-static int
-i915_get_fence_size(struct drm_device *dev, int size)
+bool
+i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
{
- int i;
- int start;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (IS_I965G(dev)) {
/* The 965 can have fences at any page boundary. */
- return ALIGN(size, 4096);
+ if (obj->size & 4095)
+ return false;
+ return true;
+ } else if (IS_I9XX(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
} else {
- /* Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+ }
- for (i = start; i < size; i <<= 1)
- ;
+ /* Power of two sized... */
+ if (obj->size & (obj->size - 1))
+ return false;
- return i;
- }
+ /* Objects must be size aligned as well */
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+ return true;
}
/* Check pitch constriants for all chips & tiling formats */
-static bool
+bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride & (stride - 1))
return false;
- /* We don't 0handle the aperture area covered by the fence being bigger
- * than the object size.
- */
- if (i915_get_fence_size(dev, size) != size)
- return false;
-
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5d..7cd8110 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
if (de_iir & DE_GSE)
@@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
(void) I915_READ(IER);
}
+/*
+ * Must be called after intel_modeset_init or hotplug interrupts won't be
+ * enabled correctly.
+ */
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- /* Leave other bits alone */
- hotplug_en |= HOTPLUG_EN_MASK;
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ /* Ignore TV since it's buggy */
+
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
- TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
- SDVOB_HOTPLUG_INT_STATUS;
- if (IS_G4X(dev)) {
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS |
- HDMIC_HOTPLUG_INT_STATUS |
- HDMID_HOTPLUG_INT_STATUS;
- }
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf..149d360 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -879,13 +879,6 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
-#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
- HDMIC_HOTPLUG_INT_EN | \
- HDMID_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +975,8 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER (1 << 25)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1751,6 +1746,8 @@
/* Display & cursor control */
+/* dithering flag on Ironlake */
+#define PIPE_ENABLE_DITHER (1 << 4)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00..a3b90c9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
- dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
- }
-
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
- I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
- }
-
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e5..ddefc87 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
+
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b0..002612f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -262,6 +262,14 @@ struct intel_limit {
#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
+#define IRONLAKE_P_DISPLAY_PORT_MIN 10
+#define IRONLAKE_P_DISPLAY_PORT_MAX 20
+#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
+#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
+#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
+#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
+#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
@@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = {
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
.p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_lvds = {
@@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = {
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_P2_LVDS_SLOW,
.p2_fast = IRONLAKE_P2_LVDS_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+ .dot = { .min = IRONLAKE_DOT_MIN,
+ .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN,
+ .max = IRONLAKE_VCO_MAX},
+ .n = { .min = IRONLAKE_N_MIN,
+ .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN,
+ .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN,
+ .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN,
+ .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
+ .max = IRONLAKE_P_DISPLAY_PORT_MAX },
+ .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
+ .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
+ .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
+ .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
+ .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
+ .find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
@@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_ironlake_lvds;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ HAS_eDP)
+ limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_sdvo;
@@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ int lvds_reg;
+
+ if (IS_IRONLAKE(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+ if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
@@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
+
+ /* return directly when it is eDP */
+ if (HAS_eDP)
+ return true;
+
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
@@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t clock;
- int err_most = 47;
- int err_min = 10000;
-
- /* eDP has only 2 clock choice, no n/m/p setting */
- if (HAS_eDP)
- return true;
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_ironlake_dp(limit, crtc, target,
- refclk, best_clock);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- /* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
- this_err = abs((10000 - (target*10000/clock.dot)));
- if (this_err < err_most) {
- *best_clock = clock;
- /* found on first matching */
- goto out;
- } else if (this_err < err_min) {
- *best_clock = clock;
- err_min = this_err;
- }
- }
- }
- }
- }
-out:
- return true;
-}
-
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
u32 temp;
int tries = 5, j, n;
+ u32 pipe_bpc;
+
+ temp = I915_READ(pipeconf_reg);
+ pipe_bpc = temp & PIPE_BPC_MASK;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
+ /*
+ * make the BPC in FDI Rx be consistent with that in
+ * pipeconf reg.
+ */
+ temp &= ~(0x7 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
FDI_SEL_PCDCLK |
FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
@@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
+ /* BPC in FDI rx is consistent with that in pipeconf */
+ temp &= ~(0x07 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
I915_READ(fdi_rx_reg);
@@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
-
+ temp = I915_READ(transconf_reg);
+ /* BPC in transcoder is consistent with that in pipeconf */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
+ I915_WRITE(transconf_reg, temp);
+ I915_READ(transconf_reg);
udelay(100);
/* disable PCH DPLL */
@@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
-const static int latency_ns = 5000;
+static const int latency_ns = 5000;
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
@@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (HAS_FW_BLC(dev) && sr_hdisplay &&
(!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 6000;
+ static const int sr_latency_ns = 6000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(pipeconf_reg);
+ temp &= ~PIPE_BPC_MASK;
+ if (is_lvds) {
+ int lvds_reg = I915_READ(PCH_LVDS);
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ temp |= PIPE_8BPC;
+ else
+ temp |= PIPE_6BPC;
+ } else
+ temp |= PIPE_8BPC;
+ I915_WRITE(pipeconf_reg, temp);
+ I915_READ(pipeconf_reg);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
-
+ /* set the dithering flag */
+ if (IS_I965G(dev)) {
+ if (dev_priv->lvds_dither) {
+ if (IS_IRONLAKE(dev))
+ pipeconf |= PIPE_ENABLE_DITHER;
+ else
+ lvds |= LVDS_ENABLE_DITHER;
+ } else {
+ if (IS_IRONLAKE(dev))
+ pipeconf &= ~PIPE_ENABLE_DITHER;
+ else
+ lvds &= ~LVDS_ENABLE_DITHER;
+ }
+ }
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
@@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
- if (!dev_priv->cursor_needs_physical) {
+ if (!dev_priv->info->cursor_needs_physical) {
ret = i915_gem_object_pin(bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
I915_WRITE(base, addr);
if (intel_crtc->cursor_bo) {
- if (dev_priv->cursor_needs_physical) {
+ if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
@@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-void intel_increase_renderclock(struct drm_device *dev, bool schedule)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- /* Restore render clock frequency to original value */
- if (IS_G4X(dev) || IS_I9XX(dev))
- pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
- else if (IS_I85X(dev))
- pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG_DRIVER("increasing render clock frequency\n");
-
- /* Schedule downclock */
- if (schedule)
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-}
-
-void intel_decrease_renderclock(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- if (IS_G4X(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
- gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I965G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
- gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
- gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I915G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
- gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I85X(dev)) {
- u16 hpllcc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
-
- /* Up to maximum... */
- hpllcc &= ~GC_CLOCK_CONTROL_MASK;
- hpllcc |= GC_CLOCK_133_200;
-
- pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
- }
- DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
-}
-
-/* Note that no increase function is needed for this - increase_renderclock()
- * will also rewrite these bits
- */
-void intel_decrease_displayclock(struct drm_device *dev)
-{
- if (IS_IRONLAKE(dev))
- return;
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
- IS_I915GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~0xf0;
- gcfgc |= 0x80;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- }
-}
-
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- /* GPU isn't processing, downclock it. */
- if (!dev_priv->busy) {
- intel_decrease_renderclock(dev);
- intel_decrease_displayclock(dev);
- }
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
+ if (!dev_priv->busy)
dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
- } else {
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev)
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
+ }
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
+ }
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED)
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
+ }
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev))
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
- if (SUPPORTS_INTEGRATED_DP(dev))
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
+ }
}
- if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED)) {
+ DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
+ }
} else if (IS_I8XX(dev))
intel_dvo_init(dev);
@@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_changed = intelfb_probe,
};
+static struct drm_gem_object *
+intel_alloc_power_context(struct drm_device *dev)
+{
+ struct drm_gem_object *pwrctx;
+ int ret;
+
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return pwrctx;
+
+err_unpin:
+ i915_gem_object_unpin(pwrctx);
+err_unref:
+ drm_gem_object_unreference(pwrctx);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (I915_HAS_RC6(dev)) {
- struct drm_gem_object *pwrctx;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_i915_gem_object *obj_priv = NULL;
if (dev_priv->pwrctx) {
obj_priv = dev_priv->pwrctx->driver_private;
} else {
- pwrctx = drm_gem_object_alloc(dev, 4096);
- if (!pwrctx) {
- DRM_DEBUG("failed to alloc power context, "
- "RC6 disabled\n");
- goto out;
- }
+ struct drm_gem_object *pwrctx;
- ret = i915_gem_object_pin(pwrctx, 4096);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n",
- ret);
- drm_gem_object_unreference(pwrctx);
- goto out;
+ pwrctx = intel_alloc_power_context(dev);
+ if (pwrctx) {
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
}
-
- i915_gem_object_set_to_gtt_domain(pwrctx, 1);
-
- dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
}
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
- I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ if (obj_priv) {
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
}
-
-out:
- return;
}
/* Set up chip specific display functions */
@@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
del_timer_sync(&intel_crtc->idle_timer);
}
- intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b..1349d9f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
case DP_B:
case PCH_DP_B:
+ dev_priv->hotplug_supported_mask |=
+ HDMIB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
+ dev_priv->hotplug_supported_mask |=
+ HDMIC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
+ dev_priv->hotplug_supported_mask |=
+ HDMID_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe..0643194 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (sdvox_reg == SDVOB) {
intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
if (!intel_output->ddc_bus)
goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce2..f4b4aa2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
+ {
+ .ident = "PC-81005",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ },
+ },
{ }
};
@@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector = dev_priv->int_lvds_connector;
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ if (connector)
+ connector->status = connector->funcs->detect(connector);
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
@@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-#ifdef CONFIG_ACPI
-/*
- * check_lid_device -- check whether @handle is an ACPI LID device.
- * @handle: ACPI device handle
- * @level : depth in the ACPI namespace tree
- * @context: the number of LID device when we find the device
- * @rv: a return value to fill if desired (Not use)
- */
-static acpi_status
-check_lid_device(acpi_handle handle, u32 level, void *context,
- void **return_value)
-{
- struct acpi_device *acpi_dev;
- int *lid_present = context;
-
- acpi_dev = NULL;
- /* Get the acpi device for device handle */
- if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
- /* If there is no ACPI device for handle, return */
- return AE_OK;
- }
-
- if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
- *lid_present = 1;
-
- return AE_OK;
-}
-
-/**
- * check whether there exists the ACPI LID device by enumerating the ACPI
- * device tree.
- */
-static int intel_lid_present(void)
-{
- int lid_present = 0;
-
- if (acpi_disabled) {
- /* If ACPI is disabled, there is no ACPI device tree to
- * check, so assume the LID device would have been present.
- */
- return 1;
- }
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- check_lid_device, NULL, &lid_present, NULL);
-
- return lid_present;
-}
-#else
-static int intel_lid_present(void)
-{
- /* In the absence of ACPI built in, assume that the LID device would
- * have been present.
- */
- return 1;
-}
-#endif
-
/**
* intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
@@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /*
- * Assume LVDS is present if there's an ACPI lid device or if the
- * device is present in the VBT.
- */
- if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
- DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
+ if (!lvds_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
@@ -1180,6 +1131,8 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
+ /* keep the LVDS connector */
+ dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc9..de5144c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOB/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOC/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec11..1d5b9b7 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev)
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
tv_priv->margin[TV_MARGIN_BOTTOM]);
+
+ dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 321044b..41dd8eb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true;
+ break;
}
}
@@ -1026,6 +1027,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ break;
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index fd94dbc..58f3426 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,34 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
return false;
}
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+ 0x00000808, /* r100 */
+ 0x00000808, /* rv100 */
+ 0x00000808, /* rs100 */
+ 0x00000808, /* rv200 */
+ 0x00000808, /* rs200 */
+ 0x00000808, /* r200 */
+ 0x00000808, /* rv250 */
+ 0x00000000, /* rs300 */
+ 0x00000808, /* rv280 */
+ 0x00000808, /* r300 */
+ 0x00000808, /* r350 */
+ 0x00000808, /* rv350 */
+ 0x00000808, /* rv380 */
+ 0x00000808, /* r420 */
+ 0x00000808, /* r423 */
+ 0x00000808, /* rv410 */
+ 0x00000000, /* rs400 */
+ 0x00000000, /* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_primary_dac *p_dac)
+{
+ p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+ return;
+}
+
struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
radeon_encoder
*encoder)
@@ -604,20 +632,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
uint16_t dac_info;
uint8_t rev, bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
+ int found = 0;
- if (rdev->bios == NULL)
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+ GFP_KERNEL);
+
+ if (!p_dac)
return NULL;
+ if (rdev->bios == NULL)
+ goto out;
+
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
- p_dac =
- kzalloc(sizeof(struct radeon_encoder_primary_dac),
- GFP_KERNEL);
-
- if (!p_dac)
- return NULL;
-
rev = RBIOS8(dac_info) & 0x3;
if (rev < 2) {
bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,9 +656,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
-
+ found = 1;
}
+out:
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
return p_dac;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2016156..b82ae61 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -615,7 +615,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
ret = connector_status_connected;
}
} else {
- if (radeon_connector->dac_load_detect) {
+ if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2..06123ba 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
+ kfree(master_priv);
return ret;
}
master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c68480..0c51f8e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -733,16 +733,18 @@ void radeon_device_fini(struct radeon_device *rdev)
*/
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
- struct radeon_device *rdev = dev->dev_private;
+ struct radeon_device *rdev;
struct drm_crtc *crtc;
int r;
- if (dev == NULL || rdev == NULL) {
+ if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW) {
return 0;
}
+ rdev = dev->dev_private;
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 91d72b7..1fb2f02 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -329,8 +329,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- } else
+ } else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
+ if (ret == false)
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
} else {
if (!ASIC_IS_AVIVO(rdev))
ret = radeon_get_legacy_connector_info_from_table(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4cdd8b4..8495d4e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
bool radeon_fence_signaled(struct radeon_fence *fence)
{
- struct radeon_device *rdev = fence->rdev;
unsigned long irq_flags;
bool signaled = false;
- if (rdev->gpu_lockup) {
+ if (!fence)
return true;
- }
- if (fence == NULL) {
+
+ if (fence->rdev->gpu_lockup)
return true;
- }
+
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4..2f349a3 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_emit_t *emit = data;
int result;
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
- return -EINVAL;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return -EINVAL;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
result = radeon_emit_irq(dev);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea42..4245218 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,7 @@ int rs600_irq_process(struct radeon_device *rdev)
}
while (status || r500_disp_int) {
/* SW interrupt */
- if (G_000040_SW_INT_EN(status))
+ if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index f102fcc..e02096c 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,8 +1,3 @@
-menu "IEEE 1394 (FireWire) support"
- depends on PCI || BROKEN
-
-source "drivers/firewire/Kconfig"
-
config IEEE1394
tristate "Legacy alternative FireWire driver stack"
depends on PCI || BROKEN
@@ -16,8 +11,13 @@ config IEEE1394
is the core support only, you will also need to select a driver for
your IEEE 1394 adapter.
- To compile this driver as a module, say M here: the
- module will be called ieee1394.
+ To compile this driver as a module, say M here: the module will be
+ called ieee1394.
+
+ NOTE:
+ ieee1394 is superseded by the newer firewire-core driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
config IEEE1394_OHCI1394
tristate "OHCI-1394 controllers"
@@ -29,19 +29,23 @@ config IEEE1394_OHCI1394
use one of these chipsets. It should work with any OHCI-1394
compliant card, however.
- To compile this driver as a module, say M here: the
- module will be called ohci1394.
+ To compile this driver as a module, say M here: the module will be
+ called ohci1394.
NOTE:
+ ohci1394 is superseded by the newer firewire-ohci driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
If you want to install firewire-ohci and ohci1394 together, you
should configure them only as modules and blacklist the driver(s)
which you don't want to have auto-loaded. Add either
- blacklist firewire-ohci
- or
blacklist ohci1394
blacklist video1394
blacklist dv1394
+ or
+ blacklist firewire-ohci
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
depending on your distribution.
@@ -58,8 +62,8 @@ config IEEE1394_PCILYNX
Instruments PCILynx chip. Note: this driver is written for revision
2 of this chip and may not work with revision 0.
- To compile this driver as a module, say M here: the
- module will be called pcilynx.
+ To compile this driver as a module, say M here: the module will be
+ called pcilynx.
Only some old and now very rare PCI and CardBus cards and
PowerMacs G3 B&W contain the PCILynx controller. Therefore
@@ -79,6 +83,14 @@ config IEEE1394_SBP2
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
+ To compile this driver as a module, say M here: the module will be
+ called sbp2.
+
+ NOTE:
+ sbp2 is superseded by the newer firewire-sbp2 driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_SBP2_PHYS_DMA
bool "Enable replacement for physical DMA in SBP2"
depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL
@@ -111,6 +123,11 @@ config IEEE1394_ETH1394
The module is called eth1394 although it does not emulate Ethernet.
+ NOTE:
+ eth1394 is superseded by the newer firewire-net driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_RAWIO
tristate "raw1394 userspace interface"
depends on IEEE1394
@@ -123,6 +140,11 @@ config IEEE1394_RAWIO
To compile this driver as a module, say M here: the module will be
called raw1394.
+ NOTE:
+ raw1394 is superseded by the newer firewire-core driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_VIDEO1394
tristate "video1394 userspace interface"
depends on IEEE1394 && IEEE1394_OHCI1394
@@ -136,13 +158,18 @@ config IEEE1394_VIDEO1394
To compile this driver as a module, say M here: the module will be
called video1394.
+ NOTE:
+ video1394 is superseded by the newer firewire-core driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_DV1394
tristate "dv1394 userspace interface (deprecated)"
depends on IEEE1394 && IEEE1394_OHCI1394
help
The dv1394 driver is unsupported and may be removed from Linux in a
- future release. Its functionality is now provided by raw1394 together
- with libraries such as libiec61883.
+ future release. Its functionality is now provided by either
+ raw1394 or firewire-core together with libraries such as libiec61883.
config IEEE1394_VERBOSEDEBUG
bool "Excessive debugging output"
@@ -153,5 +180,3 @@ config IEEE1394_VERBOSEDEBUG
will quickly result in large amounts of data sent to the system log.
Say Y if you really need the debugging output. Everyone else says N.
-
-endmenu
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index b483b29..f967008 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -221,11 +221,27 @@ static int get_compatible_type(struct ff_device *ff, int effect_type)
}
/*
+ * Only left/right direction should be used (under/over 0x8000) for
+ * forward/reverse motor direction (to keep calculation fast & simple).
+ */
+static u16 ml_calculate_direction(u16 direction, u16 force,
+ u16 new_direction, u16 new_force)
+{
+ if (!force)
+ return new_direction;
+ if (!new_force)
+ return direction;
+ return (((u32)(direction >> 1) * force +
+ (new_direction >> 1) * new_force) /
+ (force + new_force)) << 1;
+}
+
+/*
* Combine two effects and apply gain.
*/
static void ml_combine_effects(struct ff_effect *effect,
struct ml_effect_state *state,
- unsigned int gain)
+ int gain)
{
struct ff_effect *new = state->effect;
unsigned int strong, weak, i;
@@ -252,8 +268,21 @@ static void ml_combine_effects(struct ff_effect *effect,
break;
case FF_RUMBLE:
- strong = new->u.rumble.strong_magnitude * gain / 0xffff;
- weak = new->u.rumble.weak_magnitude * gain / 0xffff;
+ strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff;
+ weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff;
+
+ if (effect->u.rumble.strong_magnitude + strong)
+ effect->direction = ml_calculate_direction(
+ effect->direction,
+ effect->u.rumble.strong_magnitude,
+ new->direction, strong);
+ else if (effect->u.rumble.weak_magnitude + weak)
+ effect->direction = ml_calculate_direction(
+ effect->direction,
+ effect->u.rumble.weak_magnitude,
+ new->direction, weak);
+ else
+ effect->direction = 0;
effect->u.rumble.strong_magnitude =
min(strong + effect->u.rumble.strong_magnitude,
0xffffU);
@@ -268,6 +297,13 @@ static void ml_combine_effects(struct ff_effect *effect,
/* here we also scale it 0x7fff => 0xffff */
i = i * gain / 0x7fff;
+ if (effect->u.rumble.strong_magnitude + i)
+ effect->direction = ml_calculate_direction(
+ effect->direction,
+ effect->u.rumble.strong_magnitude,
+ new->direction, i);
+ else
+ effect->direction = 0;
effect->u.rumble.strong_magnitude =
min(i + effect->u.rumble.strong_magnitude, 0xffffU);
effect->u.rumble.weak_magnitude =
@@ -411,8 +447,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
msecs_to_jiffies(state->effect->replay.length);
state->adj_at = state->play_at;
- ml_schedule_timer(ml);
-
} else {
debug("initiated stop");
@@ -420,10 +454,10 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
__set_bit(FF_EFFECT_ABORTING, &state->flags);
else
__clear_bit(FF_EFFECT_STARTED, &state->flags);
-
- ml_play_effects(ml);
}
+ ml_play_effects(ml);
+
return 0;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index f6c688c..b1edd77 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -210,7 +210,7 @@ static int iforce_open(struct input_dev *dev)
return 0;
}
-static void iforce_release(struct input_dev *dev)
+static void iforce_close(struct input_dev *dev)
{
struct iforce *iforce = input_get_drvdata(dev);
int i;
@@ -228,30 +228,17 @@ static void iforce_release(struct input_dev *dev)
/* Disable force feedback playback */
iforce_send_packet(iforce, FF_CMD_ENABLE, "\001");
+ /* Wait for the command to complete */
+ wait_event_interruptible(iforce->wait,
+ !test_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags));
}
switch (iforce->bus) {
#ifdef CONFIG_JOYSTICK_IFORCE_USB
- case IFORCE_USB:
- usb_kill_urb(iforce->irq);
-
- /* The device was unplugged before the file
- * was released */
- if (iforce->usbdev == NULL) {
- iforce_delete_device(iforce);
- kfree(iforce);
- }
- break;
-#endif
- }
-}
-
-void iforce_delete_device(struct iforce *iforce)
-{
- switch (iforce->bus) {
-#ifdef CONFIG_JOYSTICK_IFORCE_USB
case IFORCE_USB:
- iforce_usb_delete(iforce);
+ usb_kill_urb(iforce->irq);
+ usb_kill_urb(iforce->out);
+ usb_kill_urb(iforce->ctrl);
break;
#endif
#ifdef CONFIG_JOYSTICK_IFORCE_232
@@ -303,7 +290,7 @@ int iforce_init_device(struct iforce *iforce)
input_dev->name = "Unknown I-Force device";
input_dev->open = iforce_open;
- input_dev->close = iforce_release;
+ input_dev->close = iforce_close;
/*
* On-device memory allocation.
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 9f289d8..b41303d 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -109,6 +109,7 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = urb->context;
if (urb->status) {
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dbg("urb->status %d, exiting", urb->status);
return;
}
@@ -186,33 +187,19 @@ fail:
return err;
}
-/* Called by iforce_delete() */
-void iforce_usb_delete(struct iforce* iforce)
-{
- usb_kill_urb(iforce->irq);
- usb_kill_urb(iforce->out);
- usb_kill_urb(iforce->ctrl);
-
- usb_free_urb(iforce->irq);
- usb_free_urb(iforce->out);
- usb_free_urb(iforce->ctrl);
-}
-
static void iforce_usb_disconnect(struct usb_interface *intf)
{
struct iforce *iforce = usb_get_intfdata(intf);
- int open = 0; /* FIXME! iforce->dev.handle->open; */
usb_set_intfdata(intf, NULL);
- if (iforce) {
- iforce->usbdev = NULL;
- input_unregister_device(iforce->dev);
- if (!open) {
- iforce_delete_device(iforce);
- kfree(iforce);
- }
- }
+ input_unregister_device(iforce->dev);
+
+ usb_free_urb(iforce->irq);
+ usb_free_urb(iforce->out);
+ usb_free_urb(iforce->ctrl);
+
+ kfree(iforce);
}
static struct usb_device_id iforce_usb_ids [] = {
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index f2d91f4..9f494b7 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -150,11 +150,9 @@ void iforce_serial_xmit(struct iforce *iforce);
/* iforce-usb.c */
void iforce_usb_xmit(struct iforce *iforce);
-void iforce_usb_delete(struct iforce *iforce);
/* iforce-main.c */
int iforce_init_device(struct iforce *iforce);
-void iforce_delete_device(struct iforce *iforce);
/* iforce-packets.c */
int iforce_control_playback(struct iforce*, u16 id, unsigned int);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index a357357..1f5e2ce 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -134,7 +134,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
#define ATKBD_CMD_GETID 0x02f2
#define ATKBD_CMD_SETREP 0x10f3
#define ATKBD_CMD_ENABLE 0x00f4
-#define ATKBD_CMD_RESET_DIS 0x00f5
+#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
+#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
#define ATKBD_CMD_SETALL_MBR 0x00fa
#define ATKBD_CMD_RESET_BAT 0x02ff
#define ATKBD_CMD_RESEND 0x00fe
@@ -836,7 +837,7 @@ static void atkbd_cleanup(struct serio *serio)
struct atkbd *atkbd = serio_get_drvdata(serio);
atkbd_disable(atkbd);
- ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_BAT);
+ ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_DEF);
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 34f4a29..d3c8b61 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -29,11 +29,13 @@ struct matrix_keypad {
unsigned short *keycodes;
unsigned int row_shift;
+ DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS);
+
uint32_t last_key_state[MATRIX_MAX_COLS];
struct delayed_work work;
+ spinlock_t lock;
bool scan_pending;
bool stopped;
- spinlock_t lock;
};
/*
@@ -222,9 +224,16 @@ static int matrix_keypad_suspend(struct device *dev)
matrix_keypad_stop(keypad->input_dev);
- if (device_may_wakeup(&pdev->dev))
- for (i = 0; i < pdata->num_row_gpios; i++)
- enable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+ if (device_may_wakeup(&pdev->dev)) {
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ if (!test_bit(i, keypad->disabled_gpios)) {
+ unsigned int gpio = pdata->row_gpios[i];
+
+ if (enable_irq_wake(gpio_to_irq(gpio)) == 0)
+ __set_bit(i, keypad->disabled_gpios);
+ }
+ }
+ }
return 0;
}
@@ -236,9 +245,15 @@ static int matrix_keypad_resume(struct device *dev)
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i;
- if (device_may_wakeup(&pdev->dev))
- for (i = 0; i < pdata->num_row_gpios; i++)
- disable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+ if (device_may_wakeup(&pdev->dev)) {
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ if (test_and_clear_bit(i, keypad->disabled_gpios)) {
+ unsigned int gpio = pdata->row_gpios[i];
+
+ disable_irq_wake(gpio_to_irq(gpio));
+ }
+ }
+ }
matrix_keypad_start(keypad->input_dev);
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index eeaa7ac..21d6184 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -253,14 +253,6 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
u8 reg;
int ret;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
/* Read & Clear TWL4030 pending interrupt */
ret = twl4030_kpread(kp, &reg, KEYP_ISR1, 1);
@@ -403,7 +395,8 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
*
* NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ...
*/
- error = request_irq(kp->irq, do_kp_irq, 0, pdev->name, kp);
+ error = request_threaded_irq(kp->irq, NULL, do_kp_irq,
+ 0, pdev->name, kp);
if (error) {
dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
kp->irq);
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index bdde5c8..e9069b8 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -39,18 +39,8 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
int err;
u8 value;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate since this is a threaded
- * IRQ and can sleep due to the i2c reads it has to issue.
- * Although it might be friendlier not to borrow this thread
- * context...
- */
- local_irq_enable();
-#endif
-
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
- STS_HW_CONDITIONS);
+ STS_HW_CONDITIONS);
if (!err) {
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
input_sync(pwr);
@@ -80,7 +70,7 @@ static int __devinit twl4030_pwrbutton_probe(struct platform_device *pdev)
pwr->phys = "twl4030_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
- err = request_irq(irq, powerbutton_irq,
+ err = request_threaded_irq(irq, NULL, powerbutton_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_pwrbutton", pwr);
if (err < 0) {
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 38da6ab..c0afb71 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -1328,7 +1328,7 @@ static struct platform_driver wistron_driver = {
.driver = {
.name = "wistron-bios",
.owner = THIS_MODULE,
-#if CONFIG_PM
+#ifdef CONFIG_PM
.pm = &wistron_pm_ops,
#endif
},
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 3feeb3a..c714ca2 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -70,7 +70,7 @@ config MOUSE_PS2_SYNAPTICS
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
default y
- depends on MOUSE_PS2 && X86
+ depends on MOUSE_PS2 && X86 && DMI
help
Say Y here if you have a Fujitsu B-series Lifebook PS/2
TouchScreen connected to your system.
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index b146237..90be30e 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -427,7 +427,6 @@ static void hgpk_recalib_work(struct work_struct *work)
static int hgpk_register(struct psmouse *psmouse)
{
- struct input_dev *dev = psmouse->dev;
int err;
/* register handlers */
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 2e6bdfe..6d7aa10 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -44,7 +44,6 @@ static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
}
static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
/* FLORA-ie 55mi */
.matches = {
@@ -118,7 +117,6 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
},
},
{ }
-#endif
};
void __init lifebook_module_init(void)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fd0bc09..401ac6b 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1137,7 +1137,10 @@ static void psmouse_cleanup(struct serio *serio)
if (psmouse->cleanup)
psmouse->cleanup(psmouse);
- psmouse_reset(psmouse);
+/*
+ * Reset the mouse to defaults (bare PS/2 protocol).
+ */
+ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
/*
* Some boxes, such as HP nx7400, get terribly confused if mouse
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 0236f0d..e0f3018 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -284,13 +284,7 @@ static void serio_handle_event(void)
mutex_lock(&serio_mutex);
- /*
- * Note that we handle only one event here to give swsusp
- * a chance to freeze kseriod thread. Serio events should
- * be pretty rare so we are not concerned about taking
- * performance hit.
- */
- if ((event = serio_get_event())) {
+ while ((event = serio_get_event())) {
switch (event->type) {
case SERIO_REGISTER_PORT:
@@ -380,10 +374,9 @@ static struct serio *serio_get_pending_child(struct serio *parent)
static int serio_thread(void *nothing)
{
- set_freezable();
do {
serio_handle_event();
- wait_event_freezable(serio_wait,
+ wait_event_interruptible(serio_wait,
kthread_should_stop() || !list_empty(&serio_event_list));
} while (!kthread_should_stop());
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 951c57b..ede4658 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
* We assume the Guest has the same number of GDT entries as the
* Host, otherwise we'd have to dynamically allocate the Guest GDT.
*/
- if (num >= ARRAY_SIZE(cpu->arch.gdt))
+ if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
kill_guest(cpu, "too many gdt entries %i", num);
+ return;
+ }
/* Set it up, then fix it. */
cpu->arch.gdt[num].a = lo;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f4f5f82..dd3dfe4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -386,7 +386,9 @@ static void mddev_put(mddev_t *mddev)
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
- !mddev->hold_active) {
+ mddev->ctime == 0 && !mddev->hold_active) {
+ /* Array is not configured at all, and not held active,
+ * so destroy it */
list_del(&mddev->all_mddevs);
if (mddev->gendisk) {
/* we did a probe so need to clean up.
@@ -4355,7 +4357,7 @@ static int do_md_run(mddev_t * mddev)
mddev->barriers_work = 1;
mddev->ok_start_degraded = start_dirty_degraded;
- if (start_readonly)
+ if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
@@ -4419,33 +4421,6 @@ static int do_md_run(mddev_t * mddev)
set_capacity(disk, mddev->array_sectors);
- /* If there is a partially-recovered drive we need to
- * start recovery here. If we leave it to md_check_recovery,
- * it will remove the drives and not do the right thing
- */
- if (mddev->degraded && !mddev->sync_thread) {
- int spares = 0;
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags))
- /* complete an interrupted recovery */
- spares++;
- if (spares && mddev->pers->sync_request) {
- mddev->recovery = 0;
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- mddev->sync_thread = md_register_thread(md_do_sync,
- mddev,
- "resync");
- if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync"
- " thread...\n",
- mdname(mddev));
- /* leave the spares where they are, it shouldn't hurt */
- mddev->recovery = 0;
- }
- }
- }
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -5262,6 +5237,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
+ /* ensure mddev_put doesn't delete this now that there
+ * is some minimal configuration.
+ */
+ mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
@@ -6494,10 +6473,11 @@ void md_do_sync(mddev_t *mddev)
mddev->curr_resync = 2;
try_again:
- if (kthread_should_stop()) {
+ if (kthread_should_stop())
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
- }
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index fe44789..6223bf0 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -202,14 +202,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request,
unsigned long flags;
int su;
- if ((tcode != TCODE_WRITE_QUADLET_REQUEST &&
- tcode != TCODE_WRITE_BLOCK_REQUEST) ||
- offset != CSR_REGISTER_BASE + CSR_FCP_RESPONSE ||
- length == 0 ||
- (((u8 *)payload)[0] & 0xf0) != 0) {
- fw_send_response(card, request, RCODE_TYPE_ERROR);
+ if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0)
return;
- }
su = ((u8 *)payload)[1] & 0x7;
@@ -230,10 +224,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request,
}
spin_unlock_irqrestore(&node_list_lock, flags);
- if (fdtv) {
+ if (fdtv)
avc_recv(fdtv, payload, length);
- fw_send_response(card, request, RCODE_COMPLETE);
- }
}
static struct fw_address_handler fcp_handler = {
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index efba702..3d5f40c 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -40,8 +40,7 @@
#define SG_TABLESIZE 30
-static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long);
static spinlock_t i2o_config_lock;
@@ -751,7 +750,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
lock_kernel();
switch (cmd) {
case I2OGETIOPS:
- ret = i2o_cfg_ioctl(NULL, file, cmd, arg);
+ ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
ret = i2o_cfg_passthru32(file, cmd, arg);
@@ -984,11 +983,11 @@ out:
/*
* IOCTL Handler
*/
-static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
- unsigned long arg)
+static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int ret;
+ lock_kernel();
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_getiops(arg);
@@ -1044,7 +1043,7 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
osm_debug("unknown ioctl called!\n");
ret = -EINVAL;
}
-
+ unlock_kernel();
return ret;
}
@@ -1118,7 +1117,7 @@ static int cfg_release(struct inode *inode, struct file *file)
static const struct file_operations config_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .ioctl = i2o_cfg_ioctl,
+ .unlocked_ioctl = i2o_cfg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = i2o_cfg_compat_ioctl,
#endif
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 20d29ba..9df9a5a 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -568,12 +568,12 @@ static void twl4030_sih_do_edge(struct work_struct *work)
bytes[byte] &= ~(0x03 << off);
- spin_lock_irq(&d->lock);
+ raw_spin_lock_irq(&d->lock);
if (d->status & IRQ_TYPE_EDGE_RISING)
bytes[byte] |= BIT(off + 1);
if (d->status & IRQ_TYPE_EDGE_FALLING)
bytes[byte] |= BIT(off + 0);
- spin_unlock_irq(&d->lock);
+ raw_spin_unlock_irq(&d->lock);
edge_change &= ~BIT(i);
}
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbc2311..77cf090 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -56,6 +56,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
/* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
- sizeof(dev->dev_addr));
+ memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
/* The Tx-block list is written as needed. We just set up the values. */
lp->tx_cmd_link = IDLELOOP + 4;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e58a653..dd9a09c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2346,6 +2346,7 @@ config GELIC_NET
config GELIC_WIRELESS
bool "PS3 Wireless support"
+ depends on WLAN
depends on GELIC_NET
select WIRELESS_EXT
help
@@ -2358,6 +2359,7 @@ config GELIC_WIRELESS
config GELIC_WIRELESS_OLD_PSK_INTERFACE
bool "PS3 Wireless private PSK interface (OBSOLETE)"
depends on GELIC_WIRELESS
+ select WEXT_PRIV
help
This option retains the obsolete private interface to pass
the PSK from user space programs to the driver. The PSK
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e..39e1c0d 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
tristate "Nuvoton w90p910 Ethernet support"
depends on ARM && ARCH_W90X900
select PHYLIB
+ select MII
help
Say Y here if you want to use built-in Ethernet ports
on w90p910 processor.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9e56014..9fd8e5e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -275,6 +275,7 @@ struct be_adapter {
u32 tx_fc; /* Tx flow control */
int link_speed;
u8 port_type;
+ u8 transceiver;
};
extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1b68bd9..102ade1 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1479,6 +1479,41 @@ err:
return status;
}
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ sizeof(*req));
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
@@ -1501,6 +1536,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+ req->hdr.timeout = 4;
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 92b87ef..c002b83 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -155,6 +155,7 @@ struct be_mcc_mailbox {
#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
struct be_cmd_req_hdr {
u8 opcode; /* dword 0 */
@@ -821,6 +822,19 @@ struct be_cmd_resp_loopback_test {
u32 ticks_compl;
};
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 rsvd0[4];
+};
+
/********************** DDR DMA test *********************/
struct be_cmd_req_ddrdma_test {
struct be_cmd_req_hdr hdr;
@@ -912,3 +926,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 298b92c..5d001c4 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_MAC_LOOPBACK 0x0
#define BE_PHY_LOOPBACK 0x1
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = be_cmd_read_port_type(adapter, adapter->port_num,
&connector);
- switch (connector) {
- case 7:
- ecmd->port = PORT_FIBRE;
- break;
- default:
- ecmd->port = PORT_TP;
- break;
+ if (!status) {
+ switch (connector) {
+ case 7:
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 0:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ }
+ } else {
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_INTERNAL;
}
/* Save for future use */
adapter->link_speed = ecmd->speed;
adapter->port_type = ecmd->port;
+ adapter->transceiver = ecmd->transceiver;
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
+ ecmd->transceiver = adapter->transceiver;
}
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
ecmd->phy_address = adapter->port_num;
- ecmd->transceiver = XCVR_INTERNAL;
+ switch (ecmd->port) {
+ case PORT_FIBRE:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ break;
+ case PORT_TP:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+ break;
+ case PORT_AUI:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+ break;
+ }
return 0;
}
@@ -489,6 +512,19 @@ err:
return ret;
}
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ loopback_type, 1);
+ *status = be_cmd_loopback_test(adapter, adapter->port_num,
+ loopback_type, 1500,
+ 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ BE_NO_LOOPBACK, 1);
+ return *status;
+}
+
static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
@@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_MAC_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[0] != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+ &data[0]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_PHY_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[1] != 0)
+ }
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+ &data[1]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_ONE_PORT_EXT_LOOPBACK,
- 1500, 2, 0xabc);
- if (data[2] != 0)
+ }
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
+ }
data[3] = be_test_ddr_dma(adapter);
if (data[3] != 0)
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 77ba135..306c2b8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+ bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+ CNIC_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0fb7a49..822f586 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// check if any partner replys
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave->dev->master->name);
+ best->slave ? best->slave->dev->master->name : "NULL");
}
best->is_active = 1;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e0620d0..8bd3c9f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_select_queue = gfar_select_queue,
.ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
@@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
return priv->vlgrp || priv->rx_csum_enable;
}
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- return skb_get_queue_mapping(skb);
-}
static void free_tx_pointers(struct gfar_private *priv)
{
int i = 0;
@@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb */
- skb_set_queue_mapping(skb, fcb->rq);
/* Remove the padded bytes, if there are any */
- if (amount_pull)
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
+ }
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len;
-
+ skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull);
} else {
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 090a6d3..052c740 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -87,6 +87,7 @@ History:
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
/* copy out MAC address */
- for (z = 0; z < sizeof(dev->dev_addr); z++)
+ for (z = 0; z < ETH_ALEN; z++)
dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
/* print config */
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e8e9e91..c505b50d 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
- E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 5c9d73e..3670a66 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
- if (ret_val)
- goto out;
-
- /* Set number of link attempts before downshift */
- ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
- if (ret_val)
- goto out;
- phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
- ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
out:
return ret_val;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ac9d527..f771a6c 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+ if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78963a0..933c64f 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1306,13 +1306,8 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- if (mac->type < e1000_82576) {
- fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
- } else {
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
- fc->low_water = fc->high_water - 16;
- }
+ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
fc->current_mode = fc->requested_mode;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index e9dd95f..0dbd032 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2763,7 +2763,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address\n");
+ "PF still in reset state, assigning new address."
+ " Is the PF interface up?\n");
random_ether_addr(hw->mac.addr);
} else {
err = hw->mac.ops.read_mac_addr(hw);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index bd64387..1a2ea62 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4373,6 +4373,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index dcc67a3..e154677 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -45,6 +45,7 @@ static const char *const version =
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
@@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->perm_addr))
- memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+ memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f983e3b..103e8b0 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
EFX_LOG(efx, "create port\n");
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
goto err;
- if (phy_flash_cfg)
- efx->phy_mode = PHY_MODE_SPECIAL;
-
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 17afcd2..9d009c4 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx)
static void falcon_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 3da933f..8ccab2c 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
}
-/* Get status of XAUI link */
-static bool falcon_xaui_link_ok(struct efx_nic *efx)
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
efx_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
- if (LOOPBACK_INTERNAL(efx))
- return true;
-
/* Read link status */
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
- /* If the link is up, then check the phy side of the xaui link */
- if (efx->link_state.up && link_ok)
- if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
- link_ok = efx_mdio_phyxgxs_lane_sync(efx);
-
return link_ok;
}
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
- bool mac_up = falcon_xaui_link_ok(efx);
+ bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
falcon_reset_xaui(efx);
udelay(200);
- mac_up = falcon_xaui_link_ok(efx);
+ mac_up = falcon_xmac_link_ok(efx);
--tries;
}
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
- return !falcon_check_xaui_link_up(efx, 5);
+ return !falcon_xmac_link_ok_retry(efx, 5);
}
static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_reconfigure_mac_wrapper(efx);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_mask_status_intr(efx, true);
return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
return;
falcon_mask_status_intr(efx, false);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_mask_status_intr(efx, true);
}
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e1bcc5..eb694af 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg;
+ struct efx_mcdi_phy_cfg *phy_data;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ u32 caps;
int rc;
- /* TODO: Move phy_data initialisation to
- * phy_op->probe/remove, rather than init/fini */
- phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
- if (phy_cfg == NULL) {
- rc = -ENOMEM;
- goto fail_alloc;
- }
- rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
- efx->phy_type = phy_cfg->type;
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_cfg->channel;
- efx->mdio.prtad = phy_cfg->port;
- efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
efx->mdio.mode_support = 0;
- if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->link_advertising =
+ mcdi_to_ethtool_cap(phy_data->media, caps);
+ else
+ phy_data->forced_cap = caps;
+
/* Assert that we can map efx -> mcdi loopback modes */
BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
- kfree(phy_cfg);
-
- return 0;
-
-fail:
- kfree(phy_cfg);
-fail_alloc:
- return rc;
-}
-
-static int efx_mcdi_phy_init(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_cfg *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- u32 caps;
- int rc;
-
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (phy_data == NULL)
- return -ENOMEM;
-
- rc = efx_mcdi_get_phy_cfg(efx, phy_data);
- if (rc != 0)
- goto fail;
-
- efx->phy_data = phy_data;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- goto fail;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
- if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
- else
- phy_data->forced_cap = caps;
-
return 0;
fail:
@@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void efx_mcdi_phy_fini(struct efx_nic *efx)
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
- .init = efx_mcdi_phy_init,
+ .init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
- .fini = efx_mcdi_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.run_tests = NULL,
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 34c381f..d5aab5b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -524,6 +524,7 @@ struct efx_phy_operations {
int (*probe) (struct efx_nic *efx);
int (*init) (struct efx_nic *efx);
void (*fini) (struct efx_nic *efx);
+ void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
void (*get_settings) (struct efx_nic *efx,
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index a577be2..db44224 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 3800fc7..ff8f0a4 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -33,6 +33,9 @@
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_FW_PRODUCT_CODE_1 0xd7f0
+#define PCS_FW_VERSION_1 0xd7f3
+#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
@@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
+ bool bug17190_in_bad_state;
+ unsigned long bug17190_timer;
+ u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
-static int qt2025c_wait_reset(struct efx_nic *efx)
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
{
- unsigned long timeout = jiffies + 10 * HZ;
+ unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
@@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
old_counter = counter;
else if (counter != old_counter)
break;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ /* Some cables have EEPROMs that conflict with the
+ * PHY's on-board EEPROM so it cannot load firmware */
+ EFX_ERR(efx, "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
return -ETIMEDOUT;
- msleep(10);
+ }
+ msleep(QT2025C_HEARTB_WAIT);
}
+ return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+ int reg;
+
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
+ msleep(QT2025C_FWSTART_WAIT);
+ }
+
+ return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+ /* Restart microcontroller execution of firmware from RAM */
+ efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+ efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = qt2025c_wait_fw_status_good(efx);
+ if (rc == -ETIMEDOUT) {
+ /* Bug 17689: occasionally heartbeat starts but firmware status
+ * code never progresses beyond 0x00. Try again, once, after
+ * restarting execution of the firmware image. */
+ EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+ qt2025c_restart_firmware(efx);
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+ rc = qt2025c_wait_fw_status_good(efx);
+ }
+
+ return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ u8 firmware_id[9];
+ size_t i;
+
+ for (i = 0; i < sizeof(firmware_id); i++)
+ firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_FW_PRODUCT_CODE_1 + i);
+ EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
+ phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+ ((firmware_id[3] & 0x0f) << 16) |
+ (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+ * layers up, but PCS down (no block_lock). If we notice this state
+ * persisting for a couple of seconds, we switch PMA/PMD loopback
+ * briefly on and then off again, which is normally sufficient to
+ * recover it.
+ */
+ if (efx->link_state.up ||
+ !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ phy_data->bug17190_in_bad_state = false;
+ return;
+ }
+
+ if (!phy_data->bug17190_in_bad_state) {
+ phy_data->bug17190_in_bad_state = true;
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ return;
+ }
+
+ if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+ EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, false);
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ struct falcon_board *board = falcon_board(efx);
+ int reg, rc, i;
+ uint16_t phy_op_mode;
+
+ /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+ * Self-Configure mode. Don't attempt any switching if we encounter
+ * older firmware. */
+ if (phy_data->firmware_ver < 0x02000100)
+ return 0;
+
+ /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+ * mode; however, that powers down most of the PHY when no module is
+ * present, so we must use a different mode (any fixed mode will do)
+ * to be sure that loopbacks will work. */
+ phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+ /* Only change mode if really necessary */
+ reg = efx_mdio_read(efx, 1, 0xc319);
+ if ((reg & 0x0038) == phy_op_mode)
+ return 0;
+ EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+
+ /* This sequence replicates the register writes configured in the boot
+ * EEPROM (including the differences between board revisions), except
+ * that the operating mode is changed, and the PHY is prevented from
+ * unnecessarily reloading the main firmware image again. */
+ efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+ * STOPs onto the firmware/module I2C bus to reset it, varies across
+ * board revisions, as the bus is connected to different GPIO/LED
+ * outputs on the PHY.) */
+ if (board->major == 0 && board->minor < 2) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4488);
+ efx_mdio_write(efx, 1, 0xc303, 0x4480);
+ efx_mdio_write(efx, 1, 0xc303, 0x4490);
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ }
+ } else {
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x0900);
+ efx_mdio_write(efx, 1, 0xd008, 0x0005);
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ efx_mdio_write(efx, 1, 0xc302, 0x0004);
+ efx_mdio_write(efx, 1, 0xc316, 0x0013);
+ efx_mdio_write(efx, 1, 0xc318, 0x0054);
+ efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+ efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+ efx_mdio_write(efx, 3, 0x0027, 0x0013);
+ efx_mdio_write(efx, 3, 0x0028, 0xa528);
+ efx_mdio_write(efx, 1, 0xd006, 0x000a);
+ efx_mdio_write(efx, 1, 0xd007, 0x0009);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ /* This additional write is not present in the boot EEPROM. It
+ * prevents the PHY's internal boot ROM doing another pointless (and
+ * slow) reload of the firmware image (the microcontroller's code
+ * memory is not affected by the microcontroller reset). */
+ efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ msleep(20);
+
+ /* Restart microcontroller execution of firmware from RAM */
+ qt2025c_restart_firmware(efx);
+
+ /* Wait for the microcontroller to be ready again */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0) {
+ EFX_ERR(efx, "PHY microcontroller reset during mode switch "
+ "timed out\n");
+ return rc;
}
return 0;
@@ -137,6 +335,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
static int qt202x_phy_probe(struct efx_nic *efx)
{
+ struct qt202x_phy_data *phy_data;
+
+ phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->bug17190_in_bad_state = false;
+ phy_data->bug17190_timer = 0;
+
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +353,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
static int qt202x_phy_init(struct efx_nic *efx)
{
- struct qt202x_phy_data *phy_data;
u32 devid;
int rc;
@@ -155,17 +362,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
- phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
-
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
- phy_data->phy_mode = efx->phy_mode;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_firmware_id(efx);
+
return 0;
}
@@ -183,6 +387,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_bug17190_workaround(efx);
+
return efx->link_state.up != was_up;
}
@@ -191,6 +398,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
+ int rc = qt2025c_select_phy_mode(efx);
+ if (rc)
+ return rc;
+
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
@@ -224,7 +435,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-static void qt202x_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_remove(struct efx_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
@@ -236,7 +447,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
- .fini = qt202x_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index de07a4f..f8c6771 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx)
void siena_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index ca11572..3009c29 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
int rc;
rtnl_lock();
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
- MDIO_PMA_10GBT_TXPWR_SHORT,
- count != 0 && *buf != '0');
- rc = efx_reconfigure_port(efx);
+ if (efx->state != STATE_RUNNING) {
+ rc = -EBUSY;
+ } else {
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
+ MDIO_PMA_10GBT_TXPWR_SHORT,
+ count != 0 && *buf != '0');
+ rc = efx_reconfigure_port(efx);
+ }
rtnl_unlock();
return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
return 0;
}
-static int sfx7101_phy_probe(struct efx_nic *efx)
+static int tenxpress_phy_probe(struct efx_nic *efx)
{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
- return 0;
-}
+ struct tenxpress_phy_data *phy_data;
+ int rc;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ /* Create any special files */
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_phy_short_reach);
+ if (rc)
+ goto fail;
+ }
+
+ if (efx->phy_type == PHY_TYPE_SFX7101) {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
+ } else {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = (SFT9001_LOOPBACKS |
+ FALCON_XMAC_LOOPBACKS |
+ FALCON_GMAC_LOOPBACKS);
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full);
+ }
-static int sft9001_phy_probe(struct efx_nic *efx)
-{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
- FALCON_GMAC_LOOPBACKS);
return 0;
+
+fail:
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+ return rc;
}
static int tenxpress_phy_init(struct efx_nic *efx)
{
- struct tenxpress_phy_data *phy_data;
- int rc = 0;
+ int rc;
falcon_board(efx)->type->init_phy(efx);
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
- phy_data->phy_mode = efx->phy_mode;
-
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
if (efx->phy_type == PHY_TYPE_SFT9001A) {
int reg;
@@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
- goto fail;
+ return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
- goto fail;
+ return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
- goto fail;
+ return rc;
- /* Initialise advertising flags */
- efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full);
- if (efx->phy_type != PHY_TYPE_SFX7101)
- efx->link_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full);
+ /* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- rc = device_create_file(&efx->pci_dev->dev,
- &dev_attr_phy_short_reach);
- if (rc)
- goto fail;
- }
-
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
-
- fail:
- kfree(efx->phy_data);
- efx->phy_data = NULL;
- return rc;
}
/* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void tenxpress_phy_fini(struct efx_nic *efx)
+static void sfx7101_phy_fini(struct efx_nic *efx)
{
int reg;
+ /* Power down the LNPGA */
+ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ /* Waiting here ensures that the board fini, which can turn
+ * off the power to the PHY, won't get run until the LNPGA
+ * powerdown has been given long enough to complete. */
+ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- /* Power down the LNPGA */
- reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
-
- /* Waiting here ensures that the board fini, which can turn
- * off the power to the PHY, won't get run until the LNPGA
- * powerdown has been given long enough to complete. */
- schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
- }
-
kfree(efx->phy_data);
efx->phy_data = NULL;
}
@@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
- .probe = sfx7101_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = sfx7101_phy_fini,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
@@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
};
struct efx_phy_operations falcon_sft9001_phy_ops = {
- .probe = sft9001_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e669f94..a8b70ef 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
EFX_TXQ_MASK];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
- buffer->len = 0;
- buffer->continuation = true;
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
@@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
+ buffer->len = 0;
+ buffer->continuation = true;
}
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 01e99f2..2834a01 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk)
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible_sync(sk->sk_sleep);
- tun = container_of(sk, struct tun_sock, sk)->tun;
+ tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
static void tun_sock_destruct(struct sock *sk)
{
- free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
+ free_netdev(tun_sk(sk)->tun->dev);
}
static struct proto tun_proto = {
@@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
- container_of(sk, struct tun_sock, sk)->tun = tun;
+ tun_sk(sk)->tun = tun;
security_tun_dev_post_create(sk);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index afaf088..41ad2f3 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Wait for and prevent any further xmits. */
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_tx_wake_all_queues(ugeth->ndev);
+ netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev)
ugeth->oldspeed = phydev->speed;
}
- /*
- * To change the MAC configuration we need to disable the
- * controller. To do so, we have to either grab ugeth->lock,
- * which is a bad idea since 'graceful stop' commands might
- * take quite a while, or we can quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
-
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
@@ -3273,7 +3279,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ if (bd == ugeth->txBd[txQ]) /* queue empty? */
break;
dev->stats.tx_packets++;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 593e01f..611b804 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
+#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
@@ -389,6 +390,7 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct work_struct reset_task;
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ INIT_WORK(&rp->reset_task, rhine_reset_task);
+
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
return 0;
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_reset_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
-
- printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
- "%4.4x, resetting...\n",
- dev->name, ioread16(ioaddr + IntrStatus),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ struct rhine_private *rp = container_of(work, struct rhine_private,
+ reset_task);
+ struct net_device *dev = rp->dev;
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
napi_disable(&rp->napi);
- spin_lock(&rp->lock);
+ spin_lock_bh(&rp->lock);
/* clear all descriptors */
free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
rhine_chip_reset(dev);
init_registers(dev);
- spin_unlock(&rp->lock);
+ spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ schedule_work(&rp->reset_task);
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- spin_lock_irq(&rp->lock);
-
- netif_stop_queue(dev);
napi_disable(&rp->napi);
+ cancel_work_sync(&rp->reset_task);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index f1c4b2a..0fdfd58 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
high_dma = 1;
if (pci_set_consistent_dma_mask(pdev,
- 0xffffffffffffffffULL)) {
+ DMA_BIT_MASK(64))) {
vxge_debug_init(VXGE_ERR,
"%s : unable to obtain 64bit DMA for "
"consistent allocations", __func__);
ret = -ENOMEM;
goto _exit1;
}
- } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 32bit DMA", __func__);
} else {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4c086f..e63b7c4 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1903,17 +1903,6 @@ accept:
rxs->noise = sc->ah->ah_noise_floor;
rxs->signal = rxs->noise + rs.rs_rssi;
- /* An rssi of 35 indicates you should be able use
- * 54 Mbps reliably. A more elaborate scheme can be used
- * here but it requires a map of SNR/throughput for each
- * possible mode used */
- rxs->qual = rs.rs_rssi * 100 / 35;
-
- /* rssi can be more than 35 though, anything above that
- * should be considered at 100% */
- if (rxs->qual > 100)
- rxs->qual = 100;
-
rxs->antenna = rs.rs_antenna;
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
+ /* Set PHY calibration interval */
+ ah->ah_cal_intval = ath5k_calinterval;
+
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc)
/* Set ack to be sent at low bit-rates */
ath5k_hw_set_ack_bitrate_high(ah, false);
-
- /* Set PHY calibration inteval */
- ah->ah_cal_intval = ath5k_calinterval;
-
ret = 0;
done:
mmiowb();
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 71b84d9..efc420c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
wait = wait_time;
while (ath9k_hw_numtxpending(ah, q)) {
if ((--wait) == 0) {
- ath_print(common, ATH_DBG_QUEUE,
+ ath_print(common, ATH_DBG_FATAL,
"Failed to stop TX DMA in 100 "
"msec after killing last frame\n");
break;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0c87771..e185479 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -77,6 +77,9 @@
#define ATH9K_TXERR_XTXOP 0x08
#define ATH9K_TXERR_TIMER_EXPIRED 0x10
#define ATH9K_TX_ACKED 0x20
+#define ATH9K_TXERR_MASK \
+ (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
+ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
#define ATH9K_TX_BA 0x01
#define ATH9K_TX_PWRMGMT 0x02
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c487434..996eb90 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
+ /* Stop ANI */
+ del_timer_sync(&common->ani.timer);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
+ /* Start ANI */
+ ath_start_ani(common);
+
return r;
}
@@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
return; /* another wiphy still in use */
}
+ /* Ensure HW is awake when we try to shut it down. */
+ ath9k_ps_wakeup(sc);
+
if (ah->btcoex_hw.enabled) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_ps_restore(sc);
+
+ /* Finally, put the chip in FULL SLEEP mode */
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
sc->sc_flags |= SC_OP_INVALID;
@@ -2641,8 +2653,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ ath9k_ps_wakeup(sc);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
ath_beacon_return(sc, avp);
+ ath9k_ps_restore(sc);
}
sc->sc_flags &= ~SC_OP_BEACONS;
@@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_start(sc, sta, tid, ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_resume(sc, sta, tid);
+ ath9k_ps_restore(sc);
break;
default:
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 5321f73..f7af5ea 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}
-const static struct ath_bus_ops ath_pci_bus_ops = {
+static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
.cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2a11cc5..fa12b90 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (npend) {
int r;
- ath_print(common, ATH_DBG_XMIT,
+ ath_print(common, ATH_DBG_FATAL,
"Unable to stop TxDMA. Reset HAL!\n");
spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
+ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
if (r)
ath_print(common, ATH_DBG_FATAL,
"Unable to reset hardware; reset status %d\n",
@@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
* For HT capable stations, we save tidno for later use.
* We also override seqno set by upper layer with the one
* in tx aggregation state.
- *
- * If fragmentation is on, the sequence number is
- * not overridden, since it has been
- * incremented by the fragmentation routine.
- *
- * FIXME: check if the fragmentation threshold exceeds
- * IEEE80211 max.
*/
tid = ATH_AN_2_TID(an, bf->bf_tidno);
- hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
- IEEE80211_SEQ_SEQ_SHIFT);
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
bf->bf_seqno = tid->seq_next;
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
@@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
}
- if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
+ if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
+ (sc->sc_flags & SC_OP_TXAGGR))
assign_aggr_tid_seqno(skb, bf);
bf->bf_mpdu = skb;
@@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int hdrlen, padsize;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int padpos, padsize;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath_tx_control txctl;
@@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
* BSSes.
*/
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
sc->tx.seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* Add the padding after the header if this is not already done */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- if (hdrlen & 3) {
- padsize = hdrlen % 4;
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos) {
if (skb_headroom(skb) < padsize) {
ath_print(common, ATH_DBG_XMIT,
"TX CABQ padding failed\n");
@@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
skb_push(skb, padsize);
- memmove(skb->data, skb->data + padsize, hdrlen);
+ memmove(skb->data, skb->data + padsize, padpos);
}
txctl.txq = sc->beacon.cabq;
@@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int hdrlen, padsize;
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ int padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = hdrlen & 3;
- if (padsize && hdrlen >= 24) {
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos+padsize) {
/*
* Remove MAC header padding before giving the frame back to
* mac80211.
*/
- memmove(skb->data + padsize, skb->data, hdrlen);
+ memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
@@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
+ txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 027be27..88d1fd0 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,160 +383,44 @@ static inline
}
}
-/* Check if a DMA region fits the device constraints.
- * Returns true, if the region is OK for usage with this device. */
-static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
- dma_addr_t addr, size_t size)
-{
- switch (ring->type) {
- case B43_DMA_30BIT:
- if ((u64)addr + size > (1ULL << 30))
- return 0;
- break;
- case B43_DMA_32BIT:
- if ((u64)addr + size > (1ULL << 32))
- return 0;
- break;
- case B43_DMA_64BIT:
- /* Currently we can't have addresses beyond
- * 64bit in the kernel. */
- break;
- }
- return 1;
-}
-
-#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
-#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
-
-static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
- dma_addr_t dmaaddr, size_t size)
-{
- ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
- free_pages((unsigned long)base, get_order(size));
-}
-
-static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
- dma_addr_t *dmaaddr, size_t size,
- gfp_t gfp_flags)
-{
- void *base;
-
- base = (void *)__get_free_pages(gfp_flags, get_order(size));
- if (!base)
- return NULL;
- memset(base, 0, size);
- *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
- DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
- free_pages((unsigned long)base, get_order(size));
- return NULL;
- }
-
- return base;
-}
-
-static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
- dma_addr_t *dmaaddr, size_t size)
-{
- void *base;
-
- base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
- GFP_KERNEL);
- if (!base) {
- b43err(ring->dev->wl, "Failed to allocate or map pages "
- "for DMA ringmemory\n");
- return NULL;
- }
- if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
- /* The memory does not fit our device constraints.
- * Retry with GFP_DMA set to get lower memory. */
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
- GFP_KERNEL | GFP_DMA);
- if (!base) {
- b43err(ring->dev->wl, "Failed to allocate or map pages "
- "in the GFP_DMA region for DMA ringmemory\n");
- return NULL;
- }
- if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- b43err(ring->dev->wl, "Failed to allocate DMA "
- "ringmemory that fits device constraints\n");
- return NULL;
- }
- }
- /* We expect the memory to be 4k aligned, at least. */
- if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- return NULL;
- }
-
- return base;
-}
-
static int alloc_ringmemory(struct b43_dmaring *ring)
{
- unsigned int required;
- void *base;
- dma_addr_t dmaaddr;
-
- /* There are several requirements to the descriptor ring memory:
- * - The memory region needs to fit the address constraints for the
- * device (same as for frame buffers).
- * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
- * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
+ gfp_t flags = GFP_KERNEL;
+
+ /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
+ * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
+ * has shown that 4K is sufficient for the latter as long as the buffer
+ * does not cross an 8K boundary.
+ *
+ * For unknown reasons - possibly a hardware error - the BCM4311 rev
+ * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
+ * which accounts for the GFP_DMA flag below.
+ *
+ * The flags here must match the flags in free_ringmemory below!
*/
-
if (ring->type == B43_DMA_64BIT)
- required = ring->nr_slots * sizeof(struct b43_dmadesc64);
- else
- required = ring->nr_slots * sizeof(struct b43_dmadesc32);
- if (B43_WARN_ON(required > 0x1000))
+ flags |= GFP_DMA;
+ ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
+ B43_DMA_RINGMEMSIZE,
+ &(ring->dmabase), flags);
+ if (!ring->descbase) {
+ b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
-
- ring->alloc_descsize = 0x1000;
- base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
- if (!base)
- return -ENOMEM;
- ring->alloc_descbase = base;
- ring->alloc_dmabase = dmaaddr;
-
- if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
- /* We're on <=32bit DMA, or we already got 8k aligned memory.
- * That's all we need, so we're fine. */
- ring->descbase = base;
- ring->dmabase = dmaaddr;
- return 0;
- }
- b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
-
- /* Ok, we failed at the 8k alignment requirement.
- * Try to force-align the memory region now. */
- ring->alloc_descsize = 0x2000;
- base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
- if (!base)
- return -ENOMEM;
- ring->alloc_descbase = base;
- ring->alloc_dmabase = dmaaddr;
-
- if (is_8k_aligned(dmaaddr)) {
- /* We're already 8k aligned. That Ok, too. */
- ring->descbase = base;
- ring->dmabase = dmaaddr;
- return 0;
}
- /* Force-align it to 8k */
- ring->descbase = (void *)((u8 *)base + 0x1000);
- ring->dmabase = dmaaddr + 0x1000;
- B43_WARN_ON(!is_8k_aligned(ring->dmabase));
+ memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
- b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
- ring->alloc_dmabase, ring->alloc_descsize);
+ gfp_t flags = GFP_KERNEL;
+
+ if (ring->type == B43_DMA_64BIT)
+ flags |= GFP_DMA;
+
+ ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase, flags);
}
/* Reset the RX DMA channel */
@@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
return 1;
- if (!b43_dma_address_ok(ring, addr, buffersize)) {
- /* We can't support this address. Unmap it again. */
- unmap_descbuffer(ring, addr, buffersize, dma_to_device);
- return 1;
+ switch (ring->type) {
+ case B43_DMA_30BIT:
+ if ((u64)addr + buffersize > (1ULL << 30))
+ goto address_error;
+ break;
+ case B43_DMA_32BIT:
+ if ((u64)addr + buffersize > (1ULL << 32))
+ goto address_error;
+ break;
+ case B43_DMA_64BIT:
+ /* Currently we can't have addresses beyond
+ * 64bit in the kernel. */
+ break;
}
/* The address is OK. */
return 0;
+
+address_error:
+ /* We can't support this address. Unmap it again. */
+ unmap_descbuffer(ring, addr, buffersize, dma_to_device);
+
+ return 1;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- ring->alloc_dmabase,
- ring->alloc_descsize, DMA_TO_DEVICE);
return 0;
}
@@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
}
/* Now transfer the whole frame. */
wmb();
- ssb_dma_sync_single_for_device(ring->dev->dev,
- ring->alloc_dmabase,
- ring->alloc_descsize, DMA_TO_DEVICE);
ops->poke_tx(ring, next_slot(ring, slot));
return 0;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index e607b39..f7ab37c 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
} __attribute__ ((__packed__));
/* Misc DMA constants */
+#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */
@@ -246,12 +247,6 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
- /* Pointers and size of the originally allocated and mapped memory
- * region for the descriptor ring. */
- void *alloc_descbase;
- dma_addr_t alloc_dmabase;
- unsigned int alloc_descsize;
- /* Pointer to our wireless device. */
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 7da1dab..234891d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
snr = rx_stats_sig_avg / rx_stats_noise_diff;
rx_status.noise = rx_status.signal -
iwl3945_calc_db_from_ratio(snr);
- rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
- rx_status.noise);
-
- /* If noise info not available, calculate signal quality indicator (%)
- * using just the dBm signal level. */
} else {
rx_status.noise = priv->last_rx_noise;
- rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
}
- IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_status.noise, rx_status.qual,
+ IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
+ rx_status.signal, rx_status.noise,
rx_stats_sig_avg, rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
rc = -EIO;
}
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
+ .broken_powersave = true,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
+ .broken_powersave = true,
};
struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index ecc23ec..531fa12 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -222,7 +222,6 @@ struct iwl3945_ibss_seq {
*
*****************************************************************************/
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 386513b..484c5fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
/* calculate tx gain adjustment based on power supply voltage */
- voltage = priv->calib_info->voltage;
+ voltage = le16_to_cpu(priv->calib_info->voltage);
init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
voltage_compensation =
iwl4965_get_voltage_compensation(voltage, init_voltage);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 4ef6804..bc056e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -92,11 +92,15 @@
static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
- u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_TEMPERATURE);
- /* offset = temperature - voltage / coef */
- s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
- return offset;
+ u16 temperature, voltage;
+ __le16 *temp_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
+
+ temperature = le16_to_cpu(temp_calib[0]);
+ voltage = le16_to_cpu(temp_calib[1]);
+
+ /* offset = temp - volt / coeff */
+ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
/* Fixed (non-configurable) rx data from phy */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e2f8615..33a5866 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
- u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
+ __le16 *xtal_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
- cmd.cap_pin1 = (u8)xtal_calib[0];
- cmd.cap_pin2 = (u8)xtal_calib[1];
+ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
(u8 *)&cmd, sizeof(cmd));
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index fe511cb..b93e491 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
};
/* mbps, mcs */
-const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
{ "2", "QPSK DSSS"},
{"5.5", "BPSK CCK"},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b8377ef..1c9866d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+ if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->ibss_beacon = NULL;
- spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(unsigned long long) pci_resource_len(pdev, 0));
IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
- /* this spin lock will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a7bfae0..1ec8cb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -77,8 +77,7 @@
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
*
- * NOTE: Newer devices using one-time-programmable (OTP) memory
- * require device to be awake in order to read this memory
+ * NOTE: Device does need to be awake in order to read this memory
* via CSR_EEPROM and CSR_OTP registers
*/
#define CSR_BASE (0x000)
@@ -111,9 +110,8 @@
/*
* EEPROM and OTP (one-time-programmable) memory reads
*
- * NOTE: For (newer) devices using OTP, device must be awake, initialized via
- * apm_ops.init() in order to read. Older devices (3945/4965/5000)
- * use EEPROM and do not require this.
+ * NOTE: Device must be awake, initialized via apm_ops.init(),
+ * in order to read.
*/
#define CSR_EEPROM_REG (CSR_BASE+0x02c)
#define CSR_EEPROM_GP (CSR_BASE+0x030)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2673e9a..165d1f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1168,7 +1168,7 @@ struct iwl_priv {
u32 last_beacon_time;
u64 last_tsf;
- /* eeprom */
+ /* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
struct iwl_eeprom_calib_info *calib_info;
@@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
}
+static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
+{
+ __free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+
+static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+ free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3946e5c..4a30969 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
return ret;
}
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
@@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
}
- *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
+ *eeprom_data = cpu_to_le16(r >> 16);
return 0;
}
@@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
*/
static bool iwl_is_otp_empty(struct iwl_priv *priv)
{
- u16 next_link_addr = 0, link_value;
+ u16 next_link_addr = 0;
+ __le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
@@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
static int iwl_find_otp_image(struct iwl_priv *priv,
u16 *validblockaddr)
{
- u16 next_link_addr = 0, link_value = 0, valid_addr;
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* check for more block on the link list
*/
valid_addr = next_link_addr;
- next_link_addr = link_value * sizeof(u16);
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
int iwl_eeprom_init(struct iwl_priv *priv)
{
- u16 *e;
+ __le16 *e;
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
int sz;
int ret;
@@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = -ENOMEM;
goto alloc_err;
}
- e = (u16 *)priv->eeprom;
+ e = (__le16 *)priv->eeprom;
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- /* OTP reads require powered-up chip */
- priv->cfg->ops->lib->apm_ops.init(priv);
- }
+ priv->cfg->ops->lib->apm_ops.init(priv);
ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
if (ret < 0) {
@@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
for (addr = validblockaddr; addr < validblockaddr + sz;
addr += sizeof(u16)) {
- u16 eeprom_data;
+ __le16 eeprom_data;
ret = iwl_read_otp_word(priv, addr, &eeprom_data);
if (ret)
@@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[cache_addr / 2] = eeprom_data;
cache_addr += sizeof(u16);
}
-
- /*
- * Now that OTP reads are complete, reset chip to save
- * power until we load uCode during "up".
- */
- priv->cfg->ops->lib->apm_ops.stop(priv);
-
} else {
/* eeprom is an array of 16bit values */
for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
goto done;
}
r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
+ e[addr / 2] = cpu_to_le16(r >> 16);
}
}
ret = 0;
@@ -603,6 +595,8 @@ done:
err:
if (ret)
iwl_eeprom_free(priv);
+ /* Reset chip to save power until we load uCode during "up". */
+ priv->cfg->ops->lib->apm_ops.stop(priv);
alloc_err:
return ret;
}
@@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
ch_info->ht40_eeprom = *eeprom_ch;
ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
ch_info->ht40_flags = eeprom_ch->flags;
- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 5cd2b66..0cd9c02 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
*
*/
struct iwl_eeprom_enhanced_txpwr {
- u16 common;
+ __le16 common;
s8 chain_a_max;
s8 chain_b_max;
s8 chain_c_max;
@@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info {
struct iwl_eeprom_calib_info {
u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
u8 saturation_power52; /* half-dBm */
- s16 voltage; /* signed */
+ __le16 voltage; /* signed */
struct iwl_eeprom_calib_subband_info
band_info[EEPROM_TX_POWER_BANDS];
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a231659..30e9ea6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -234,7 +234,7 @@ cancel:
}
fail:
if (cmd->reply_page) {
- free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd->reply_page);
cmd->reply_page = 0;
}
out:
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6090bc1..6f36b6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
- priv->alloc_rxb_page--;
}
}
@@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_page--;
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95) /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- * about formulas used below. */
-static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
- int sig_qual;
- int degradation = PERFECT_RSSI - rssi_dbm;
-
- /* If we get a noise measurement, use signal-to-noise ratio (SNR)
- * as indicator; formula is (signal dbm - noise dbm).
- * SNR at or above 40 is a great signal (100%).
- * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
- * Weakest usable signal is usually 10 - 15 dB SNR. */
- if (noise_dbm) {
- if (rssi_dbm - noise_dbm >= 40)
- return 100;
- else if (rssi_dbm < noise_dbm)
- return 0;
- sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
- /* Else use just the signal level.
- * This formula is a least squares fit of data points collected and
- * compared with a reference system that had a percentage (%) display
- * for signal quality. */
- } else
- sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
- (15 * RSSI_RANGE + 62 * degradation)) /
- (RSSI_RANGE * RSSI_RANGE);
-
- if (sig_qual > 100)
- sig_qual = 100;
- else if (sig_qual < 1)
- sig_qual = 0;
-
- return sig_qual;
-}
-
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwl_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
@@ -1105,11 +1060,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
if (iwl_is_associated(priv) &&
!test_bit(STATUS_SCANNING, &priv->status)) {
rx_status.noise = priv->last_rx_noise;
- rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
- rx_status.noise);
} else {
rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
- rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
}
/* Reset beacon noise level if not associated. */
@@ -1122,8 +1074,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
iwl_dbg_report_frame(priv, phy_res, len, header, 1);
#endif
iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
- rx_status.signal, rx_status.noise, rx_status.qual,
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
+ rx_status.signal, rx_status.noise,
(unsigned long long)rx_status.mactime);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a2b2b83..fa1c89b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
clear_bit(STATUS_SCAN_HW, &priv->status);
}
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cd6a690..cde09a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
break;
}
}
-
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
@@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
break;
}
}
-
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 00da5e1..87ce2bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -407,13 +407,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
int txq_id;
/* Tx queues */
- if (priv->txq)
+ if (priv->txq) {
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
iwl_cmd_queue_free(priv);
else
iwl_tx_queue_free(priv, txq_id);
+ }
iwl_free_dma_ptr(priv, &priv->kw);
iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 2a28a1f..f8e4e4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
+ if ((iwl_queue_space(q) < q->high_mark))
+ goto drop;
+
spin_lock_irqsave(&priv->lock, flags);
idx = get_cmd_index(q, q->write_ptr, 0);
@@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
break;
}
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_page--;
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
- priv->alloc_rxb_page--;
}
}
@@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
return (int)ratio2dB[sig_ratio];
}
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95) /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- * about formulas used below. */
-int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
- int sig_qual;
- int degradation = PERFECT_RSSI - rssi_dbm;
-
- /* If we get a noise measurement, use signal-to-noise ratio (SNR)
- * as indicator; formula is (signal dbm - noise dbm).
- * SNR at or above 40 is a great signal (100%).
- * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
- * Weakest usable signal is usually 10 - 15 dB SNR. */
- if (noise_dbm) {
- if (rssi_dbm - noise_dbm >= 40)
- return 100;
- else if (rssi_dbm < noise_dbm)
- return 0;
- sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
- /* Else use just the signal level.
- * This formula is a least squares fit of data points collected and
- * compared with a reference system that had a percentage (%) display
- * for signal quality. */
- } else
- sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
- (15 * RSSI_RANGE + 62 * degradation)) /
- (RSSI_RANGE * RSSI_RANGE);
-
- if (sig_qual > 100)
- sig_qual = 100;
- else if (sig_qual < 1)
- sig_qual = 0;
-
- return sig_qual;
-}
-
/**
* iwl3945_rx_handle - Main entry function for receiving responses from uCode
*
@@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+ if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->retry_rate = 1;
priv->ibss_beacon = NULL;
- spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ IEEE80211_HW_SPECTRUM_MGMT;
+
+ if (!priv->cfg->broken_powersave)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
- /* this spin lock will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
/***********************
* 4. Read EEPROM
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 5a26bb0..8428111 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
- struct list_head rx_packets[IWM_RX_ID_HASH];
+ struct list_head rx_packets[IWM_RX_ID_HASH + 1];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
@@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
int iwm_down(struct iwm_priv *iwm);
/* TX API */
-u16 iwm_tid_to_queue(u16 tid);
+int iwm_tid_to_queue(u16 tid);
void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
void iwm_tx_worker(struct work_struct *work);
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index e4f0f87..c4c0d23 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev)
*/
static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-u16 iwm_tid_to_queue(u16 tid)
+int iwm_tid_to_queue(u16 tid)
{
if (tid > IWM_UMAC_TID_NR - 2)
return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 1c57c1f..6d6ed74 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
if (!stop) {
struct iwm_tx_queue *txq;
- u16 queue = iwm_tid_to_queue(bit);
+ int queue = iwm_tid_to_queue(bit);
if (queue < 0)
continue;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 2f91c9b..92b7a35 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -2,6 +2,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
@@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
- sizeof(priv->dev->dev_addr));
+ memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index c6a6c04..b0b1c78 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
chan_count = lbs_scan_create_channel_list(priv, chan_list);
netif_stop_queue(priv->dev);
- netif_carrier_off(priv->dev);
- if (priv->mesh_dev) {
+ if (priv->mesh_dev)
netif_stop_queue(priv->mesh_dev);
- netif_carrier_off(priv->mesh_dev);
- }
/* Prepare to continue an interrupted scan */
lbs_deb_scan("chan_count %d, scan_channel %d\n",
@@ -635,16 +632,13 @@ out2:
priv->scan_channel = 0;
out:
- if (priv->connect_status == LBS_CONNECTED) {
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
- }
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
+ if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
+ !priv->tx_pending_len)
+ netif_wake_queue(priv->mesh_dev);
+
kfree(chan_list);
lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index a8eb9e1..4b1aab5 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
if (priv->connect_status == LBS_CONNECTED) {
memcpy(extra, priv->curbssparams.ssid,
priv->curbssparams.ssid_len);
- extra[priv->curbssparams.ssid_len] = '\0';
} else {
memset(extra, 0, 32);
- extra[priv->curbssparams.ssid_len] = '\0';
}
/*
* If none, we may want to get the one that was set
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 019431d..26a1abd 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
stats.noise = prxpd->nf;
- stats.qual = prxpd->snr - prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 7698fdd..31ca241 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -23,7 +23,7 @@
#define MAX_RID_LEN 1024
/* Helper routine to record keys
- * Do not call from interrupt context */
+ * It is called under orinoco_lock so it may not sleep */
static int orinoco_set_key(struct orinoco_private *priv, int index,
enum orinoco_alg alg, const u8 *key, int key_len,
const u8 *seq, int seq_len)
@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
kzfree(priv->keys[index].seq);
if (key_len) {
- priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
+ priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
if (!priv->keys[index].key)
goto nomem;
} else
priv->keys[index].key = NULL;
if (seq_len) {
- priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
+ priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
if (!priv->keys[index].seq)
goto free_key;
} else
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index c5fe867..1a7eae3 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1323,7 +1323,7 @@
#define PAIRWISE_KEY_ENTRY(__idx) \
( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
#define MAC_IVEIV_ENTRY(__idx) \
- ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+ ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
#define MAC_WCID_ATTR_ENTRY(__idx) \
( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
#define SHARED_KEY_ENTRY(__idx) \
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index eb1e1d0..27bf887 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
#include <linux/module.h>
#include "rt2x00.h"
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
#include "rt2x00usb.h"
#endif
#include "rt2800lib.h"
@@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_intf_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@@ -2022,6 +2022,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u16 eeprom;
/*
+ * Disable powersaving as default on PCI devices.
+ */
+ if (rt2x00_intf_is_pci(rt2x00dev))
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_TX_STBC |
- IEEE80211_HT_CAP_RX_STBC |
- IEEE80211_HT_CAP_PSMP_SUPPORT;
+ IEEE80211_HT_CAP_RX_STBC;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
@@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
- memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
- memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+ memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
+ memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
}
static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index af85d18..ab95346 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 687e17d..0ca5893 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2539,6 +2539,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index a1a3dd1..8a40a14 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.antenna = (flags2 >> 15) & 1;
/* TODO: improve signal/rssi reporting */
- rx_status.qual = flags2 & 0xFF;
rx_status.signal = (flags2 >> 8) & 0x7F;
/* XXX: is this correct? */
rx_status.rate_idx = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 2e733e7..28a8086 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
}
}
- if (loop >= INIT_LOOP) {
+ if (loop > INIT_LOOP) {
wl1251_error("timeout waiting for the hardware to "
"complete initialization");
return -EIO;
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 886a9bc..c3385b3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -777,7 +777,7 @@ out:
return ret;
}
-static int wl1271_build_basic_rates(char *rates, u8 band)
+static int wl1271_build_basic_rates(u8 *rates, u8 band)
{
u8 index = 0;
@@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
return index;
}
-static int wl1271_build_extended_rates(char *rates, u8 band)
+static int wl1271_build_extended_rates(u8 *rates, u8 band)
{
u8 index = 0;
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index dfa1b9bc..7ca95c4 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
return r;
}
-static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
-{
- static const u16 constants[] = {
- 715, 655, 585, 540, 470, 410, 360, 315,
- 270, 235, 205, 175, 150, 125, 105, 85,
- 65, 50, 40, 25, 15
- };
-
- int i;
- u32 x;
-
- /* It seems that their quality parameter is somehow per signal
- * and is now transferred per bit.
- */
- switch (zd_rate) {
- case ZD_OFDM_RATE_6M:
- case ZD_OFDM_RATE_12M:
- case ZD_OFDM_RATE_24M:
- size *= 2;
- break;
- case ZD_OFDM_RATE_9M:
- case ZD_OFDM_RATE_18M:
- case ZD_OFDM_RATE_36M:
- case ZD_OFDM_RATE_54M:
- size *= 4;
- size /= 3;
- break;
- case ZD_OFDM_RATE_48M:
- size *= 3;
- size /= 2;
- break;
- default:
- return -EINVAL;
- }
-
- x = (10000 * status_quality)/size;
- for (i = 0; i < ARRAY_SIZE(constants); i++) {
- if (x > constants[i])
- break;
- }
-
- switch (zd_rate) {
- case ZD_OFDM_RATE_6M:
- case ZD_OFDM_RATE_9M:
- i += 3;
- break;
- case ZD_OFDM_RATE_12M:
- case ZD_OFDM_RATE_18M:
- i += 5;
- break;
- case ZD_OFDM_RATE_24M:
- case ZD_OFDM_RATE_36M:
- i += 9;
- break;
- case ZD_OFDM_RATE_48M:
- case ZD_OFDM_RATE_54M:
- i += 15;
- break;
- default:
- return -EINVAL;
- }
-
- return i;
-}
-
-static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
-{
- int r;
-
- r = ofdm_qual_db(status_quality, zd_rate, size);
- ZD_ASSERT(r >= 0);
- if (r < 0)
- r = 0;
-
- r = (r * 100)/29;
- return r <= 100 ? r : 100;
-}
-
-static unsigned int log10times100(unsigned int x)
-{
- static const u8 log10[] = {
- 0,
- 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
- 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
- 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
- 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
- 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
- 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
- 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
- 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
- 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
- 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
- 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
- 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
- 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
- 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
- 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
- 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
- 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
- 223, 223, 223, 224, 224, 224, 224,
- };
-
- return x < ARRAY_SIZE(log10) ? log10[x] : 225;
-}
-
-enum {
- MAX_CCK_EVM_DB = 45,
-};
-
-static int cck_evm_db(u8 status_quality)
-{
- return (20 * log10times100(status_quality)) / 100;
-}
-
-static int cck_snr_db(u8 status_quality)
-{
- int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
- ZD_ASSERT(r >= 0);
- return r;
-}
-
-static int cck_qual_percent(u8 status_quality)
-{
- int r;
-
- r = cck_snr_db(status_quality);
- r = (100*r)/17;
- return r <= 100 ? r : 100;
-}
-
static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
{
return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
}
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
- const struct rx_status *status)
-{
- return (status->frame_status&ZD_RX_OFDM) ?
- ofdm_qual_percent(status->signal_quality_ofdm,
- zd_rate_from_ofdm_plcp_header(rx_frame),
- size) :
- cck_qual_percent(status->signal_quality_cck);
-}
-
/**
* zd_rx_rate - report zd-rate
* @rx_frame - received frame
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 9fd8f35..f8bbf7d 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
struct rx_status;
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
- const struct rx_status *status);
-
u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
struct zd_mc_hash {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cf51e8f..8ebf5c3 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = status->signal_strength;
- stats.qual = zd_rx_qual_percent(buffer,
- length - sizeof(struct rx_status),
- status);
rate = zd_rx_rate(buffer, status);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index bd588eb..8e210cd7 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -121,7 +121,7 @@ struct controller {
#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
-/* AMD PCIX bridge registers */
+/* AMD PCI-X bridge registers */
#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C
#define PCIX_MISCII_OFFSET 0x48
#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e56f9be..4173125 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -305,7 +305,7 @@ struct device_domain_info {
int segment; /* PCI domain */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
- struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -1604,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
return ret;
parent = parent->bus->self;
}
- if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
tmp->subordinate->number, 0,
@@ -3325,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
parent->devfn);
parent = parent->bus->self;
}
- if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 8b65a48..95b8491 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
bridge = pci_find_upstream_pcie_bridge(dev);
if (bridge) {
- if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
+ if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
(bridge->bus->number << 8) | dev->bus->number);
else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index cc617dd..7e28295 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
- struct pci_dev *bridge = bus->self;
- int ret;
-
- ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
- if (!ret || pci_is_pcie(bridge))
+ if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
return;
bus = bus->parent;
}
@@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
if (acpi_pci_can_wakeup(dev))
return acpi_pm_device_sleep_wake(&dev->dev, enable);
- if (!pci_is_pcie(dev))
- acpi_pci_propagate_wakeup_enable(dev->bus, enable);
-
+ acpi_pci_propagate_wakeup_enable(dev->bus, enable);
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0bc27e0..0906599 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1153,11 +1153,11 @@ pci_disable_device(struct pci_dev *dev)
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
- * Sets the PCI-E reset state for the device. This is the default
+ * Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1168,7 +1168,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
/**
* pci_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
@@ -2296,6 +2296,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
down(&dev->dev.sem);
}
+ rc = pci_dev_specific_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pcie_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
@@ -2779,6 +2783,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
+{
+}
+EXPORT_SYMBOL(pci_fixup_cardbus);
+
static int __init pci_setup(char *str)
{
while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 33ed8e0..fbd0e3a 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -313,4 +313,12 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
extern void pci_enable_acs(struct pci_dev *dev);
+struct pci_dev_reset_methods {
+ u16 vendor;
+ u16 device;
+ int (*reset)(struct pci_dev *dev, int probe);
+};
+
+extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
index b8c925c..9142949 100644
--- a/drivers/pci/pcie/aer/Kconfig.debug
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -3,14 +3,14 @@
#
config PCIEAER_INJECT
- tristate "PCIE AER error injector support"
+ tristate "PCIe AER error injector support"
depends on PCIEAER
default n
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
- Debuging PCIE AER code is quite difficult because it is hard
+ Debugging PCIe AER code is quite difficult because it is hard
to trigger various real hardware errors. Software based
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 7fcd5331..797d478 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -1,7 +1,7 @@
/*
- * PCIE AER software error injection support.
+ * PCIe AER software error injection support.
*
- * Debuging PCIE AER code is quite difficult because it is hard to
+ * Debuging PCIe AER code is quite difficult because it is hard to
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
@@ -484,5 +484,5 @@ static void __exit aer_inject_exit(void)
module_init(aer_inject_init);
module_exit(aer_inject_exit);
-MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_DESCRIPTION("PCIe AER software error injector");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 97a3459..21f215f 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -155,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
- /* Use PCIE bus function to store rpc into PCIE device */
+ /* Use PCIe bus function to store rpc into PCIe device */
set_service_data(dev, rpc);
return rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 8edb2f3..0481408 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -24,7 +24,7 @@
*
* @return: Zero on success. Nonzero otherwise.
*
- * Invoked when PCIE bus loads AER service driver. To avoid conflict with
+ * Invoked when PCIe bus loads AER service driver. To avoid conflict with
* BIOS AER support requires BIOS to yield AER control to OS native driver.
**/
int aer_osc_setup(struct pcie_device *pciedev)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ae672ca..c843a79 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -587,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev,
* aer_enable_rootport - enable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
*
- * Invoked when PCIE bus loads AER service driver.
+ * Invoked when PCIe bus loads AER service driver.
*/
void aer_enable_rootport(struct aer_rpc *rpc)
{
@@ -597,7 +597,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
u32 reg32;
pos = pci_pcie_cap(pdev);
- /* Clear PCIE Capability's Device Status */
+ /* Clear PCIe Capability's Device Status */
pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
@@ -631,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
* disable_root_aer - disable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
*
- * Invoked when PCIE bus unloads AER service driver.
+ * Invoked when PCIe bus unloads AER service driver.
*/
static void disable_root_aer(struct aer_rpc *rpc)
{
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 44acde7..9d3e4c8 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
if (info->status == 0) {
AER_PR(info, dev,
- "PCIE Bus Error: severity=%s, type=Unaccessible, "
+ "PCIe Bus Error: severity=%s, type=Unaccessible, "
"id=%04x(Unregistered Agent ID)\n",
aer_error_severity_string[info->severity], id);
} else {
@@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
agent = AER_GET_AGENT(info->severity, info->status);
AER_PR(info, dev,
- "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], id, aer_agent_string[agent]);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5a01fc7..be53d98 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1,6 +1,6 @@
/*
* File: drivers/pci/pcie/aspm.c
- * Enabling PCIE link L0s/L1 state and Clock Power Management
+ * Enabling PCIe link L0s/L1 state and Clock Power Management
*
* Copyright (C) 2007 Intel
* Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
@@ -499,7 +499,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
int pos;
u32 reg32;
/*
- * Some functions in a slot might not all be PCIE functions,
+ * Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
*/
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index a49452e..34d6517 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -24,7 +24,7 @@
*/
#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "PCIE Port Bus Driver"
+#define DRIVER_DESC "PCIe Port Bus Driver"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7cfa7c3..c746943 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2629,14 +2629,86 @@ static int __init pci_apply_final_quirks(void)
if (!pci_cache_line_size) {
printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
cls << 2, pci_dfl_cache_line_size << 2);
- pci_cache_line_size = cls;
+ pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
}
return 0;
}
fs_initcall_sync(pci_apply_final_quirks);
+
+/*
+ * Followings are device-specific reset methods which can be used to
+ * reset a single function if other methods (e.g. FLR, PM D0->D3) are
+ * not available.
+ */
+static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
+{
+ int pos;
+
+ /* only implement PCI_CLASS_SERIAL_USB at present */
+ if (dev->class == PCI_CLASS_SERIAL_USB) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+ if (!pos)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_write_config_byte(dev, pos + 0x4, 1);
+ msleep(100);
+
+ return 0;
+ } else {
+ return -ENOTTY;
+ }
+}
+
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+{
+ int pos;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR);
+ msleep(100);
+
+ return 0;
+}
+
+#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
+
+static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+ reset_intel_82599_sfp_virtfn },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ reset_intel_generic_dev },
+ { 0 }
+};
+
+int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+ const struct pci_dev_reset_methods *i;
+
+ for (i = pci_dev_reset_methods; i->reset; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID))
+ return i->reset(dev, probe);
+ }
+
+ return -ENOTTY;
+}
+
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
+int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; }
#endif
EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 6dae871..4a471dc 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -15,9 +15,9 @@
DECLARE_RWSEM(pci_bus_sem);
/*
- * find the upstream PCIE-to-PCI bridge of a PCI device
+ * find the upstream PCIe-to-PCI bridge of a PCI device
* if the device is PCIE, return NULL
- * if the device isn't connected to a PCIE bridge (that is its parent is a
+ * if the device isn't connected to a PCIe bridge (that is its parent is a
* legacy PCI bridge and the bridge is directly connected to bus 0), return its
* parent
*/
@@ -37,7 +37,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
tmp = pdev;
continue;
}
- /* PCI device should connect to a PCIE bridge */
+ /* PCI device should connect to a PCIe bridge */
if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
/* Busted hardware? */
WARN_ON_ONCE(1);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index cdf50f3..d99f846 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -222,7 +222,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
-/* pcibios_fixup_bus(bus); */
+ pci_fixup_cardbus(bus);
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ec4faff..db32c25 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -231,8 +231,36 @@ config THINKPAD_ACPI
This driver was formerly known as ibm-acpi.
+ Extra functionality will be available if the rfkill (CONFIG_RFKILL)
+ and/or ALSA (CONFIG_SND) subsystems are available in the kernel.
+ Note that if you want ThinkPad-ACPI to be built-in instead of
+ modular, ALSA and rfkill will also have to be built-in.
+
If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
+config THINKPAD_ACPI_ALSA_SUPPORT
+ bool "Console audio control ALSA interface"
+ depends on THINKPAD_ACPI
+ depends on SND
+ depends on SND = y || THINKPAD_ACPI = SND
+ default y
+ ---help---
+ Enables monitoring of the built-in console audio output control
+ (headphone and speakers), which is operated by the mute and (in
+ some ThinkPad models) volume hotkeys.
+
+ If this option is enabled, ThinkPad-ACPI will export an ALSA card
+ with a single read-only mixer control, which should be used for
+ on-screen-display feedback purposes by the Desktop Environment.
+
+ Optionally, the driver will also allow software control (the
+ ALSA mixer will be made read-write). Please refer to the driver
+ documentation for details.
+
+ All IBM models have both volume and mute control. Newer Lenovo
+ models only have mute control (the volume hotkeys are just normal
+ keys and volume control is done through the main HDA mixer).
+
config THINKPAD_ACPI_DEBUGFACILITIES
bool "Maintainer debug facilities"
depends on THINKPAD_ACPI
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 916ccb2..1b1dddb 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -202,8 +202,13 @@ static void dell_wmi_notify(u32 value, void *context)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
+ acpi_status status;
- wmi_get_event_data(value, &response);
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
+ return;
+ }
obj = (union acpi_object *)response.pointer;
@@ -323,8 +328,9 @@ static int __init dell_wmi_input_setup(void)
static int __init dell_wmi_init(void)
{
int err;
+ acpi_status status;
- if (wmi_has_guid(DELL_EVENT_GUID)) {
+ if (!wmi_has_guid(DELL_EVENT_GUID)) {
printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
return -ENODEV;
}
@@ -336,14 +342,14 @@ static int __init dell_wmi_init(void)
if (err)
return err;
- err = wmi_install_notify_handler(DELL_EVENT_GUID,
+ status = wmi_install_notify_handler(DELL_EVENT_GUID,
dell_wmi_notify, NULL);
- if (err) {
+ if (ACPI_FAILURE(status)) {
input_unregister_device(dell_wmi_input_dev);
printk(KERN_ERR
"dell-wmi: Unable to register notify handler - %d\n",
- err);
- return err;
+ status);
+ return -ENODEV;
}
return 0;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8781d8fa..5b648f0 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -338,8 +338,13 @@ static void hp_wmi_notify(u32 value, void *context)
static struct key_entry *key;
union acpi_object *obj;
int eventcode;
+ acpi_status status;
- wmi_get_event_data(value, &response);
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status);
+ return;
+ }
obj = (union acpi_object *)response.pointer;
@@ -581,7 +586,7 @@ static int __init hp_wmi_init(void)
if (wmi_has_guid(HPWMI_EVENT_GUID)) {
err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
hp_wmi_notify, NULL);
- if (!err)
+ if (ACPI_SUCCESS(err))
hp_wmi_input_setup();
}
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 7f77f90..f5f70d4 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -149,8 +149,13 @@ static void msi_wmi_notify(u32 value, void *context)
static struct key_entry *key;
union acpi_object *obj;
ktime_t cur;
+ acpi_status status;
- wmi_get_event_data(value, &response);
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status);
+ return;
+ }
obj = (union acpi_object *)response.pointer;
@@ -236,7 +241,7 @@ static int __init msi_wmi_init(void)
}
err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
msi_wmi_notify, NULL);
- if (err)
+ if (ACPI_FAILURE(err))
return -EINVAL;
err = msi_wmi_input_setup();
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 448c8ae..e67e4fe 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6384,11 +6384,13 @@ static struct ibm_struct brightness_driver_data = {
* and we leave them unchanged.
*/
+#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
+
#define TPACPI_ALSA_DRVNAME "ThinkPad EC"
#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
-static int alsa_index = SNDRV_DEFAULT_IDX1;
+static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
static char *alsa_id = "ThinkPadEC";
static int alsa_enable = SNDRV_DEFAULT_ENABLE1;
@@ -6705,10 +6707,11 @@ static int __init volume_create_alsa_mixer(void)
rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
sizeof(struct tpacpi_alsa_data), &card);
- if (rc < 0)
- return rc;
- if (!card)
- return -ENOMEM;
+ if (rc < 0 || !card) {
+ printk(TPACPI_ERR
+ "Failed to create ALSA card structures: %d\n", rc);
+ return 1;
+ }
BUG_ON(!card->private_data);
data = card->private_data;
@@ -6741,8 +6744,9 @@ static int __init volume_create_alsa_mixer(void)
rc = snd_ctl_add(card, ctl_vol);
if (rc < 0) {
printk(TPACPI_ERR
- "Failed to create ALSA volume control\n");
- goto err_out;
+ "Failed to create ALSA volume control: %d\n",
+ rc);
+ goto err_exit;
}
data->ctl_vol_id = &ctl_vol->id;
}
@@ -6750,22 +6754,25 @@ static int __init volume_create_alsa_mixer(void)
ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
rc = snd_ctl_add(card, ctl_mute);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to create ALSA mute control\n");
- goto err_out;
+ printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n",
+ rc);
+ goto err_exit;
}
data->ctl_mute_id = &ctl_mute->id;
snd_card_set_dev(card, &tpacpi_pdev->dev);
rc = snd_card_register(card);
-
-err_out:
if (rc < 0) {
- snd_card_free(card);
- card = NULL;
+ printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc);
+ goto err_exit;
}
alsa_card = card;
- return rc;
+ return 0;
+
+err_exit:
+ snd_card_free(card);
+ return 1;
}
#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */
@@ -7016,6 +7023,28 @@ static struct ibm_struct volume_driver_data = {
.shutdown = volume_shutdown,
};
+#else /* !CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
+#define alsa_card NULL
+
+static void inline volume_alsa_notify_change(void)
+{
+}
+
+static int __init volume_init(struct ibm_init_struct *iibm)
+{
+ printk(TPACPI_INFO
+ "volume: disabled as there is no ALSA support in this kernel\n");
+
+ return 1;
+}
+
+static struct ibm_struct volume_driver_data = {
+ .name = "volume",
+};
+
+#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
/*************************************************************************
* Fan subdriver
*/
@@ -8738,6 +8767,7 @@ MODULE_PARM_DESC(hotkey_report_mode,
"used for backwards compatibility with userspace, "
"see documentation");
+#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
module_param_named(volume_mode, volume_mode, uint, 0444);
MODULE_PARM_DESC(volume_mode,
"Selects volume control strategy: "
@@ -8760,6 +8790,7 @@ module_param_named(id, alsa_id, charp, 0444);
MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer");
module_param_named(enable, alsa_enable, bool, 0444);
MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer");
+#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
#define TPACPI_PARAM(feature) \
module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 9f93d6c..b104302 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -492,8 +492,7 @@ wmi_notify_handler handler, void *data)
if (!guid || !handler)
return AE_BAD_PARAMETER;
- find_guid(guid, &block);
- if (!block)
+ if (!find_guid(guid, &block))
return AE_NOT_EXIST;
if (block->handler)
@@ -521,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
if (!guid)
return AE_BAD_PARAMETER;
- find_guid(guid, &block);
- if (!block)
+ if (!find_guid(guid, &block))
return AE_NOT_EXIST;
if (!block->handler)
@@ -716,6 +714,22 @@ static int wmi_class_init(void)
return ret;
}
+static bool guid_already_parsed(const char *guid_string)
+{
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ gblock = &wblock->gblock;
+
+ if (strncmp(gblock->guid, guid_string, 16) == 0)
+ return true;
+ }
+ return false;
+}
+
/*
* Parse the _WDG method for the GUID data blocks
*/
@@ -725,6 +739,7 @@ static __init acpi_status parse_wdg(acpi_handle handle)
union acpi_object *obj;
struct guid_block *gblock;
struct wmi_block *wblock;
+ char guid_string[37];
acpi_status status;
u32 i, total;
@@ -747,6 +762,19 @@ static __init acpi_status parse_wdg(acpi_handle handle)
memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
for (i = 0; i < total; i++) {
+ /*
+ Some WMI devices, like those for nVidia hooks, have a
+ duplicate GUID. It's not clear what we should do in this
+ case yet, so for now, we'll just ignore the duplicate.
+ Anyone who wants to add support for that device can come
+ up with a better workaround for the mess then.
+ */
+ if (guid_already_parsed(gblock[i].guid) == true) {
+ wmi_gtoa(gblock[i].guid, guid_string);
+ printk(KERN_INFO PREFIX "Skipping duplicate GUID %s\n",
+ guid_string);
+ continue;
+ }
wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
if (!wblock)
return AE_NO_MEMORY;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index d033414..e1b700a 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -10,5 +10,5 @@ obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
-qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff7748a..44f2f6a 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,6 +182,34 @@ struct scssc_area {
u32:32;
} __attribute__ ((packed));
+struct qdio_dev_perf_stat {
+ unsigned int adapter_int;
+ unsigned int qdio_int;
+ unsigned int pci_request_int;
+
+ unsigned int tasklet_inbound;
+ unsigned int tasklet_inbound_resched;
+ unsigned int tasklet_inbound_resched2;
+ unsigned int tasklet_outbound;
+
+ unsigned int siga_read;
+ unsigned int siga_write;
+ unsigned int siga_sync;
+
+ unsigned int inbound_call;
+ unsigned int inbound_handler;
+ unsigned int stop_polling;
+ unsigned int inbound_queue_full;
+ unsigned int outbound_call;
+ unsigned int outbound_handler;
+ unsigned int fast_requeue;
+ unsigned int target_full;
+ unsigned int eqbs;
+ unsigned int eqbs_partial;
+ unsigned int sqbs;
+ unsigned int sqbs_partial;
+};
+
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
@@ -269,6 +297,7 @@ struct qdio_irq {
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
unsigned long int_parm;
struct subchannel_id schid;
@@ -286,9 +315,10 @@ struct qdio_irq {
struct ciw aqueue;
struct qdio_ssqd_desc ssqd_desc;
-
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+ struct qdio_dev_perf_stat perf_stat;
+ int perf_stat_enabled;
/*
* Warning: Leave these members together at the end so they won't be
* cleared in qdio_setup_irq.
@@ -311,6 +341,10 @@ struct qdio_irq {
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
+#define qperf(qdev,attr) qdev->perf_stat.attr
+#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \
+ q->irq_ptr->perf_stat.attr++
+
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
{
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 7676997..f49761f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -55,13 +55,11 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
- seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
- seq_printf(m, "ftc: %d\n", q->first_to_check);
- seq_printf(m, "last_move: %d\n", q->last_move);
- seq_printf(m, "polling: %d\n", q->u.in.polling);
- seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
- seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
+ seq_printf(m, "DSCI: %d nr_used: %d\n",
+ *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
+ seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
+ seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
+ q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
@@ -110,7 +108,6 @@ static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
if (!q)
return 0;
-
if (q->is_input_q)
xchg(q->irq_ptr->dsci, 1);
local_bh_disable();
@@ -134,6 +131,98 @@ static const struct file_operations debugfs_fops = {
.release = single_release,
};
+static char *qperf_names[] = {
+ "Assumed adapter interrupts",
+ "QDIO interrupts",
+ "Requested PCIs",
+ "Inbound tasklet runs",
+ "Inbound tasklet resched",
+ "Inbound tasklet resched2",
+ "Outbound tasklet runs",
+ "SIGA read",
+ "SIGA write",
+ "SIGA sync",
+ "Inbound calls",
+ "Inbound handler",
+ "Inbound stop_polling",
+ "Inbound queue full",
+ "Outbound calls",
+ "Outbound handler",
+ "Outbound fast_requeue",
+ "Outbound target_full",
+ "QEBSM eqbs",
+ "QEBSM eqbs partial",
+ "QEBSM sqbs",
+ "QEBSM sqbs partial"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+ struct qdio_irq *irq_ptr = m->private;
+ unsigned int *stat;
+ int i;
+
+ if (!irq_ptr)
+ return 0;
+ if (!irq_ptr->perf_stat_enabled) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ stat = (unsigned int *)&irq_ptr->perf_stat;
+
+ for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+ seq_printf(m, "%26s:\t%u\n",
+ qperf_names[i], *(stat + i));
+ return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct qdio_irq *irq_ptr = seq->private;
+ unsigned long val;
+ char buf[8];
+ int ret;
+
+ if (!irq_ptr)
+ return 0;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(&buf, ubuf, count))
+ return -EFAULT;
+ buf[count] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ irq_ptr->perf_stat_enabled = 0;
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ break;
+ case 1:
+ irq_ptr->perf_stat_enabled = 1;
+ break;
+ }
+ return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qperf_show,
+ filp->f_path.dentry->d_inode->i_private);
+}
+
+static struct file_operations debugfs_perf_fops = {
+ .owner = THIS_MODULE,
+ .open = qperf_seq_open,
+ .read = seq_read,
+ .write = qperf_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
char name[QDIO_DEBUGFS_NAME_LEN];
@@ -156,6 +245,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
debugfs_root);
if (IS_ERR(irq_ptr->debugfs_dev))
irq_ptr->debugfs_dev = NULL;
+
+ irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr,
+ &debugfs_perf_fops);
+ if (IS_ERR(irq_ptr->debugfs_perf))
+ irq_ptr->debugfs_perf = NULL;
+
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
@@ -171,6 +268,7 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_perf);
debugfs_remove(irq_ptr->debugfs_dev);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index b2275c5..999fe80 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -22,7 +22,6 @@
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
@@ -126,7 +125,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int rc;
BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
+ qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -139,7 +138,7 @@ again:
* buffers later.
*/
if ((ccq == 96) && (count != tmp_count)) {
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
+ qperf_inc(q, eqbs_partial);
return (count - tmp_count);
}
@@ -182,7 +181,7 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
return 0;
BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
+ qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -191,7 +190,7 @@ again:
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
+ qperf_inc(q, sqbs_partial);
goto again;
}
if (rc < 0) {
@@ -285,7 +284,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_sync);
+ qperf_inc(q, siga_sync);
cc = do_siga_sync(q->irq_ptr->schid, output, input);
if (cc)
@@ -350,7 +349,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_in);
+ qperf_inc(q, siga_read);
cc = do_siga_input(q->irq_ptr->schid, q->mask);
if (cc)
@@ -382,7 +381,7 @@ static inline void qdio_stop_polling(struct qdio_q *q)
return;
q->u.in.polling = 0;
- qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
+ qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
@@ -400,7 +399,7 @@ static void announce_buffer_error(struct qdio_q *q, int count)
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
(q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
- qdio_perf_stat_inc(&perf_stats.outbound_target_full);
+ qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
return;
@@ -487,7 +486,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
if (atomic_sub(count, &q->nr_buf_used) == 0)
- qdio_perf_stat_inc(&perf_stats.inbound_queue_full);
+ qperf_inc(q, inbound_queue_full);
break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
@@ -567,9 +566,10 @@ static void qdio_kick_handler(struct qdio_q *q)
count = sub_buf(end, start);
if (q->is_input_q) {
- qdio_perf_stat_inc(&perf_stats.inbound_handler);
+ qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
} else
+ qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
@@ -583,24 +583,28 @@ static void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
+ qperf_inc(q, tasklet_inbound);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
+ qperf_inc(q, tasklet_inbound_resched);
goto again;
+ }
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
goto again;
+ }
}
void qdio_inbound_processing(unsigned long data)
@@ -688,7 +692,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_out);
+ qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit);
switch (cc) {
@@ -711,7 +715,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
static void __qdio_outbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
+ qperf_inc(q, tasklet_outbound);
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
@@ -739,12 +743,9 @@ static void __qdio_outbound_processing(struct qdio_q *q)
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
- else {
- if (!timer_pending(&q->u.out.timer)) {
+ else
+ if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
- qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
- }
- }
return;
sched:
@@ -784,7 +785,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qperf_inc(q, tasklet_inbound);
qdio_sync_after_thinint(q);
/*
@@ -799,7 +800,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
@@ -812,7 +813,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
@@ -851,8 +852,6 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
- qdio_perf_stat_inc(&perf_stats.pci_int);
-
for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
@@ -923,8 +922,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
- qdio_perf_stat_inc(&perf_stats.qdio_int);
-
if (!intparm || !irq_ptr) {
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
@@ -1383,6 +1380,8 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
{
int used, diff;
+ qperf_inc(q, inbound_call);
+
if (!q->u.in.polling)
goto set;
@@ -1438,14 +1437,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
unsigned char state;
int used, rc = 0;
- qdio_perf_stat_inc(&perf_stats.outbound_handler);
+ qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
- if (callflags & QDIO_FLAG_PCI_OUT)
+ if (callflags & QDIO_FLAG_PCI_OUT) {
q->u.out.pci_out_enabled = 1;
+ qperf_inc(q, pci_request_int);
+ }
else
q->u.out.pci_out_enabled = 0;
@@ -1484,7 +1485,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
if (state != SLSB_CU_OUTPUT_PRIMED)
rc = qdio_kick_outbound_q(q);
else
- qdio_perf_stat_inc(&perf_stats.fast_requeue);
+ qperf_inc(q, fast_requeue);
out:
tasklet_schedule(&q->tasklet);
@@ -1540,16 +1541,11 @@ static int __init init_QDIO(void)
rc = qdio_debug_init();
if (rc)
goto out_ti;
- rc = qdio_setup_perf_stats();
- if (rc)
- goto out_debug;
rc = tiqdio_register_thinints();
if (rc)
- goto out_perf;
+ goto out_debug;
return 0;
-out_perf:
- qdio_remove_perf_stats();
out_debug:
qdio_debug_exit();
out_ti:
@@ -1563,7 +1559,6 @@ static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
- qdio_remove_perf_stats();
qdio_debug_exit();
qdio_setup_exit();
}
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
deleted file mode 100644
index 54f7c32..0000000
--- a/drivers/s390/cio/qdio_perf.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.c
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/ccwdev.h>
-
-#include "cio.h"
-#include "css.h"
-#include "device.h"
-#include "ioasm.h"
-#include "chsc.h"
-#include "qdio_debug.h"
-#include "qdio_perf.h"
-
-int qdio_performance_stats;
-struct qdio_perf_stats perf_stats;
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *qdio_perf_pde;
-#endif
-
-/*
- * procfs functions
- */
-static int qdio_perf_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.qdio_int));
- seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.pci_int));
- seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.thin_int));
- seq_printf(m, "\n");
- seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_inbound));
- seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_outbound));
- seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.tasklet_thinint),
- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
- seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound),
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
- seq_printf(m, "\n");
- seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_in));
- seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_out));
- seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_sync));
- seq_printf(m, "\n");
- seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.inbound_handler));
- seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_handler));
- seq_printf(m, "\n");
- seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
- (long)atomic_long_read(&perf_stats.fast_requeue));
- seq_printf(m, "Number of outbound target full condition\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_target_full));
- seq_printf(m, "Number of inbound queue full condition\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.inbound_queue_full));
- seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
- seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_stop_polling));
- seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
- seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
- seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
- seq_printf(m, "\n");
- return 0;
-}
-static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, qdio_perf_proc_show, NULL);
-}
-
-static const struct file_operations qdio_perf_proc_fops = {
- .owner = THIS_MODULE,
- .open = qdio_perf_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*
- * sysfs functions
- */
-static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
-}
-
-static ssize_t qdio_perf_stats_store(struct bus_type *bus,
- const char *buf, size_t count)
-{
- unsigned long i;
-
- if (strict_strtoul(buf, 16, &i) != 0)
- return -EINVAL;
- if ((i != 0) && (i != 1))
- return -EINVAL;
- if (i == qdio_performance_stats)
- return count;
-
- qdio_performance_stats = i;
- /* reset performance statistics */
- if (i == 0)
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- return count;
-}
-
-static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
- qdio_perf_stats_store);
-
-int __init qdio_setup_perf_stats(void)
-{
- int rc;
-
- rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
- if (rc)
- return rc;
-
-#ifdef CONFIG_PROC_FS
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
- NULL, &qdio_perf_proc_fops);
-#endif
- return 0;
-}
-
-void qdio_remove_perf_stats(void)
-{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("qdio_perf", NULL);
-#endif
- bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
deleted file mode 100644
index 1245423..0000000
--- a/drivers/s390/cio/qdio_perf.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.h
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#ifndef QDIO_PERF_H
-#define QDIO_PERF_H
-
-#include <linux/types.h>
-#include <asm/atomic.h>
-
-struct qdio_perf_stats {
- /* interrupt handler calls */
- atomic_long_t qdio_int;
- atomic_long_t pci_int;
- atomic_long_t thin_int;
-
- /* tasklet runs */
- atomic_long_t tasklet_inbound;
- atomic_long_t tasklet_outbound;
- atomic_long_t tasklet_thinint;
- atomic_long_t tasklet_thinint_loop;
- atomic_long_t thinint_inbound;
- atomic_long_t thinint_inbound_loop;
- atomic_long_t thinint_inbound_loop2;
-
- /* signal adapter calls */
- atomic_long_t siga_out;
- atomic_long_t siga_in;
- atomic_long_t siga_sync;
-
- /* misc */
- atomic_long_t inbound_handler;
- atomic_long_t outbound_handler;
- atomic_long_t fast_requeue;
- atomic_long_t outbound_target_full;
- atomic_long_t inbound_queue_full;
-
- /* for debugging */
- atomic_long_t debug_tl_out_timer;
- atomic_long_t debug_stop_polling;
- atomic_long_t debug_eqbs_all;
- atomic_long_t debug_eqbs_incomplete;
- atomic_long_t debug_sqbs_all;
- atomic_long_t debug_sqbs_incomplete;
-};
-
-extern struct qdio_perf_stats perf_stats;
-extern int qdio_performance_stats;
-
-static inline void qdio_perf_stat_inc(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_inc(count);
-}
-
-int qdio_setup_perf_stats(void);
-void qdio_remove_perf_stats(void);
-
-#endif
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 981a77e..091d904 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,9 +1,7 @@
/*
* linux/drivers/s390/cio/thinint_qdio.c
*
- * thin interrupt support for qdio
- *
- * Copyright 2000-2008 IBM Corp.
+ * Copyright 2000,2009 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
@@ -19,7 +17,6 @@
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
@@ -132,8 +129,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
struct qdio_q *q;
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite)
@@ -154,6 +149,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
+ qperf_inc(q, adapter_int);
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr))
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 7c815d3..28d86f9 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -522,6 +522,40 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op,
set_fan_speeds(fp);
}
+static void destroy_one_temp(struct bbc_cpu_temperature *tp)
+{
+ bbc_i2c_detach(tp->client);
+ kfree(tp);
+}
+
+static void destroy_all_temps(struct bbc_i2c_bus *bp)
+{
+ struct bbc_cpu_temperature *tp, *tpos;
+
+ list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
+ list_del(&tp->bp_list);
+ list_del(&tp->glob_list);
+ destroy_one_temp(tp);
+ }
+}
+
+static void destroy_one_fan(struct bbc_fan_control *fp)
+{
+ bbc_i2c_detach(fp->client);
+ kfree(fp);
+}
+
+static void destroy_all_fans(struct bbc_i2c_bus *bp)
+{
+ struct bbc_fan_control *fp, *fpos;
+
+ list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
+ list_del(&fp->bp_list);
+ list_del(&fp->glob_list);
+ destroy_one_fan(fp);
+ }
+}
+
int bbc_envctrl_init(struct bbc_i2c_bus *bp)
{
struct of_device *op;
@@ -541,6 +575,8 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
int err = PTR_ERR(kenvctrld_task);
kenvctrld_task = NULL;
+ destroy_all_temps(bp);
+ destroy_all_fans(bp);
return err;
}
}
@@ -548,35 +584,11 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
return 0;
}
-static void destroy_one_temp(struct bbc_cpu_temperature *tp)
-{
- bbc_i2c_detach(tp->client);
- kfree(tp);
-}
-
-static void destroy_one_fan(struct bbc_fan_control *fp)
-{
- bbc_i2c_detach(fp->client);
- kfree(fp);
-}
-
void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp)
{
- struct bbc_cpu_temperature *tp, *tpos;
- struct bbc_fan_control *fp, *fpos;
-
if (kenvctrld_task)
kthread_stop(kenvctrld_task);
- list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
- list_del(&tp->bp_list);
- list_del(&tp->glob_list);
- destroy_one_temp(tp);
- }
-
- list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
- list_del(&fp->bp_list);
- list_del(&fp->glob_list);
- destroy_one_fan(fp);
- }
+ destroy_all_temps(bp);
+ destroy_all_fans(bp);
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 26ffdcd..15a00e8 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn)
static int is_cxgb3_dev(struct net_device *dev)
{
struct cxgb3i_sdev_data *cdata;
+ struct net_device *ndev = dev;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(dev);
write_lock(&cdata_rwlock);
list_for_each_entry(cdata, &cdata_list, list) {
@@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev)
int i;
for (i = 0; i < ports->nports; i++)
- if (dev == ports->lldevs[i]) {
+ if (ndev == ports->lldevs[i]) {
write_unlock(&cdata_rwlock);
return 1;
}
@@ -1566,6 +1570,26 @@ out_err:
return -EINVAL;
}
+/**
+ * cxgb3i_find_dev - find the interface associated with the given address
+ * @ipaddr: ip address
+ */
+static struct net_device *
+cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
+{
+ struct flowi fl;
+ int err;
+ struct rtable *rt;
+
+ memset(&fl, 0, sizeof(fl));
+ fl.nl_u.ip4_u.daddr = ipaddr;
+
+ err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+ if (!err)
+ return (&rt->u.dst)->dev;
+
+ return NULL;
+}
/**
* cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
@@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
struct cxgb3i_sdev_data *cdata;
struct t3cdev *cdev;
__be32 sipv4;
+ struct net_device *dstdev;
int err;
c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
@@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
c3cn->daddr.sin_port = usin->sin_port;
c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
+ dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
+ if (!dstdev || !is_cxgb3_dev(dstdev))
+ return -ENETUNREACH;
+
+ if (dstdev->priv_flags & IFF_802_1Q_VLAN)
+ dev = dstdev;
+
rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
c3cn->daddr.sin_addr.s_addr,
c3cn->saddr.sin_port,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ce52270..2cc3968 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
if (vport->fc_rscn_flush) {
/* Another thread is walking fc_rscn_id_list on this vport */
- spin_unlock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return 0;
@@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_initial_fdisc(vport);
break;
}
-
} else {
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
if (vport == phba->pport)
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3b94244..2445e39 100755
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba)
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
+
+ /* Block all SCSI stack I/Os */
+ lpfc_scsi_dev_block(phba);
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
if (phba->link_state > LPFC_LINK_DOWN) {
@@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* to book keeping the FCFIs can be used.
*/
if (shdr_status || shdr_add_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2521 READ_FCF_RECORD mailbox failed "
- "with status x%x add_status x%x, mbx\n",
- shdr_status, shdr_add_status);
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2726 READ_FCF_RECORD Indicates empty "
+ "FCF table.\n");
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2521 READ_FCF_RECORD mailbox failed "
+ "with status x%x add_status x%x, mbx\n",
+ shdr_status, shdr_add_status);
+ }
goto out;
}
/* Interpreting the returned information of FCF records */
@@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
+ spin_lock_irq(&phba->hbalock);
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(&phba->hbalock);
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vport);
@@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->mbxStatus);
break;
}
+ spin_lock_irq(&phba->hbalock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(&phba->hbalock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
/*
@@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_mbx_unreg_vpi(vports[i]);
+ spin_lock_irq(&phba->hbalock);
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_destroy_vport_work_array(phba, vports);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1585148..8a2a1c5 100644..100755
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy {
};
#define LPFC_HDR_BUF_SIZE 128
-#define LPFC_DATA_BUF_SIZE 4096
+#define LPFC_DATA_BUF_SIZE 2048
struct rq_context {
uint32_t word0;
#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg {
#define STATUS_ERROR_ACITMAIN 0x2a
#define STATUS_REBOOT_REQUIRED 0x2c
#define STATUS_FCF_IN_USE 0x3a
+#define STATUS_FCF_TABLE_EMPTY 0x43
struct lpfc_mbx_sli4_config {
struct mbox_header header;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d4da6bd..b8eb1b6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
+ uint32_t link_state;
phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
@@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
break;
/*
* Currently, driver support only one FCF - so treat this as
- * a link down.
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
*/
+ link_state = phba->link_state;
lpfc_linkdown(phba);
+ phba->link_state = link_state;
/* Unregister FCF if no devices connected to it */
lpfc_unregister_unused_fcf(phba);
break;
@@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2711 PCI channel permanent disable for failure\n");
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
}
@@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
switch (state) {
case pci_channel_io_normal:
/* Non-fatal error, prepare for recovery */
@@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
+ /* Default to single FCP EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ phba->cfg_fcp_eq_count = 1;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7935667..589549b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
- .entry_count = 200,
+ .entry_count = 256,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_ELS_RING),
@@ -1482,8 +1482,11 @@ err:
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->add_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return 0;
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count);
}
/**
@@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->init_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->entry_count);
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count);
}
/**
@@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (rc) {
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
return -EIO;
}
@@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.un.ulpWord[3]);
wqe->generic.word3 = 0;
bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
- bf_set(wqe_xc, &wqe->generic, 1);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
return dmabuf;
}
temp_hdr = seq_dmabuf->hbuf.virt;
- if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_del_init(&seq_dmabuf->hbuf.list);
list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
@@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
seq_dmabuf->time_stamp = jiffies;
lpfc_update_rcv_time_stamp(vport);
+ if (list_empty(&seq_dmabuf->dbuf.list)) {
+ temp_hdr = dmabuf->hbuf.virt;
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
/* find the correct place in the sequence to insert this frame */
list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
* If the frame's sequence count is greater than the frame on
* the list then insert the frame right after this frame
*/
- if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
return seq_dmabuf;
}
@@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* If there is a hole in the sequence count then fail. */
- if (++seq_count != hdr->fh_seq_cnt)
+ if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
return 0;
fctl = (hdr->fh_f_ctl[0] << 16 |
hdr->fh_f_ctl[1] << 8 |
@@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
+ struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
@@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length,
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
@@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
return;
}
/* If not last frame in sequence continue processing frames. */
- if (!lpfc_seq_complete(seq_dmabuf)) {
- /*
- * When saving off frames post a new one and mark this
- * frame to be freed when it is finished.
- **/
- lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
- dmabuf->tag = -1;
+ if (!lpfc_seq_complete(seq_dmabuf))
return;
- }
+
/* Send the complete sequence to the upper layer protocol */
lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 25d66d0..44e5f57 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -28,7 +28,7 @@
/* Multi-queue arrangement for fast-path FCP work queues */
#define LPFC_FN_EQN_MAX 8
#define LPFC_SP_EQN_DEF 1
-#define LPFC_FP_EQN_DEF 1
+#define LPFC_FP_EQN_DEF 4
#define LPFC_FP_EQN_MIN 1
#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7f3aed..792f722 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.6"
+#define LPFC_DRIVER_VERSION "8.3.7"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 7d6dd83..e3c7fa6 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
+ spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(&phba->hbalock);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
@@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
spin_unlock_irq(&phba->ndlp_lock);
}
- if (vport->vpi_state != LPFC_VPI_REGISTERED)
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
goto skip_logo;
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e7d2688..b6f1ef9 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
sense_copied = 1;
}
- if (RES_IS_GSCSI(res->cfg_entry)) {
+ if (RES_IS_GSCSI(res->cfg_entry))
pmcraid_cancel_all(cmd, sense_copied);
- } else if (sense_copied) {
+ else if (sense_copied)
pmcraid_erp_done(cmd);
- return 0;
- } else {
+ else
pmcraid_request_sense(cmd);
- }
return 1;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 21e2bc4..3a9f5b2 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
if (off)
return 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
return -EINVAL;
if (start > ha->optrom_size)
@@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN))
return 0;
@@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
uint8_t *tmp_data;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
- rval = qla2x00_get_firmware_state(vha, state);
+ if (!vha->hw->flags.eeh_busy)
+ rval = qla2x00_get_firmware_state(vha, state);
if (rval != QLA_SUCCESS)
memset(state, -1, sizeof(state));
@@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- if (unlikely(pci_channel_offline(fcport->vha->hw->pdev)))
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
- else
- qla2x00_abort_fcport_cmds(fcport);
+ return;
+ }
/*
* Transport has effectively 'deleted' the rport, clear
@@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (!fcport)
return;
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
return;
@@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ goto done;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto done;
+
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f660dd7..d6d9c86 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -26,7 +26,7 @@
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
-/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
+/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
/*
* Macros use for debugging the driver.
@@ -132,6 +132,13 @@
#else
#define DEBUG16(x) do {} while (0)
#endif
+
+#if defined(QL_DEBUG_LEVEL_17)
+#define DEBUG17(x) do {x;} while (0)
+#else
+#define DEBUG17(x) do {} while (0)
+#endif
+
/*
* Firmware Dump structure definition
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 384afda..608e675 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2256,11 +2256,13 @@ struct qla_hw_data {
uint32_t disable_serdes :1;
uint32_t gpsc_supported :1;
uint32_t npiv_supported :1;
+ uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
+ uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
} flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b6801f..f61fb8d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
extern int
qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_isr.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 73a7935..b4a0eac 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
vha->flags.reset_active = 0;
+ ha->flags.pci_channel_io_perm_failure = 0;
+ ha->flags.eeh_busy = 0;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
uint32_t cnt;
uint16_t cmd;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return;
+
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -786,6 +791,12 @@ void
qla24xx_reset_chip(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+
+ if (pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure) {
+ return;
+ }
+
ha->isp_ops->disable_intrs(ha);
/* Perform RISC reset. */
@@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
+ qla2x00_get_data_rate(vha);
+
/* Determine what we need to do */
if (ha->current_topology == ISP_CFG_FL &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
@@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure)) {
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ status = 0;
+ return status;
+ }
+
ha->isp_ops->get_flash_version(vha, req->ring);
ha->isp_ops->nvram_config(vha);
@@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
int ret, retries;
struct qla_hw_data *ha = vha->hw;
+ if (ha->flags.pci_channel_io_perm_failure)
+ return;
if (!IS_FWI2_CAPABLE(ha))
return;
if (!ha->fw_major_version)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1692a88..ffd0efd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id)
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_WORD(&reg->hccr);
@@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_DWORD(&reg->hccr);
@@ -1992,7 +1995,7 @@ qla24xx_msix_default(int irq, void *dev_id)
do {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_DWORD(&reg->hccr);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 05d595d..056e4d4 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
+ if (ha->flags.pci_channel_io_perm_failure) {
+ DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
+ "Exiting.\n", __func__, vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
- if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
- !ha->flags.mbox_int)
+ if (!ha->flags.mbox_int &&
+ !(IS_QLA2200(ha) &&
+ command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "Waited %d sec\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
}
/* Check whether we timed out */
@@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (rval == QLA_FUNCTION_TIMEOUT &&
mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
- if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
+ if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+ ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
DEBUG(printk("%s(%ld): timeout schedule "
"isp_abort_needed.\n", __func__,
@@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
base_vha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occurred. Scheduling ISP "
- "abort.\n");
+ "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (!abort_active) {
@@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
return rval;
}
+
+int
+qla2x00_get_data_rate(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_DATA_RATE;
+ mcp->mb[1] = 0;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
+ __func__, vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 2a4c7f4..b901aa2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -639,8 +639,10 @@ static void qla_do_work(struct work_struct *work)
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
+ spin_lock_irq(&rsp->hw->hardware_lock);
vha = qla25xx_get_host(rsp);
qla24xx_process_response_queue(vha, rsp);
+ spin_unlock_irq(&rsp->hw->hardware_lock);
}
/* create response queue */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2f873d2..209f50e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
srb_t *sp;
int rval;
- if (unlikely(pci_channel_offline(ha->pdev))) {
- if (ha->pdev->error_state == pci_channel_io_frozen)
- cmd->result = DID_REQUEUE << 16;
- else
+ if (ha->flags.eeh_busy) {
+ if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_REQUEUE << 16;
goto qc24_fail_command;
}
@@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
#define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
unsigned long wait_iter = ABORT_WAIT_ITER;
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
int ret = QLA_SUCCESS;
+ if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
+ DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
+ return ret;
+ }
+
while (CMD_SP(cmd) && wait_iter--) {
msleep(ABORT_POLLING_PERIOD);
}
@@ -1810,6 +1817,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
+
+ /* Set EEH reset type to fundamental if required by hba */
+ if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+ pdev->needs_freset = 1;
+ pci_save_state(pdev);
+ }
+
/* Configure PCI I/O space */
ret = qla2x00_iospace_config(ha);
if (ret)
@@ -2174,6 +2188,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
+ /* Kill the kernel thread for this host */
+ if (ha->dpc_thread) {
+ struct task_struct *t = ha->dpc_thread;
+
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+ }
+
qla25xx_delete_queues(vha);
if (ha->flags.fce_enabled)
@@ -2185,6 +2217,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
/* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(vha);
+ vha->flags.online = 0;
+
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops->disable_intrs(ha);
@@ -2859,6 +2893,13 @@ qla2x00_do_dpc(void *data)
if (!base_vha->flags.init_done)
continue;
+ if (ha->flags.eeh_busy) {
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "qla2x00_do_dpc: dpc_flags: %lx\n",
+ base_vha->dpc_flags));
+ continue;
+ }
+
DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
ha->dpc_active = 1;
@@ -3049,8 +3090,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
int index;
srb_t *sp;
int t;
+ uint16_t w;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+
+ /* Hardware read to raise pending EEH errors during mailbox waits. */
+ if (!pci_channel_offline(ha->pdev))
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
/*
* Ports - Port down timer.
*
@@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void)
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ scsi_qla_host_t *vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = vha->hw;
+
+ DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
+ state));
switch (state) {
case pci_channel_io_normal:
+ ha->flags.eeh_busy = 0;
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ ha->flags.eeh_busy = 1;
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+ ha->flags.pci_channel_io_perm_failure = 1;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int rc;
+ DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+
if (ha->mem_only)
rc = pci_enable_device_mem(pdev);
else
@@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
if (rc) {
qla_printk(KERN_WARNING, ha,
"Can't re-enable PCI device after reset.\n");
-
return ret;
}
- pci_set_master(pdev);
if (ha->isp_ops->pci_config(base_vha))
return ret;
+#ifdef QL_DEBUG_LEVEL_17
+ {
+ uint8_t b;
+ uint32_t i;
+
+ printk("slot_reset_1: ");
+ for (i = 0; i < 256; i++) {
+ pci_read_config_byte(ha->pdev, i, &b);
+ printk("%s%02x", (i%16) ? " " : "\n", b);
+ }
+ printk("\n");
+ }
+#endif
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "slot_reset-return:ret=%x\n", ret));
+
return ret;
}
@@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
+ DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
+
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"the device failed to resume I/O "
"from slot/link_reset");
}
+
+ ha->flags.eeh_busy = 0;
+
pci_cleanup_aer_uncorrect_error_status(pdev);
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c482220..a65dd95 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k8"
+#define QLA2XXX_VERSION "8.03.01-k9"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3058bb1..fd7b15b 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
}
break;
case INQUIRY:
+ if (lun >= host->max_lun) {
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
if (id != host->max_id - 1)
break;
if (!lun && !cmd->device->channel &&
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 415858b..825b665 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,9 +1221,9 @@ static void setup_smart_timing(struct pxafb_info *fbi,
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf;
+ struct pxafb_mach_info *inf = fbi->dev->platform_data;
- if (!fbi || !fbi->dev->platform_data->smart_update) {
+ if (!inf->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
OpenPOWER on IntegriCloud