summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/nbd.c48
-rw-r--r--drivers/block/nvme-core.c684
-rw-r--r--drivers/block/nvme-scsi.c43
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c10
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c828
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h38
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c27
-rw-r--r--drivers/irqchip/irq-vic.c6
-rw-r--r--drivers/md/bitmap.c1
-rw-r--r--drivers/md/md.c65
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/Kconfig2
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c1
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c8
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c45
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c15
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c27
-rw-r--r--drivers/media/rc/ir-nec-decoder.c5
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c86
-rw-r--r--drivers/media/rc/rc-main.c98
-rw-r--r--drivers/media/tuners/r820t.c3
-rw-r--r--drivers/media/tuners/tuner-xc2028.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/gspca/jpeg.h4
-rw-r--r--drivers/media/usb/stk1160/stk1160-ac97.c2
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ethernet/8390/apne.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c124
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c16
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/hyperv/rndis_filter.c12
-rw-r--r--drivers/net/ieee802154/at86rf230.c10
-rw-r--r--drivers/net/ntb_netdev.c27
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/usb/r8152.c48
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c14
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c35
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c26
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c10
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ntb/ntb_hw.c192
-rw-r--r--drivers/ntb/ntb_hw.h8
-rw-r--r--drivers/ntb/ntb_transport.c20
-rw-r--r--drivers/pinctrl/Kconfig8
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c1461
-rw-r--r--drivers/pinctrl/pinctrl-capri.c1454
-rw-r--r--drivers/pinctrl/pinctrl-msm.c6
-rw-r--r--drivers/pinctrl/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c48
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c16
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c4
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c14
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c18
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_pm.c128
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c64
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c23
-rw-r--r--drivers/staging/media/msi3101/msi001.c2
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c15
-rw-r--r--drivers/staging/usbip/stub_dev.c8
-rw-r--r--drivers/staging/usbip/usbip_common.c25
-rw-r--r--drivers/staging/usbip/usbip_common.h1
-rw-r--r--drivers/staging/usbip/vhci_hcd.c4
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c33
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c12
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c95
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_file.c40
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_rd.c14
-rw-r--r--drivers/target/target_core_sbc.c178
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_tmr.c23
-rw-r--r--drivers/target/target_core_transport.c92
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h13
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c5
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c76
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c10
-rw-r--r--drivers/tty/tty_audit.c3
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c8
-rw-r--r--drivers/vhost/net.c14
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/da8xx-fb.c10
-rw-r--r--drivers/video/omap2/dss/dispc.c67
-rw-r--r--drivers/video/omap2/dss/dsi.c20
-rw-r--r--drivers/video/omap2/dss/dss.c4
-rw-r--r--drivers/video/omap2/dss/dss.h6
-rw-r--r--drivers/video/omap2/dss/hdmi_common.c8
157 files changed, 4402 insertions, 3130 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 18c76e8..68e3992 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -469,24 +469,14 @@ static void drbd_wait_ee_list_empty(struct drbd_device *device,
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{
- mm_segment_t oldfs;
struct kvec iov = {
.iov_base = buf,
.iov_len = size,
};
struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&iov,
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- int rv;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
- set_fs(oldfs);
-
- return rv;
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
}
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 55298db..3a70ea2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -630,37 +630,29 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
}
case NBD_CLEAR_SOCK: {
- struct file *file;
-
+ struct socket *sock = nbd->sock;
nbd->sock = NULL;
- file = nbd->file;
- nbd->file = NULL;
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
- if (file)
- fput(file);
+ if (sock)
+ sockfd_put(sock);
return 0;
}
case NBD_SET_SOCK: {
- struct file *file;
- if (nbd->file)
+ struct socket *sock;
+ int err;
+ if (nbd->sock)
return -EBUSY;
- file = fget(arg);
- if (file) {
- struct inode *inode = file_inode(file);
- if (S_ISSOCK(inode->i_mode)) {
- nbd->file = file;
- nbd->sock = SOCKET_I(inode);
- if (max_part > 0)
- bdev->bd_invalidated = 1;
- nbd->disconnect = 0; /* we're connected now */
- return 0;
- } else {
- fput(file);
- }
+ sock = sockfd_lookup(arg, &err);
+ if (sock) {
+ nbd->sock = sock;
+ if (max_part > 0)
+ bdev->bd_invalidated = 1;
+ nbd->disconnect = 0; /* we're connected now */
+ return 0;
}
return -EINVAL;
}
@@ -697,12 +689,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_DO_IT: {
struct task_struct *thread;
- struct file *file;
+ struct socket *sock;
int error;
if (nbd->pid)
return -EBUSY;
- if (!nbd->file)
+ if (!nbd->sock)
return -EINVAL;
mutex_unlock(&nbd->tx_lock);
@@ -731,15 +723,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (error)
return error;
sock_shutdown(nbd, 0);
- file = nbd->file;
- nbd->file = NULL;
+ sock = nbd->sock;
+ nbd->sock = NULL;
nbd_clear_que(nbd);
dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
kill_bdev(bdev);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
set_device_ro(bdev, false);
- if (file)
- fput(file);
+ if (sock)
+ sockfd_put(sock);
nbd->flags = 0;
nbd->bytesize = 0;
bdev->bd_inode->i_size = 0;
@@ -875,9 +867,7 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].file = NULL;
nbd_dev[i].magic = NBD_MAGIC;
- nbd_dev[i].flags = 0;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da085ff..7c64fa7 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1,6 +1,6 @@
/*
* NVM Express device driver
- * Copyright (c) 2011, Intel Corporation.
+ * Copyright (c) 2011-2014, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -20,10 +20,12 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
+#include <linux/hdreg.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -35,6 +37,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/percpu.h>
#include <linux/poison.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -47,6 +50,11 @@
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define ADMIN_TIMEOUT (60 * HZ)
+#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT)
+
+unsigned char io_timeout = 30;
+module_param(io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
static int nvme_major;
module_param(nvme_major, int, 0);
@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
+static wait_queue_head_t nvme_kthread_wait;
static void nvme_reset_failed_dev(struct work_struct *ws);
@@ -74,6 +83,7 @@ struct async_cmd_info {
* commands and one for I/O commands).
*/
struct nvme_queue {
+ struct rcu_head r_head;
struct device *q_dmadev;
struct nvme_dev *dev;
char irqname[24]; /* nvme4294967295-65535\0 */
@@ -85,6 +95,7 @@ struct nvme_queue {
wait_queue_head_t sq_full;
wait_queue_t sq_cong_wait;
struct bio_list sq_cong;
+ struct list_head iod_bio;
u32 __iomem *q_db;
u16 q_depth;
u16 cq_vector;
@@ -95,6 +106,7 @@ struct nvme_queue {
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ cpumask_var_t cpu_mask;
struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
}
-typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *);
struct nvme_cmd_info {
@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
-static void special_completion(struct nvme_dev *dev, void *ctx,
+static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
if (ctx == CMD_CTX_CANCELLED)
@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
if (ctx == CMD_CTX_FLUSH)
return;
if (ctx == CMD_CTX_ABORT) {
- ++dev->abort_limit;
+ ++nvmeq->dev->abort_limit;
return;
}
if (ctx == CMD_CTX_COMPLETED) {
- dev_warn(&dev->pci_dev->dev,
+ dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
if (ctx == CMD_CTX_INVALID) {
- dev_warn(&dev->pci_dev->dev,
+ dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
- dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+ dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
}
-static void async_completion(struct nvme_dev *dev, void *ctx,
+static void async_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct async_cmd_info *cmdinfo = ctx;
@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
return ctx;
}
-struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
+{
+ return rcu_dereference_raw(dev->queues[qid]);
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
+{
+ unsigned queue_id = get_cpu_var(*dev->io_queue);
+ rcu_read_lock();
+ return rcu_dereference(dev->queues[queue_id]);
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
{
- return dev->queues[get_cpu() + 1];
+ rcu_read_unlock();
+ put_cpu_var(nvmeq->dev->io_queue);
}
-void put_nvmeq(struct nvme_queue *nvmeq)
+static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
+ __acquires(RCU)
{
- put_cpu();
+ rcu_read_lock();
+ return rcu_dereference(dev->queues[q_idx]);
+}
+
+static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+{
+ rcu_read_unlock();
}
/**
@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
unsigned long flags;
u16 tail;
spin_lock_irqsave(&nvmeq->q_lock, flags);
+ if (nvmeq->q_suspended) {
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+ return -EBUSY;
+ }
tail = nvmeq->sq_tail;
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
if (++tail == nvmeq->q_depth)
@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->npages = -1;
iod->length = nbytes;
iod->nents = 0;
+ iod->first_dma = 0ULL;
iod->start_time = jiffies;
}
@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
part_stat_unlock();
}
-static void bio_completion(struct nvme_dev *dev, void *ctx,
+static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct nvme_iod *iod = ctx;
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
+ if (unlikely(status)) {
+ if (!(status & NVME_SC_DNR ||
+ bio->bi_rw & REQ_FAILFAST_MASK) &&
+ (jiffies - iod->start_time) < IOD_TIMEOUT) {
+ if (!waitqueue_active(&nvmeq->sq_full))
+ add_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ list_add_tail(&iod->node, &nvmeq->iod_bio);
+ wake_up(&nvmeq->sq_full);
+ return;
+ }
+ }
if (iod->nents) {
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_end_io_acct(bio, iod->start_time);
}
- nvme_free_iod(dev, iod);
+ nvme_free_iod(nvmeq->dev, iod);
if (status)
bio_endio(bio, -EIO);
else
@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
-int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
- struct nvme_iod *iod, int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
+ gfp_t gfp)
{
struct dma_pool *pool;
int length = total_len;
@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
dma_addr_t prp_dma;
int nprps, i;
- cmd->prp1 = cpu_to_le64(dma_addr);
length -= (PAGE_SIZE - offset);
if (length <= 0)
return total_len;
@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
}
if (length <= PAGE_SIZE) {
- cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->first_dma = dma_addr;
return total_len;
}
@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
if (!prp_list) {
- cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->first_dma = dma_addr;
iod->npages = -1;
return (total_len - length) + PAGE_SIZE;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
- cmd->prp2 = cpu_to_le64(prp_dma);
i = 0;
for (;;) {
if (i == PAGE_SIZE / 8) {
@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
bio_chain(split, bio);
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, split);
bio_list_add(&nvmeq->sq_cong, bio);
+ wake_up(&nvmeq->sq_full);
return 0;
}
@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
return length;
}
-/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio, struct nvme_iod *iod, int cmdid)
{
- struct nvme_dsm_range *range;
+ struct nvme_dsm_range *range =
+ (struct nvme_dsm_range *)iod_list(iod)[0];
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
- range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
- &iod->first_dma);
- if (!range)
- return -ENOMEM;
-
- iod_list(iod)[0] = (__le64 *)range;
- iod->npages = 0;
-
range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
return nvme_submit_flush(nvmeq, ns, cmdid);
}
-/*
- * Called with local interrupts disabled and the q_lock held. May not sleep.
- */
-static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct bio *bio)
+static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
{
+ struct bio *bio = iod->private;
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
struct nvme_command *cmnd;
- struct nvme_iod *iod;
- enum dma_data_direction dma_dir;
- int cmdid, length, result;
+ int cmdid;
u16 control;
u32 dsmgmt;
- int psegs = bio_phys_segments(ns->queue, bio);
-
- if ((bio->bi_rw & REQ_FLUSH) && psegs) {
- result = nvme_submit_flush_data(nvmeq, ns);
- if (result)
- return result;
- }
- result = -ENOMEM;
- iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
- if (!iod)
- goto nomem;
- iod->private = bio;
-
- result = -EBUSY;
cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
if (unlikely(cmdid < 0))
- goto free_iod;
+ return cmdid;
- if (bio->bi_rw & REQ_DISCARD) {
- result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
- if (result)
- goto free_cmdid;
- return result;
- }
- if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ if (bio->bi_rw & REQ_DISCARD)
+ return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+ if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
return nvme_submit_flush(nvmeq, ns, cmdid);
control = 0;
@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-
memset(cmnd, 0, sizeof(*cmnd));
- if (bio_data_dir(bio)) {
- cmnd->rw.opcode = nvme_cmd_write;
- dma_dir = DMA_TO_DEVICE;
- } else {
- cmnd->rw.opcode = nvme_cmd_read;
- dma_dir = DMA_FROM_DEVICE;
- }
-
- result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
- if (result <= 0)
- goto free_cmdid;
- length = result;
+ cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
cmnd->rw.command_id = cmdid;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
- GFP_ATOMIC);
+ cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
- cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+ cmnd->rw.length =
+ cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
- nvme_start_io_acct(bio);
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
return 0;
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio)
+{
+ struct nvme_iod *iod;
+ int psegs = bio_phys_segments(ns->queue, bio);
+ int result;
+
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
+ if (!iod)
+ return -ENOMEM;
+
+ iod->private = bio;
+ if (bio->bi_rw & REQ_DISCARD) {
+ void *range;
+ /*
+ * We reuse the small pool to allocate the 16-byte range here
+ * as it is not worth having a special pool for these or
+ * additional cases to handle freeing the iod.
+ */
+ range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
+ GFP_ATOMIC,
+ &iod->first_dma);
+ if (!range) {
+ result = -ENOMEM;
+ goto free_iod;
+ }
+ iod_list(iod)[0] = (__le64 *)range;
+ iod->npages = 0;
+ } else if (psegs) {
+ result = nvme_map_bio(nvmeq, iod, bio,
+ bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ psegs);
+ if (result <= 0)
+ goto free_iod;
+ if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
+ result) {
+ result = -ENOMEM;
+ goto free_iod;
+ }
+ nvme_start_io_acct(bio);
+ }
+ if (unlikely(nvme_submit_iod(nvmeq, iod))) {
+ if (!waitqueue_active(&nvmeq->sq_full))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ list_add_tail(&iod->node, &nvmeq->iod_bio);
+ }
+ return 0;
- free_cmdid:
- free_cmdid(nvmeq, cmdid, NULL);
free_iod:
nvme_free_iod(nvmeq->dev, iod);
- nomem:
return result;
}
@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
}
ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
- fn(nvmeq->dev, ctx, &cqe);
+ fn(nvmeq, ctx, &cqe);
}
/* If the controller ignores the cq head doorbell and continuously
@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
result = nvme_submit_bio_queue(nvmeq, ns, bio);
if (unlikely(result)) {
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, bio);
}
@@ -791,7 +848,7 @@ struct sync_cmd_info {
int status;
};
-static void sync_completion(struct nvme_dev *dev, void *ctx,
+static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
+ struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
- int cmdid;
+ int cmdid, ret;
struct sync_cmd_info cmdinfo;
+ struct nvme_queue *nvmeq;
+
+ nvmeq = lock_nvmeq(dev, q_idx);
+ if (!nvmeq) {
+ unlock_nvmeq(nvmeq);
+ return -ENODEV;
+ }
cmdinfo.task = current;
cmdinfo.status = -EINTR;
- cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
- timeout);
- if (cmdid < 0)
+ cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
+ if (cmdid < 0) {
+ unlock_nvmeq(nvmeq);
return cmdid;
+ }
cmd->common.command_id = cmdid;
set_current_state(TASK_KILLABLE);
- nvme_submit_cmd(nvmeq, cmd);
+ ret = nvme_submit_cmd(nvmeq, cmd);
+ if (ret) {
+ free_cmdid(nvmeq, cmdid, NULL);
+ unlock_nvmeq(nvmeq);
+ set_current_state(TASK_RUNNING);
+ return ret;
+ }
+ unlock_nvmeq(nvmeq);
schedule_timeout(timeout);
if (cmdinfo.status == -EINTR) {
- nvme_abort_command(nvmeq, cmdid);
+ nvmeq = lock_nvmeq(dev, q_idx);
+ if (nvmeq)
+ nvme_abort_command(nvmeq, cmdid);
+ unlock_nvmeq(nvmeq);
return -EINTR;
}
@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
return cmdid;
cmdinfo->status = -EINTR;
cmd->common.command_id = cmdid;
- nvme_submit_cmd(nvmeq, cmd);
- return 0;
+ return nvme_submit_cmd(nvmeq, cmd);
}
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
- return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+ return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
+}
+
+int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result)
+{
+ return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
+ NVME_IO_TIMEOUT);
}
static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
{
- return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
ADMIN_TIMEOUT);
}
@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
struct nvme_command cmd;
struct nvme_dev *dev = nvmeq->dev;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ struct nvme_queue *adminq;
if (!nvmeq->qid || info[cmdid].aborted) {
if (work_busy(&dev->reset_work))
@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
if (!dev->abort_limit)
return;
- a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ adminq = rcu_dereference(dev->queues[0]);
+ a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
ADMIN_TIMEOUT);
if (a_cmdid < 0)
return;
@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
nvmeq->qid);
- nvme_submit_cmd(dev->queues[0], &cmd);
+ nvme_submit_cmd(adminq, &cmd);
}
/**
@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
+ fn(nvmeq, ctx, &cqe);
}
}
-static void nvme_free_queue(struct nvme_queue *nvmeq)
+static void nvme_free_queue(struct rcu_head *r)
{
+ struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
+
spin_lock_irq(&nvmeq->q_lock);
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
bio_endio(bio, -EIO);
}
+ while (!list_empty(&nvmeq->iod_bio)) {
+ static struct nvme_completion cqe = {
+ .status = cpu_to_le16(
+ (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
+ };
+ struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
+ struct nvme_iod,
+ node);
+ list_del(&iod->node);
+ bio_completion(nvmeq, iod, &cqe);
+ }
spin_unlock_irq(&nvmeq->q_lock);
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ if (nvmeq->qid)
+ free_cpumask_var(nvmeq->cpu_mask);
kfree(nvmeq);
}
@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->queue_count - 1; i >= lowest; i--) {
- nvme_free_queue(dev->queues[i]);
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+ rcu_assign_pointer(dev->queues[i], NULL);
+ call_rcu(&nvmeq->r_head, nvme_free_queue);
dev->queue_count--;
- dev->queues[i] = NULL;
}
}
@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 1;
}
nvmeq->q_suspended = 1;
+ nvmeq->dev->online_queues--;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
if (!nvmeq)
return;
@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
if (!nvmeq->sq_cmds)
goto free_cqdma;
+ if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
+ goto free_sqdma;
+
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
+ INIT_LIST_HEAD(&nvmeq->iod_bio);
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
+ rcu_assign_pointer(dev->queues[qid], nvmeq);
return nvmeq;
+ free_sqdma:
+ dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
+ nvmeq->sq_dma_addr);
free_cqdma:
dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
nvmeq->q_suspended = 0;
+ dev->online_queues++;
}
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
+ nvmeq = raw_nvmeq(dev, 0);
if (!nvmeq) {
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq)
return -ENOMEM;
- dev->queues[0] = nvmeq;
}
aqa = nvmeq->q_depth - 1;
@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_dev *dev = ns->dev;
- struct nvme_queue *nvmeq;
struct nvme_user_io io;
struct nvme_command c;
unsigned length, meta_len;
@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.metadata = cpu_to_le64(meta_dma_addr);
}
- length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+ length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+ c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.rw.prp2 = cpu_to_le64(iod->first_dma);
- nvmeq = get_nvmeq(dev);
- /*
- * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
- * disabled. We may be preempted at any point, and be rescheduled
- * to a different CPU. That will cause cacheline bouncing, but no
- * additional races since q_lock already protects against other CPUs.
- */
- put_nvmeq(nvmeq);
if (length != (io.nblocks + 1) << ns->lba_shift)
status = -ENOMEM;
- else if (!nvmeq || nvmeq->q_suspended)
- status = -EBUSY;
else
- status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ status = nvme_submit_io_cmd(dev, &c, NULL);
if (meta_len) {
if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
length);
if (IS_ERR(iod))
return PTR_ERR(iod);
- length = nvme_setup_prps(dev, &c.common, iod, length,
- GFP_KERNEL);
+ length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+ c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.common.prp2 = cpu_to_le64(iod->first_dma);
}
timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
- timeout);
+ status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode)
kref_put(&dev->kref, nvme_free_dev);
}
+static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
+{
+ /* some standard values */
+ geo->heads = 1 << 6;
+ geo->sectors = 1 << 5;
+ geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ return 0;
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl,
.open = nvme_open,
.release = nvme_release,
+ .getgeo = nvme_getgeo,
};
+static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
+{
+ struct nvme_iod *iod, *next;
+
+ list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
+ if (unlikely(nvme_submit_iod(nvmeq, iod)))
+ break;
+ list_del(&iod->node);
+ if (bio_list_empty(&nvmeq->sq_cong) &&
+ list_empty(&nvmeq->iod_bio))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (bio_list_empty(&nvmeq->sq_cong) &&
+ list_empty(&nvmeq->iod_bio))
remove_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait);
if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait);
bio_list_add_head(&nvmeq->sq_cong, bio);
@@ -1700,8 +1826,10 @@ static int nvme_kthread(void *data)
queue_work(nvme_workq, &dev->reset_work);
continue;
}
+ rcu_read_lock();
for (i = 0; i < dev->queue_count; i++) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ struct nvme_queue *nvmeq =
+ rcu_dereference(dev->queues[i]);
if (!nvmeq)
continue;
spin_lock_irq(&nvmeq->q_lock);
@@ -1710,9 +1838,11 @@ static int nvme_kthread(void *data)
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq);
+ nvme_resubmit_iods(nvmeq);
unlock:
spin_unlock_irq(&nvmeq->q_lock);
}
+ rcu_read_unlock();
}
spin_unlock(&dev_list_lock);
schedule_timeout(round_jiffies_relative(HZ));
@@ -1787,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
+static int nvme_find_closest_node(int node)
+{
+ int n, val, min_val = INT_MAX, best_node = node;
+
+ for_each_online_node(n) {
+ if (n == node)
+ continue;
+ val = node_distance(node, n);
+ if (val < min_val) {
+ min_val = val;
+ best_node = n;
+ }
+ }
+ return best_node;
+}
+
+static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
+ int count)
+{
+ int cpu;
+ for_each_cpu(cpu, qmask) {
+ if (cpumask_weight(nvmeq->cpu_mask) >= count)
+ break;
+ if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
+ *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
+ }
+}
+
+static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
+ const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
+{
+ int next_cpu;
+ for_each_cpu(next_cpu, new_mask) {
+ cpumask_or(mask, mask, get_cpu_mask(next_cpu));
+ cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
+ cpumask_and(mask, mask, unassigned_cpus);
+ nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
+ }
+}
+
+static void nvme_create_io_queues(struct nvme_dev *dev)
+{
+ unsigned i, max;
+
+ max = min(dev->max_qid, num_online_cpus());
+ for (i = dev->queue_count; i <= max; i++)
+ if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+ break;
+
+ max = min(dev->queue_count - 1, num_online_cpus());
+ for (i = dev->online_queues; i <= max; i++)
+ if (nvme_create_queue(raw_nvmeq(dev, i), i))
+ break;
+}
+
+/*
+ * If there are fewer queues than online cpus, this will try to optimally
+ * assign a queue to multiple cpus by grouping cpus that are "close" together:
+ * thread siblings, core, socket, closest node, then whatever else is
+ * available.
+ */
+static void nvme_assign_io_queues(struct nvme_dev *dev)
+{
+ unsigned cpu, cpus_per_queue, queues, remainder, i;
+ cpumask_var_t unassigned_cpus;
+
+ nvme_create_io_queues(dev);
+
+ queues = min(dev->online_queues - 1, num_online_cpus());
+ if (!queues)
+ return;
+
+ cpus_per_queue = num_online_cpus() / queues;
+ remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
+
+ if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
+ return;
+
+ cpumask_copy(unassigned_cpus, cpu_online_mask);
+ cpu = cpumask_first(unassigned_cpus);
+ for (i = 1; i <= queues; i++) {
+ struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
+ cpumask_t mask;
+
+ cpumask_clear(nvmeq->cpu_mask);
+ if (!cpumask_weight(unassigned_cpus)) {
+ unlock_nvmeq(nvmeq);
+ break;
+ }
+
+ mask = *get_cpu_mask(cpu);
+ nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ topology_thread_cpumask(cpu),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ topology_core_cpumask(cpu),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ cpumask_of_node(cpu_to_node(cpu)),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ cpumask_of_node(
+ nvme_find_closest_node(
+ cpu_to_node(cpu))),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ unassigned_cpus,
+ nvmeq, cpus_per_queue);
+
+ WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
+ "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
+ dev->instance, i);
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ nvmeq->cpu_mask);
+ cpumask_andnot(unassigned_cpus, unassigned_cpus,
+ nvmeq->cpu_mask);
+ cpu = cpumask_next(cpu, unassigned_cpus);
+ if (remainder && !--remainder)
+ cpus_per_queue++;
+ unlock_nvmeq(nvmeq);
+ }
+ WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
+ dev->instance);
+ i = 0;
+ cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, unassigned_cpus)
+ *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
+ free_cpumask_var(unassigned_cpus);
+}
+
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1805,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
+static int nvme_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ nvme_assign_io_queues(dev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = raw_nvmeq(dev, 0);
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, vecs, nr_io_queues, size, q_depth;
+ int result, i, vecs, nr_io_queues, size;
- nr_io_queues = num_online_cpus();
+ nr_io_queues = num_possible_cpus();
result = set_queue_count(dev, nr_io_queues);
if (result < 0)
return result;
@@ -1830,7 +2110,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
size = db_bar_size(dev, nr_io_queues);
} while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
- dev->queues[0]->q_db = dev->dbs;
+ adminq->q_db = dev->dbs;
}
/* Deregister the admin queue's interrupt */
@@ -1856,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* number of interrupts.
*/
nr_io_queues = vecs;
+ dev->max_qid = nr_io_queues;
result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
@@ -1864,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Free previously allocated queues that are no longer usable */
- spin_lock(&dev_list_lock);
- for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
-
- spin_lock_irq(&nvmeq->q_lock);
- nvme_cancel_ios(nvmeq, false);
- spin_unlock_irq(&nvmeq->q_lock);
-
- nvme_free_queue(nvmeq);
- dev->queue_count--;
- dev->queues[i] = NULL;
- }
- spin_unlock(&dev_list_lock);
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < nr_io_queues; i++) {
- irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-
- q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
- NVME_Q_DEPTH);
- for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
- if (!dev->queues[i + 1]) {
- result = -ENOMEM;
- goto free_queues;
- }
- }
-
- for (; i < num_possible_cpus(); i++) {
- int target = i % rounddown_pow_of_two(dev->queue_count - 1);
- dev->queues[i + 1] = dev->queues[target + 1];
- }
+ nvme_free_queues(dev, nr_io_queues + 1);
+ nvme_assign_io_queues(dev);
- for (i = 1; i < dev->queue_count; i++) {
- result = nvme_create_queue(dev->queues[i], i);
- if (result) {
- for (--i; i > 0; i--)
- nvme_disable_queue(dev, i);
- goto free_queues;
- }
- }
+ dev->nb.notifier_call = &nvme_cpu_notify;
+ result = register_hotcpu_notifier(&dev->nb);
+ if (result)
+ goto free_queues;
return 0;
@@ -1985,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
static int nvme_dev_map(struct nvme_dev *dev)
{
+ u64 cap;
int bars, result = -ENOMEM;
struct pci_dev *pdev = dev->pci_dev;
@@ -2008,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
result = -ENODEV;
goto unmap;
}
- dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ cap = readq(&dev->bar->cap);
+ dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
+ dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
@@ -2164,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
atomic_set(&dq.refcount, 0);
dq.worker = &worker;
for (i = dev->queue_count - 1; i > 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
if (nvme_suspend_queue(nvmeq))
continue;
@@ -2177,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
kthread_stop(kworker_task);
}
+/*
+* Remove the node from the device list and check
+* for whether or not we need to stop the nvme_thread.
+*/
+static void nvme_dev_list_remove(struct nvme_dev *dev)
+{
+ struct task_struct *tmp = NULL;
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
+ tmp = nvme_thread;
+ nvme_thread = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+
+ if (tmp)
+ kthread_stop(tmp);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
dev->initialized = 0;
+ unregister_hotcpu_notifier(&dev->nb);
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- spin_unlock(&dev_list_lock);
+ nvme_dev_list_remove(dev);
if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
nvme_suspend_queue(nvmeq);
nvme_clear_queue(nvmeq);
}
@@ -2282,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref)
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_free_namespaces(dev);
+ free_percpu(dev->io_queue);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2325,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = {
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
+ bool start_thread = false;
result = nvme_dev_map(dev);
if (result)
@@ -2335,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
goto unmap;
spin_lock(&dev_list_lock);
+ if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+ start_thread = true;
+ nvme_thread = NULL;
+ }
list_add(&dev->node, &dev_list);
spin_unlock(&dev_list_lock);
+ if (start_thread) {
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ wake_up(&nvme_kthread_wait);
+ } else
+ wait_event_killable(nvme_kthread_wait, nvme_thread);
+
+ if (IS_ERR_OR_NULL(nvme_thread)) {
+ result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+ goto disable;
+ }
+
result = nvme_setup_io_queues(dev);
if (result && result != -EBUSY)
goto disable;
@@ -2346,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
disable:
nvme_disable_queue(dev, 0);
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- spin_unlock(&dev_list_lock);
+ nvme_dev_list_remove(dev);
unmap:
nvme_dev_unmap(dev);
return result;
@@ -2367,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg)
static void nvme_remove_disks(struct work_struct *ws)
{
- int i;
struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
nvme_dev_remove(dev);
- spin_lock(&dev_list_lock);
- for (i = dev->queue_count - 1; i > 0; i--) {
- BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
- nvme_free_queue(dev->queues[i]);
- dev->queue_count--;
- dev->queues[i] = NULL;
- }
- spin_unlock(&dev_list_lock);
+ nvme_free_queues(dev, 1);
}
static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2441,6 +2715,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
GFP_KERNEL);
if (!dev->queues)
goto free;
+ dev->io_queue = alloc_percpu(unsigned short);
+ if (!dev->io_queue)
+ goto free;
INIT_LIST_HEAD(&dev->namespaces);
dev->reset_workfn = nvme_reset_failed_dev;
@@ -2455,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release;
+ kref_init(&dev->kref);
result = nvme_dev_start(dev);
if (result) {
if (result == -EBUSY)
@@ -2462,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
- kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2491,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release:
nvme_release_instance(dev);
free:
+ free_percpu(dev->io_queue);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2517,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
nvme_free_queues(dev, 0);
+ rcu_barrier();
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
@@ -2529,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev)
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
+#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2549,6 +2829,7 @@ static int nvme_resume(struct device *dev)
}
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2563,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = {
/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS 0x010802
-static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
@@ -2585,14 +2866,11 @@ static int __init nvme_init(void)
{
int result;
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- if (IS_ERR(nvme_thread))
- return PTR_ERR(nvme_thread);
+ init_waitqueue_head(&nvme_kthread_wait);
- result = -ENOMEM;
nvme_workq = create_singlethread_workqueue("nvme");
if (!nvme_workq)
- goto kill_kthread;
+ return -ENOMEM;
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
@@ -2609,8 +2887,6 @@ static int __init nvme_init(void)
unregister_blkdev(nvme_major, "nvme");
kill_workq:
destroy_workqueue(nvme_workq);
- kill_kthread:
- kthread_stop(nvme_thread);
return result;
}
@@ -2619,11 +2895,11 @@ static void __exit nvme_exit(void)
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
destroy_workqueue(nvme_workq);
- kthread_stop(nvme_thread);
+ BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.8");
+MODULE_VERSION("0.9");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a0ceb6..2c3f5be 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR(iod);
goto out;
}
- length = nvme_setup_prps(dev, &c.common, iod, tot_len,
- GFP_KERNEL);
+ length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
if (length != tot_len) {
res = -ENOMEM;
goto out_unmap;
}
+ c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
} else if (opcode == nvme_admin_activate_fw) {
@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
- struct nvme_queue *nvmeq;
u32 num_cmds;
struct nvme_iod *iod;
u64 unit_len;
@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_command c;
u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
u16 control;
- u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+ u32 max_blocks = queue_max_hw_sectors(ns->queue);
num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR(iod);
goto out;
}
- retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
- GFP_KERNEL);
+ retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
if (retcode != unit_len) {
nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = -ENOMEM;
goto out;
}
+ c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.rw.prp2 = cpu_to_le64(iod->first_dma);
nvme_offset += unit_num_blocks;
- nvmeq = get_nvmeq(dev);
- /*
- * Since nvme_submit_sync_cmd sleeps, we can't keep
- * preemption disabled. We may be preempted at any
- * point, and be rescheduled to a different CPU. That
- * will cause cacheline bouncing, but no additional
- * races since q_lock already protects against other
- * CPUs.
- */
- put_nvmeq(nvmeq);
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
- NVME_IO_TIMEOUT);
+ nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
if (nvme_sc != NVME_SC_SUCCESS) {
nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc;
- struct nvme_queue *nvmeq;
struct nvme_command c;
u8 immed, pcmod, pc, no_flush, start;
@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id);
- nvmeq = get_nvmeq(ns->dev);
- put_nvmeq(nvmeq);
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
-
+ nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
goto out;
@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc;
struct nvme_command c;
- struct nvme_queue *nvmeq;
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id);
- nvmeq = get_nvmeq(ns->dev);
- put_nvmeq(nvmeq);
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_dev *dev = ns->dev;
struct scsi_unmap_parm_list *plist;
struct nvme_dsm_range *range;
- struct nvme_queue *nvmeq;
struct nvme_command c;
int i, nvme_sc, res = -ENOMEM;
u16 ndesc, list_len;
@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.dsm.nr = cpu_to_le32(ndesc - 1);
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- nvmeq = get_nvmeq(dev);
- put_nvmeq(nvmeq);
-
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 8c3b255..e900961 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -61,18 +61,18 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
}
bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ /* set warm-up count & enable */
+ __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
+ __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+
/* register driver */
err = hwrng_register(&bcm2835_rng_ops);
if (err) {
dev_err(dev, "hwrng registration failed\n");
iounmap(rng_base);
- } else {
+ } else
dev_info(dev, "hwrng registered\n");
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
- }
return err;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6928d09..60aafb8 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -901,9 +901,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- src = buf->ops->map(pipe, buf, 1);
+ src = kmap_atomic(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
- buf->ops->unmap(pipe, buf, src);
+ kunmap_atomic(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8ee228e..c98fdb1 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr);
+static int
+isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
static void
isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
}
static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
+ u8 protection)
{
struct isert_device *device = isert_conn->conn_device;
struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
+ if (protection)
+ attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
cma_id->device);
@@ -226,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device)
return ret;
/* asign function handlers */
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
+ dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
device->use_fastreg = 1;
device->reg_rdma_mem = isert_reg_rdma;
device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@ isert_create_device_ib_res(struct isert_device *device)
device->unreg_rdma_mem = isert_unmap_cmd;
}
+ /* Check signature cap */
+ device->pi_capable = dev_attr->device_cap_flags &
+ IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+
device->cqs_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
pr_debug("Using %d CQs, device %s supports %d vectors support "
- "Fast registration %d\n",
+ "Fast registration %d pi_capable %d\n",
device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg);
+ device->ib_device->num_comp_vectors, device->use_fastreg,
+ device->pi_capable);
device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
device->cqs_used, GFP_KERNEL);
if (!device->cq_desc) {
@@ -395,6 +406,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
list_del(&fr_desc->list);
ib_free_fast_reg_page_list(fr_desc->data_frpl);
ib_dereg_mr(fr_desc->data_mr);
+ if (fr_desc->pi_ctx) {
+ ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+ ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+ ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
+ kfree(fr_desc->pi_ctx);
+ }
kfree(fr_desc);
++i;
}
@@ -406,8 +423,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
static int
isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc)
+ struct fast_reg_descriptor *fr_desc, u8 protection)
{
+ int ret;
+
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
if (IS_ERR(fr_desc->data_mr)) {
pr_err("Failed to allocate data frmr err=%ld\n",
PTR_ERR(fr_desc->data_mr));
- ib_free_fast_reg_page_list(fr_desc->data_frpl);
- return PTR_ERR(fr_desc->data_mr);
+ ret = PTR_ERR(fr_desc->data_mr);
+ goto err_data_frpl;
}
pr_debug("Create fr_desc %p page_list %p\n",
fr_desc, fr_desc->data_frpl->page_list);
+ fr_desc->ind |= ISERT_DATA_KEY_VALID;
+
+ if (protection) {
+ struct ib_mr_init_attr mr_init_attr = {0};
+ struct pi_context *pi_ctx;
+
+ fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
+ if (!fr_desc->pi_ctx) {
+ pr_err("Failed to allocate pi context\n");
+ ret = -ENOMEM;
+ goto err_data_mr;
+ }
+ pi_ctx = fr_desc->pi_ctx;
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ pr_err("Failed to allocate prot frpl err=%ld\n",
+ PTR_ERR(pi_ctx->prot_frpl));
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ goto err_pi_ctx;
+ }
- fr_desc->valid = true;
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ pr_err("Failed to allocate prot frmr err=%ld\n",
+ PTR_ERR(pi_ctx->prot_mr));
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ goto err_prot_frpl;
+ }
+ fr_desc->ind |= ISERT_PROT_KEY_VALID;
+
+ mr_init_attr.max_reg_descriptors = 2;
+ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ pr_err("Failed to allocate signature enabled mr err=%ld\n",
+ PTR_ERR(pi_ctx->sig_mr));
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ goto err_prot_mr;
+ }
+ fr_desc->ind |= ISERT_SIG_KEY_VALID;
+ }
+ fr_desc->ind &= ~ISERT_PROTECTED;
return 0;
+err_prot_mr:
+ ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+err_prot_frpl:
+ ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+ kfree(fr_desc->pi_ctx);
+err_data_mr:
+ ib_dereg_mr(fr_desc->data_mr);
+err_data_frpl:
+ ib_free_fast_reg_page_list(fr_desc->data_frpl);
+
+ return ret;
}
static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
- int i, ret;
+ struct se_session *se_sess = isert_conn->conn->sess->se_sess;
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+ int i, ret, tag_num;
+ /*
+ * Setup the number of FRMRs based upon the number of tags
+ * available to session in iscsi_target_locate_portal().
+ */
+ tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
+ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
- INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
isert_conn->conn_fr_pool_size = 0;
- for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
+ for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
}
ret = isert_create_fr_desc(device->ib_device,
- isert_conn->conn_pd, fr_desc);
+ isert_conn->conn_pd, fr_desc,
+ pi_support);
if (ret) {
pr_err("Failed to create fastreg descriptor err=%d\n",
ret);
@@ -480,6 +561,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
+ u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
cma_id, cma_id->context);
@@ -498,6 +580,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
spin_lock_init(&isert_conn->conn_lock);
+ INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_mr;
}
- if (device->use_fastreg) {
- ret = isert_conn_create_fastreg_pool(isert_conn);
- if (ret) {
- pr_err("Conn: %p failed to create fastreg pool\n",
- isert_conn);
- goto out_fastreg;
- }
+ if (pi_support && !device->pi_capable) {
+ pr_err("Protection information requested but not supported\n");
+ ret = -EINVAL;
+ goto out_mr;
}
- ret = isert_conn_setup_qp(isert_conn, cma_id);
+ ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
if (ret)
goto out_conn_dev;
@@ -591,9 +671,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
out_conn_dev:
- if (device->use_fastreg)
- isert_conn_free_fastreg_pool(isert_conn);
-out_fastreg:
ib_dereg_mr(isert_conn->conn_mr);
out_mr:
ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
}
if (!login->login_failed) {
if (login->login_complete) {
+ if (isert_conn->conn_device->use_fastreg) {
+ u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
+
+ ret = isert_conn_create_fastreg_pool(isert_conn,
+ pi_support);
+ if (ret) {
+ pr_err("Conn: %p failed to create"
+ " fastreg pool\n", isert_conn);
+ return ret;
+ }
+ }
+
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
@@ -1392,19 +1481,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
}
}
+static int
+isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+ struct scatterlist *sg, u32 nents, u32 length, u32 offset,
+ enum iser_ib_op_code op, struct isert_data_buf *data)
+{
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+ data->dma_dir = op == ISER_IB_RDMA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ data->len = length - offset;
+ data->offset = offset;
+ data->sg_off = data->offset / PAGE_SIZE;
+
+ data->sg = &sg[data->sg_off];
+ data->nents = min_t(unsigned int, nents - data->sg_off,
+ ISCSI_ISER_SG_TABLESIZE);
+ data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
+ PAGE_SIZE);
+
+ data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
+ data->dma_dir);
+ if (unlikely(!data->dma_nents)) {
+ pr_err("Cmd: unable to dma map SGs %p\n", sg);
+ return -EINVAL;
+ }
+
+ pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+ isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+
+ return 0;
+}
+
+static void
+isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
+{
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+ ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
+ memset(data, 0, sizeof(*data));
+}
+
+
+
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
- if (wr->sge) {
+
+ if (wr->data.sg) {
pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
- ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
- (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- wr->sge = NULL;
+ isert_unmap_data_buf(isert_conn, &wr->data);
}
if (wr->send_wr) {
@@ -1424,7 +1554,6 @@ static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
LIST_HEAD(unmap_list);
pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
if (wr->fr_desc) {
pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
isert_cmd, wr->fr_desc);
+ if (wr->fr_desc->ind & ISERT_PROTECTED) {
+ isert_unmap_data_buf(isert_conn, &wr->prot);
+ wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ }
spin_lock_bh(&isert_conn->conn_lock);
list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
spin_unlock_bh(&isert_conn->conn_lock);
wr->fr_desc = NULL;
}
- if (wr->sge) {
+ if (wr->data.sg) {
pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
- ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
- (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- wr->sge = NULL;
+ isert_unmap_data_buf(isert_conn, &wr->data);
}
wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
}
static void
-isert_put_cmd(struct isert_cmd *isert_cmd)
+isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- if (cmd->data_direction == DMA_TO_DEVICE)
+ if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
+ /*
+ * Check for special case during comp_err where
+ * WRITE_PENDING has been handed off from core,
+ * but requires an extra target_put_sess_cmd()
+ * before transport_generic_free_cmd() below.
+ */
+ if (comp_err &&
+ cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ }
+ }
device->unreg_rdma_mem(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
static void
isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
- struct ib_device *ib_dev)
+ struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->pdu_buf_dma != 0) {
pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
}
isert_unmap_tx_desc(tx_desc, ib_dev);
- isert_put_cmd(isert_cmd);
+ isert_put_cmd(isert_cmd, comp_err);
+}
+
+static int
+isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
+{
+ struct ib_mr_status mr_status;
+ int ret;
+
+ ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ goto fail_mr_status;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ u64 sec_offset_err;
+ u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
+
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ break;
+ }
+ sec_offset_err = mr_status.sig_err.sig_err_offset;
+ do_div(sec_offset_err, block_size);
+ se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
+
+ pr_err("isert: PI error found type %d at sector 0x%llx "
+ "expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)se_cmd->bad_sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ ret = 1;
+ }
+
+fail_mr_status:
+ return ret;
+}
+
+static void
+isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
+ struct isert_cmd *isert_cmd)
+{
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct isert_conn *isert_conn = isert_cmd->conn;
+ struct isert_device *device = isert_conn->conn_device;
+ int ret = 0;
+
+ if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+ ret = isert_check_pi_status(se_cmd,
+ wr->fr_desc->pi_ctx->sig_mr);
+ wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ }
+
+ device->unreg_rdma_mem(isert_cmd, isert_conn);
+ wr->send_wr_num = 0;
+ if (ret)
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->pi_err, 0);
+ else
+ isert_put_response(isert_conn->conn, cmd);
}
static void
@@ -1545,10 +1758,17 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct isert_device *device = isert_conn->conn_device;
+ int ret = 0;
+
+ if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+ ret = isert_check_pi_status(se_cmd,
+ wr->fr_desc->pi_ctx->sig_mr);
+ wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ }
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
- cmd->write_data_done = wr->cur_rdma_length;
+ cmd->write_data_done = wr->data.len;
wr->send_wr_num = 0;
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
- target_execute_cmd(se_cmd);
+ if (ret)
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->pi_err, 0);
+ else
+ target_execute_cmd(se_cmd);
}
static void
@@ -1577,14 +1801,14 @@ isert_do_control_comp(struct work_struct *work)
iscsit_tmr_post_handler(cmd, cmd->conn);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
case ISTATE_SEND_REJECT:
pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@ isert_do_control_comp(struct work_struct *work)
case ISTATE_SEND_TEXTRSP:
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
default:
pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
- atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+ /**
+ * If send_wr_num is 0 this means that we got
+ * RDMA completion and we cleared it and we should
+ * simply decrement the response post. else the
+ * response is incorporated in send_wr_num, just
+ * sub it.
+ **/
+ if (wr->send_wr_num)
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ else
+ atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
}
static void
@@ -1658,8 +1893,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
isert_conn, ib_dev);
break;
case ISER_IB_RDMA_WRITE:
- pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
- dump_stack();
+ pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ isert_completion_rdma_write(tx_desc, isert_cmd);
break;
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
llnode = llist_next(llnode);
wr = &t->isert_cmd->rdma_wr;
- atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
- isert_completion_put(t, t->isert_cmd, ib_dev);
+ /**
+ * If send_wr_num is 0 this means that we got
+ * RDMA completion and we cleared it and we should
+ * simply decrement the response post. else the
+ * response is incorporated in send_wr_num, just
+ * sub it.
+ **/
+ if (wr->send_wr_num)
+ atomic_sub(wr->send_wr_num,
+ &isert_conn->post_send_buf_count);
+ else
+ atomic_dec(&isert_conn->post_send_buf_count);
+
+ isert_completion_put(t, t->isert_cmd, ib_dev, true);
}
}
@@ -1728,15 +1976,27 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
llnode = llist_next(llnode);
wr = &t->isert_cmd->rdma_wr;
- atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
- isert_completion_put(t, t->isert_cmd, ib_dev);
+ /**
+ * If send_wr_num is 0 this means that we got
+ * RDMA completion and we cleared it and we should
+ * simply decrement the response post. else the
+ * response is incorporated in send_wr_num, just
+ * sub it.
+ **/
+ if (wr->send_wr_num)
+ atomic_sub(wr->send_wr_num,
+ &isert_conn->post_send_buf_count);
+ else
+ atomic_dec(&isert_conn->post_send_buf_count);
+
+ isert_completion_put(t, t->isert_cmd, ib_dev, true);
}
tx_desc->comp_llnode_batch = NULL;
if (!isert_cmd)
isert_unmap_tx_desc(tx_desc, ib_dev);
else
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
}
static void
@@ -1918,6 +2178,36 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
return isert_post_response(isert_conn, isert_cmd);
}
+static void
+isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_device *device = isert_conn->conn_device;
+
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ device->unreg_rdma_mem(isert_cmd, isert_conn);
+}
+
+static enum target_prot_op
+isert_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+ struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_device *device = isert_conn->conn_device;
+
+ if (device->pi_capable)
+ return TARGET_PROT_ALL;
+
+ return TARGET_PROT_NORMAL;
+}
+
static int
isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool nopout_response)
@@ -2099,54 +2389,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_data_buf *data = &wr->data;
struct ib_send_wr *send_wr;
struct ib_sge *ib_sge;
- struct scatterlist *sg_start;
- u32 sg_off = 0, sg_nents;
- u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
- int ret = 0, count, i, ib_sge_cnt;
+ u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
+ int ret = 0, i, ib_sge_cnt;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
- data_left = se_cmd->data_length;
- } else {
- sg_off = cmd->write_data_done / PAGE_SIZE;
- data_left = se_cmd->data_length - cmd->write_data_done;
- offset = cmd->write_data_done;
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
- }
+ isert_cmd->tx_desc.isert_cmd = isert_cmd;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = se_cmd->t_data_nents - sg_off;
+ offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+ ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, se_cmd->data_length,
+ offset, wr->iser_ib_op, &wr->data);
+ if (ret)
+ return ret;
- count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
- (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (unlikely(!count)) {
- pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
- return -EINVAL;
- }
- wr->sge = sg_start;
- wr->num_sge = sg_nents;
- wr->cur_rdma_length = data_left;
- pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, count, sg_start, sg_nents, data_left);
+ data_left = data->len;
+ offset = data->offset;
- ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
+ ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
if (!ib_sge) {
pr_warn("Unable to allocate ib_sge\n");
ret = -ENOMEM;
- goto unmap_sg;
+ goto unmap_cmd;
}
wr->ib_sge = ib_sge;
- wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
+ wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
pr_debug("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
- goto unmap_sg;
+ goto unmap_cmd;
}
wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
return 0;
-unmap_sg:
- ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
- (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
+unmap_cmd:
+ isert_unmap_data_buf(isert_conn, data);
+
return ret;
}
@@ -2232,49 +2506,70 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
}
static int
-isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
- struct isert_conn *isert_conn, struct scatterlist *sg_start,
- struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
- unsigned int data_len)
+isert_fast_reg_mr(struct isert_conn *isert_conn,
+ struct fast_reg_descriptor *fr_desc,
+ struct isert_data_buf *mem,
+ enum isert_indicator ind,
+ struct ib_sge *sge)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_mr *mr;
+ struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fr_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
int ret, pagelist_len;
u32 page_off;
u8 key;
- sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
- page_off = offset % PAGE_SIZE;
+ if (mem->dma_nents == 1) {
+ sge->lkey = isert_conn->conn_mr->lkey;
+ sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
+ sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
+ pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
+ __func__, __LINE__, sge->addr, sge->length,
+ sge->lkey);
+ return 0;
+ }
+
+ if (ind == ISERT_DATA_KEY_VALID) {
+ /* Registering data buffer */
+ mr = fr_desc->data_mr;
+ frpl = fr_desc->data_frpl;
+ } else {
+ /* Registering protection buffer */
+ mr = fr_desc->pi_ctx->prot_mr;
+ frpl = fr_desc->pi_ctx->prot_frpl;
+ }
+
+ page_off = mem->offset % PAGE_SIZE;
pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, sg_nents, offset);
+ fr_desc, mem->nents, mem->offset);
- pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
- &fr_desc->data_frpl->page_list[0]);
+ pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
+ &frpl->page_list[0]);
- if (!fr_desc->valid) {
+ if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
memset(&inv_wr, 0, sizeof(inv_wr));
inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+ inv_wr.ex.invalidate_rkey = mr->rkey;
wr = &inv_wr;
/* Bump the key */
- key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(fr_desc->data_mr, ++key);
+ key = (u8)(mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
memset(&fr_wr, 0, sizeof(fr_wr));
fr_wr.wr_id = ISER_FASTREG_LI_WRID;
fr_wr.opcode = IB_WR_FAST_REG_MR;
- fr_wr.wr.fast_reg.iova_start =
- fr_desc->data_frpl->page_list[0] + page_off;
- fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
+ fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
+ fr_wr.wr.fast_reg.page_list = frpl;
fr_wr.wr.fast_reg.page_list_len = pagelist_len;
fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
- fr_wr.wr.fast_reg.length = data_len;
- fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
+ fr_wr.wr.fast_reg.length = mem->len;
+ fr_wr.wr.fast_reg.rkey = mr->rkey;
fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
if (!wr)
@@ -2287,15 +2582,157 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
pr_err("fast registration failed, ret:%d\n", ret);
return ret;
}
- fr_desc->valid = false;
+ fr_desc->ind &= ~ind;
+
+ sge->lkey = mr->lkey;
+ sge->addr = frpl->page_list[0] + page_off;
+ sge->length = mem->len;
+
+ pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
+ __func__, __LINE__, sge->addr, sge->length,
+ sge->lkey);
+
+ return ret;
+}
+
+static inline enum ib_t10_dif_type
+se2ib_prot_type(enum target_prot_type prot_type)
+{
+ switch (prot_type) {
+ case TARGET_DIF_TYPE0_PROT:
+ return IB_T10DIF_NONE;
+ case TARGET_DIF_TYPE1_PROT:
+ return IB_T10DIF_TYPE1;
+ case TARGET_DIF_TYPE2_PROT:
+ return IB_T10DIF_TYPE2;
+ case TARGET_DIF_TYPE3_PROT:
+ return IB_T10DIF_TYPE3;
+ default:
+ return IB_T10DIF_NONE;
+ }
+}
- ib_sge->lkey = fr_desc->data_mr->lkey;
- ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
- ib_sge->length = data_len;
+static int
+isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
+{
+ enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
+
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
+ sig_attrs->mem.sig.dif.pi_interval =
+ se_cmd->se_dev->dev_attrib.block_size;
+ sig_attrs->wire.sig.dif.pi_interval =
+ se_cmd->se_dev->dev_attrib.block_size;
+
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_STRIP:
+ sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
+ sig_attrs->wire.sig.dif.type = ib_prot_type;
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+ break;
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ sig_attrs->mem.sig.dif.type = ib_prot_type;
+ sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+ sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
+ break;
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ sig_attrs->mem.sig.dif.type = ib_prot_type;
+ sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+ sig_attrs->wire.sig.dif.type = ib_prot_type;
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+ break;
+ default:
+ pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+ return -EINVAL;
+ }
- pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
+ return 0;
+}
+
+static inline u8
+isert_set_prot_checks(u8 prot_checks)
+{
+ return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
+ (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
+ (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+}
+
+static int
+isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
+ struct fast_reg_descriptor *fr_desc,
+ struct ib_sge *data_sge, struct ib_sge *prot_sge,
+ struct ib_sge *sig_sge)
+{
+ struct ib_send_wr sig_wr, inv_wr;
+ struct ib_send_wr *bad_wr, *wr = NULL;
+ struct pi_context *pi_ctx = fr_desc->pi_ctx;
+ struct ib_sig_attrs sig_attrs;
+ int ret;
+ u32 key;
+
+ memset(&sig_attrs, 0, sizeof(sig_attrs));
+ ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+ if (ret)
+ goto err;
+
+ sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
+
+ if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ wr = &inv_wr;
+ /* Bump the key */
+ key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
+ }
+
+ memset(&sig_wr, 0, sizeof(sig_wr));
+ sig_wr.opcode = IB_WR_REG_SIG_MR;
+ sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+ sig_wr.sg_list = data_sge;
+ sig_wr.num_sge = 1;
+ sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
+ sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+ sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+ if (se_cmd->t_prot_sg)
+ sig_wr.wr.sig_handover.prot = prot_sge;
+
+ if (!wr)
+ wr = &sig_wr;
+ else
+ wr->next = &sig_wr;
+
+ ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+ if (ret) {
+ pr_err("fast registration failed, ret:%d\n", ret);
+ goto err;
+ }
+ fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
+
+ sig_sge->lkey = pi_ctx->sig_mr->lkey;
+ sig_sge->addr = 0;
+ sig_sge->length = se_cmd->data_length;
+ if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
+ se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
+ /*
+ * We have protection guards on the wire
+ * so we need to set a larget transfer
+ */
+ sig_sge->length += se_cmd->prot_length;
+ pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
+ sig_sge->addr, sig_sge->length,
+ sig_sge->lkey);
+err:
return ret;
}
@@ -2305,62 +2742,82 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_sge data_sge;
struct ib_send_wr *send_wr;
- struct ib_sge *ib_sge;
- struct scatterlist *sg_start;
- struct fast_reg_descriptor *fr_desc;
- u32 sg_off = 0, sg_nents;
- u32 offset = 0, data_len, data_left, rdma_write_max;
- int ret = 0, count;
+ struct fast_reg_descriptor *fr_desc = NULL;
+ u32 offset;
+ int ret = 0;
unsigned long flags;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
- data_left = se_cmd->data_length;
- } else {
- offset = cmd->write_data_done;
- sg_off = offset / PAGE_SIZE;
- data_left = se_cmd->data_length - cmd->write_data_done;
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
- }
+ isert_cmd->tx_desc.isert_cmd = isert_cmd;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = se_cmd->t_data_nents - sg_off;
+ offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+ ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, se_cmd->data_length,
+ offset, wr->iser_ib_op, &wr->data);
+ if (ret)
+ return ret;
- count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
- (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (unlikely(!count)) {
- pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
- return -EINVAL;
+ if (wr->data.dma_nents != 1 ||
+ se_cmd->prot_op != TARGET_PROT_NORMAL) {
+ spin_lock_irqsave(&isert_conn->conn_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+ struct fast_reg_descriptor, list);
+ list_del(&fr_desc->list);
+ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ wr->fr_desc = fr_desc;
}
- wr->sge = sg_start;
- wr->num_sge = sg_nents;
- pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, count, sg_start, sg_nents, data_left);
- memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
- ib_sge = &wr->s_ib_sge;
- wr->ib_sge = ib_sge;
+ ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
+ ISERT_DATA_KEY_VALID, &data_sge);
+ if (ret)
+ goto unmap_cmd;
+
+ if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
+ struct ib_sge prot_sge, sig_sge;
+
+ if (se_cmd->t_prot_sg) {
+ ret = isert_map_data_buf(isert_conn, isert_cmd,
+ se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents,
+ se_cmd->prot_length,
+ 0, wr->iser_ib_op, &wr->prot);
+ if (ret)
+ goto unmap_cmd;
+
+ ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
+ ISERT_PROT_KEY_VALID, &prot_sge);
+ if (ret)
+ goto unmap_prot_cmd;
+ }
+
+ ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
+ &data_sge, &prot_sge, &sig_sge);
+ if (ret)
+ goto unmap_prot_cmd;
+ fr_desc->ind |= ISERT_PROTECTED;
+ memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
+ } else
+ memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+
+ wr->ib_sge = &wr->s_ib_sge;
wr->send_wr_num = 1;
memset(&wr->s_send_wr, 0, sizeof(*send_wr));
wr->send_wr = &wr->s_send_wr;
-
wr->isert_cmd = isert_cmd;
- rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
send_wr = &isert_cmd->rdma_wr.s_send_wr;
- send_wr->sg_list = ib_sge;
+ send_wr->sg_list = &wr->s_ib_sge;
send_wr->num_sge = 1;
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
send_wr->opcode = IB_WR_RDMA_WRITE;
send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
send_wr->wr.rdma.rkey = isert_cmd->read_stag;
- send_wr->send_flags = 0;
- send_wr->next = &isert_cmd->tx_desc.send_wr;
+ send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+ 0 : IB_SEND_SIGNALED;
} else {
send_wr->opcode = IB_WR_RDMA_READ;
send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
send_wr->send_flags = IB_SEND_SIGNALED;
}
- data_len = min(data_left, rdma_write_max);
- wr->cur_rdma_length = data_len;
-
- /* if there is a single dma entry, dma mr is sufficient */
- if (count == 1) {
- ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
- ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
- ib_sge->lkey = isert_conn->conn_mr->lkey;
- wr->fr_desc = NULL;
- } else {
+ return 0;
+unmap_prot_cmd:
+ if (se_cmd->t_prot_sg)
+ isert_unmap_data_buf(isert_conn, &wr->prot);
+unmap_cmd:
+ if (fr_desc) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
+ list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
- wr->fr_desc = fr_desc;
-
- ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
- ib_sge, sg_nents, offset, data_len);
- if (ret) {
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
- goto unmap_sg;
- }
}
+ isert_unmap_data_buf(isert_conn, &wr->data);
- return 0;
-
-unmap_sg:
- ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
- (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
return ret;
}
@@ -2422,25 +2860,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
return rc;
}
- /*
- * Build isert_conn->tx_desc for iSCSI response PDU and attach
- */
- isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
- &isert_cmd->tx_desc.iscsi_header);
- isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd,
- &isert_cmd->tx_desc.send_wr, true);
+ if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+ /*
+ * Build isert_conn->tx_desc for iSCSI response PDU and attach
+ */
+ isert_create_send_desc(isert_conn, isert_cmd,
+ &isert_cmd->tx_desc);
+ iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_conn, isert_cmd,
+ &isert_cmd->tx_desc.send_wr, true);
+ isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
+ wr->send_wr_num += 1;
+ }
- atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
- pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
- isert_cmd);
+
+ if (se_cmd->prot_op == TARGET_PROT_NORMAL)
+ pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+ "READ\n", isert_cmd);
+ else
+ pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+ isert_cmd);
return 1;
}
@@ -2815,6 +3263,8 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_get_dataout = isert_get_dataout,
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
+ .iscsit_aborted_task = isert_aborted_task,
+ .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
};
static int __init isert_init(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index f6ae7f5..4c072ae 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,11 +50,35 @@ struct iser_tx_desc {
struct ib_send_wr send_wr;
} __packed;
+enum isert_indicator {
+ ISERT_PROTECTED = 1 << 0,
+ ISERT_DATA_KEY_VALID = 1 << 1,
+ ISERT_PROT_KEY_VALID = 1 << 2,
+ ISERT_SIG_KEY_VALID = 1 << 3,
+};
+
+struct pi_context {
+ struct ib_mr *prot_mr;
+ struct ib_fast_reg_page_list *prot_frpl;
+ struct ib_mr *sig_mr;
+};
+
struct fast_reg_descriptor {
- struct list_head list;
- struct ib_mr *data_mr;
- struct ib_fast_reg_page_list *data_frpl;
- bool valid;
+ struct list_head list;
+ struct ib_mr *data_mr;
+ struct ib_fast_reg_page_list *data_frpl;
+ u8 ind;
+ struct pi_context *pi_ctx;
+};
+
+struct isert_data_buf {
+ struct scatterlist *sg;
+ int nents;
+ u32 sg_off;
+ u32 len; /* cur_rdma_length */
+ u32 offset;
+ unsigned int dma_nents;
+ enum dma_data_direction dma_dir;
};
struct isert_rdma_wr {
@@ -63,12 +87,11 @@ struct isert_rdma_wr {
enum iser_ib_op_code iser_ib_op;
struct ib_sge *ib_sge;
struct ib_sge s_ib_sge;
- int num_sge;
- struct scatterlist *sge;
int send_wr_num;
struct ib_send_wr *send_wr;
struct ib_send_wr s_send_wr;
- u32 cur_rdma_length;
+ struct isert_data_buf data;
+ struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc;
};
@@ -141,6 +164,7 @@ struct isert_cq_desc {
struct isert_device {
int use_fastreg;
+ bool pi_capable;
int cqs_used;
int refcount;
int cq_active_qps[ISERT_MAX_CQ];
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0e537d8..fe09f27 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx)
{
+ struct ib_device *dev = ch->sport->sdev->device;
struct se_cmd *cmd;
struct scatterlist *sg, *sg_orig;
int sg_cnt;
@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
db = ioctx->rbufs;
tsize = cmd->data_length;
- dma_len = sg_dma_len(&sg[0]);
+ dma_len = ib_sg_dma_len(dev, &sg[0]);
riu = ioctx->rdma_ius;
/*
@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
++j;
if (j < count) {
sg = sg_next(sg);
- dma_len = sg_dma_len(sg);
+ dma_len = ib_sg_dma_len(
+ dev, sg);
}
}
} else {
@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
tsize = cmd->data_length;
riu = ioctx->rdma_ius;
sg = sg_orig;
- dma_len = sg_dma_len(&sg[0]);
- dma_addr = sg_dma_address(&sg[0]);
+ dma_len = ib_sg_dma_len(dev, &sg[0]);
+ dma_addr = ib_sg_dma_address(dev, &sg[0]);
/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
++j;
if (j < count) {
sg = sg_next(sg);
- dma_len = sg_dma_len(sg);
- dma_addr = sg_dma_address(sg);
+ dma_len = ib_sg_dma_len(
+ dev, sg);
+ dma_addr = ib_sg_dma_address(
+ dev, sg);
}
}
} else {
@@ -2580,7 +2584,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto destroy_ib;
}
- ch->sess = transport_init_session();
+ ch->sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(ch->sess)) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
srpt_queue_response(cmd);
}
+static void srpt_aborted_task(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
+
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+}
+
static int srpt_queue_status(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@ static struct target_core_fabric_ops srpt_template = {
.queue_data_in = srpt_queue_data_in,
.queue_status = srpt_queue_status,
.queue_tm_rsp = srpt_queue_tm_rsp,
+ .aborted_task = srpt_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 37dab0b..7d35287 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -228,12 +229,17 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
{
u32 stat, hwirq;
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
struct vic_device *vic = irq_desc_get_handler_data(desc);
+ chained_irq_enter(host_chip, desc);
+
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
hwirq = ffs(stat) - 1;
generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
}
+
+ chained_irq_exit(host_chip, desc);
}
/*
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4195a01..9a8e66a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1988,7 +1988,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
mddev->bitmap_info.file = NULL;
- restore_bitmap_write_access(f);
fput(f);
}
} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4ad5cc4..8fda38d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5181,32 +5181,6 @@ static int restart_array(struct mddev *mddev)
return 0;
}
-/* similar to deny_write_access, but accounts for our holding a reference
- * to the file ourselves */
-static int deny_bitmap_write_access(struct file * file)
-{
- struct inode *inode = file->f_mapping->host;
-
- spin_lock(&inode->i_lock);
- if (atomic_read(&inode->i_writecount) > 1) {
- spin_unlock(&inode->i_lock);
- return -ETXTBSY;
- }
- atomic_set(&inode->i_writecount, -1);
- spin_unlock(&inode->i_lock);
-
- return 0;
-}
-
-void restore_bitmap_write_access(struct file *file)
-{
- struct inode *inode = file->f_mapping->host;
-
- spin_lock(&inode->i_lock);
- atomic_set(&inode->i_writecount, 1);
- spin_unlock(&inode->i_lock);
-}
-
static void md_clean(struct mddev *mddev)
{
mddev->array_sectors = 0;
@@ -5427,7 +5401,6 @@ static int do_md_stop(struct mddev * mddev, int mode,
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
- restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
}
@@ -5979,7 +5952,7 @@ abort_export:
static int set_bitmap_file(struct mddev *mddev, int fd)
{
- int err;
+ int err = 0;
if (mddev->pers) {
if (!mddev->pers->quiesce)
@@ -5991,6 +5964,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (fd >= 0) {
+ struct inode *inode;
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
mddev->bitmap_info.file = fget(fd);
@@ -6001,10 +5975,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
return -EBADF;
}
- err = deny_bitmap_write_access(mddev->bitmap_info.file);
- if (err) {
+ inode = mddev->bitmap_info.file->f_mapping->host;
+ if (!S_ISREG(inode->i_mode)) {
+ printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
+ mdname(mddev));
+ err = -EBADF;
+ } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
+ printk(KERN_ERR "%s: error: bitmap file must open for write\n",
+ mdname(mddev));
+ err = -EBADF;
+ } else if (atomic_read(&inode->i_writecount) != 1) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
+ err = -EBUSY;
+ }
+ if (err) {
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
return err;
@@ -6027,10 +6012,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
- if (mddev->bitmap_info.file) {
- restore_bitmap_write_access(mddev->bitmap_info.file);
+ if (mddev->bitmap_info.file)
fput(mddev->bitmap_info.file);
- }
mddev->bitmap_info.file = NULL;
}
@@ -7182,11 +7165,14 @@ static int md_seq_open(struct inode *inode, struct file *file)
return error;
}
+static int md_unloading;
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
int mask;
+ if (md_unloading)
+ return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
@@ -8672,6 +8658,7 @@ static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
+ int delay = 1;
blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
@@ -8680,7 +8667,19 @@ static __exit void md_exit(void)
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
+
+ /* We cannot unload the modules while some process is
+ * waiting for us in select() or poll() - wake them up
+ */
+ md_unloading = 1;
+ while (waitqueue_active(&md_event_waiters)) {
+ /* not safe to leave yet */
+ wake_up(&md_event_waiters);
+ msleep(delay);
+ delay += delay;
+ }
remove_proc_entry("mdstat", NULL);
+
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 07bba96..a49d991 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -605,7 +605,6 @@ extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
-extern void restore_bitmap_write_access(struct file *file);
extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a6ca1c..56e24c0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
struct pool_info *pi = data;
struct r1bio *r1_bio;
struct bio *bio;
+ int need_pages;
int i, j;
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
@@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* RESYNC_PAGES for each bio.
*/
if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
- j = pi->raid_disks;
+ need_pages = pi->raid_disks;
else
- j = 1;
- while(j--) {
+ need_pages = 1;
+ for (j = 0; j < need_pages; j++) {
bio = r1_bio->bios[j];
bio->bi_vcnt = RESYNC_PAGES;
if (bio_alloc_pages(bio, gfp_flags))
- goto out_free_bio;
+ goto out_free_pages;
}
/* If not user-requests, copy the page pointers to all bios */
if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
return r1_bio;
+out_free_pages:
+ while (--j >= 0) {
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, r1_bio->bios[j], i)
+ __free_page(bv->bv_page);
+ }
+
out_free_bio:
while (++j < pi->raid_disks)
bio_put(r1_bio->bios[j]);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 16f5c21..25247a8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -679,14 +679,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
init_stripe(sh, sector, previous);
atomic_inc(&sh->count);
}
- } else {
+ } else if (!atomic_inc_not_zero(&sh->count)) {
spin_lock(&conf->device_lock);
- if (atomic_read(&sh->count)) {
- BUG_ON(!list_empty(&sh->lru)
- && !test_bit(STRIPE_EXPANDING, &sh->state)
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
- );
- } else {
+ if (!atomic_read(&sh->count)) {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&sh->lru) &&
@@ -4552,6 +4547,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
+ DEFINE_WAIT(w);
+ bool do_prepare;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -4575,15 +4572,18 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+ prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
- DEFINE_WAIT(w);
int previous;
int seq;
+ do_prepare = false;
retry:
seq = read_seqcount_begin(&conf->gen_lock);
previous = 0;
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
+ if (do_prepare)
+ prepare_to_wait(&conf->wait_for_overlap, &w,
+ TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
/* spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
@@ -4604,6 +4604,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
: logical_sector >= conf->reshape_safe) {
spin_unlock_irq(&conf->device_lock);
schedule();
+ do_prepare = true;
goto retry;
}
}
@@ -4640,6 +4641,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if (must_retry) {
release_stripe(sh);
schedule();
+ do_prepare = true;
goto retry;
}
}
@@ -4663,8 +4665,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
- logical_sector < mddev->suspend_hi)
+ logical_sector < mddev->suspend_hi) {
schedule();
+ do_prepare = true;
+ }
goto retry;
}
@@ -4677,9 +4681,9 @@ static void make_request(struct mddev *mddev, struct bio * bi)
md_wakeup_thread(mddev->thread);
release_stripe(sh);
schedule();
+ do_prepare = true;
goto retry;
}
- finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((bi->bi_rw & REQ_SYNC) &&
@@ -4689,10 +4693,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
} else {
/* cannot get stripe for read-ahead, just give-up */
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- finish_wait(&conf->wait_for_overlap, &w);
break;
}
}
+ finish_wait(&conf->wait_for_overlap, &w);
remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
diff --git a/drivers/media/dvb-frontends/drx39xyj/Kconfig b/drivers/media/dvb-frontends/drx39xyj/Kconfig
index 15628eb..6c2ccb6 100644
--- a/drivers/media/dvb-frontends/drx39xyj/Kconfig
+++ b/drivers/media/dvb-frontends/drx39xyj/Kconfig
@@ -1,7 +1,7 @@
config DVB_DRX39XYJ
tristate "Micronas DRX-J demodulator"
depends on DVB_CORE && I2C
- default m if DVB_FE_CUSTOMISE
+ default m if !MEDIA_SUBDRV_AUTOSELECT
help
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 1d2c473..92c891a 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1176,6 +1176,7 @@ static struct dvb_frontend_ops lgdt3304_ops = {
},
.i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
.init = lgdt3305_init,
+ .sleep = lgdt3305_sleep,
.set_frontend = lgdt3304_set_parameters,
.get_frontend = lgdt3305_get_frontend,
.get_tune_settings = lgdt3305_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 32cffca..d63bc9c 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -297,7 +297,7 @@ struct inittab {
u8 val;
};
-struct inittab m88rs2000_setup[] = {
+static struct inittab m88rs2000_setup[] = {
{DEMOD_WRITE, 0x9a, 0x30},
{DEMOD_WRITE, 0x00, 0x01},
{WRITE_DELAY, 0x19, 0x00},
@@ -315,7 +315,7 @@ struct inittab m88rs2000_setup[] = {
{0xff, 0xaa, 0xff}
};
-struct inittab m88rs2000_shutdown[] = {
+static struct inittab m88rs2000_shutdown[] = {
{DEMOD_WRITE, 0x9a, 0x30},
{DEMOD_WRITE, 0xb0, 0x00},
{DEMOD_WRITE, 0xf1, 0x89},
@@ -325,7 +325,7 @@ struct inittab m88rs2000_shutdown[] = {
{0xff, 0xaa, 0xff}
};
-struct inittab fe_reset[] = {
+static struct inittab fe_reset[] = {
{DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0x20, 0x81},
{DEMOD_WRITE, 0x21, 0x80},
@@ -363,7 +363,7 @@ struct inittab fe_reset[] = {
{0xff, 0xaa, 0xff}
};
-struct inittab fe_trigger[] = {
+static struct inittab fe_trigger[] = {
{DEMOD_WRITE, 0x97, 0x04},
{DEMOD_WRITE, 0x99, 0x77},
{DEMOD_WRITE, 0x9b, 0x64},
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 7a77a5b..5c42188 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -49,8 +49,8 @@
#define VPE_MODULE_NAME "vpe"
/* minimum and maximum frame sizes */
-#define MIN_W 128
-#define MIN_H 128
+#define MIN_W 32
+#define MIN_H 32
#define MAX_W 1920
#define MAX_H 1080
@@ -887,6 +887,9 @@ static int job_ready(void *priv)
if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
return 0;
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
+ return 0;
+
return 1;
}
@@ -1277,18 +1280,17 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
s_buf = &s_vb->v4l2_buf;
d_buf = &d_vb->v4l2_buf;
+ d_buf->flags = s_buf->flags;
+
d_buf->timestamp = s_buf->timestamp;
- d_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- d_buf->flags |= s_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
- d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
+ if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
d_buf->timecode = s_buf->timecode;
- }
+
d_buf->sequence = ctx->sequence;
- d_buf->field = ctx->field;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_DATA_INTERLACED) {
+ d_buf->field = ctx->field;
if (ctx->field == V4L2_FIELD_BOTTOM) {
ctx->sequence++;
ctx->field = V4L2_FIELD_TOP;
@@ -1297,6 +1299,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->field = V4L2_FIELD_BOTTOM;
}
} else {
+ d_buf->field = V4L2_FIELD_NONE;
ctx->sequence++;
}
@@ -1335,8 +1338,9 @@ static int vpe_querycap(struct file *file, void *priv,
{
strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
- strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ VPE_MODULE_NAME);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1476,6 +1480,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
}
}
+ memset(pix->reserved, 0, sizeof(pix->reserved));
for (i = 0; i < pix->num_planes; i++) {
plane_fmt = &pix->plane_fmt[i];
depth = fmt->vpdma_fmt[i]->depth;
@@ -1487,6 +1492,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
plane_fmt->sizeimage =
(pix->height * pix->width * depth) >> 3;
+
+ memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
}
return 0;
@@ -1717,6 +1724,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
q_data = get_q_data(ctx, vb->vb2_queue->type);
num_planes = q_data->fmt->coplanar ? 2 : 1;
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!(q_data->flags & Q_DATA_INTERLACED)) {
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ } else {
+ if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
+ vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+ return -EINVAL;
+ }
+ }
+
for (i = 0; i < num_planes; i++) {
if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
vpe_err(ctx->dev,
@@ -1866,9 +1883,11 @@ static int vpe_open(struct file *file)
s_q_data->fmt = &vpe_formats[2];
s_q_data->width = 1920;
s_q_data->height = 1080;
- s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
+ s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
+ s_q_data->height);
+ s_q_data->colorspace = V4L2_COLORSPACE_REC709;
s_q_data->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
@@ -2002,7 +2021,7 @@ static struct video_device vpe_videodev = {
.fops = &vpe_fops,
.ioctl_ops = &vpe_ioctl_ops,
.minor = -1,
- .release = video_device_release,
+ .release = video_device_release_empty,
.vfl_dir = VFL_DIR_M2M,
};
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 579a52b..0127dd2 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -504,6 +504,18 @@ unlock:
return ret;
}
+static int img_ir_set_normal_filter(struct rc_dev *dev,
+ struct rc_scancode_filter *sc_filter)
+{
+ return img_ir_set_filter(dev, RC_FILTER_NORMAL, sc_filter);
+}
+
+static int img_ir_set_wakeup_filter(struct rc_dev *dev,
+ struct rc_scancode_filter *sc_filter)
+{
+ return img_ir_set_filter(dev, RC_FILTER_WAKEUP, sc_filter);
+}
+
/**
* img_ir_set_decoder() - Set the current decoder.
* @priv: IR private data.
@@ -986,7 +998,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
rdev->map_name = RC_MAP_EMPTY;
rc_set_allowed_protocols(rdev, img_ir_allowed_protos(priv));
rdev->input_name = "IMG Infrared Decoder";
- rdev->s_filter = img_ir_set_filter;
+ rdev->s_filter = img_ir_set_normal_filter;
+ rdev->s_wakeup_filter = img_ir_set_wakeup_filter;
/* Register hardware decoder */
error = rc_register_device(rdev);
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index e7a731bc..751d9d9 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -5,6 +5,7 @@
*/
#include "img-ir-hw.h"
+#include <linux/bitrev.h>
/* Convert NEC data to a scancode */
static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
@@ -22,11 +23,11 @@ static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
data_inv = (raw >> 24) & 0xff;
if ((data_inv ^ data) != 0xff) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
- /* scan encoding: aaAAddDD */
- *scancode = addr_inv << 24 |
- addr << 16 |
- data_inv << 8 |
- data;
+ /* scan encoding: as transmitted, MSBit = first received bit */
+ *scancode = bitrev8(addr) << 24 |
+ bitrev8(addr_inv) << 16 |
+ bitrev8(data) << 8 |
+ bitrev8(data_inv);
} else if ((addr_inv ^ addr) != 0xff) {
/* Extended NEC */
/* scan encoding: AAaaDD */
@@ -54,13 +55,15 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in,
if ((in->data | in->mask) & 0xff000000) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
- /* scan encoding: aaAAddDD */
- addr_inv = (in->data >> 24) & 0xff;
- addr_inv_m = (in->mask >> 24) & 0xff;
- addr = (in->data >> 16) & 0xff;
- addr_m = (in->mask >> 16) & 0xff;
- data_inv = (in->data >> 8) & 0xff;
- data_inv_m = (in->mask >> 8) & 0xff;
+ /* scan encoding: as transmitted, MSBit = first received bit */
+ addr = bitrev8(in->data >> 24);
+ addr_m = bitrev8(in->mask >> 24);
+ addr_inv = bitrev8(in->data >> 16);
+ addr_inv_m = bitrev8(in->mask >> 16);
+ data = bitrev8(in->data >> 8);
+ data_m = bitrev8(in->mask >> 8);
+ data_inv = bitrev8(in->data >> 0);
+ data_inv_m = bitrev8(in->mask >> 0);
} else if ((in->data | in->mask) & 0x00ff0000) {
/* Extended NEC */
/* scan encoding AAaaDD */
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 9de1791..35c42e5 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -172,10 +172,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (send_32bits) {
/* NEC transport, but modified protocol, used by at
* least Apple and TiVo remotes */
- scancode = not_address << 24 |
- address << 16 |
- not_command << 8 |
- command;
+ scancode = data->bits;
IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
} else if ((address ^ not_address) != 0xff) {
/* Extended NEC */
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index 5cc1b45..454e062 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -15,62 +15,62 @@
* Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
* which also ships with a TiVo-branded IR transceiver, supported by the mceusb
* driver. Note that the remote uses an NEC-ish protocol, but instead of having
- * a command/not_command pair, it has a vendor ID of 0x3085, but some keys, the
+ * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
* NEC extended checksums do pass, so the table presently has the intended
* values and the checksum-passed versions for those keys.
*/
static struct rc_map_table tivo[] = {
- { 0x3085f009, KEY_MEDIA }, /* TiVo Button */
- { 0x3085e010, KEY_POWER2 }, /* TV Power */
- { 0x3085e011, KEY_TV }, /* Live TV/Swap */
- { 0x3085c034, KEY_VIDEO_NEXT }, /* TV Input */
- { 0x3085e013, KEY_INFO },
- { 0x3085a05f, KEY_CYCLEWINDOWS }, /* Window */
+ { 0xa10c900f, KEY_MEDIA }, /* TiVo Button */
+ { 0xa10c0807, KEY_POWER2 }, /* TV Power */
+ { 0xa10c8807, KEY_TV }, /* Live TV/Swap */
+ { 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */
+ { 0xa10cc807, KEY_INFO },
+ { 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
{ 0x0085305f, KEY_CYCLEWINDOWS },
- { 0x3085c036, KEY_EPG }, /* Guide */
+ { 0xa10c6c03, KEY_EPG }, /* Guide */
- { 0x3085e014, KEY_UP },
- { 0x3085e016, KEY_DOWN },
- { 0x3085e017, KEY_LEFT },
- { 0x3085e015, KEY_RIGHT },
+ { 0xa10c2807, KEY_UP },
+ { 0xa10c6807, KEY_DOWN },
+ { 0xa10ce807, KEY_LEFT },
+ { 0xa10ca807, KEY_RIGHT },
- { 0x3085e018, KEY_SCROLLDOWN }, /* Red Thumbs Down */
- { 0x3085e019, KEY_SELECT },
- { 0x3085e01a, KEY_SCROLLUP }, /* Green Thumbs Up */
+ { 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */
+ { 0xa10c9807, KEY_SELECT },
+ { 0xa10c5807, KEY_SCROLLUP }, /* Green Thumbs Up */
- { 0x3085e01c, KEY_VOLUMEUP },
- { 0x3085e01d, KEY_VOLUMEDOWN },
- { 0x3085e01b, KEY_MUTE },
- { 0x3085d020, KEY_RECORD },
- { 0x3085e01e, KEY_CHANNELUP },
- { 0x3085e01f, KEY_CHANNELDOWN },
+ { 0xa10c3807, KEY_VOLUMEUP },
+ { 0xa10cb807, KEY_VOLUMEDOWN },
+ { 0xa10cd807, KEY_MUTE },
+ { 0xa10c040b, KEY_RECORD },
+ { 0xa10c7807, KEY_CHANNELUP },
+ { 0xa10cf807, KEY_CHANNELDOWN },
{ 0x0085301f, KEY_CHANNELDOWN },
- { 0x3085d021, KEY_PLAY },
- { 0x3085d023, KEY_PAUSE },
- { 0x3085d025, KEY_SLOW },
- { 0x3085d022, KEY_REWIND },
- { 0x3085d024, KEY_FASTFORWARD },
- { 0x3085d026, KEY_PREVIOUS },
- { 0x3085d027, KEY_NEXT }, /* ->| */
+ { 0xa10c840b, KEY_PLAY },
+ { 0xa10cc40b, KEY_PAUSE },
+ { 0xa10ca40b, KEY_SLOW },
+ { 0xa10c440b, KEY_REWIND },
+ { 0xa10c240b, KEY_FASTFORWARD },
+ { 0xa10c640b, KEY_PREVIOUS },
+ { 0xa10ce40b, KEY_NEXT }, /* ->| */
- { 0x3085b044, KEY_ZOOM }, /* Aspect */
- { 0x3085b048, KEY_STOP },
- { 0x3085b04a, KEY_DVD }, /* DVD Menu */
+ { 0xa10c220d, KEY_ZOOM }, /* Aspect */
+ { 0xa10c120d, KEY_STOP },
+ { 0xa10c520d, KEY_DVD }, /* DVD Menu */
- { 0x3085d028, KEY_NUMERIC_1 },
- { 0x3085d029, KEY_NUMERIC_2 },
- { 0x3085d02a, KEY_NUMERIC_3 },
- { 0x3085d02b, KEY_NUMERIC_4 },
- { 0x3085d02c, KEY_NUMERIC_5 },
- { 0x3085d02d, KEY_NUMERIC_6 },
- { 0x3085d02e, KEY_NUMERIC_7 },
- { 0x3085d02f, KEY_NUMERIC_8 },
+ { 0xa10c140b, KEY_NUMERIC_1 },
+ { 0xa10c940b, KEY_NUMERIC_2 },
+ { 0xa10c540b, KEY_NUMERIC_3 },
+ { 0xa10cd40b, KEY_NUMERIC_4 },
+ { 0xa10c340b, KEY_NUMERIC_5 },
+ { 0xa10cb40b, KEY_NUMERIC_6 },
+ { 0xa10c740b, KEY_NUMERIC_7 },
+ { 0xa10cf40b, KEY_NUMERIC_8 },
{ 0x0085302f, KEY_NUMERIC_8 },
- { 0x3085c030, KEY_NUMERIC_9 },
- { 0x3085c031, KEY_NUMERIC_0 },
- { 0x3085c033, KEY_ENTER },
- { 0x3085c032, KEY_CLEAR },
+ { 0xa10c0c03, KEY_NUMERIC_9 },
+ { 0xa10c8c03, KEY_NUMERIC_0 },
+ { 0xa10ccc03, KEY_ENTER },
+ { 0xa10c4c03, KEY_CLEAR },
};
static struct rc_map_list tivo_map = {
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 99697aa..970b93d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -633,19 +633,13 @@ EXPORT_SYMBOL_GPL(rc_repeat);
static void ir_do_keydown(struct rc_dev *dev, int scancode,
u32 keycode, u8 toggle)
{
- struct rc_scancode_filter *filter;
- bool new_event = !dev->keypressed ||
- dev->last_scancode != scancode ||
- dev->last_toggle != toggle;
+ bool new_event = (!dev->keypressed ||
+ dev->last_scancode != scancode ||
+ dev->last_toggle != toggle);
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
- /* Generic scancode filtering */
- filter = &dev->scancode_filters[RC_FILTER_NORMAL];
- if (filter->mask && ((scancode ^ filter->data) & filter->mask))
- return;
-
input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
if (new_event && keycode != KEY_RESERVED) {
@@ -923,6 +917,7 @@ static ssize_t store_protocols(struct device *device,
int rc, i, count = 0;
ssize_t ret;
int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
+ int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
struct rc_scancode_filter local_filter, *filter;
/* Device is being removed */
@@ -1007,24 +1002,23 @@ static ssize_t store_protocols(struct device *device,
* Fall back to clearing the filter.
*/
filter = &dev->scancode_filters[fattr->type];
- if (old_type != type && filter->mask) {
+ set_filter = (fattr->type == RC_FILTER_NORMAL)
+ ? dev->s_filter : dev->s_wakeup_filter;
+
+ if (set_filter && old_type != type && filter->mask) {
local_filter = *filter;
if (!type) {
/* no protocol => clear filter */
ret = -1;
- } else if (!dev->s_filter) {
- /* generic filtering => accept any filter */
- ret = 0;
} else {
/* hardware filtering => try setting, otherwise clear */
- ret = dev->s_filter(dev, fattr->type, &local_filter);
+ ret = set_filter(dev, &local_filter);
}
if (ret < 0) {
/* clear the filter */
local_filter.data = 0;
local_filter.mask = 0;
- if (dev->s_filter)
- dev->s_filter(dev, fattr->type, &local_filter);
+ set_filter(dev, &local_filter);
}
/* commit the new filter */
@@ -1068,7 +1062,10 @@ static ssize_t show_filter(struct device *device,
return -EINVAL;
mutex_lock(&dev->lock);
- if (fattr->mask)
+ if ((fattr->type == RC_FILTER_NORMAL && !dev->s_filter) ||
+ (fattr->type == RC_FILTER_WAKEUP && !dev->s_wakeup_filter))
+ val = 0;
+ else if (fattr->mask)
val = dev->scancode_filters[fattr->type].mask;
else
val = dev->scancode_filters[fattr->type].data;
@@ -1106,6 +1103,7 @@ static ssize_t store_filter(struct device *device,
struct rc_scancode_filter local_filter, *filter;
int ret;
unsigned long val;
+ int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
/* Device is being removed */
if (!dev)
@@ -1115,9 +1113,11 @@ static ssize_t store_filter(struct device *device,
if (ret < 0)
return ret;
- /* Scancode filter not supported (but still accept 0) */
- if (!dev->s_filter && fattr->type != RC_FILTER_NORMAL)
- return val ? -EINVAL : count;
+ /* Can the scancode filter be set? */
+ set_filter = (fattr->type == RC_FILTER_NORMAL) ? dev->s_filter :
+ dev->s_wakeup_filter;
+ if (!set_filter)
+ return -EINVAL;
mutex_lock(&dev->lock);
@@ -1128,16 +1128,16 @@ static ssize_t store_filter(struct device *device,
local_filter.mask = val;
else
local_filter.data = val;
+
if (!dev->enabled_protocols[fattr->type] && local_filter.mask) {
/* refuse to set a filter unless a protocol is enabled */
ret = -EINVAL;
goto unlock;
}
- if (dev->s_filter) {
- ret = dev->s_filter(dev, fattr->type, &local_filter);
- if (ret < 0)
- goto unlock;
- }
+
+ ret = set_filter(dev, &local_filter);
+ if (ret < 0)
+ goto unlock;
/* Success, commit the new filter */
*filter = local_filter;
@@ -1189,27 +1189,45 @@ static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, true);
-static struct attribute *rc_dev_attrs[] = {
+static struct attribute *rc_dev_protocol_attrs[] = {
&dev_attr_protocols.attr.attr,
+ NULL,
+};
+
+static struct attribute_group rc_dev_protocol_attr_grp = {
+ .attrs = rc_dev_protocol_attrs,
+};
+
+static struct attribute *rc_dev_wakeup_protocol_attrs[] = {
&dev_attr_wakeup_protocols.attr.attr,
+ NULL,
+};
+
+static struct attribute_group rc_dev_wakeup_protocol_attr_grp = {
+ .attrs = rc_dev_wakeup_protocol_attrs,
+};
+
+static struct attribute *rc_dev_filter_attrs[] = {
&dev_attr_filter.attr.attr,
&dev_attr_filter_mask.attr.attr,
- &dev_attr_wakeup_filter.attr.attr,
- &dev_attr_wakeup_filter_mask.attr.attr,
NULL,
};
-static struct attribute_group rc_dev_attr_grp = {
- .attrs = rc_dev_attrs,
+static struct attribute_group rc_dev_filter_attr_grp = {
+ .attrs = rc_dev_filter_attrs,
};
-static const struct attribute_group *rc_dev_attr_groups[] = {
- &rc_dev_attr_grp,
- NULL
+static struct attribute *rc_dev_wakeup_filter_attrs[] = {
+ &dev_attr_wakeup_filter.attr.attr,
+ &dev_attr_wakeup_filter_mask.attr.attr,
+ NULL,
+};
+
+static struct attribute_group rc_dev_wakeup_filter_attr_grp = {
+ .attrs = rc_dev_wakeup_filter_attrs,
};
static struct device_type rc_dev_type = {
- .groups = rc_dev_attr_groups,
.release = rc_dev_release,
.uevent = rc_dev_uevent,
};
@@ -1266,7 +1284,7 @@ int rc_register_device(struct rc_dev *dev)
static bool raw_init = false; /* raw decoders loaded? */
struct rc_map *rc_map;
const char *path;
- int rc, devno;
+ int rc, devno, attr = 0;
if (!dev || !dev->map_name)
return -EINVAL;
@@ -1294,6 +1312,16 @@ int rc_register_device(struct rc_dev *dev)
return -ENOMEM;
} while (test_and_set_bit(devno, ir_core_dev_number));
+ dev->dev.groups = dev->sysfs_groups;
+ dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+ if (dev->s_filter)
+ dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
+ if (dev->s_wakeup_filter)
+ dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
+ if (dev->change_wakeup_protocol)
+ dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp;
+ dev->sysfs_groups[attr++] = NULL;
+
/*
* Take the lock here, as the device sysfs node will appear
* when device_add() is called, which may trigger an ir-keytable udev
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 319adc4..96ccfeb 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -1468,7 +1468,8 @@ static int r820t_imr_prepare(struct r820t_priv *priv)
static int r820t_multi_read(struct r820t_priv *priv)
{
int rc, i;
- u8 data[2], min = 0, max = 255, sum = 0;
+ u16 sum = 0;
+ u8 data[2], min = 255, max = 0;
usleep_range(5000, 6000);
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 76a8165..6ef93ee 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1107,6 +1107,7 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
offset += 200000;
}
#endif
+ break;
default:
tuner_err("Unsupported tuner type %d.\n", new_type);
break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c83c16c..61d196e 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1503,8 +1503,6 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
&rtl2832u_props, "Astrometa DVB-T2", NULL) },
- { DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
- &rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/gspca/jpeg.h b/drivers/media/usb/gspca/jpeg.h
index ab54910..0aa2b67 100644
--- a/drivers/media/usb/gspca/jpeg.h
+++ b/drivers/media/usb/gspca/jpeg.h
@@ -154,7 +154,9 @@ static void jpeg_set_qual(u8 *jpeg_hdr,
{
int i, sc;
- if (quality < 50)
+ if (quality <= 0)
+ sc = 5000;
+ else if (quality < 50)
sc = 5000 / quality;
else
sc = 200 - quality * 2;
diff --git a/drivers/media/usb/stk1160/stk1160-ac97.c b/drivers/media/usb/stk1160/stk1160-ac97.c
index c46c8be..2dd308f 100644
--- a/drivers/media/usb/stk1160/stk1160-ac97.c
+++ b/drivers/media/usb/stk1160/stk1160-ac97.c
@@ -108,7 +108,7 @@ int stk1160_ac97_register(struct stk1160 *dev)
"stk1160-mixer");
snprintf(card->longname, sizeof(card->longname),
"stk1160 ac97 codec mixer control");
- strncpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
+ strlcpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus);
if (rc)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d9f8546..69aff72 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4492,6 +4492,7 @@ static int __init bonding_init(void)
out:
return res;
err:
+ bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 30104b6..c56ac9e 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -560,9 +560,7 @@ static struct net_device *apne_dev;
static int __init apne_module_init(void)
{
apne_dev = apne_probe(-1);
- if (IS_ERR(apne_dev))
- return PTR_ERR(apne_dev);
- return 0;
+ return PTR_ERR_OR_ZERO(apne_dev);
}
static void __exit apne_module_exit(void)
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index fcaeeb8..2846067 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -268,15 +268,6 @@ static unsigned int emac_setup(struct net_device *ndev)
writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN,
db->membase + EMAC_TX_MODE_REG);
- /* set up RX */
- reg_val = readl(db->membase + EMAC_RX_CTL_REG);
-
- writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
- EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
- EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
- EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
- db->membase + EMAC_RX_CTL_REG);
-
/* set MAC */
/* set MAC CTL0 */
reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
@@ -309,6 +300,26 @@ static unsigned int emac_setup(struct net_device *ndev)
return 0;
}
+static void emac_set_rx_mode(struct net_device *ndev)
+{
+ struct emac_board_info *db = netdev_priv(ndev);
+ unsigned int reg_val;
+
+ /* set up RX */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+
+ if (ndev->flags & IFF_PROMISC)
+ reg_val |= EMAC_RX_CTL_PASS_ALL_EN;
+ else
+ reg_val &= ~EMAC_RX_CTL_PASS_ALL_EN;
+
+ writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
+ EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
+ EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
+ EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
+ db->membase + EMAC_RX_CTL_REG);
+}
+
static unsigned int emac_powerup(struct net_device *ndev)
{
struct emac_board_info *db = netdev_priv(ndev);
@@ -782,6 +793,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_stop = emac_stop,
.ndo_start_xmit = emac_start_xmit,
.ndo_tx_timeout = emac_timeout,
+ .ndo_set_rx_mode = emac_set_rx_mode,
.ndo_do_ioctl = emac_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a8efb18..0ab8370 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+#ifdef CONFIG_PM_SLEEP
static int
bnx2_suspend(struct device *device)
{
@@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
#define BNX2_PM_OPS (&bnx2_pm_ops)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 751d5c7..7e49c43 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
config NET_CADENCE
bool "Cadence devices"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -22,7 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on HAS_DMA
+ depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
select MACB
---help---
If you wish to compile a kernel for the AT91RM9200 and enable
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAS_DMA
+ depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 81e8402..8a96572 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan);
- if (e->neigh)
+ if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
@@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
if (e) {
spin_lock(&e->lock); /* avoid race with t4_l2t_free */
e->state = L2T_STATE_RESOLVING;
+ if (neigh->dev->flags & IFF_LOOPBACK)
+ memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
memcpy(e->addr, addr, addr_len);
e->ifindex = ifidx;
e->hash = hash;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fb2fe65..bba6768 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -682,7 +682,7 @@ enum {
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
- FW_MAX_SIZE = 512 * 1024,
+ FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8ccaa25..97db5a7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -374,6 +374,7 @@ enum vf_state {
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
+#define BE_FLAGS_SETUP_DONE (1 << 13)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e6df47..a186454 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
bool dummy_wrb;
int i, pending_txqs;
- /* Wait for a max of 200ms for all the tx-completions to arrive. */
+ /* Stop polling for compls when HW has been silent for 10ms */
do {
pending_txqs = adapter->num_tx_qs;
for_all_tx_queues(adapter, txo, i) {
+ cmpl = 0;
+ num_wrbs = 0;
txq = &txo->q;
while ((txcp = be_tx_compl_get(&txo->cq))) {
end_idx =
@@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
if (cmpl) {
be_cq_notify(adapter, txo->cq.id, false, cmpl);
atomic_sub(num_wrbs, &txq->used);
- cmpl = 0;
- num_wrbs = 0;
+ timeo = 0;
}
if (atomic_read(&txq->used) == 0)
pending_txqs--;
}
- if (pending_txqs == 0 || ++timeo > 200)
+ if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
break;
mdelay(1);
@@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *eqo;
int i;
+ /* This protection is needed as be_close() may be called even when the
+ * adapter is in cleared state (after eeh perm failure)
+ */
+ if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
+ return 0;
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter)
be_clear_queues(adapter);
be_msix_disable(adapter);
+ adapter->flags &= ~BE_FLAGS_SETUP_DONE;
return 0;
}
@@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter)
adapter->phy.fc_autoneg = 1;
be_schedule_worker(adapter);
+ adapter->flags |= BE_FLAGS_SETUP_DONE;
return 0;
err:
be_clear(adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 2879b96..c1d3fdb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -115,8 +115,6 @@ static DEFINE_SPINLOCK(e1000_phy_lock);
*/
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- e_dbg("e1000_set_phy_type");
-
if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
@@ -159,8 +157,6 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
u32 ret_val;
u16 phy_saved_data;
- e_dbg("e1000_phy_init_script");
-
if (hw->phy_init_script) {
msleep(20);
@@ -253,8 +249,6 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
*/
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
- e_dbg("e1000_set_mac_type");
-
switch (hw->device_id) {
case E1000_DEV_ID_82542:
switch (hw->revision_id) {
@@ -365,8 +359,6 @@ void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
- e_dbg("e1000_set_media_type");
-
if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
hw->tbi_compatibility_en = false;
@@ -415,8 +407,6 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- e_dbg("e1000_reset_hw");
-
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
if (hw->mac_type == e1000_82542_rev2_0) {
e_dbg("Disabling MWI on 82542 rev 2.0\n");
@@ -566,8 +556,6 @@ s32 e1000_init_hw(struct e1000_hw *hw)
u32 mta_size;
u32 ctrl_ext;
- e_dbg("e1000_init_hw");
-
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
if (ret_val) {
@@ -683,8 +671,6 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
u16 eeprom_data;
s32 ret_val;
- e_dbg("e1000_adjust_serdes_amplitude");
-
if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
@@ -730,8 +716,6 @@ s32 e1000_setup_link(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- e_dbg("e1000_setup_link");
-
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
@@ -848,8 +832,6 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
u32 signal = 0;
s32 ret_val;
- e_dbg("e1000_setup_fiber_serdes_link");
-
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
* cleared when there is a signal. This applies to fiber media only.
@@ -1051,8 +1033,6 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_preconfig");
-
ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to
* what the PHY speed and duplex configuration is. In addition, we need
@@ -1112,8 +1092,6 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_igp_setup");
-
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1254,8 +1232,6 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_mgp_setup");
-
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1362,8 +1338,6 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_autoneg");
-
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
*/
@@ -1432,7 +1406,6 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
- e_dbg("e1000_copper_link_postconfig");
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
e1000_config_collision_dist(hw);
@@ -1473,8 +1446,6 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- e_dbg("e1000_setup_copper_link");
-
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
if (ret_val)
@@ -1554,8 +1525,6 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- e_dbg("e1000_phy_setup_autoneg");
-
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
@@ -1707,8 +1676,6 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
u16 phy_data;
u16 i;
- e_dbg("e1000_phy_force_speed_duplex");
-
/* Turn off Flow control if we are forcing speed and duplex. */
hw->fc = E1000_FC_NONE;
@@ -1939,8 +1906,6 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
- e_dbg("e1000_config_collision_dist");
-
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
else
@@ -1970,8 +1935,6 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_config_mac_to_phy");
-
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.
*/
@@ -2049,8 +2012,6 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
- e_dbg("e1000_force_mac_fc");
-
/* Get the current configuration of the Device Control Register */
ctrl = er32(CTRL);
@@ -2120,8 +2081,6 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
u16 speed;
u16 duplex;
- e_dbg("e1000_config_fc_after_link_up");
-
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
@@ -2337,8 +2296,6 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
u32 status;
s32 ret_val = E1000_SUCCESS;
- e_dbg("e1000_check_for_serdes_link_generic");
-
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
@@ -2449,8 +2406,6 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_check_for_link");
-
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2632,8 +2587,6 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_get_speed_and_duplex");
-
if (hw->mac_type >= e1000_82543) {
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
@@ -2699,7 +2652,6 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- e_dbg("e1000_wait_autoneg");
e_dbg("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -2866,8 +2818,6 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
u32 ret_val;
unsigned long flags;
- e_dbg("e1000_read_phy_reg");
-
spin_lock_irqsave(&e1000_phy_lock, flags);
if ((hw->phy_type == e1000_phy_igp) &&
@@ -2894,8 +2844,6 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
- e_dbg("e1000_read_phy_reg_ex");
-
if (reg_addr > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
@@ -3008,8 +2956,6 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
u32 ret_val;
unsigned long flags;
- e_dbg("e1000_write_phy_reg");
-
spin_lock_irqsave(&e1000_phy_lock, flags);
if ((hw->phy_type == e1000_phy_igp) &&
@@ -3036,8 +2982,6 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
- e_dbg("e1000_write_phy_reg_ex");
-
if (reg_addr > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
@@ -3129,8 +3073,6 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
u32 ctrl, ctrl_ext;
u32 led_ctrl;
- e_dbg("e1000_phy_hw_reset");
-
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
@@ -3189,8 +3131,6 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_phy_reset");
-
switch (hw->phy_type) {
case e1000_phy_igp:
ret_val = e1000_phy_hw_reset(hw);
@@ -3229,8 +3169,6 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
u16 phy_id_high, phy_id_low;
bool match = false;
- e_dbg("e1000_detect_gig_phy");
-
if (hw->phy_id != 0)
return E1000_SUCCESS;
@@ -3301,7 +3239,6 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- e_dbg("e1000_phy_reset_dsp");
do {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3333,8 +3270,6 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
u16 phy_data, min_length, max_length, average;
e1000_rev_polarity polarity;
- e_dbg("e1000_phy_igp_get_info");
-
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter.
*/
@@ -3414,8 +3349,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
u16 phy_data;
e1000_rev_polarity polarity;
- e_dbg("e1000_phy_m88_get_info");
-
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter.
*/
@@ -3487,8 +3420,6 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_phy_get_info");
-
phy_info->cable_length = e1000_cable_length_undefined;
phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
phy_info->cable_polarity = e1000_rev_polarity_undefined;
@@ -3527,8 +3458,6 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- e_dbg("e1000_validate_mdi_settings");
-
if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
e_dbg("Invalid MDI setting detected\n");
hw->mdix = 1;
@@ -3551,8 +3480,6 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
- e_dbg("e1000_init_eeprom_params");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -3770,8 +3697,6 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i = 0;
- e_dbg("e1000_acquire_eeprom");
-
eecd = er32(EECD);
/* Request EEPROM Access */
@@ -3871,8 +3796,6 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
- e_dbg("e1000_release_eeprom");
-
eecd = er32(EECD);
if (hw->eeprom.type == e1000_eeprom_spi) {
@@ -3920,8 +3843,6 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
u16 retry_count = 0;
u8 spi_stat_reg;
- e_dbg("e1000_spi_eeprom_ready");
-
/* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
* bit 0 of the internal status register. If it's not cleared within
@@ -3974,8 +3895,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
- e_dbg("e1000_read_eeprom");
-
if (hw->mac_type == e1000_ce4100) {
GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
data);
@@ -4076,8 +3995,6 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- e_dbg("e1000_validate_eeprom_checksum");
-
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
e_dbg("EEPROM Read Error\n");
@@ -4112,8 +4029,6 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- e_dbg("e1000_update_eeprom_checksum");
-
for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
e_dbg("EEPROM Read Error\n");
@@ -4154,8 +4069,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
- e_dbg("e1000_write_eeprom");
-
if (hw->mac_type == e1000_ce4100) {
GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
data);
@@ -4205,8 +4118,6 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
- e_dbg("e1000_write_eeprom_spi");
-
while (widx < words) {
u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4274,8 +4185,6 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words_written = 0;
u16 i = 0;
- e_dbg("e1000_write_eeprom_microwire");
-
/* Send the write enable command to the EEPROM (3-bit opcode plus
* 6/8-bit dummy address beginning with 11). It's less work to include
* the 11 of the dummy address as part of the opcode than it is to shift
@@ -4354,8 +4263,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
u16 offset;
u16 eeprom_data, i;
- e_dbg("e1000_read_mac_addr");
-
for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
@@ -4394,8 +4301,6 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
u32 i;
u32 rar_num;
- e_dbg("e1000_init_rx_addrs");
-
/* Setup the receive address. */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -4553,8 +4458,6 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
u16 eeprom_data, i, temp;
const u16 led_mask = 0x0F;
- e_dbg("e1000_id_led_init");
-
if (hw->mac_type < e1000_82540) {
/* Nothing to do */
return E1000_SUCCESS;
@@ -4626,8 +4529,6 @@ s32 e1000_setup_led(struct e1000_hw *hw)
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
- e_dbg("e1000_setup_led");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4678,8 +4579,6 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
- e_dbg("e1000_cleanup_led");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4714,8 +4613,6 @@ s32 e1000_led_on(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- e_dbg("e1000_led_on");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4760,8 +4657,6 @@ s32 e1000_led_off(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- e_dbg("e1000_led_off");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4889,8 +4784,6 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
*/
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- e_dbg("e1000_reset_adaptive");
-
if (hw->adaptive_ifs) {
if (!hw->ifs_params_forced) {
hw->current_ifs_val = 0;
@@ -4917,8 +4810,6 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
*/
void e1000_update_adaptive(struct e1000_hw *hw)
{
- e_dbg("e1000_update_adaptive");
-
if (hw->adaptive_ifs) {
if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
if (hw->tx_packet_delta > MIN_NUM_XMITS) {
@@ -5114,8 +5005,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 i, phy_data;
u16 cable_length;
- e_dbg("e1000_get_cable_length");
-
*min_length = *max_length = 0;
/* Use old method for Phy older than IGP */
@@ -5231,8 +5120,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_check_polarity");
-
if (hw->phy_type == e1000_phy_m88) {
/* return the Polarity bit in the Status register. */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
@@ -5299,8 +5186,6 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_check_downshift");
-
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
@@ -5411,8 +5296,6 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
s32 ret_val;
u16 phy_data, phy_saved_data, speed, duplex, i;
- e_dbg("e1000_config_dsp_after_link_change");
-
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5546,8 +5429,6 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- e_dbg("e1000_set_phy_mode");
-
if ((hw->mac_type == e1000_82545_rev_3) &&
(hw->media_type == e1000_media_type_copper)) {
ret_val =
@@ -5594,7 +5475,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_set_d3_lplu_state");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5699,8 +5579,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
u16 default_page = 0;
u16 phy_data;
- e_dbg("e1000_set_vco_speed");
-
switch (hw->mac_type) {
case e1000_82545_rev_3:
case e1000_82546_rev_3:
@@ -5872,7 +5750,6 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
*/
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- e_dbg("e1000_get_auto_rd_done");
msleep(5);
return E1000_SUCCESS;
}
@@ -5887,7 +5764,6 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
*/
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- e_dbg("e1000_get_phy_cfg_done");
msleep(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 46e6544..27058df 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2682,14 +2682,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index dce377b..d50c91e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5100,16 +5100,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
u32 cmd_length = 0;
u16 ipcse = 0, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 53be5f4..b9f50f4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1114,20 +1114,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
+ struct ipv6hdr *ipv6h;
struct tcphdr *tcph;
struct iphdr *iph;
u32 l4len;
int err;
- struct ipv6hdr *ipv6h;
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index e35e66f..2797548 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1412,6 +1412,14 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
+/**
+ * i40evf_configure_rss - increment to next available tx queue
+ * @adapter: board private structure
+ * @j: queue counter
+ *
+ * Helper function for RSS programming to increment through available
+ * queus. Returns the next queue value.
+ **/
static int next_queue(struct i40evf_adapter *adapter, int j)
{
j += 1;
@@ -1451,10 +1459,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
/* Populate the LUT with max no. of queues in round robin fashion */
j = adapter->vsi_res->num_queue_pairs;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
- lut = next_queue(adapter, j);
- lut |= next_queue(adapter, j) << 8;
- lut |= next_queue(adapter, j) << 16;
- lut |= next_queue(adapter, j) << 24;
+ j = next_queue(adapter, j);
+ lut = j;
+ j = next_queue(adapter, j);
+ lut |= j << 8;
+ j = next_queue(adapter, j);
+ lut |= j << 16;
+ j = next_queue(adapter, j);
+ lut |= j << 24;
wr32(hw, I40E_VFQF_HLUT(i), lut);
}
i40e_flush(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7fbe1e9..2713006 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -241,7 +241,6 @@ struct igb_ring {
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
- unsigned long last_rx_timestamp;
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
@@ -437,6 +436,7 @@ struct igb_adapter {
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
@@ -533,20 +533,6 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
struct sk_buff *skb);
-static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
-
- /* Update the last_rx_timestamp timer in order to enable watchdog check
- * for error case of latched timestamp on a dropped packet.
- */
- rx_ring->last_rx_timestamp = jiffies;
-}
-
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
#ifdef CONFIG_IGB_HWMON
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3019818..fb98d46 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4605,6 +4605,7 @@ static int igb_tso(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
@@ -4612,11 +4613,9 @@ static int igb_tso(struct igb_ring *tx_ring,
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -6955,7 +6954,9 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2cca8fd..9209d65 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -427,10 +427,8 @@ static void igb_ptp_overflow_check(struct work_struct *work)
void igb_ptp_rx_hang(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct igb_ring *rx_ring;
u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
unsigned long rx_event;
- int n;
if (hw->mac.type != e1000_82576)
return;
@@ -445,11 +443,8 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
/* Determine the most recent watchdog or rx_timestamp event */
rx_event = adapter->last_rx_ptp_check;
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
- }
+ if (time_after(adapter->last_rx_timestamp, rx_event))
+ rx_event = adapter->last_rx_timestamp;
/* Only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
@@ -540,6 +535,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
}
/**
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b7ab03a..d608599 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1910,20 +1910,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
{
struct e1000_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
struct igbvf_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
u32 mss_l4len_idx, l4len;
+ unsigned int i;
+ int err;
+
*hdr_len = 0;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "igbvf_tso returning an error\n");
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0) {
+ dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+ return err;
}
l4len = tcp_hdrlen(skb);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index f42c201..6080127 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1220,17 +1220,15 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
unsigned int i;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
u16 ipcse, tucse, mss;
- int err;
if (likely(skb_is_gso(skb))) {
struct ixgb_buffer *buffer_info;
struct iphdr *iph;
+ int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 55c53a1..1a12c1d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -811,6 +811,7 @@ enum ixgbe_state_t {
__IXGBE_DISABLED,
__IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
+ __IXGBE_SERVICE_INITED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8436c65..c4c526b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -297,7 +297,8 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
return;
hw->hw_addr = NULL;
e_dev_err("Adapter removed\n");
- ixgbe_service_event_schedule(adapter);
+ if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
+ ixgbe_service_event_schedule(adapter);
}
void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -6509,6 +6510,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
@@ -6516,11 +6518,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
@@ -7077,8 +7077,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
struct vlan_ethhdr *vhdr;
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+
+ if (skb_cow_head(skb, 0))
goto out_drop;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI = htons(tx_flags >>
@@ -8023,6 +8023,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* EEPROM */
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (ixgbe_removed(hw->hw_addr)) {
+ err = -EIO;
+ goto err_ioremap;
+ }
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
if (!(eec & (1 << 8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
@@ -8185,7 +8189,12 @@ skip_sriov:
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
+ if (ixgbe_removed(hw->hw_addr)) {
+ err = -EIO;
+ goto err_sw_init;
+ }
INIT_WORK(&adapter->service_task, ixgbe_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
err = ixgbe_init_interrupt_scheme(adapter);
@@ -8494,6 +8503,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
skip_bad_vf_detection:
#endif /* CONFIG_PCI_IOV */
+ if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
+ return PCI_ERS_RESULT_DISCONNECT;
+
rtnl_lock();
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e7e7d69..a0a1de9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -421,6 +421,7 @@ enum ixbgevf_state_t {
__IXGBEVF_DOWN,
__IXGBEVF_DISABLED,
__IXGBEVF_REMOVING,
+ __IXGBEVF_WORK_INIT,
};
struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4ba139b..d0799e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -107,7 +107,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
return;
hw->hw_addr = NULL;
dev_err(&adapter->pdev->dev, "Adapter removed\n");
- schedule_work(&adapter->watchdog_task);
+ if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
+ schedule_work(&adapter->watchdog_task);
}
static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -2838,6 +2839,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
@@ -2845,11 +2847,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
@@ -3573,8 +3573,13 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->watchdog_timer.function = ixgbevf_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
+ if (IXGBE_REMOVED(hw->hw_addr)) {
+ err = -EIO;
+ goto err_sw_init;
+ }
INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+ set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
err = ixgbevf_init_interrupt_scheme(adapter);
if (err)
@@ -3667,6 +3672,9 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
+ return PCI_ERS_RESULT_DISCONNECT;
+
rtnl_lock();
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d04b1c3..b248bcb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -89,9 +89,8 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SERDES_CFG 0x24A0
+#define MVNETA_SGMII_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
-#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -712,6 +711,35 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
+
+
+/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
+static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ if (enable)
+ val |= MVNETA_GMAC2_PORT_RGMII;
+ else
+ val &= ~MVNETA_GMAC2_PORT_RGMII;
+
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Config SGMII port */
+static void mvneta_port_sgmii_config(struct mvneta_port *pp)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val |= MVNETA_GMAC2_PCS_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+ mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+}
+
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -2729,15 +2757,12 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- else
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
+ mvneta_port_sgmii_config(pp);
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+ mvneta_gmac_rgmii_set(pp, 1);
/* Cancel Port Reset */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ae95f..cef267e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- dev = &priv->dev;
+ dev = pci_get_drvdata(pdev);
+ priv = mlx4_priv(dev);
dev->pdev = pdev;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
@@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
} else {
atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, total_vfs);
- atomic_dec(&pf_loading);
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
err);
+ atomic_dec(&pf_loading);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
@@ -2535,8 +2530,10 @@ slave_start:
mlx4_sense_init(dev);
mlx4_start_sense(dev);
- priv->pci_dev_data = pci_dev_data;
- pci_set_drvdata(pdev, dev);
+ priv->removed = 0;
+
+ if (mlx4_is_master(dev) && dev->num_vfs)
+ atomic_dec(&pf_loading);
return 0;
@@ -2588,6 +2585,9 @@ err_rel_own:
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ if (mlx4_is_master(dev) && dev->num_vfs)
+ atomic_dec(&pf_loading);
+
kfree(priv->dev.dev_vfs);
err_free_dev:
@@ -2604,85 +2604,110 @@ err_disable_pdev:
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+
printk_once(KERN_INFO "%s", mlx4_version);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev = &priv->dev;
+ pci_set_drvdata(pdev, dev);
+ priv->pci_dev_data = id->driver_data;
+
return __mlx4_init_one(pdev, id->driver_data);
}
-static void mlx4_remove_one(struct pci_dev *pdev)
+static void __mlx4_remove_one(struct pci_dev *pdev)
{
struct mlx4_dev *dev = pci_get_drvdata(pdev);
struct mlx4_priv *priv = mlx4_priv(dev);
+ int pci_dev_data;
int p;
- if (dev) {
- /* in SRIOV it is not allowed to unload the pf's
- * driver while there are alive vf's */
- if (mlx4_is_master(dev)) {
- if (mlx4_how_many_lives_vf(dev))
- printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
- }
- mlx4_stop_sense(dev);
- mlx4_unregister_device(dev);
+ if (priv->removed)
+ return;
- for (p = 1; p <= dev->caps.num_ports; p++) {
- mlx4_cleanup_port_info(&priv->port[p]);
- mlx4_CLOSE_PORT(dev, p);
- }
+ pci_dev_data = priv->pci_dev_data;
- if (mlx4_is_master(dev))
- mlx4_free_resource_tracker(dev,
- RES_TR_FREE_SLAVES_ONLY);
-
- mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_qp_table(dev);
- mlx4_cleanup_srq_table(dev);
- mlx4_cleanup_cq_table(dev);
- mlx4_cmd_use_polling(dev);
- mlx4_cleanup_eq_table(dev);
- mlx4_cleanup_mcg_table(dev);
- mlx4_cleanup_mr_table(dev);
- mlx4_cleanup_xrcd_table(dev);
- mlx4_cleanup_pd_table(dev);
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ mlx4_stop_sense(dev);
+ mlx4_unregister_device(dev);
- if (mlx4_is_master(dev))
- mlx4_free_resource_tracker(dev,
- RES_TR_FREE_STRUCTS_ONLY);
-
- iounmap(priv->kar);
- mlx4_uar_free(dev, &priv->driver_uar);
- mlx4_cleanup_uar_table(dev);
- if (!mlx4_is_slave(dev))
- mlx4_clear_steering(dev);
- mlx4_free_eq_table(dev);
- if (mlx4_is_master(dev))
- mlx4_multi_func_cleanup(dev);
- mlx4_close_hca(dev);
- if (mlx4_is_slave(dev))
- mlx4_multi_func_cleanup(dev);
- mlx4_cmd_cleanup(dev);
-
- if (dev->flags & MLX4_FLAG_MSI_X)
- pci_disable_msix(pdev);
- if (dev->flags & MLX4_FLAG_SRIOV) {
- mlx4_warn(dev, "Disabling SR-IOV\n");
- pci_disable_sriov(pdev);
- }
+ for (p = 1; p <= dev->caps.num_ports; p++) {
+ mlx4_cleanup_port_info(&priv->port[p]);
+ mlx4_CLOSE_PORT(dev, p);
+ }
- if (!mlx4_is_slave(dev))
- mlx4_free_ownership(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev,
+ RES_TR_FREE_SLAVES_ONLY);
- kfree(dev->caps.qp0_tunnel);
- kfree(dev->caps.qp0_proxy);
- kfree(dev->caps.qp1_tunnel);
- kfree(dev->caps.qp1_proxy);
- kfree(dev->dev_vfs);
+ mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_xrcd_table(dev);
+ mlx4_cleanup_pd_table(dev);
- kfree(priv);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev,
+ RES_TR_FREE_STRUCTS_ONLY);
+
+ iounmap(priv->kar);
+ mlx4_uar_free(dev, &priv->driver_uar);
+ mlx4_cleanup_uar_table(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
+ mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+ mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+ mlx4_cmd_cleanup(dev);
+
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+ if (dev->flags & MLX4_FLAG_SRIOV) {
+ mlx4_warn(dev, "Disabling SR-IOV\n");
+ pci_disable_sriov(pdev);
+ dev->num_vfs = 0;
}
+
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+ kfree(dev->dev_vfs);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ memset(priv, 0, sizeof(*priv));
+ priv->pci_dev_data = pci_dev_data;
+ priv->removed = 1;
+}
+
+static void mlx4_remove_one(struct pci_dev *pdev)
+{
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ __mlx4_remove_one(pdev);
+ kfree(priv);
+ pci_set_drvdata(pdev, NULL);
}
int mlx4_restart_one(struct pci_dev *pdev)
@@ -2692,7 +2717,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
int pci_dev_data;
pci_dev_data = priv->pci_dev_data;
- mlx4_remove_one(pdev);
+ __mlx4_remove_one(pdev);
return __mlx4_init_one(pdev, pci_dev_data);
}
@@ -2747,7 +2772,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- mlx4_remove_one(pdev);
+ __mlx4_remove_one(pdev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2755,11 +2780,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- const struct pci_device_id *id;
- int ret;
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int ret;
- id = pci_match_id(mlx4_pci_table, pdev);
- ret = __mlx4_init_one(pdev, id->driver_data);
+ ret = __mlx4_init_one(pdev, priv->pci_dev_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf8be41..f9c4651 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -800,6 +800,7 @@ struct mlx4_priv {
spinlock_t ctx_lock;
int pci_dev_data;
+ int removed;
struct list_head pgdir_list;
struct mutex pgdir_mutex;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b48737d..ba20c72 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
- adapter->max_tx_rings = ahw->max_tx_ques;
- adapter->max_sds_rings = ahw->max_rx_ques;
/* eSwitch capability indicates vNIC mode.
* vNIC and SRIOV are mutually exclusive operational modes.
* If SR-IOV capability is detected, SR-IOV physical function
@@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 max_sds_rings, max_tx_rings;
int ret;
ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
- adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
- adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+ max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
- adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
return -EIO;
}
+ adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
+ adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
+
return 0;
}
@@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
goto disable_intr;
}
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
err = qlcnic_83xx_setup_mbx_intr(adapter);
if (err)
goto disable_mbx_intr;
qlcnic_83xx_clear_function_resources(adapter);
-
- INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
-
+ qlcnic_dcb_enable(adapter->dcb);
qlcnic_83xx_initialize_nic(adapter, 1);
+ qlcnic_dcb_get_info(adapter->dcb);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
err = qlcnic_83xx_configure_opmode(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 64dcbf3..c1e11f5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
- adapter->max_tx_rings = npar_info->max_tx_ques;
- adapter->max_sds_rings = npar_info->max_rx_ques;
}
qlcnic_free_mbx_args(&cmd);
@@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg2 &= ~BIT_3;
break;
case QLCNIC_ADD_VLAN:
+ arg1 &= ~(0x0ffff << 16);
arg1 |= (BIT_2 | BIT_5);
arg1 |= (esw_cfg->vlan_id << 16);
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 7d4f549..a51fe18 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
goto out_free_cfg;
}
- qlcnic_dcb_get_info(dcb);
-
return 0;
out_free_cfg:
kfree(dcb->cfg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 309d056..dbf7539 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
else
num_msix += adapter->drv_tx_rings;
- if (adapter->drv_rss_rings > 0)
+ if (adapter->drv_rss_rings > 0)
num_msix += adapter->drv_rss_rings;
else
num_msix += adapter->drv_sds_rings;
@@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
return -ENOMEM;
}
-restore:
for (vector = 0; vector < num_msix; vector++)
adapter->msix_entries[vector].entry = vector;
+restore:
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
- adapter->ahw->num_msix = num_msix;
- if (adapter->drv_tss_rings > 0)
- adapter->drv_tx_rings = adapter->drv_tss_rings;
+ if (err > 0) {
+ if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
+ return -ENOSPC;
- if (adapter->drv_rss_rings > 0)
- adapter->drv_sds_rings = adapter->drv_rss_rings;
- } else {
netdev_info(adapter->netdev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
num_msix, err);
@@ -716,12 +712,20 @@ restore:
"Restoring %d Tx, %d SDS rings for total %d vectors.\n",
adapter->drv_tx_rings, adapter->drv_sds_rings,
num_msix);
- goto restore;
- err = -EIO;
+ goto restore;
+ } else if (err < 0) {
+ return err;
}
- return err;
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
+
+ return 0;
}
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
@@ -2528,8 +2532,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
- qlcnic_dcb_enable(adapter->dcb);
-
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2549,7 +2551,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
+ qlcnic_dcb_enable(adapter->dcb);
+ qlcnic_dcb_get_info(adapter->dcb);
err = qlcnic_setup_intr(adapter);
+
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 14f748c..2801379 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ if (pci_vfs_assigned(adapter->pdev)) {
+ netdev_err(adapter->netdev,
+ "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
+ adapter->portnum);
+ netdev_info(adapter->netdev,
+ "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
+ adapter->portnum);
+ return -EPERM;
+ }
+
rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 448d156..cd346e2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
int i;
- for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
@@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
+ u8 pci_func;
int i, ret;
u32 count;
@@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
for (i = 0; i < adapter->ahw->total_nic_func; i++) {
- if (qlcnic_is_valid_nic_func(adapter, i) < 0)
- continue;
if (adapter->npars[i].pci_func >= count) {
dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
__func__, adapter->ahw->total_nic_func, count);
continue;
}
- ret = qlcnic_get_nic_info(adapter, &nic_info, i);
- if (ret)
- return ret;
if (!adapter->npars[i].eswitch_status)
continue;
- np_cfg[i].pci_func = i;
- np_cfg[i].op_mode = (u8)nic_info.op_mode;
- np_cfg[i].port_num = nic_info.phys_port;
- np_cfg[i].fw_capab = nic_info.capabilities;
- np_cfg[i].min_bw = nic_info.min_tx_bw;
- np_cfg[i].max_bw = nic_info.max_tx_bw;
- np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
- np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+ pci_func = adapter->npars[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+
+ np_cfg[pci_func].pci_func = pci_func;
+ np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
+ np_cfg[pci_func].port_num = nic_info.phys_port;
+ np_cfg[pci_func].fw_capab = nic_info.capabilities;
+ np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
+ np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
+ np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
}
return size;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5d5fec6..36aa109 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -687,7 +687,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
- if (unlikely(status < 0)) {
+ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* the interface is going down, skbs are purged */
dev_kfree_skb_any(skb);
return;
@@ -1201,8 +1201,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
for_each_slave(priv, cpsw_slave_open, priv);
/* Add default VLAN */
- if (!priv->data.dual_emac)
- cpsw_add_default_vlan(priv);
+ cpsw_add_default_vlan(priv);
if (!cpsw_common_res_usage_state(priv)) {
/* setup tx dma to fixed prio and zero offset */
@@ -1253,6 +1252,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_set_coalesce(ndev, &coal);
}
+ napi_enable(&priv->napi);
+ cpdma_ctlr_start(priv->dma);
+ cpsw_intr_enable(priv);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
@@ -1261,12 +1266,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
}
}
- napi_enable(&priv->napi);
- cpdma_ctlr_start(priv->dma);
- cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
if (priv->data.dual_emac)
priv->slaves[priv->emac_port].open_stat = true;
return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 13010b4..d18f711d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -747,6 +747,7 @@ struct ndis_oject_header {
#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
+#define VERSION_4_OFFLOAD_SIZE 22
/*
* New offload OIDs for NDIS 6
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index daddea2..f7629ec 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -344,7 +344,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
memset(init_packet, 0, sizeof(struct nvsp_message));
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
- ndis_version = 0x00050001;
+ ndis_version = 0x00060001;
else
ndis_version = 0x0006001e;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4e4cf9e..31e55fb 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -319,7 +319,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_data_pgs * sizeof(struct hv_page_buffer)) +
sizeof(struct rndis_message) +
- NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
+ NDIS_VLAN_PPI_SIZE +
+ NDIS_CSUM_PPI_SIZE +
+ NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -396,7 +398,30 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
} else if (net_trans_info & INFO_UDP) {
- csum_info->transmit.udp_checksum = 1;
+ /* UDP checksum offload is not supported on ws2008r2.
+ * Furthermore, on ws2012 and ws2012r2, there are some
+ * issues with udp checksum offload from Linux guests.
+ * (these are host issues).
+ * For now compute the checksum here.
+ */
+ struct udphdr *uh;
+ u16 udp_len;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ goto drop;
+
+ uh = udp_hdr(skb);
+ udp_len = ntohs(uh->len);
+ uh->check = 0;
+ uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ udp_len, IPPROTO_UDP,
+ csum_partial(uh, udp_len, 0));
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ csum_info->transmit.udp_checksum = 0;
}
goto do_send;
@@ -436,6 +461,7 @@ do_send:
ret = netvsc_send(net_device_ctx->device_ctx, packet);
+drop:
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 4a37e3d..143a98c 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -641,6 +641,16 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_offload_params);
int ret, t;
+ u32 vsp_version = nvdev->nvsp_version;
+
+ if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
+ extlen = VERSION_4_OFFLOAD_SIZE;
+ /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
+ * UDP checksum offload.
+ */
+ req_offloads->udp_ip_v4_csum = 0;
+ req_offloads->udp_ip_v6_csum = 0;
+ }
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -674,7 +684,7 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
} else {
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
set_complete->status);
ret = -EINVAL;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 430bb0d..e36f194 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp,
dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
if (status == 0)
- *data = buf[1];
+ *data = (buf[1] & mask) >> shift;
return status;
}
@@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return -EINVAL;
}
- rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
- if (rc)
- return rc;
- if (!status) {
- dev_err(&lp->spi->dev, "AVDD error\n");
- return -EINVAL;
- }
-
return 0;
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index f3cdf64..63aa9d9 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -78,11 +78,19 @@ static void ntb_netdev_event_handler(void *data, int status)
netdev_dbg(ndev, "Event %x, Link %x\n", status,
ntb_transport_link_query(dev->qp));
- /* Currently, only link status event is supported */
- if (status)
- netif_carrier_on(ndev);
- else
+ switch (status) {
+ case NTB_LINK_DOWN:
netif_carrier_off(ndev);
+ break;
+ case NTB_LINK_UP:
+ if (!ntb_transport_link_query(dev->qp))
+ return;
+
+ netif_carrier_on(ndev);
+ break;
+ default:
+ netdev_warn(ndev, "Unsupported event type %d\n", status);
+ }
}
static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
@@ -182,8 +190,10 @@ static int ntb_netdev_open(struct net_device *ndev)
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN);
- if (rc == -EINVAL)
+ if (rc == -EINVAL) {
+ dev_kfree_skb(skb);
goto err;
+ }
}
netif_carrier_off(ndev);
@@ -367,12 +377,15 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
{
struct net_device *ndev;
struct ntb_netdev *dev;
+ bool found = false;
list_for_each_entry(dev, &dev_list, list) {
- if (dev->pdev == pdev)
+ if (dev->pdev == pdev) {
+ found = true;
break;
+ }
}
- if (dev == NULL)
+ if (!found)
return;
list_del(&dev->list);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1d788f1..1b6d09a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -756,12 +756,8 @@ void phy_state_machine(struct work_struct *work)
netif_carrier_on(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
- } else if (0 == phydev->link_timeout--) {
+ } else if (0 == phydev->link_timeout--)
needs_aneg = 1;
- /* If we have the magic_aneg bit, we try again */
- if (phydev->drv->flags & PHY_HAS_MAGICANEG)
- break;
- }
break;
case PHY_NOLINK:
err = phy_read_status(phydev);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 18e12a3..3fbfb08 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -929,6 +929,9 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
struct r8152 *tp = netdev_priv(netdev);
int ret;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
if (phy_id != R8152_PHY_ID)
return -EINVAL;
@@ -949,6 +952,9 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
{
struct r8152 *tp = netdev_priv(netdev);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
if (phy_id != R8152_PHY_ID)
return;
@@ -1962,6 +1968,9 @@ static int rtl_enable(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
set_tx_qlen(tp);
rtl_set_eee_plus(tp);
@@ -1994,6 +2003,9 @@ static void r8153_set_rx_agg(struct r8152 *tp)
static int rtl8153_enable(struct r8152 *tp)
{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
set_tx_qlen(tp);
rtl_set_eee_plus(tp);
r8153_set_rx_agg(tp);
@@ -2006,6 +2018,11 @@ static void rtl8152_disable(struct r8152 *tp)
u32 ocp_data;
int i;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2232,6 +2249,9 @@ static void r8152b_exit_oob(struct r8152 *tp)
u32 ocp_data;
int i;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2460,6 +2480,9 @@ static void r8153_first_init(struct r8152 *tp)
u32 ocp_data;
int i;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
@@ -2687,6 +2710,11 @@ out:
static void rtl8152_down(struct r8152 *tp)
{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+
r8152_power_cut_en(tp, false);
r8152b_disable_aldps(tp);
r8152b_enter_oob(tp);
@@ -2695,6 +2723,11 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+
r8153_u1u2en(tp, false);
r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
@@ -2904,6 +2937,9 @@ static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
@@ -2939,6 +2975,9 @@ static void r8153_init(struct r8152 *tp)
u32 ocp_data;
int i;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
@@ -3213,6 +3252,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *data = if_mii(rq);
int res;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
res = usb_autopm_get_interface(tp->intf);
if (res < 0)
goto out;
@@ -3293,12 +3335,18 @@ static void r8152b_get_version(struct r8152 *tp)
static void rtl8152_unload(struct r8152 *tp)
{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
if (tp->version != RTL_VER_01)
r8152_power_cut_en(tp, true);
}
static void rtl8153_unload(struct r8152 *tp)
{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
r8153_power_cut_en(tp, true);
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c55e316..82355d5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1755,8 +1755,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
if (err)
return err;
- return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
- false);
+ return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
+ tos, ttl, df, false);
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 84734a8..83c39e2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
cosa_putstatus(cosa, 0);
cosa_getdata8(cosa);
cosa_putstatus(cosa, SR_RST);
-#ifdef MODULE
msleep(500);
-#else
- udelay(5*100000);
-#endif
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 3b3e910..00fb8ba 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1004,11 +1004,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
- value = level * 2;
+ value = level;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP, value);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
- AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 471e0f6..bd9e634 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -312,10 +312,9 @@ static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
void ath9k_csa_update(struct ath_softc *sc)
{
- ieee80211_iterate_active_interfaces(sc->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- ath9k_csa_update_vif,
- sc);
+ ieee80211_iterate_active_interfaces_atomic(sc->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath9k_csa_update_vif, sc);
}
void ath9k_beacon_tasklet(unsigned long data)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index e8149e3..289f3d8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -471,8 +471,11 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
if (!txok || !vif || !txs)
goto send_mac80211;
- if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK)
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) {
tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ }
if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c0a4e86..cbbb02a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -670,6 +670,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
.num_different_channels = 1,
.beacon_int_infra_match = true,
},
+#ifdef CONFIG_ATH9K_DFS_CERTIFIED
{
.limits = if_dfs_limits,
.n_limits = ARRAY_SIZE(if_dfs_limits),
@@ -679,6 +680,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20),
}
+#endif
};
static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 05ee7f1..24ccbe9 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -5176,22 +5176,22 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
int ch = new_channel->hw_value;
u16 old_band_5ghz;
- u32 tmp32;
+ u16 tmp16;
old_band_5ghz =
b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
} else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
}
b43_chantab_phy_upload(dev, e);
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index e89535e..1a8d321 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -102,10 +102,10 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
}
get_queue_num:
- q_num = 0;
recontend_queue = false;
q_num = rsi_determine_min_weight_queue(common);
+
q_len = skb_queue_len(&common->tx_queue[ii]);
ii = q_num;
@@ -118,7 +118,9 @@ get_queue_num:
}
}
- common->tx_qinfo[q_num].pkt_contended = 0;
+ if (q_num < NUM_EDCA_QUEUES)
+ common->tx_qinfo[q_num].pkt_contended = 0;
+
/* Adjust the back off values for all queues again */
recontend_queue = rsi_recalculate_weights(common);
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index 7e4ef45..c466246 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -289,32 +289,29 @@ int rsi_init_dbgfs(struct rsi_hw *adapter)
const struct rsi_dbg_files *files;
dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL);
+ if (!dev_dbgfs)
+ return -ENOMEM;
+
adapter->dfsentry = dev_dbgfs;
snprintf(devdir, sizeof(devdir), "%s",
wiphy_name(adapter->hw->wiphy));
- dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
- if (IS_ERR(dev_dbgfs->subdir)) {
- if (dev_dbgfs->subdir == ERR_PTR(-ENODEV))
- rsi_dbg(ERR_ZONE,
- "%s:Debugfs has not been mounted\n", __func__);
- else
- rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir);
+ dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
- adapter->dfsentry = NULL;
+ if (!dev_dbgfs->subdir) {
kfree(dev_dbgfs);
- return (int)PTR_ERR(dev_dbgfs->subdir);
- } else {
- for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
- files = &dev_debugfs_files[ii];
- dev_dbgfs->rsi_files[ii] =
- debugfs_create_file(files->name,
- files->perms,
- dev_dbgfs->subdir,
- common,
- &files->fops);
- }
+ return -ENOMEM;
+ }
+
+ for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
+ files = &dev_debugfs_files[ii];
+ dev_dbgfs->rsi_files[ii] =
+ debugfs_create_file(files->name,
+ files->perms,
+ dev_dbgfs->subdir,
+ common,
+ &files->fops);
}
return 0;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 2361a68..7369429 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -738,7 +738,7 @@ int rsi_hal_load_key(struct rsi_common *common,
*
* Return: 0 on success, corresponding error code on failure.
*/
-static u8 rsi_load_bootup_params(struct rsi_common *common)
+static int rsi_load_bootup_params(struct rsi_common *common)
{
struct sk_buff *skb;
struct rsi_boot_params *boot_params;
@@ -1272,6 +1272,7 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
{
s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
u16 msg_type = (msg[2]);
+ int ret;
rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
__func__, msg_len, msg_type);
@@ -1284,8 +1285,9 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
if (common->fsm_state == FSM_CARD_NOT_READY) {
rsi_set_default_parameters(common);
- if (rsi_load_bootup_params(common))
- return -ENOMEM;
+ ret = rsi_load_bootup_params(common);
+ if (ret)
+ return ret;
else
common->fsm_state = FSM_BOOT_PARAMS_SENT;
} else {
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 852453f..2e39d38 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -756,12 +756,13 @@ fail:
static void rsi_disconnect(struct sdio_func *pfunction)
{
struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
- struct rsi_91x_sdiodev *dev =
- (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ struct rsi_91x_sdiodev *dev;
if (!adapter)
return;
+ dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
dev->write_fail = 2;
rsi_mac80211_detach(adapter);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index f1cb99c..20d11cc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -247,7 +247,7 @@ static int rsi_process_pkt(struct rsi_common *common)
if (!common->rx_data_pkt) {
rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n",
__func__);
- return -1;
+ return -ENOMEM;
}
status = rsi_sdio_host_intf_read_pkt(adapter,
@@ -260,12 +260,10 @@ static int rsi_process_pkt(struct rsi_common *common)
}
status = rsi_read_pkt(common, rcv_pkt_len);
- kfree(common->rx_data_pkt);
- return status;
fail:
kfree(common->rx_data_pkt);
- return -1;
+ return status;
}
/**
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index bb1bf96..4c46e56 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -154,24 +154,30 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
u16 *value,
u16 len)
{
- u8 temp_buf[4];
- int status = 0;
+ u8 *buf;
+ int status = -ENOMEM;
+
+ buf = kmalloc(0x04, GFP_KERNEL);
+ if (!buf)
+ return status;
status = usb_control_msg(usbdev,
usb_rcvctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_READ,
USB_TYPE_VENDOR,
((reg & 0xffff0000) >> 16), (reg & 0xffff),
- (void *)temp_buf,
+ (void *)buf,
len,
HZ * 5);
- *value = (temp_buf[0] | (temp_buf[1] << 8));
+ *value = (buf[0] | (buf[1] << 8));
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: Reg read failed with error code :%d\n",
__func__, status);
}
+ kfree(buf);
+
return status;
}
@@ -190,8 +196,12 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
u16 value,
u16 len)
{
- u8 usb_reg_buf[4];
- int status = 0;
+ u8 *usb_reg_buf;
+ int status = -ENOMEM;
+
+ usb_reg_buf = kmalloc(0x04, GFP_KERNEL);
+ if (!usb_reg_buf)
+ return status;
usb_reg_buf[0] = (value & 0x00ff);
usb_reg_buf[1] = (value & 0xff00) >> 8;
@@ -212,6 +222,8 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
"%s: Reg write failed with error code :%d\n",
__func__, status);
}
+ kfree(usb_reg_buf);
+
return status;
}
@@ -286,7 +298,7 @@ int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
return -ENOMEM;
while (count) {
- transfer = min_t(int, count, 4096);
+ transfer = (u8)(min_t(u32, count, 4096));
memcpy(buf, data, transfer);
status = usb_control_msg(dev->usbdev,
usb_sndctrlpipe(dev->usbdev, 0),
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
index b6722de6..33da3df 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -625,17 +625,7 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
else
btcoexist->binded = true;
-#if (defined(CONFIG_PCI_HCI))
- btcoexist->chip_interface = BTC_INTF_PCI;
-#elif (defined(CONFIG_USB_HCI))
- btcoexist->chip_interface = BTC_INTF_USB;
-#elif (defined(CONFIG_SDIO_HCI))
- btcoexist->chip_interface = BTC_INTF_SDIO;
-#elif (defined(CONFIG_GSPI_HCI))
- btcoexist->chip_interface = BTC_INTF_GSPI;
-#else
btcoexist->chip_interface = BTC_INTF_UNKNOWN;
-#endif
if (NULL == btcoexist->adapter)
btcoexist->adapter = adapter;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 057b0570..158b5e6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1291,13 +1291,13 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
for (i = 0; i < NET_TX_RING_SIZE; i++) {
skb_entry_set_link(&np->tx_skbs[i], i+1);
np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ np->grant_tx_page[i] = NULL;
}
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
np->rx_skbs[i] = NULL;
np->grant_rx_ref[i] = GRANT_INVALID_REF;
- np->grant_tx_page[i] = NULL;
}
/* A grant for every tx ring slot */
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 170e8e60..372e08c 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -91,7 +91,7 @@ static struct dentry *debugfs_dir;
/* Translate memory window 0,1 to BAR 2,4 */
#define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2)
-static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+static const struct pci_device_id ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
@@ -120,7 +120,8 @@ MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int ntb_register_event_callback(struct ntb_device *ndev,
- void (*func)(void *handle, enum ntb_hw_event event))
+ void (*func)(void *handle,
+ enum ntb_hw_event event))
{
if (ndev->event_cb)
return -EINVAL;
@@ -715,9 +716,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
SNB_PBAR4LMT_OFFSET);
/* HW errata on the Limit registers. They can only be
* written when the base register is 4GB aligned and
- * < 32bit. This should already be the case based on the
- * driver defaults, but write the Limit registers first
- * just in case.
+ * < 32bit. This should already be the case based on
+ * the driver defaults, but write the Limit registers
+ * first just in case.
*/
} else {
ndev->limits.max_mw = SNB_MAX_MW;
@@ -739,9 +740,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
/* HW errata on the Limit registers. They can only be
* written when the base register is 4GB aligned and
- * < 32bit. This should already be the case based on the
- * driver defaults, but write the Limit registers first
- * just in case.
+ * < 32bit. This should already be the case based on
+ * the driver defaults, but write the Limit registers
+ * first just in case.
*/
}
@@ -785,7 +786,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
/* B2B_XLAT_OFFSET is a 64bit register, but can
* only take 32bit writes
*/
- writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
+ writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
writel(SNB_MBAR01_USD_ADDR >> 32,
ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
@@ -803,7 +804,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
ndev->conn_type = NTB_CONN_RP;
if (xeon_errata_workaround) {
- dev_err(&ndev->pdev->dev,
+ dev_err(&ndev->pdev->dev,
"NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n");
return -EINVAL;
}
@@ -1079,111 +1080,131 @@ static irqreturn_t ntb_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static int ntb_setup_msix(struct ntb_device *ndev)
+static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
{
struct pci_dev *pdev = ndev->pdev;
struct msix_entry *msix;
- int msix_entries;
int rc, i;
- u16 val;
- if (!pdev->msix_cap) {
- rc = -EIO;
- goto err;
- }
+ if (msix_entries < ndev->limits.msix_cnt)
+ return -ENOSPC;
- rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val);
- if (rc)
- goto err;
+ rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
+ if (rc < 0)
+ return rc;
- msix_entries = msix_table_size(val);
- if (msix_entries > ndev->limits.msix_cnt) {
- rc = -EINVAL;
- goto err;
+ for (i = 0; i < msix_entries; i++) {
+ msix = &ndev->msix_entries[i];
+ WARN_ON(!msix->vector);
+
+ if (i == msix_entries - 1) {
+ rc = request_irq(msix->vector,
+ xeon_event_msix_irq, 0,
+ "ntb-event-msix", ndev);
+ if (rc)
+ goto err;
+ } else {
+ rc = request_irq(msix->vector,
+ xeon_callback_msix_irq, 0,
+ "ntb-callback-msix",
+ &ndev->db_cb[i]);
+ if (rc)
+ goto err;
+ }
}
- ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
- GFP_KERNEL);
- if (!ndev->msix_entries) {
- rc = -ENOMEM;
- goto err;
+ ndev->num_msix = msix_entries;
+ ndev->max_cbs = msix_entries - 1;
+
+ return 0;
+
+err:
+ while (--i >= 0) {
+ /* Code never reaches here for entry nr 'ndev->num_msix - 1' */
+ msix = &ndev->msix_entries[i];
+ free_irq(msix->vector, &ndev->db_cb[i]);
}
- for (i = 0; i < msix_entries; i++)
- ndev->msix_entries[i].entry = i;
+ pci_disable_msix(pdev);
+ ndev->num_msix = 0;
- rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
- if (rc < 0)
- goto err1;
- if (rc > 0) {
- /* On SNB, the link interrupt is always tied to 4th vector. If
- * we can't get all 4, then we can't use MSI-X.
- */
- if (ndev->hw_type != BWD_HW) {
- rc = -EIO;
- goto err1;
- }
+ return rc;
+}
- dev_warn(&pdev->dev,
- "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
- rc);
- msix_entries = rc;
+static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ struct msix_entry *msix;
+ int rc, i;
- rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
- if (rc)
- goto err1;
- }
+ msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
+ 1, msix_entries);
+ if (msix_entries < 0)
+ return msix_entries;
for (i = 0; i < msix_entries; i++) {
msix = &ndev->msix_entries[i];
WARN_ON(!msix->vector);
- /* Use the last MSI-X vector for Link status */
- if (ndev->hw_type == BWD_HW) {
- rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
- "ntb-callback-msix", &ndev->db_cb[i]);
- if (rc)
- goto err2;
- } else {
- if (i == msix_entries - 1) {
- rc = request_irq(msix->vector,
- xeon_event_msix_irq, 0,
- "ntb-event-msix", ndev);
- if (rc)
- goto err2;
- } else {
- rc = request_irq(msix->vector,
- xeon_callback_msix_irq, 0,
- "ntb-callback-msix",
- &ndev->db_cb[i]);
- if (rc)
- goto err2;
- }
- }
+ rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+ "ntb-callback-msix", &ndev->db_cb[i]);
+ if (rc)
+ goto err;
}
ndev->num_msix = msix_entries;
+ ndev->max_cbs = msix_entries;
+
+ return 0;
+
+err:
+ while (--i >= 0)
+ free_irq(msix->vector, &ndev->db_cb[i]);
+
+ pci_disable_msix(pdev);
+ ndev->num_msix = 0;
+
+ return rc;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int msix_entries;
+ int rc, i;
+
+ msix_entries = pci_msix_vec_count(pdev);
+ if (msix_entries < 0) {
+ rc = msix_entries;
+ goto err;
+ } else if (msix_entries > ndev->limits.msix_cnt) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
+ GFP_KERNEL);
+ if (!ndev->msix_entries) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < msix_entries; i++)
+ ndev->msix_entries[i].entry = i;
+
if (ndev->hw_type == BWD_HW)
- ndev->max_cbs = msix_entries;
+ rc = ntb_setup_bwd_msix(ndev, msix_entries);
else
- ndev->max_cbs = msix_entries - 1;
+ rc = ntb_setup_snb_msix(ndev, msix_entries);
+ if (rc)
+ goto err1;
return 0;
-err2:
- while (--i >= 0) {
- msix = &ndev->msix_entries[i];
- if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
- free_irq(msix->vector, ndev);
- else
- free_irq(msix->vector, &ndev->db_cb[i]);
- }
- pci_disable_msix(pdev);
err1:
kfree(ndev->msix_entries);
- dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
err:
- ndev->num_msix = 0;
+ dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
return rc;
}
@@ -1281,6 +1302,7 @@ static void ntb_free_interrupts(struct ntb_device *ndev)
free_irq(msix->vector, &ndev->db_cb[i]);
}
pci_disable_msix(pdev);
+ kfree(ndev->msix_entries);
} else {
free_irq(pdev->irq, ndev);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index bbdb7ed..465517b 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -45,6 +45,7 @@
* Contact Information:
* Jon Mason <jon.mason@intel.com>
*/
+#include <linux/ntb.h>
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
@@ -60,8 +61,6 @@
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
-#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
@@ -83,9 +82,6 @@ static inline void writeq(u64 val, void __iomem *addr)
#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
(1 << NTB_BAR_45))
-#define NTB_LINK_DOWN 0
-#define NTB_LINK_UP 1
-
#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
#define NTB_MAX_NUM_MW 2
@@ -233,7 +229,7 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
int db_num));
void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
int ntb_register_event_callback(struct ntb_device *ndev,
- void (*event_cb_func) (void *handle,
+ void (*event_cb_func)(void *handle,
enum ntb_hw_event event));
void ntb_unregister_event_callback(struct ntb_device *ndev);
int ntb_get_max_spads(struct ntb_device *ndev);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3217f39..9dd63b8 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -56,7 +56,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/ntb.h>
#include "ntb_hw.h"
#define NTB_TRANSPORT_VERSION 3
@@ -107,8 +106,8 @@ struct ntb_transport_qp {
struct ntb_rx_info __iomem *rx_info;
struct ntb_rx_info *remote_rx_info;
- void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
+ void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
struct list_head tx_free_q;
spinlock_t ntb_tx_free_q_lock;
void __iomem *tx_mw;
@@ -117,8 +116,8 @@ struct ntb_transport_qp {
unsigned int tx_max_entry;
unsigned int tx_max_frame;
- void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
+ void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
struct list_head rx_pend_q;
struct list_head rx_free_q;
spinlock_t ntb_rx_pend_q_lock;
@@ -129,7 +128,7 @@ struct ntb_transport_qp {
unsigned int rx_max_frame;
dma_cookie_t last_cookie;
- void (*event_handler) (void *data, int status);
+ void (*event_handler)(void *data, int status);
struct delayed_work link_work;
struct work_struct link_cleanup;
@@ -480,7 +479,7 @@ static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
}
static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
- struct list_head *list)
+ struct list_head *list)
{
struct ntb_queue_entry *entry;
unsigned long flags;
@@ -839,7 +838,7 @@ static void ntb_qp_link_work(struct work_struct *work)
}
static int ntb_transport_init_queue(struct ntb_transport *nt,
- unsigned int qp_num)
+ unsigned int qp_num)
{
struct ntb_transport_qp *qp;
unsigned int num_qps_mw, tx_size;
@@ -1055,7 +1054,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
if (!chan)
goto err;
- if (len < copy_bytes)
+ if (len < copy_bytes)
goto err_wait;
device = chan->device;
@@ -1190,8 +1189,7 @@ out:
return 0;
err:
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
- &qp->rx_pend_q);
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
hdr->flags = 0;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index e493240..e00c02d 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -104,16 +104,16 @@ config PINCTRL_BCM2835
select PINMUX
select PINCONF
-config PINCTRL_CAPRI
- bool "Broadcom Capri pinctrl driver"
+config PINCTRL_BCM281XX
+ bool "Broadcom BCM281xx pinctrl driver"
depends on OF
select PINMUX
select PINCONF
select GENERIC_PINCONF
select REGMAP_MMIO
help
- Say Y here to support Broadcom Capri pinctrl driver, which is used for
- the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351,
+ Say Y here to support Broadcom BCM281xx pinctrl driver, which is used
+ for the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351,
BCM28145, and BCM28155 SoCs. This driver requires the pinctrl
framework. GPIO is provided by a separate GPIO driver.
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 4b83588..6d3fd62 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
-obj-$(CONFIG_PINCTRL_CAPRI) += pinctrl-capri.o
+obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
new file mode 100644
index 0000000..3bed792
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -0,0 +1,1461 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinctrl-utils.h"
+
+/* BCM281XX Pin Control Registers Definitions */
+
+/* Function Select bits are the same for all pin control registers */
+#define BCM281XX_PIN_REG_F_SEL_MASK 0x0700
+#define BCM281XX_PIN_REG_F_SEL_SHIFT 8
+
+/* Standard pin register */
+#define BCM281XX_STD_PIN_REG_DRV_STR_MASK 0x0007
+#define BCM281XX_STD_PIN_REG_DRV_STR_SHIFT 0
+#define BCM281XX_STD_PIN_REG_INPUT_DIS_MASK 0x0008
+#define BCM281XX_STD_PIN_REG_INPUT_DIS_SHIFT 3
+#define BCM281XX_STD_PIN_REG_SLEW_MASK 0x0010
+#define BCM281XX_STD_PIN_REG_SLEW_SHIFT 4
+#define BCM281XX_STD_PIN_REG_PULL_UP_MASK 0x0020
+#define BCM281XX_STD_PIN_REG_PULL_UP_SHIFT 5
+#define BCM281XX_STD_PIN_REG_PULL_DN_MASK 0x0040
+#define BCM281XX_STD_PIN_REG_PULL_DN_SHIFT 6
+#define BCM281XX_STD_PIN_REG_HYST_MASK 0x0080
+#define BCM281XX_STD_PIN_REG_HYST_SHIFT 7
+
+/* I2C pin register */
+#define BCM281XX_I2C_PIN_REG_INPUT_DIS_MASK 0x0004
+#define BCM281XX_I2C_PIN_REG_INPUT_DIS_SHIFT 2
+#define BCM281XX_I2C_PIN_REG_SLEW_MASK 0x0008
+#define BCM281XX_I2C_PIN_REG_SLEW_SHIFT 3
+#define BCM281XX_I2C_PIN_REG_PULL_UP_STR_MASK 0x0070
+#define BCM281XX_I2C_PIN_REG_PULL_UP_STR_SHIFT 4
+
+/* HDMI pin register */
+#define BCM281XX_HDMI_PIN_REG_INPUT_DIS_MASK 0x0008
+#define BCM281XX_HDMI_PIN_REG_INPUT_DIS_SHIFT 3
+#define BCM281XX_HDMI_PIN_REG_MODE_MASK 0x0010
+#define BCM281XX_HDMI_PIN_REG_MODE_SHIFT 4
+
+/**
+ * bcm281xx_pin_type - types of pin register
+ */
+enum bcm281xx_pin_type {
+ BCM281XX_PIN_TYPE_UNKNOWN = 0,
+ BCM281XX_PIN_TYPE_STD,
+ BCM281XX_PIN_TYPE_I2C,
+ BCM281XX_PIN_TYPE_HDMI,
+};
+
+static enum bcm281xx_pin_type std_pin = BCM281XX_PIN_TYPE_STD;
+static enum bcm281xx_pin_type i2c_pin = BCM281XX_PIN_TYPE_I2C;
+static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI;
+
+/**
+ * bcm281xx_pin_function- define pin function
+ */
+struct bcm281xx_pin_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned ngroups;
+};
+
+/**
+ * bcm281xx_pinctrl_data - Broadcom-specific pinctrl data
+ * @reg_base - base of pinctrl registers
+ */
+struct bcm281xx_pinctrl_data {
+ void __iomem *reg_base;
+
+ /* List of all pins */
+ const struct pinctrl_pin_desc *pins;
+ const unsigned npins;
+
+ const struct bcm281xx_pin_function *functions;
+ const unsigned nfunctions;
+
+ struct regmap *regmap;
+};
+
+/*
+ * Pin number definition. The order here must be the same as defined in the
+ * PADCTRLREG block in the RDB.
+ */
+#define BCM281XX_PIN_ADCSYNC 0
+#define BCM281XX_PIN_BAT_RM 1
+#define BCM281XX_PIN_BSC1_SCL 2
+#define BCM281XX_PIN_BSC1_SDA 3
+#define BCM281XX_PIN_BSC2_SCL 4
+#define BCM281XX_PIN_BSC2_SDA 5
+#define BCM281XX_PIN_CLASSGPWR 6
+#define BCM281XX_PIN_CLK_CX8 7
+#define BCM281XX_PIN_CLKOUT_0 8
+#define BCM281XX_PIN_CLKOUT_1 9
+#define BCM281XX_PIN_CLKOUT_2 10
+#define BCM281XX_PIN_CLKOUT_3 11
+#define BCM281XX_PIN_CLKREQ_IN_0 12
+#define BCM281XX_PIN_CLKREQ_IN_1 13
+#define BCM281XX_PIN_CWS_SYS_REQ1 14
+#define BCM281XX_PIN_CWS_SYS_REQ2 15
+#define BCM281XX_PIN_CWS_SYS_REQ3 16
+#define BCM281XX_PIN_DIGMIC1_CLK 17
+#define BCM281XX_PIN_DIGMIC1_DQ 18
+#define BCM281XX_PIN_DIGMIC2_CLK 19
+#define BCM281XX_PIN_DIGMIC2_DQ 20
+#define BCM281XX_PIN_GPEN13 21
+#define BCM281XX_PIN_GPEN14 22
+#define BCM281XX_PIN_GPEN15 23
+#define BCM281XX_PIN_GPIO00 24
+#define BCM281XX_PIN_GPIO01 25
+#define BCM281XX_PIN_GPIO02 26
+#define BCM281XX_PIN_GPIO03 27
+#define BCM281XX_PIN_GPIO04 28
+#define BCM281XX_PIN_GPIO05 29
+#define BCM281XX_PIN_GPIO06 30
+#define BCM281XX_PIN_GPIO07 31
+#define BCM281XX_PIN_GPIO08 32
+#define BCM281XX_PIN_GPIO09 33
+#define BCM281XX_PIN_GPIO10 34
+#define BCM281XX_PIN_GPIO11 35
+#define BCM281XX_PIN_GPIO12 36
+#define BCM281XX_PIN_GPIO13 37
+#define BCM281XX_PIN_GPIO14 38
+#define BCM281XX_PIN_GPS_PABLANK 39
+#define BCM281XX_PIN_GPS_TMARK 40
+#define BCM281XX_PIN_HDMI_SCL 41
+#define BCM281XX_PIN_HDMI_SDA 42
+#define BCM281XX_PIN_IC_DM 43
+#define BCM281XX_PIN_IC_DP 44
+#define BCM281XX_PIN_KP_COL_IP_0 45
+#define BCM281XX_PIN_KP_COL_IP_1 46
+#define BCM281XX_PIN_KP_COL_IP_2 47
+#define BCM281XX_PIN_KP_COL_IP_3 48
+#define BCM281XX_PIN_KP_ROW_OP_0 49
+#define BCM281XX_PIN_KP_ROW_OP_1 50
+#define BCM281XX_PIN_KP_ROW_OP_2 51
+#define BCM281XX_PIN_KP_ROW_OP_3 52
+#define BCM281XX_PIN_LCD_B_0 53
+#define BCM281XX_PIN_LCD_B_1 54
+#define BCM281XX_PIN_LCD_B_2 55
+#define BCM281XX_PIN_LCD_B_3 56
+#define BCM281XX_PIN_LCD_B_4 57
+#define BCM281XX_PIN_LCD_B_5 58
+#define BCM281XX_PIN_LCD_B_6 59
+#define BCM281XX_PIN_LCD_B_7 60
+#define BCM281XX_PIN_LCD_G_0 61
+#define BCM281XX_PIN_LCD_G_1 62
+#define BCM281XX_PIN_LCD_G_2 63
+#define BCM281XX_PIN_LCD_G_3 64
+#define BCM281XX_PIN_LCD_G_4 65
+#define BCM281XX_PIN_LCD_G_5 66
+#define BCM281XX_PIN_LCD_G_6 67
+#define BCM281XX_PIN_LCD_G_7 68
+#define BCM281XX_PIN_LCD_HSYNC 69
+#define BCM281XX_PIN_LCD_OE 70
+#define BCM281XX_PIN_LCD_PCLK 71
+#define BCM281XX_PIN_LCD_R_0 72
+#define BCM281XX_PIN_LCD_R_1 73
+#define BCM281XX_PIN_LCD_R_2 74
+#define BCM281XX_PIN_LCD_R_3 75
+#define BCM281XX_PIN_LCD_R_4 76
+#define BCM281XX_PIN_LCD_R_5 77
+#define BCM281XX_PIN_LCD_R_6 78
+#define BCM281XX_PIN_LCD_R_7 79
+#define BCM281XX_PIN_LCD_VSYNC 80
+#define BCM281XX_PIN_MDMGPIO0 81
+#define BCM281XX_PIN_MDMGPIO1 82
+#define BCM281XX_PIN_MDMGPIO2 83
+#define BCM281XX_PIN_MDMGPIO3 84
+#define BCM281XX_PIN_MDMGPIO4 85
+#define BCM281XX_PIN_MDMGPIO5 86
+#define BCM281XX_PIN_MDMGPIO6 87
+#define BCM281XX_PIN_MDMGPIO7 88
+#define BCM281XX_PIN_MDMGPIO8 89
+#define BCM281XX_PIN_MPHI_DATA_0 90
+#define BCM281XX_PIN_MPHI_DATA_1 91
+#define BCM281XX_PIN_MPHI_DATA_2 92
+#define BCM281XX_PIN_MPHI_DATA_3 93
+#define BCM281XX_PIN_MPHI_DATA_4 94
+#define BCM281XX_PIN_MPHI_DATA_5 95
+#define BCM281XX_PIN_MPHI_DATA_6 96
+#define BCM281XX_PIN_MPHI_DATA_7 97
+#define BCM281XX_PIN_MPHI_DATA_8 98
+#define BCM281XX_PIN_MPHI_DATA_9 99
+#define BCM281XX_PIN_MPHI_DATA_10 100
+#define BCM281XX_PIN_MPHI_DATA_11 101
+#define BCM281XX_PIN_MPHI_DATA_12 102
+#define BCM281XX_PIN_MPHI_DATA_13 103
+#define BCM281XX_PIN_MPHI_DATA_14 104
+#define BCM281XX_PIN_MPHI_DATA_15 105
+#define BCM281XX_PIN_MPHI_HA0 106
+#define BCM281XX_PIN_MPHI_HAT0 107
+#define BCM281XX_PIN_MPHI_HAT1 108
+#define BCM281XX_PIN_MPHI_HCE0_N 109
+#define BCM281XX_PIN_MPHI_HCE1_N 110
+#define BCM281XX_PIN_MPHI_HRD_N 111
+#define BCM281XX_PIN_MPHI_HWR_N 112
+#define BCM281XX_PIN_MPHI_RUN0 113
+#define BCM281XX_PIN_MPHI_RUN1 114
+#define BCM281XX_PIN_MTX_SCAN_CLK 115
+#define BCM281XX_PIN_MTX_SCAN_DATA 116
+#define BCM281XX_PIN_NAND_AD_0 117
+#define BCM281XX_PIN_NAND_AD_1 118
+#define BCM281XX_PIN_NAND_AD_2 119
+#define BCM281XX_PIN_NAND_AD_3 120
+#define BCM281XX_PIN_NAND_AD_4 121
+#define BCM281XX_PIN_NAND_AD_5 122
+#define BCM281XX_PIN_NAND_AD_6 123
+#define BCM281XX_PIN_NAND_AD_7 124
+#define BCM281XX_PIN_NAND_ALE 125
+#define BCM281XX_PIN_NAND_CEN_0 126
+#define BCM281XX_PIN_NAND_CEN_1 127
+#define BCM281XX_PIN_NAND_CLE 128
+#define BCM281XX_PIN_NAND_OEN 129
+#define BCM281XX_PIN_NAND_RDY_0 130
+#define BCM281XX_PIN_NAND_RDY_1 131
+#define BCM281XX_PIN_NAND_WEN 132
+#define BCM281XX_PIN_NAND_WP 133
+#define BCM281XX_PIN_PC1 134
+#define BCM281XX_PIN_PC2 135
+#define BCM281XX_PIN_PMU_INT 136
+#define BCM281XX_PIN_PMU_SCL 137
+#define BCM281XX_PIN_PMU_SDA 138
+#define BCM281XX_PIN_RFST2G_MTSLOTEN3G 139
+#define BCM281XX_PIN_RGMII_0_RX_CTL 140
+#define BCM281XX_PIN_RGMII_0_RXC 141
+#define BCM281XX_PIN_RGMII_0_RXD_0 142
+#define BCM281XX_PIN_RGMII_0_RXD_1 143
+#define BCM281XX_PIN_RGMII_0_RXD_2 144
+#define BCM281XX_PIN_RGMII_0_RXD_3 145
+#define BCM281XX_PIN_RGMII_0_TX_CTL 146
+#define BCM281XX_PIN_RGMII_0_TXC 147
+#define BCM281XX_PIN_RGMII_0_TXD_0 148
+#define BCM281XX_PIN_RGMII_0_TXD_1 149
+#define BCM281XX_PIN_RGMII_0_TXD_2 150
+#define BCM281XX_PIN_RGMII_0_TXD_3 151
+#define BCM281XX_PIN_RGMII_1_RX_CTL 152
+#define BCM281XX_PIN_RGMII_1_RXC 153
+#define BCM281XX_PIN_RGMII_1_RXD_0 154
+#define BCM281XX_PIN_RGMII_1_RXD_1 155
+#define BCM281XX_PIN_RGMII_1_RXD_2 156
+#define BCM281XX_PIN_RGMII_1_RXD_3 157
+#define BCM281XX_PIN_RGMII_1_TX_CTL 158
+#define BCM281XX_PIN_RGMII_1_TXC 159
+#define BCM281XX_PIN_RGMII_1_TXD_0 160
+#define BCM281XX_PIN_RGMII_1_TXD_1 161
+#define BCM281XX_PIN_RGMII_1_TXD_2 162
+#define BCM281XX_PIN_RGMII_1_TXD_3 163
+#define BCM281XX_PIN_RGMII_GPIO_0 164
+#define BCM281XX_PIN_RGMII_GPIO_1 165
+#define BCM281XX_PIN_RGMII_GPIO_2 166
+#define BCM281XX_PIN_RGMII_GPIO_3 167
+#define BCM281XX_PIN_RTXDATA2G_TXDATA3G1 168
+#define BCM281XX_PIN_RTXEN2G_TXDATA3G2 169
+#define BCM281XX_PIN_RXDATA3G0 170
+#define BCM281XX_PIN_RXDATA3G1 171
+#define BCM281XX_PIN_RXDATA3G2 172
+#define BCM281XX_PIN_SDIO1_CLK 173
+#define BCM281XX_PIN_SDIO1_CMD 174
+#define BCM281XX_PIN_SDIO1_DATA_0 175
+#define BCM281XX_PIN_SDIO1_DATA_1 176
+#define BCM281XX_PIN_SDIO1_DATA_2 177
+#define BCM281XX_PIN_SDIO1_DATA_3 178
+#define BCM281XX_PIN_SDIO4_CLK 179
+#define BCM281XX_PIN_SDIO4_CMD 180
+#define BCM281XX_PIN_SDIO4_DATA_0 181
+#define BCM281XX_PIN_SDIO4_DATA_1 182
+#define BCM281XX_PIN_SDIO4_DATA_2 183
+#define BCM281XX_PIN_SDIO4_DATA_3 184
+#define BCM281XX_PIN_SIM_CLK 185
+#define BCM281XX_PIN_SIM_DATA 186
+#define BCM281XX_PIN_SIM_DET 187
+#define BCM281XX_PIN_SIM_RESETN 188
+#define BCM281XX_PIN_SIM2_CLK 189
+#define BCM281XX_PIN_SIM2_DATA 190
+#define BCM281XX_PIN_SIM2_DET 191
+#define BCM281XX_PIN_SIM2_RESETN 192
+#define BCM281XX_PIN_SRI_C 193
+#define BCM281XX_PIN_SRI_D 194
+#define BCM281XX_PIN_SRI_E 195
+#define BCM281XX_PIN_SSP_EXTCLK 196
+#define BCM281XX_PIN_SSP0_CLK 197
+#define BCM281XX_PIN_SSP0_FS 198
+#define BCM281XX_PIN_SSP0_RXD 199
+#define BCM281XX_PIN_SSP0_TXD 200
+#define BCM281XX_PIN_SSP2_CLK 201
+#define BCM281XX_PIN_SSP2_FS_0 202
+#define BCM281XX_PIN_SSP2_FS_1 203
+#define BCM281XX_PIN_SSP2_FS_2 204
+#define BCM281XX_PIN_SSP2_FS_3 205
+#define BCM281XX_PIN_SSP2_RXD_0 206
+#define BCM281XX_PIN_SSP2_RXD_1 207
+#define BCM281XX_PIN_SSP2_TXD_0 208
+#define BCM281XX_PIN_SSP2_TXD_1 209
+#define BCM281XX_PIN_SSP3_CLK 210
+#define BCM281XX_PIN_SSP3_FS 211
+#define BCM281XX_PIN_SSP3_RXD 212
+#define BCM281XX_PIN_SSP3_TXD 213
+#define BCM281XX_PIN_SSP4_CLK 214
+#define BCM281XX_PIN_SSP4_FS 215
+#define BCM281XX_PIN_SSP4_RXD 216
+#define BCM281XX_PIN_SSP4_TXD 217
+#define BCM281XX_PIN_SSP5_CLK 218
+#define BCM281XX_PIN_SSP5_FS 219
+#define BCM281XX_PIN_SSP5_RXD 220
+#define BCM281XX_PIN_SSP5_TXD 221
+#define BCM281XX_PIN_SSP6_CLK 222
+#define BCM281XX_PIN_SSP6_FS 223
+#define BCM281XX_PIN_SSP6_RXD 224
+#define BCM281XX_PIN_SSP6_TXD 225
+#define BCM281XX_PIN_STAT_1 226
+#define BCM281XX_PIN_STAT_2 227
+#define BCM281XX_PIN_SYSCLKEN 228
+#define BCM281XX_PIN_TRACECLK 229
+#define BCM281XX_PIN_TRACEDT00 230
+#define BCM281XX_PIN_TRACEDT01 231
+#define BCM281XX_PIN_TRACEDT02 232
+#define BCM281XX_PIN_TRACEDT03 233
+#define BCM281XX_PIN_TRACEDT04 234
+#define BCM281XX_PIN_TRACEDT05 235
+#define BCM281XX_PIN_TRACEDT06 236
+#define BCM281XX_PIN_TRACEDT07 237
+#define BCM281XX_PIN_TRACEDT08 238
+#define BCM281XX_PIN_TRACEDT09 239
+#define BCM281XX_PIN_TRACEDT10 240
+#define BCM281XX_PIN_TRACEDT11 241
+#define BCM281XX_PIN_TRACEDT12 242
+#define BCM281XX_PIN_TRACEDT13 243
+#define BCM281XX_PIN_TRACEDT14 244
+#define BCM281XX_PIN_TRACEDT15 245
+#define BCM281XX_PIN_TXDATA3G0 246
+#define BCM281XX_PIN_TXPWRIND 247
+#define BCM281XX_PIN_UARTB1_UCTS 248
+#define BCM281XX_PIN_UARTB1_URTS 249
+#define BCM281XX_PIN_UARTB1_URXD 250
+#define BCM281XX_PIN_UARTB1_UTXD 251
+#define BCM281XX_PIN_UARTB2_URXD 252
+#define BCM281XX_PIN_UARTB2_UTXD 253
+#define BCM281XX_PIN_UARTB3_UCTS 254
+#define BCM281XX_PIN_UARTB3_URTS 255
+#define BCM281XX_PIN_UARTB3_URXD 256
+#define BCM281XX_PIN_UARTB3_UTXD 257
+#define BCM281XX_PIN_UARTB4_UCTS 258
+#define BCM281XX_PIN_UARTB4_URTS 259
+#define BCM281XX_PIN_UARTB4_URXD 260
+#define BCM281XX_PIN_UARTB4_UTXD 261
+#define BCM281XX_PIN_VC_CAM1_SCL 262
+#define BCM281XX_PIN_VC_CAM1_SDA 263
+#define BCM281XX_PIN_VC_CAM2_SCL 264
+#define BCM281XX_PIN_VC_CAM2_SDA 265
+#define BCM281XX_PIN_VC_CAM3_SCL 266
+#define BCM281XX_PIN_VC_CAM3_SDA 267
+
+#define BCM281XX_PIN_DESC(a, b, c) \
+ { .number = a, .name = b, .drv_data = &c##_pin }
+
+/*
+ * Pin description definition. The order here must be the same as defined in
+ * the PADCTRLREG block in the RDB, since the pin number is used as an index
+ * into this array.
+ */
+static const struct pinctrl_pin_desc bcm281xx_pinctrl_pins[] = {
+ BCM281XX_PIN_DESC(BCM281XX_PIN_ADCSYNC, "adcsync", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_BAT_RM, "bat_rm", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_BSC1_SCL, "bsc1_scl", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_BSC1_SDA, "bsc1_sda", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_BSC2_SCL, "bsc2_scl", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_BSC2_SDA, "bsc2_sda", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLASSGPWR, "classgpwr", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLK_CX8, "clk_cx8", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_0, "clkout_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_1, "clkout_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_2, "clkout_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_3, "clkout_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLKREQ_IN_0, "clkreq_in_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CLKREQ_IN_1, "clkreq_in_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CWS_SYS_REQ1, "cws_sys_req1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CWS_SYS_REQ2, "cws_sys_req2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_CWS_SYS_REQ3, "cws_sys_req3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC1_CLK, "digmic1_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC1_DQ, "digmic1_dq", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC2_CLK, "digmic2_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC2_DQ, "digmic2_dq", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPEN13, "gpen13", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPEN14, "gpen14", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPEN15, "gpen15", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO00, "gpio00", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO01, "gpio01", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO02, "gpio02", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO03, "gpio03", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO04, "gpio04", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO05, "gpio05", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO06, "gpio06", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO07, "gpio07", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO08, "gpio08", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO09, "gpio09", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO10, "gpio10", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO11, "gpio11", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO12, "gpio12", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO13, "gpio13", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO14, "gpio14", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPS_PABLANK, "gps_pablank", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_GPS_TMARK, "gps_tmark", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_HDMI_SCL, "hdmi_scl", hdmi),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_HDMI_SDA, "hdmi_sda", hdmi),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_IC_DM, "ic_dm", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_IC_DP, "ic_dp", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_0, "kp_col_ip_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_1, "kp_col_ip_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_2, "kp_col_ip_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_3, "kp_col_ip_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_0, "kp_row_op_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_1, "kp_row_op_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_2, "kp_row_op_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_3, "kp_row_op_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_0, "lcd_b_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_1, "lcd_b_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_2, "lcd_b_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_3, "lcd_b_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_4, "lcd_b_4", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_5, "lcd_b_5", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_6, "lcd_b_6", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_7, "lcd_b_7", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_0, "lcd_g_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_1, "lcd_g_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_2, "lcd_g_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_3, "lcd_g_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_4, "lcd_g_4", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_5, "lcd_g_5", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_6, "lcd_g_6", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_7, "lcd_g_7", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_HSYNC, "lcd_hsync", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_OE, "lcd_oe", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_PCLK, "lcd_pclk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_0, "lcd_r_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_1, "lcd_r_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_2, "lcd_r_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_3, "lcd_r_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_4, "lcd_r_4", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_5, "lcd_r_5", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_6, "lcd_r_6", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_7, "lcd_r_7", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_VSYNC, "lcd_vsync", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO0, "mdmgpio0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO1, "mdmgpio1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO2, "mdmgpio2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO3, "mdmgpio3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO4, "mdmgpio4", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO5, "mdmgpio5", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO6, "mdmgpio6", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO7, "mdmgpio7", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO8, "mdmgpio8", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_0, "mphi_data_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_1, "mphi_data_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_2, "mphi_data_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_3, "mphi_data_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_4, "mphi_data_4", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_5, "mphi_data_5", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_6, "mphi_data_6", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_7, "mphi_data_7", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_8, "mphi_data_8", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_9, "mphi_data_9", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_10, "mphi_data_10", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_11, "mphi_data_11", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_12, "mphi_data_12", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_13, "mphi_data_13", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_14, "mphi_data_14", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_15, "mphi_data_15", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HA0, "mphi_ha0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HAT0, "mphi_hat0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HAT1, "mphi_hat1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HCE0_N, "mphi_hce0_n", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HCE1_N, "mphi_hce1_n", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HRD_N, "mphi_hrd_n", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HWR_N, "mphi_hwr_n", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_RUN0, "mphi_run0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_RUN1, "mphi_run1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MTX_SCAN_CLK, "mtx_scan_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_MTX_SCAN_DATA, "mtx_scan_data", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_0, "nand_ad_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_1, "nand_ad_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_2, "nand_ad_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_3, "nand_ad_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_4, "nand_ad_4", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_5, "nand_ad_5", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_6, "nand_ad_6", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_7, "nand_ad_7", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_ALE, "nand_ale", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_CEN_0, "nand_cen_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_CEN_1, "nand_cen_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_CLE, "nand_cle", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_OEN, "nand_oen", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_RDY_0, "nand_rdy_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_RDY_1, "nand_rdy_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_WEN, "nand_wen", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_WP, "nand_wp", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_PC1, "pc1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_PC2, "pc2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_PMU_INT, "pmu_int", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_PMU_SCL, "pmu_scl", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_PMU_SDA, "pmu_sda", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RFST2G_MTSLOTEN3G, "rfst2g_mtsloten3g",
+ std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RX_CTL, "rgmii_0_rx_ctl", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXC, "rgmii_0_rxc", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_0, "rgmii_0_rxd_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_1, "rgmii_0_rxd_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_2, "rgmii_0_rxd_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_3, "rgmii_0_rxd_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TX_CTL, "rgmii_0_tx_ctl", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXC, "rgmii_0_txc", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_0, "rgmii_0_txd_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_1, "rgmii_0_txd_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_2, "rgmii_0_txd_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_3, "rgmii_0_txd_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RX_CTL, "rgmii_1_rx_ctl", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXC, "rgmii_1_rxc", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_0, "rgmii_1_rxd_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_1, "rgmii_1_rxd_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_2, "rgmii_1_rxd_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_3, "rgmii_1_rxd_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TX_CTL, "rgmii_1_tx_ctl", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXC, "rgmii_1_txc", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_0, "rgmii_1_txd_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_1, "rgmii_1_txd_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_2, "rgmii_1_txd_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_3, "rgmii_1_txd_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_0, "rgmii_gpio_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_1, "rgmii_gpio_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_2, "rgmii_gpio_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_3, "rgmii_gpio_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RTXDATA2G_TXDATA3G1,
+ "rtxdata2g_txdata3g1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RTXEN2G_TXDATA3G2, "rtxen2g_txdata3g2",
+ std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RXDATA3G0, "rxdata3g0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RXDATA3G1, "rxdata3g1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_RXDATA3G2, "rxdata3g2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_CLK, "sdio1_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_CMD, "sdio1_cmd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_0, "sdio1_data_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_1, "sdio1_data_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_2, "sdio1_data_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_3, "sdio1_data_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_CLK, "sdio4_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_CMD, "sdio4_cmd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_0, "sdio4_data_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_1, "sdio4_data_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_2, "sdio4_data_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_3, "sdio4_data_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_CLK, "sim_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_DATA, "sim_data", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_DET, "sim_det", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_RESETN, "sim_resetn", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_CLK, "sim2_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_DATA, "sim2_data", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_DET, "sim2_det", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_RESETN, "sim2_resetn", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SRI_C, "sri_c", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SRI_D, "sri_d", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SRI_E, "sri_e", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP_EXTCLK, "ssp_extclk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_CLK, "ssp0_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_FS, "ssp0_fs", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_RXD, "ssp0_rxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_TXD, "ssp0_txd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_CLK, "ssp2_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_0, "ssp2_fs_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_1, "ssp2_fs_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_2, "ssp2_fs_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_3, "ssp2_fs_3", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_RXD_0, "ssp2_rxd_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_RXD_1, "ssp2_rxd_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_TXD_0, "ssp2_txd_0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_TXD_1, "ssp2_txd_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_CLK, "ssp3_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_FS, "ssp3_fs", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_RXD, "ssp3_rxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_TXD, "ssp3_txd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_CLK, "ssp4_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_FS, "ssp4_fs", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_RXD, "ssp4_rxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_TXD, "ssp4_txd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_CLK, "ssp5_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_FS, "ssp5_fs", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_RXD, "ssp5_rxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_TXD, "ssp5_txd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_CLK, "ssp6_clk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_FS, "ssp6_fs", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_RXD, "ssp6_rxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_TXD, "ssp6_txd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_STAT_1, "stat_1", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_STAT_2, "stat_2", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_SYSCLKEN, "sysclken", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACECLK, "traceclk", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT00, "tracedt00", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT01, "tracedt01", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT02, "tracedt02", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT03, "tracedt03", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT04, "tracedt04", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT05, "tracedt05", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT06, "tracedt06", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT07, "tracedt07", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT08, "tracedt08", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT09, "tracedt09", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT10, "tracedt10", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT11, "tracedt11", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT12, "tracedt12", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT13, "tracedt13", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT14, "tracedt14", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT15, "tracedt15", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TXDATA3G0, "txdata3g0", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_TXPWRIND, "txpwrind", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_UCTS, "uartb1_ucts", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_URTS, "uartb1_urts", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_URXD, "uartb1_urxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_UTXD, "uartb1_utxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB2_URXD, "uartb2_urxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB2_UTXD, "uartb2_utxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_UCTS, "uartb3_ucts", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_URTS, "uartb3_urts", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_URXD, "uartb3_urxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_UTXD, "uartb3_utxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_UCTS, "uartb4_ucts", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_URTS, "uartb4_urts", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_URXD, "uartb4_urxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_UTXD, "uartb4_utxd", std),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM1_SCL, "vc_cam1_scl", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM1_SDA, "vc_cam1_sda", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM2_SCL, "vc_cam2_scl", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM2_SDA, "vc_cam2_sda", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM3_SCL, "vc_cam3_scl", i2c),
+ BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM3_SDA, "vc_cam3_sda", i2c),
+};
+
+static const char * const bcm281xx_alt_groups[] = {
+ "adcsync",
+ "bat_rm",
+ "bsc1_scl",
+ "bsc1_sda",
+ "bsc2_scl",
+ "bsc2_sda",
+ "classgpwr",
+ "clk_cx8",
+ "clkout_0",
+ "clkout_1",
+ "clkout_2",
+ "clkout_3",
+ "clkreq_in_0",
+ "clkreq_in_1",
+ "cws_sys_req1",
+ "cws_sys_req2",
+ "cws_sys_req3",
+ "digmic1_clk",
+ "digmic1_dq",
+ "digmic2_clk",
+ "digmic2_dq",
+ "gpen13",
+ "gpen14",
+ "gpen15",
+ "gpio00",
+ "gpio01",
+ "gpio02",
+ "gpio03",
+ "gpio04",
+ "gpio05",
+ "gpio06",
+ "gpio07",
+ "gpio08",
+ "gpio09",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gps_pablank",
+ "gps_tmark",
+ "hdmi_scl",
+ "hdmi_sda",
+ "ic_dm",
+ "ic_dp",
+ "kp_col_ip_0",
+ "kp_col_ip_1",
+ "kp_col_ip_2",
+ "kp_col_ip_3",
+ "kp_row_op_0",
+ "kp_row_op_1",
+ "kp_row_op_2",
+ "kp_row_op_3",
+ "lcd_b_0",
+ "lcd_b_1",
+ "lcd_b_2",
+ "lcd_b_3",
+ "lcd_b_4",
+ "lcd_b_5",
+ "lcd_b_6",
+ "lcd_b_7",
+ "lcd_g_0",
+ "lcd_g_1",
+ "lcd_g_2",
+ "lcd_g_3",
+ "lcd_g_4",
+ "lcd_g_5",
+ "lcd_g_6",
+ "lcd_g_7",
+ "lcd_hsync",
+ "lcd_oe",
+ "lcd_pclk",
+ "lcd_r_0",
+ "lcd_r_1",
+ "lcd_r_2",
+ "lcd_r_3",
+ "lcd_r_4",
+ "lcd_r_5",
+ "lcd_r_6",
+ "lcd_r_7",
+ "lcd_vsync",
+ "mdmgpio0",
+ "mdmgpio1",
+ "mdmgpio2",
+ "mdmgpio3",
+ "mdmgpio4",
+ "mdmgpio5",
+ "mdmgpio6",
+ "mdmgpio7",
+ "mdmgpio8",
+ "mphi_data_0",
+ "mphi_data_1",
+ "mphi_data_2",
+ "mphi_data_3",
+ "mphi_data_4",
+ "mphi_data_5",
+ "mphi_data_6",
+ "mphi_data_7",
+ "mphi_data_8",
+ "mphi_data_9",
+ "mphi_data_10",
+ "mphi_data_11",
+ "mphi_data_12",
+ "mphi_data_13",
+ "mphi_data_14",
+ "mphi_data_15",
+ "mphi_ha0",
+ "mphi_hat0",
+ "mphi_hat1",
+ "mphi_hce0_n",
+ "mphi_hce1_n",
+ "mphi_hrd_n",
+ "mphi_hwr_n",
+ "mphi_run0",
+ "mphi_run1",
+ "mtx_scan_clk",
+ "mtx_scan_data",
+ "nand_ad_0",
+ "nand_ad_1",
+ "nand_ad_2",
+ "nand_ad_3",
+ "nand_ad_4",
+ "nand_ad_5",
+ "nand_ad_6",
+ "nand_ad_7",
+ "nand_ale",
+ "nand_cen_0",
+ "nand_cen_1",
+ "nand_cle",
+ "nand_oen",
+ "nand_rdy_0",
+ "nand_rdy_1",
+ "nand_wen",
+ "nand_wp",
+ "pc1",
+ "pc2",
+ "pmu_int",
+ "pmu_scl",
+ "pmu_sda",
+ "rfst2g_mtsloten3g",
+ "rgmii_0_rx_ctl",
+ "rgmii_0_rxc",
+ "rgmii_0_rxd_0",
+ "rgmii_0_rxd_1",
+ "rgmii_0_rxd_2",
+ "rgmii_0_rxd_3",
+ "rgmii_0_tx_ctl",
+ "rgmii_0_txc",
+ "rgmii_0_txd_0",
+ "rgmii_0_txd_1",
+ "rgmii_0_txd_2",
+ "rgmii_0_txd_3",
+ "rgmii_1_rx_ctl",
+ "rgmii_1_rxc",
+ "rgmii_1_rxd_0",
+ "rgmii_1_rxd_1",
+ "rgmii_1_rxd_2",
+ "rgmii_1_rxd_3",
+ "rgmii_1_tx_ctl",
+ "rgmii_1_txc",
+ "rgmii_1_txd_0",
+ "rgmii_1_txd_1",
+ "rgmii_1_txd_2",
+ "rgmii_1_txd_3",
+ "rgmii_gpio_0",
+ "rgmii_gpio_1",
+ "rgmii_gpio_2",
+ "rgmii_gpio_3",
+ "rtxdata2g_txdata3g1",
+ "rtxen2g_txdata3g2",
+ "rxdata3g0",
+ "rxdata3g1",
+ "rxdata3g2",
+ "sdio1_clk",
+ "sdio1_cmd",
+ "sdio1_data_0",
+ "sdio1_data_1",
+ "sdio1_data_2",
+ "sdio1_data_3",
+ "sdio4_clk",
+ "sdio4_cmd",
+ "sdio4_data_0",
+ "sdio4_data_1",
+ "sdio4_data_2",
+ "sdio4_data_3",
+ "sim_clk",
+ "sim_data",
+ "sim_det",
+ "sim_resetn",
+ "sim2_clk",
+ "sim2_data",
+ "sim2_det",
+ "sim2_resetn",
+ "sri_c",
+ "sri_d",
+ "sri_e",
+ "ssp_extclk",
+ "ssp0_clk",
+ "ssp0_fs",
+ "ssp0_rxd",
+ "ssp0_txd",
+ "ssp2_clk",
+ "ssp2_fs_0",
+ "ssp2_fs_1",
+ "ssp2_fs_2",
+ "ssp2_fs_3",
+ "ssp2_rxd_0",
+ "ssp2_rxd_1",
+ "ssp2_txd_0",
+ "ssp2_txd_1",
+ "ssp3_clk",
+ "ssp3_fs",
+ "ssp3_rxd",
+ "ssp3_txd",
+ "ssp4_clk",
+ "ssp4_fs",
+ "ssp4_rxd",
+ "ssp4_txd",
+ "ssp5_clk",
+ "ssp5_fs",
+ "ssp5_rxd",
+ "ssp5_txd",
+ "ssp6_clk",
+ "ssp6_fs",
+ "ssp6_rxd",
+ "ssp6_txd",
+ "stat_1",
+ "stat_2",
+ "sysclken",
+ "traceclk",
+ "tracedt00",
+ "tracedt01",
+ "tracedt02",
+ "tracedt03",
+ "tracedt04",
+ "tracedt05",
+ "tracedt06",
+ "tracedt07",
+ "tracedt08",
+ "tracedt09",
+ "tracedt10",
+ "tracedt11",
+ "tracedt12",
+ "tracedt13",
+ "tracedt14",
+ "tracedt15",
+ "txdata3g0",
+ "txpwrind",
+ "uartb1_ucts",
+ "uartb1_urts",
+ "uartb1_urxd",
+ "uartb1_utxd",
+ "uartb2_urxd",
+ "uartb2_utxd",
+ "uartb3_ucts",
+ "uartb3_urts",
+ "uartb3_urxd",
+ "uartb3_utxd",
+ "uartb4_ucts",
+ "uartb4_urts",
+ "uartb4_urxd",
+ "uartb4_utxd",
+ "vc_cam1_scl",
+ "vc_cam1_sda",
+ "vc_cam2_scl",
+ "vc_cam2_sda",
+ "vc_cam3_scl",
+ "vc_cam3_sda",
+};
+
+/* Every pin can implement all ALT1-ALT4 functions */
+#define BCM281XX_PIN_FUNCTION(fcn_name) \
+{ \
+ .name = #fcn_name, \
+ .groups = bcm281xx_alt_groups, \
+ .ngroups = ARRAY_SIZE(bcm281xx_alt_groups), \
+}
+
+static const struct bcm281xx_pin_function bcm281xx_functions[] = {
+ BCM281XX_PIN_FUNCTION(alt1),
+ BCM281XX_PIN_FUNCTION(alt2),
+ BCM281XX_PIN_FUNCTION(alt3),
+ BCM281XX_PIN_FUNCTION(alt4),
+};
+
+static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = {
+ .pins = bcm281xx_pinctrl_pins,
+ .npins = ARRAY_SIZE(bcm281xx_pinctrl_pins),
+ .functions = bcm281xx_functions,
+ .nfunctions = ARRAY_SIZE(bcm281xx_functions),
+};
+
+static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev,
+ unsigned pin)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ if (pin >= pdata->npins)
+ return BCM281XX_PIN_TYPE_UNKNOWN;
+
+ return *(enum bcm281xx_pin_type *)(pdata->pins[pin].drv_data);
+}
+
+#define BCM281XX_PIN_SHIFT(type, param) \
+ (BCM281XX_ ## type ## _PIN_REG_ ## param ## _SHIFT)
+
+#define BCM281XX_PIN_MASK(type, param) \
+ (BCM281XX_ ## type ## _PIN_REG_ ## param ## _MASK)
+
+/*
+ * This helper function is used to build up the value and mask used to write to
+ * a pin register, but does not actually write to the register.
+ */
+static inline void bcm281xx_pin_update(u32 *reg_val, u32 *reg_mask,
+ u32 param_val, u32 param_shift,
+ u32 param_mask)
+{
+ *reg_val &= ~param_mask;
+ *reg_val |= (param_val << param_shift) & param_mask;
+ *reg_mask |= param_mask;
+}
+
+static struct regmap_config bcm281xx_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = BCM281XX_PIN_VC_CAM3_SDA,
+};
+
+static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->npins;
+}
+
+static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->pins[group].name;
+}
+
+static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pdata->pins[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+
+static struct pinctrl_ops bcm281xx_pinctrl_ops = {
+ .get_groups_count = bcm281xx_pinctrl_get_groups_count,
+ .get_group_name = bcm281xx_pinctrl_get_group_name,
+ .get_group_pins = bcm281xx_pinctrl_get_group_pins,
+ .pin_dbg_show = bcm281xx_pinctrl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->nfunctions;
+}
+
+static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->functions[function].name;
+}
+
+static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pdata->functions[function].groups;
+ *num_groups = pdata->functions[function].ngroups;
+
+ return 0;
+}
+
+static int bcm281xx_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm281xx_pin_function *f = &pdata->functions[function];
+ u32 offset = 4 * pdata->pins[group].number;
+ int rc = 0;
+
+ dev_dbg(pctldev->dev,
+ "%s(): Enable function %s (%d) of pin %s (%d) @offset 0x%x.\n",
+ __func__, f->name, function, pdata->pins[group].name,
+ pdata->pins[group].number, offset);
+
+ rc = regmap_update_bits(pdata->regmap, offset,
+ BCM281XX_PIN_REG_F_SEL_MASK,
+ function << BCM281XX_PIN_REG_F_SEL_SHIFT);
+ if (rc)
+ dev_err(pctldev->dev,
+ "Error updating register for pin %s (%d).\n",
+ pdata->pins[group].name, pdata->pins[group].number);
+
+ return rc;
+}
+
+static struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
+ .get_functions_count = bcm281xx_pinctrl_get_fcns_count,
+ .get_function_name = bcm281xx_pinctrl_get_fcn_name,
+ .get_function_groups = bcm281xx_pinctrl_get_fcn_groups,
+ .enable = bcm281xx_pinmux_enable,
+};
+
+static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config)
+{
+ return -ENOTSUPP;
+}
+
+
+/* Goes through the configs and update register val/mask */
+static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i;
+ enum pin_config_param param;
+ u16 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ arg = (arg >= 1 ? 1 : 0);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(STD, HYST),
+ BCM281XX_PIN_MASK(STD, HYST));
+ break;
+ /*
+ * The pin bias can only be one of pull-up, pull-down, or
+ * disable. The user does not need to specify a value for the
+ * property, and the default value from pinconf-generic is
+ * ignored.
+ */
+ case PIN_CONFIG_BIAS_DISABLE:
+ bcm281xx_pin_update(val, mask, 0,
+ BCM281XX_PIN_SHIFT(STD, PULL_UP),
+ BCM281XX_PIN_MASK(STD, PULL_UP));
+ bcm281xx_pin_update(val, mask, 0,
+ BCM281XX_PIN_SHIFT(STD, PULL_DN),
+ BCM281XX_PIN_MASK(STD, PULL_DN));
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ bcm281xx_pin_update(val, mask, 1,
+ BCM281XX_PIN_SHIFT(STD, PULL_UP),
+ BCM281XX_PIN_MASK(STD, PULL_UP));
+ bcm281xx_pin_update(val, mask, 0,
+ BCM281XX_PIN_SHIFT(STD, PULL_DN),
+ BCM281XX_PIN_MASK(STD, PULL_DN));
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ bcm281xx_pin_update(val, mask, 0,
+ BCM281XX_PIN_SHIFT(STD, PULL_UP),
+ BCM281XX_PIN_MASK(STD, PULL_UP));
+ bcm281xx_pin_update(val, mask, 1,
+ BCM281XX_PIN_SHIFT(STD, PULL_DN),
+ BCM281XX_PIN_MASK(STD, PULL_DN));
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(STD, SLEW),
+ BCM281XX_PIN_MASK(STD, SLEW));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(STD, INPUT_DIS),
+ BCM281XX_PIN_MASK(STD, INPUT_DIS));
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ /* Valid range is 2-16 mA, even numbers only */
+ if ((arg < 2) || (arg > 16) || (arg % 2)) {
+ dev_err(pctldev->dev,
+ "Invalid Drive Strength value (%d) for "
+ "pin %s (%d). Valid values are "
+ "(2..16) mA, even numbers only.\n",
+ arg, pdata->pins[pin].name, pin);
+ return -EINVAL;
+ }
+ bcm281xx_pin_update(val, mask, (arg/2)-1,
+ BCM281XX_PIN_SHIFT(STD, DRV_STR),
+ BCM281XX_PIN_MASK(STD, DRV_STR));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+/*
+ * The pull-up strength for an I2C pin is represented by bits 4-6 in the
+ * register with the following mapping:
+ * 0b000: No pull-up
+ * 0b001: 1200 Ohm
+ * 0b010: 1800 Ohm
+ * 0b011: 720 Ohm
+ * 0b100: 2700 Ohm
+ * 0b101: 831 Ohm
+ * 0b110: 1080 Ohm
+ * 0b111: 568 Ohm
+ * This array maps pull-up strength in Ohms to register values (1+index).
+ */
+static const u16 bcm281xx_pullup_map[] = {
+ 1200, 1800, 720, 2700, 831, 1080, 568
+};
+
+/* Goes through the configs and update register val/mask */
+static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i, j;
+ enum pin_config_param param;
+ u16 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ for (j = 0; j < ARRAY_SIZE(bcm281xx_pullup_map); j++)
+ if (bcm281xx_pullup_map[j] == arg)
+ break;
+
+ if (j == ARRAY_SIZE(bcm281xx_pullup_map)) {
+ dev_err(pctldev->dev,
+ "Invalid pull-up value (%d) for pin %s "
+ "(%d). Valid values are 568, 720, 831, "
+ "1080, 1200, 1800, 2700 Ohms.\n",
+ arg, pdata->pins[pin].name, pin);
+ return -EINVAL;
+ }
+
+ bcm281xx_pin_update(val, mask, j+1,
+ BCM281XX_PIN_SHIFT(I2C, PULL_UP_STR),
+ BCM281XX_PIN_MASK(I2C, PULL_UP_STR));
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ bcm281xx_pin_update(val, mask, 0,
+ BCM281XX_PIN_SHIFT(I2C, PULL_UP_STR),
+ BCM281XX_PIN_MASK(I2C, PULL_UP_STR));
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(I2C, SLEW),
+ BCM281XX_PIN_MASK(I2C, SLEW));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(I2C, INPUT_DIS),
+ BCM281XX_PIN_MASK(I2C, INPUT_DIS));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+/* Goes through the configs and update register val/mask */
+static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i;
+ enum pin_config_param param;
+ u16 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(HDMI, MODE),
+ BCM281XX_PIN_MASK(HDMI, MODE));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(HDMI, INPUT_DIS),
+ BCM281XX_PIN_MASK(HDMI, INPUT_DIS));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ enum bcm281xx_pin_type pin_type;
+ u32 offset = 4 * pin;
+ u32 cfg_val, cfg_mask;
+ int rc;
+
+ cfg_val = 0;
+ cfg_mask = 0;
+ pin_type = pin_type_get(pctldev, pin);
+
+ /* Different pins have different configuration options */
+ switch (pin_type) {
+ case BCM281XX_PIN_TYPE_STD:
+ rc = bcm281xx_std_pin_update(pctldev, pin, configs,
+ num_configs, &cfg_val, &cfg_mask);
+ break;
+
+ case BCM281XX_PIN_TYPE_I2C:
+ rc = bcm281xx_i2c_pin_update(pctldev, pin, configs,
+ num_configs, &cfg_val, &cfg_mask);
+ break;
+
+ case BCM281XX_PIN_TYPE_HDMI:
+ rc = bcm281xx_hdmi_pin_update(pctldev, pin, configs,
+ num_configs, &cfg_val, &cfg_mask);
+ break;
+
+ default:
+ dev_err(pctldev->dev, "Unknown pin type for pin %s (%d).\n",
+ pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch pin type */
+
+ if (rc)
+ return rc;
+
+ dev_dbg(pctldev->dev,
+ "%s(): Set pin %s (%d) with config 0x%x, mask 0x%x\n",
+ __func__, pdata->pins[pin].name, pin, cfg_val, cfg_mask);
+
+ rc = regmap_update_bits(pdata->regmap, offset, cfg_mask, cfg_val);
+ if (rc) {
+ dev_err(pctldev->dev,
+ "Error updating register for pin %s (%d).\n",
+ pdata->pins[pin].name, pin);
+ return rc;
+ }
+
+ return 0;
+}
+
+static struct pinconf_ops bcm281xx_pinctrl_pinconf_ops = {
+ .pin_config_get = bcm281xx_pinctrl_pin_config_get,
+ .pin_config_set = bcm281xx_pinctrl_pin_config_set,
+};
+
+static struct pinctrl_desc bcm281xx_pinctrl_desc = {
+ /* name, pins, npins members initialized in probe function */
+ .pctlops = &bcm281xx_pinctrl_ops,
+ .pmxops = &bcm281xx_pinctrl_pinmux_ops,
+ .confops = &bcm281xx_pinctrl_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
+{
+ struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
+ struct resource *res;
+ struct pinctrl_dev *pctl;
+
+ /* So far We can assume there is only 1 bank of registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing MEM resource\n");
+ return -ENODEV;
+ }
+
+ pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->reg_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the dynamic part of pinctrl_desc */
+ pdata->regmap = devm_regmap_init_mmio(&pdev->dev, pdata->reg_base,
+ &bcm281xx_pinctrl_regmap_config);
+ if (IS_ERR(pdata->regmap)) {
+ dev_err(&pdev->dev, "Regmap MMIO init failed.\n");
+ return -ENODEV;
+ }
+
+ bcm281xx_pinctrl_desc.name = dev_name(&pdev->dev);
+ bcm281xx_pinctrl_desc.pins = bcm281xx_pinctrl.pins;
+ bcm281xx_pinctrl_desc.npins = bcm281xx_pinctrl.npins;
+
+ pctl = pinctrl_register(&bcm281xx_pinctrl_desc,
+ &pdev->dev,
+ pdata);
+ if (!pctl) {
+ dev_err(&pdev->dev, "Failed to register pinctrl\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+
+ return 0;
+}
+
+static struct of_device_id bcm281xx_pinctrl_of_match[] = {
+ { .compatible = "brcm,bcm11351-pinctrl", },
+ { },
+};
+
+static struct platform_driver bcm281xx_pinctrl_driver = {
+ .driver = {
+ .name = "bcm281xx-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm281xx_pinctrl_of_match,
+ },
+};
+
+module_platform_driver_probe(bcm281xx_pinctrl_driver, bcm281xx_pinctrl_probe);
+
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
+MODULE_AUTHOR("Sherman Yin <syin@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom BCM281xx pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
deleted file mode 100644
index eb25002..0000000
--- a/drivers/pinctrl/pinctrl-capri.c
+++ /dev/null
@@ -1,1454 +0,0 @@
-/*
- * Copyright (C) 2013 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-#include "core.h"
-#include "pinctrl-utils.h"
-
-/* Capri Pin Control Registers Definitions */
-
-/* Function Select bits are the same for all pin control registers */
-#define CAPRI_PIN_REG_F_SEL_MASK 0x0700
-#define CAPRI_PIN_REG_F_SEL_SHIFT 8
-
-/* Standard pin register */
-#define CAPRI_STD_PIN_REG_DRV_STR_MASK 0x0007
-#define CAPRI_STD_PIN_REG_DRV_STR_SHIFT 0
-#define CAPRI_STD_PIN_REG_INPUT_DIS_MASK 0x0008
-#define CAPRI_STD_PIN_REG_INPUT_DIS_SHIFT 3
-#define CAPRI_STD_PIN_REG_SLEW_MASK 0x0010
-#define CAPRI_STD_PIN_REG_SLEW_SHIFT 4
-#define CAPRI_STD_PIN_REG_PULL_UP_MASK 0x0020
-#define CAPRI_STD_PIN_REG_PULL_UP_SHIFT 5
-#define CAPRI_STD_PIN_REG_PULL_DN_MASK 0x0040
-#define CAPRI_STD_PIN_REG_PULL_DN_SHIFT 6
-#define CAPRI_STD_PIN_REG_HYST_MASK 0x0080
-#define CAPRI_STD_PIN_REG_HYST_SHIFT 7
-
-/* I2C pin register */
-#define CAPRI_I2C_PIN_REG_INPUT_DIS_MASK 0x0004
-#define CAPRI_I2C_PIN_REG_INPUT_DIS_SHIFT 2
-#define CAPRI_I2C_PIN_REG_SLEW_MASK 0x0008
-#define CAPRI_I2C_PIN_REG_SLEW_SHIFT 3
-#define CAPRI_I2C_PIN_REG_PULL_UP_STR_MASK 0x0070
-#define CAPRI_I2C_PIN_REG_PULL_UP_STR_SHIFT 4
-
-/* HDMI pin register */
-#define CAPRI_HDMI_PIN_REG_INPUT_DIS_MASK 0x0008
-#define CAPRI_HDMI_PIN_REG_INPUT_DIS_SHIFT 3
-#define CAPRI_HDMI_PIN_REG_MODE_MASK 0x0010
-#define CAPRI_HDMI_PIN_REG_MODE_SHIFT 4
-
-/**
- * capri_pin_type - types of pin register
- */
-enum capri_pin_type {
- CAPRI_PIN_TYPE_UNKNOWN = 0,
- CAPRI_PIN_TYPE_STD,
- CAPRI_PIN_TYPE_I2C,
- CAPRI_PIN_TYPE_HDMI,
-};
-
-static enum capri_pin_type std_pin = CAPRI_PIN_TYPE_STD;
-static enum capri_pin_type i2c_pin = CAPRI_PIN_TYPE_I2C;
-static enum capri_pin_type hdmi_pin = CAPRI_PIN_TYPE_HDMI;
-
-/**
- * capri_pin_function- define pin function
- */
-struct capri_pin_function {
- const char *name;
- const char * const *groups;
- const unsigned ngroups;
-};
-
-/**
- * capri_pinctrl_data - Broadcom-specific pinctrl data
- * @reg_base - base of pinctrl registers
- */
-struct capri_pinctrl_data {
- void __iomem *reg_base;
-
- /* List of all pins */
- const struct pinctrl_pin_desc *pins;
- const unsigned npins;
-
- const struct capri_pin_function *functions;
- const unsigned nfunctions;
-
- struct regmap *regmap;
-};
-
-/*
- * Pin number definition. The order here must be the same as defined in the
- * PADCTRLREG block in the RDB.
- */
-#define CAPRI_PIN_ADCSYNC 0
-#define CAPRI_PIN_BAT_RM 1
-#define CAPRI_PIN_BSC1_SCL 2
-#define CAPRI_PIN_BSC1_SDA 3
-#define CAPRI_PIN_BSC2_SCL 4
-#define CAPRI_PIN_BSC2_SDA 5
-#define CAPRI_PIN_CLASSGPWR 6
-#define CAPRI_PIN_CLK_CX8 7
-#define CAPRI_PIN_CLKOUT_0 8
-#define CAPRI_PIN_CLKOUT_1 9
-#define CAPRI_PIN_CLKOUT_2 10
-#define CAPRI_PIN_CLKOUT_3 11
-#define CAPRI_PIN_CLKREQ_IN_0 12
-#define CAPRI_PIN_CLKREQ_IN_1 13
-#define CAPRI_PIN_CWS_SYS_REQ1 14
-#define CAPRI_PIN_CWS_SYS_REQ2 15
-#define CAPRI_PIN_CWS_SYS_REQ3 16
-#define CAPRI_PIN_DIGMIC1_CLK 17
-#define CAPRI_PIN_DIGMIC1_DQ 18
-#define CAPRI_PIN_DIGMIC2_CLK 19
-#define CAPRI_PIN_DIGMIC2_DQ 20
-#define CAPRI_PIN_GPEN13 21
-#define CAPRI_PIN_GPEN14 22
-#define CAPRI_PIN_GPEN15 23
-#define CAPRI_PIN_GPIO00 24
-#define CAPRI_PIN_GPIO01 25
-#define CAPRI_PIN_GPIO02 26
-#define CAPRI_PIN_GPIO03 27
-#define CAPRI_PIN_GPIO04 28
-#define CAPRI_PIN_GPIO05 29
-#define CAPRI_PIN_GPIO06 30
-#define CAPRI_PIN_GPIO07 31
-#define CAPRI_PIN_GPIO08 32
-#define CAPRI_PIN_GPIO09 33
-#define CAPRI_PIN_GPIO10 34
-#define CAPRI_PIN_GPIO11 35
-#define CAPRI_PIN_GPIO12 36
-#define CAPRI_PIN_GPIO13 37
-#define CAPRI_PIN_GPIO14 38
-#define CAPRI_PIN_GPS_PABLANK 39
-#define CAPRI_PIN_GPS_TMARK 40
-#define CAPRI_PIN_HDMI_SCL 41
-#define CAPRI_PIN_HDMI_SDA 42
-#define CAPRI_PIN_IC_DM 43
-#define CAPRI_PIN_IC_DP 44
-#define CAPRI_PIN_KP_COL_IP_0 45
-#define CAPRI_PIN_KP_COL_IP_1 46
-#define CAPRI_PIN_KP_COL_IP_2 47
-#define CAPRI_PIN_KP_COL_IP_3 48
-#define CAPRI_PIN_KP_ROW_OP_0 49
-#define CAPRI_PIN_KP_ROW_OP_1 50
-#define CAPRI_PIN_KP_ROW_OP_2 51
-#define CAPRI_PIN_KP_ROW_OP_3 52
-#define CAPRI_PIN_LCD_B_0 53
-#define CAPRI_PIN_LCD_B_1 54
-#define CAPRI_PIN_LCD_B_2 55
-#define CAPRI_PIN_LCD_B_3 56
-#define CAPRI_PIN_LCD_B_4 57
-#define CAPRI_PIN_LCD_B_5 58
-#define CAPRI_PIN_LCD_B_6 59
-#define CAPRI_PIN_LCD_B_7 60
-#define CAPRI_PIN_LCD_G_0 61
-#define CAPRI_PIN_LCD_G_1 62
-#define CAPRI_PIN_LCD_G_2 63
-#define CAPRI_PIN_LCD_G_3 64
-#define CAPRI_PIN_LCD_G_4 65
-#define CAPRI_PIN_LCD_G_5 66
-#define CAPRI_PIN_LCD_G_6 67
-#define CAPRI_PIN_LCD_G_7 68
-#define CAPRI_PIN_LCD_HSYNC 69
-#define CAPRI_PIN_LCD_OE 70
-#define CAPRI_PIN_LCD_PCLK 71
-#define CAPRI_PIN_LCD_R_0 72
-#define CAPRI_PIN_LCD_R_1 73
-#define CAPRI_PIN_LCD_R_2 74
-#define CAPRI_PIN_LCD_R_3 75
-#define CAPRI_PIN_LCD_R_4 76
-#define CAPRI_PIN_LCD_R_5 77
-#define CAPRI_PIN_LCD_R_6 78
-#define CAPRI_PIN_LCD_R_7 79
-#define CAPRI_PIN_LCD_VSYNC 80
-#define CAPRI_PIN_MDMGPIO0 81
-#define CAPRI_PIN_MDMGPIO1 82
-#define CAPRI_PIN_MDMGPIO2 83
-#define CAPRI_PIN_MDMGPIO3 84
-#define CAPRI_PIN_MDMGPIO4 85
-#define CAPRI_PIN_MDMGPIO5 86
-#define CAPRI_PIN_MDMGPIO6 87
-#define CAPRI_PIN_MDMGPIO7 88
-#define CAPRI_PIN_MDMGPIO8 89
-#define CAPRI_PIN_MPHI_DATA_0 90
-#define CAPRI_PIN_MPHI_DATA_1 91
-#define CAPRI_PIN_MPHI_DATA_2 92
-#define CAPRI_PIN_MPHI_DATA_3 93
-#define CAPRI_PIN_MPHI_DATA_4 94
-#define CAPRI_PIN_MPHI_DATA_5 95
-#define CAPRI_PIN_MPHI_DATA_6 96
-#define CAPRI_PIN_MPHI_DATA_7 97
-#define CAPRI_PIN_MPHI_DATA_8 98
-#define CAPRI_PIN_MPHI_DATA_9 99
-#define CAPRI_PIN_MPHI_DATA_10 100
-#define CAPRI_PIN_MPHI_DATA_11 101
-#define CAPRI_PIN_MPHI_DATA_12 102
-#define CAPRI_PIN_MPHI_DATA_13 103
-#define CAPRI_PIN_MPHI_DATA_14 104
-#define CAPRI_PIN_MPHI_DATA_15 105
-#define CAPRI_PIN_MPHI_HA0 106
-#define CAPRI_PIN_MPHI_HAT0 107
-#define CAPRI_PIN_MPHI_HAT1 108
-#define CAPRI_PIN_MPHI_HCE0_N 109
-#define CAPRI_PIN_MPHI_HCE1_N 110
-#define CAPRI_PIN_MPHI_HRD_N 111
-#define CAPRI_PIN_MPHI_HWR_N 112
-#define CAPRI_PIN_MPHI_RUN0 113
-#define CAPRI_PIN_MPHI_RUN1 114
-#define CAPRI_PIN_MTX_SCAN_CLK 115
-#define CAPRI_PIN_MTX_SCAN_DATA 116
-#define CAPRI_PIN_NAND_AD_0 117
-#define CAPRI_PIN_NAND_AD_1 118
-#define CAPRI_PIN_NAND_AD_2 119
-#define CAPRI_PIN_NAND_AD_3 120
-#define CAPRI_PIN_NAND_AD_4 121
-#define CAPRI_PIN_NAND_AD_5 122
-#define CAPRI_PIN_NAND_AD_6 123
-#define CAPRI_PIN_NAND_AD_7 124
-#define CAPRI_PIN_NAND_ALE 125
-#define CAPRI_PIN_NAND_CEN_0 126
-#define CAPRI_PIN_NAND_CEN_1 127
-#define CAPRI_PIN_NAND_CLE 128
-#define CAPRI_PIN_NAND_OEN 129
-#define CAPRI_PIN_NAND_RDY_0 130
-#define CAPRI_PIN_NAND_RDY_1 131
-#define CAPRI_PIN_NAND_WEN 132
-#define CAPRI_PIN_NAND_WP 133
-#define CAPRI_PIN_PC1 134
-#define CAPRI_PIN_PC2 135
-#define CAPRI_PIN_PMU_INT 136
-#define CAPRI_PIN_PMU_SCL 137
-#define CAPRI_PIN_PMU_SDA 138
-#define CAPRI_PIN_RFST2G_MTSLOTEN3G 139
-#define CAPRI_PIN_RGMII_0_RX_CTL 140
-#define CAPRI_PIN_RGMII_0_RXC 141
-#define CAPRI_PIN_RGMII_0_RXD_0 142
-#define CAPRI_PIN_RGMII_0_RXD_1 143
-#define CAPRI_PIN_RGMII_0_RXD_2 144
-#define CAPRI_PIN_RGMII_0_RXD_3 145
-#define CAPRI_PIN_RGMII_0_TX_CTL 146
-#define CAPRI_PIN_RGMII_0_TXC 147
-#define CAPRI_PIN_RGMII_0_TXD_0 148
-#define CAPRI_PIN_RGMII_0_TXD_1 149
-#define CAPRI_PIN_RGMII_0_TXD_2 150
-#define CAPRI_PIN_RGMII_0_TXD_3 151
-#define CAPRI_PIN_RGMII_1_RX_CTL 152
-#define CAPRI_PIN_RGMII_1_RXC 153
-#define CAPRI_PIN_RGMII_1_RXD_0 154
-#define CAPRI_PIN_RGMII_1_RXD_1 155
-#define CAPRI_PIN_RGMII_1_RXD_2 156
-#define CAPRI_PIN_RGMII_1_RXD_3 157
-#define CAPRI_PIN_RGMII_1_TX_CTL 158
-#define CAPRI_PIN_RGMII_1_TXC 159
-#define CAPRI_PIN_RGMII_1_TXD_0 160
-#define CAPRI_PIN_RGMII_1_TXD_1 161
-#define CAPRI_PIN_RGMII_1_TXD_2 162
-#define CAPRI_PIN_RGMII_1_TXD_3 163
-#define CAPRI_PIN_RGMII_GPIO_0 164
-#define CAPRI_PIN_RGMII_GPIO_1 165
-#define CAPRI_PIN_RGMII_GPIO_2 166
-#define CAPRI_PIN_RGMII_GPIO_3 167
-#define CAPRI_PIN_RTXDATA2G_TXDATA3G1 168
-#define CAPRI_PIN_RTXEN2G_TXDATA3G2 169
-#define CAPRI_PIN_RXDATA3G0 170
-#define CAPRI_PIN_RXDATA3G1 171
-#define CAPRI_PIN_RXDATA3G2 172
-#define CAPRI_PIN_SDIO1_CLK 173
-#define CAPRI_PIN_SDIO1_CMD 174
-#define CAPRI_PIN_SDIO1_DATA_0 175
-#define CAPRI_PIN_SDIO1_DATA_1 176
-#define CAPRI_PIN_SDIO1_DATA_2 177
-#define CAPRI_PIN_SDIO1_DATA_3 178
-#define CAPRI_PIN_SDIO4_CLK 179
-#define CAPRI_PIN_SDIO4_CMD 180
-#define CAPRI_PIN_SDIO4_DATA_0 181
-#define CAPRI_PIN_SDIO4_DATA_1 182
-#define CAPRI_PIN_SDIO4_DATA_2 183
-#define CAPRI_PIN_SDIO4_DATA_3 184
-#define CAPRI_PIN_SIM_CLK 185
-#define CAPRI_PIN_SIM_DATA 186
-#define CAPRI_PIN_SIM_DET 187
-#define CAPRI_PIN_SIM_RESETN 188
-#define CAPRI_PIN_SIM2_CLK 189
-#define CAPRI_PIN_SIM2_DATA 190
-#define CAPRI_PIN_SIM2_DET 191
-#define CAPRI_PIN_SIM2_RESETN 192
-#define CAPRI_PIN_SRI_C 193
-#define CAPRI_PIN_SRI_D 194
-#define CAPRI_PIN_SRI_E 195
-#define CAPRI_PIN_SSP_EXTCLK 196
-#define CAPRI_PIN_SSP0_CLK 197
-#define CAPRI_PIN_SSP0_FS 198
-#define CAPRI_PIN_SSP0_RXD 199
-#define CAPRI_PIN_SSP0_TXD 200
-#define CAPRI_PIN_SSP2_CLK 201
-#define CAPRI_PIN_SSP2_FS_0 202
-#define CAPRI_PIN_SSP2_FS_1 203
-#define CAPRI_PIN_SSP2_FS_2 204
-#define CAPRI_PIN_SSP2_FS_3 205
-#define CAPRI_PIN_SSP2_RXD_0 206
-#define CAPRI_PIN_SSP2_RXD_1 207
-#define CAPRI_PIN_SSP2_TXD_0 208
-#define CAPRI_PIN_SSP2_TXD_1 209
-#define CAPRI_PIN_SSP3_CLK 210
-#define CAPRI_PIN_SSP3_FS 211
-#define CAPRI_PIN_SSP3_RXD 212
-#define CAPRI_PIN_SSP3_TXD 213
-#define CAPRI_PIN_SSP4_CLK 214
-#define CAPRI_PIN_SSP4_FS 215
-#define CAPRI_PIN_SSP4_RXD 216
-#define CAPRI_PIN_SSP4_TXD 217
-#define CAPRI_PIN_SSP5_CLK 218
-#define CAPRI_PIN_SSP5_FS 219
-#define CAPRI_PIN_SSP5_RXD 220
-#define CAPRI_PIN_SSP5_TXD 221
-#define CAPRI_PIN_SSP6_CLK 222
-#define CAPRI_PIN_SSP6_FS 223
-#define CAPRI_PIN_SSP6_RXD 224
-#define CAPRI_PIN_SSP6_TXD 225
-#define CAPRI_PIN_STAT_1 226
-#define CAPRI_PIN_STAT_2 227
-#define CAPRI_PIN_SYSCLKEN 228
-#define CAPRI_PIN_TRACECLK 229
-#define CAPRI_PIN_TRACEDT00 230
-#define CAPRI_PIN_TRACEDT01 231
-#define CAPRI_PIN_TRACEDT02 232
-#define CAPRI_PIN_TRACEDT03 233
-#define CAPRI_PIN_TRACEDT04 234
-#define CAPRI_PIN_TRACEDT05 235
-#define CAPRI_PIN_TRACEDT06 236
-#define CAPRI_PIN_TRACEDT07 237
-#define CAPRI_PIN_TRACEDT08 238
-#define CAPRI_PIN_TRACEDT09 239
-#define CAPRI_PIN_TRACEDT10 240
-#define CAPRI_PIN_TRACEDT11 241
-#define CAPRI_PIN_TRACEDT12 242
-#define CAPRI_PIN_TRACEDT13 243
-#define CAPRI_PIN_TRACEDT14 244
-#define CAPRI_PIN_TRACEDT15 245
-#define CAPRI_PIN_TXDATA3G0 246
-#define CAPRI_PIN_TXPWRIND 247
-#define CAPRI_PIN_UARTB1_UCTS 248
-#define CAPRI_PIN_UARTB1_URTS 249
-#define CAPRI_PIN_UARTB1_URXD 250
-#define CAPRI_PIN_UARTB1_UTXD 251
-#define CAPRI_PIN_UARTB2_URXD 252
-#define CAPRI_PIN_UARTB2_UTXD 253
-#define CAPRI_PIN_UARTB3_UCTS 254
-#define CAPRI_PIN_UARTB3_URTS 255
-#define CAPRI_PIN_UARTB3_URXD 256
-#define CAPRI_PIN_UARTB3_UTXD 257
-#define CAPRI_PIN_UARTB4_UCTS 258
-#define CAPRI_PIN_UARTB4_URTS 259
-#define CAPRI_PIN_UARTB4_URXD 260
-#define CAPRI_PIN_UARTB4_UTXD 261
-#define CAPRI_PIN_VC_CAM1_SCL 262
-#define CAPRI_PIN_VC_CAM1_SDA 263
-#define CAPRI_PIN_VC_CAM2_SCL 264
-#define CAPRI_PIN_VC_CAM2_SDA 265
-#define CAPRI_PIN_VC_CAM3_SCL 266
-#define CAPRI_PIN_VC_CAM3_SDA 267
-
-#define CAPRI_PIN_DESC(a, b, c) \
- { .number = a, .name = b, .drv_data = &c##_pin }
-
-/*
- * Pin description definition. The order here must be the same as defined in
- * the PADCTRLREG block in the RDB, since the pin number is used as an index
- * into this array.
- */
-static const struct pinctrl_pin_desc capri_pinctrl_pins[] = {
- CAPRI_PIN_DESC(CAPRI_PIN_ADCSYNC, "adcsync", std),
- CAPRI_PIN_DESC(CAPRI_PIN_BAT_RM, "bat_rm", std),
- CAPRI_PIN_DESC(CAPRI_PIN_BSC1_SCL, "bsc1_scl", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_BSC1_SDA, "bsc1_sda", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_BSC2_SCL, "bsc2_scl", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_BSC2_SDA, "bsc2_sda", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_CLASSGPWR, "classgpwr", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLK_CX8, "clk_cx8", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_0, "clkout_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_1, "clkout_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_2, "clkout_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_3, "clkout_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLKREQ_IN_0, "clkreq_in_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CLKREQ_IN_1, "clkreq_in_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ1, "cws_sys_req1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ2, "cws_sys_req2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ3, "cws_sys_req3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC1_CLK, "digmic1_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC1_DQ, "digmic1_dq", std),
- CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC2_CLK, "digmic2_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC2_DQ, "digmic2_dq", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPEN13, "gpen13", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPEN14, "gpen14", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPEN15, "gpen15", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO00, "gpio00", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO01, "gpio01", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO02, "gpio02", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO03, "gpio03", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO04, "gpio04", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO05, "gpio05", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO06, "gpio06", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO07, "gpio07", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO08, "gpio08", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO09, "gpio09", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO10, "gpio10", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO11, "gpio11", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO12, "gpio12", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO13, "gpio13", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPIO14, "gpio14", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPS_PABLANK, "gps_pablank", std),
- CAPRI_PIN_DESC(CAPRI_PIN_GPS_TMARK, "gps_tmark", std),
- CAPRI_PIN_DESC(CAPRI_PIN_HDMI_SCL, "hdmi_scl", hdmi),
- CAPRI_PIN_DESC(CAPRI_PIN_HDMI_SDA, "hdmi_sda", hdmi),
- CAPRI_PIN_DESC(CAPRI_PIN_IC_DM, "ic_dm", std),
- CAPRI_PIN_DESC(CAPRI_PIN_IC_DP, "ic_dp", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_0, "kp_col_ip_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_1, "kp_col_ip_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_2, "kp_col_ip_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_3, "kp_col_ip_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_0, "kp_row_op_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_1, "kp_row_op_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_2, "kp_row_op_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_3, "kp_row_op_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_0, "lcd_b_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_1, "lcd_b_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_2, "lcd_b_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_3, "lcd_b_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_4, "lcd_b_4", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_5, "lcd_b_5", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_6, "lcd_b_6", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_7, "lcd_b_7", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_0, "lcd_g_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_1, "lcd_g_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_2, "lcd_g_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_3, "lcd_g_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_4, "lcd_g_4", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_5, "lcd_g_5", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_6, "lcd_g_6", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_7, "lcd_g_7", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_HSYNC, "lcd_hsync", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_OE, "lcd_oe", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_PCLK, "lcd_pclk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_0, "lcd_r_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_1, "lcd_r_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_2, "lcd_r_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_3, "lcd_r_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_4, "lcd_r_4", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_5, "lcd_r_5", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_6, "lcd_r_6", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_7, "lcd_r_7", std),
- CAPRI_PIN_DESC(CAPRI_PIN_LCD_VSYNC, "lcd_vsync", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO0, "mdmgpio0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO1, "mdmgpio1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO2, "mdmgpio2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO3, "mdmgpio3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO4, "mdmgpio4", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO5, "mdmgpio5", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO6, "mdmgpio6", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO7, "mdmgpio7", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO8, "mdmgpio8", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_0, "mphi_data_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_1, "mphi_data_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_2, "mphi_data_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_3, "mphi_data_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_4, "mphi_data_4", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_5, "mphi_data_5", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_6, "mphi_data_6", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_7, "mphi_data_7", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_8, "mphi_data_8", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_9, "mphi_data_9", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_10, "mphi_data_10", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_11, "mphi_data_11", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_12, "mphi_data_12", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_13, "mphi_data_13", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_14, "mphi_data_14", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_15, "mphi_data_15", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HA0, "mphi_ha0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HAT0, "mphi_hat0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HAT1, "mphi_hat1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HCE0_N, "mphi_hce0_n", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HCE1_N, "mphi_hce1_n", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HRD_N, "mphi_hrd_n", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HWR_N, "mphi_hwr_n", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_RUN0, "mphi_run0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MPHI_RUN1, "mphi_run1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MTX_SCAN_CLK, "mtx_scan_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_MTX_SCAN_DATA, "mtx_scan_data", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_0, "nand_ad_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_1, "nand_ad_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_2, "nand_ad_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_3, "nand_ad_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_4, "nand_ad_4", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_5, "nand_ad_5", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_6, "nand_ad_6", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_7, "nand_ad_7", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_ALE, "nand_ale", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_CEN_0, "nand_cen_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_CEN_1, "nand_cen_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_CLE, "nand_cle", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_OEN, "nand_oen", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_RDY_0, "nand_rdy_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_RDY_1, "nand_rdy_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_WEN, "nand_wen", std),
- CAPRI_PIN_DESC(CAPRI_PIN_NAND_WP, "nand_wp", std),
- CAPRI_PIN_DESC(CAPRI_PIN_PC1, "pc1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_PC2, "pc2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_PMU_INT, "pmu_int", std),
- CAPRI_PIN_DESC(CAPRI_PIN_PMU_SCL, "pmu_scl", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_PMU_SDA, "pmu_sda", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_RFST2G_MTSLOTEN3G, "rfst2g_mtsloten3g", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RX_CTL, "rgmii_0_rx_ctl", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXC, "rgmii_0_rxc", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_0, "rgmii_0_rxd_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_1, "rgmii_0_rxd_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_2, "rgmii_0_rxd_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_3, "rgmii_0_rxd_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TX_CTL, "rgmii_0_tx_ctl", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXC, "rgmii_0_txc", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_0, "rgmii_0_txd_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_1, "rgmii_0_txd_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_2, "rgmii_0_txd_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_3, "rgmii_0_txd_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RX_CTL, "rgmii_1_rx_ctl", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXC, "rgmii_1_rxc", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_0, "rgmii_1_rxd_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_1, "rgmii_1_rxd_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_2, "rgmii_1_rxd_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_3, "rgmii_1_rxd_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TX_CTL, "rgmii_1_tx_ctl", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXC, "rgmii_1_txc", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_0, "rgmii_1_txd_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_1, "rgmii_1_txd_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_2, "rgmii_1_txd_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_3, "rgmii_1_txd_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_0, "rgmii_gpio_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_1, "rgmii_gpio_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_2, "rgmii_gpio_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_3, "rgmii_gpio_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RTXDATA2G_TXDATA3G1, "rtxdata2g_txdata3g1",
- std),
- CAPRI_PIN_DESC(CAPRI_PIN_RTXEN2G_TXDATA3G2, "rtxen2g_txdata3g2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G0, "rxdata3g0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G1, "rxdata3g1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G2, "rxdata3g2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_CLK, "sdio1_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_CMD, "sdio1_cmd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_0, "sdio1_data_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_1, "sdio1_data_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_2, "sdio1_data_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_3, "sdio1_data_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_CLK, "sdio4_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_CMD, "sdio4_cmd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_0, "sdio4_data_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_1, "sdio4_data_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_2, "sdio4_data_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_3, "sdio4_data_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM_CLK, "sim_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM_DATA, "sim_data", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM_DET, "sim_det", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM_RESETN, "sim_resetn", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM2_CLK, "sim2_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM2_DATA, "sim2_data", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM2_DET, "sim2_det", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SIM2_RESETN, "sim2_resetn", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SRI_C, "sri_c", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SRI_D, "sri_d", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SRI_E, "sri_e", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP_EXTCLK, "ssp_extclk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP0_CLK, "ssp0_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP0_FS, "ssp0_fs", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP0_RXD, "ssp0_rxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP0_TXD, "ssp0_txd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_CLK, "ssp2_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_0, "ssp2_fs_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_1, "ssp2_fs_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_2, "ssp2_fs_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_3, "ssp2_fs_3", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_RXD_0, "ssp2_rxd_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_RXD_1, "ssp2_rxd_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_TXD_0, "ssp2_txd_0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP2_TXD_1, "ssp2_txd_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP3_CLK, "ssp3_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP3_FS, "ssp3_fs", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP3_RXD, "ssp3_rxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP3_TXD, "ssp3_txd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP4_CLK, "ssp4_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP4_FS, "ssp4_fs", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP4_RXD, "ssp4_rxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP4_TXD, "ssp4_txd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP5_CLK, "ssp5_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP5_FS, "ssp5_fs", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP5_RXD, "ssp5_rxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP5_TXD, "ssp5_txd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP6_CLK, "ssp6_clk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP6_FS, "ssp6_fs", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP6_RXD, "ssp6_rxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SSP6_TXD, "ssp6_txd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_STAT_1, "stat_1", std),
- CAPRI_PIN_DESC(CAPRI_PIN_STAT_2, "stat_2", std),
- CAPRI_PIN_DESC(CAPRI_PIN_SYSCLKEN, "sysclken", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACECLK, "traceclk", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT00, "tracedt00", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT01, "tracedt01", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT02, "tracedt02", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT03, "tracedt03", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT04, "tracedt04", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT05, "tracedt05", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT06, "tracedt06", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT07, "tracedt07", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT08, "tracedt08", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT09, "tracedt09", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT10, "tracedt10", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT11, "tracedt11", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT12, "tracedt12", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT13, "tracedt13", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT14, "tracedt14", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT15, "tracedt15", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TXDATA3G0, "txdata3g0", std),
- CAPRI_PIN_DESC(CAPRI_PIN_TXPWRIND, "txpwrind", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_UCTS, "uartb1_ucts", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_URTS, "uartb1_urts", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_URXD, "uartb1_urxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_UTXD, "uartb1_utxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB2_URXD, "uartb2_urxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB2_UTXD, "uartb2_utxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_UCTS, "uartb3_ucts", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_URTS, "uartb3_urts", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_URXD, "uartb3_urxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_UTXD, "uartb3_utxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_UCTS, "uartb4_ucts", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_URTS, "uartb4_urts", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_URXD, "uartb4_urxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_UTXD, "uartb4_utxd", std),
- CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM1_SCL, "vc_cam1_scl", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM1_SDA, "vc_cam1_sda", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM2_SCL, "vc_cam2_scl", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM2_SDA, "vc_cam2_sda", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM3_SCL, "vc_cam3_scl", i2c),
- CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM3_SDA, "vc_cam3_sda", i2c),
-};
-
-static const char * const capri_alt_groups[] = {
- "adcsync",
- "bat_rm",
- "bsc1_scl",
- "bsc1_sda",
- "bsc2_scl",
- "bsc2_sda",
- "classgpwr",
- "clk_cx8",
- "clkout_0",
- "clkout_1",
- "clkout_2",
- "clkout_3",
- "clkreq_in_0",
- "clkreq_in_1",
- "cws_sys_req1",
- "cws_sys_req2",
- "cws_sys_req3",
- "digmic1_clk",
- "digmic1_dq",
- "digmic2_clk",
- "digmic2_dq",
- "gpen13",
- "gpen14",
- "gpen15",
- "gpio00",
- "gpio01",
- "gpio02",
- "gpio03",
- "gpio04",
- "gpio05",
- "gpio06",
- "gpio07",
- "gpio08",
- "gpio09",
- "gpio10",
- "gpio11",
- "gpio12",
- "gpio13",
- "gpio14",
- "gps_pablank",
- "gps_tmark",
- "hdmi_scl",
- "hdmi_sda",
- "ic_dm",
- "ic_dp",
- "kp_col_ip_0",
- "kp_col_ip_1",
- "kp_col_ip_2",
- "kp_col_ip_3",
- "kp_row_op_0",
- "kp_row_op_1",
- "kp_row_op_2",
- "kp_row_op_3",
- "lcd_b_0",
- "lcd_b_1",
- "lcd_b_2",
- "lcd_b_3",
- "lcd_b_4",
- "lcd_b_5",
- "lcd_b_6",
- "lcd_b_7",
- "lcd_g_0",
- "lcd_g_1",
- "lcd_g_2",
- "lcd_g_3",
- "lcd_g_4",
- "lcd_g_5",
- "lcd_g_6",
- "lcd_g_7",
- "lcd_hsync",
- "lcd_oe",
- "lcd_pclk",
- "lcd_r_0",
- "lcd_r_1",
- "lcd_r_2",
- "lcd_r_3",
- "lcd_r_4",
- "lcd_r_5",
- "lcd_r_6",
- "lcd_r_7",
- "lcd_vsync",
- "mdmgpio0",
- "mdmgpio1",
- "mdmgpio2",
- "mdmgpio3",
- "mdmgpio4",
- "mdmgpio5",
- "mdmgpio6",
- "mdmgpio7",
- "mdmgpio8",
- "mphi_data_0",
- "mphi_data_1",
- "mphi_data_2",
- "mphi_data_3",
- "mphi_data_4",
- "mphi_data_5",
- "mphi_data_6",
- "mphi_data_7",
- "mphi_data_8",
- "mphi_data_9",
- "mphi_data_10",
- "mphi_data_11",
- "mphi_data_12",
- "mphi_data_13",
- "mphi_data_14",
- "mphi_data_15",
- "mphi_ha0",
- "mphi_hat0",
- "mphi_hat1",
- "mphi_hce0_n",
- "mphi_hce1_n",
- "mphi_hrd_n",
- "mphi_hwr_n",
- "mphi_run0",
- "mphi_run1",
- "mtx_scan_clk",
- "mtx_scan_data",
- "nand_ad_0",
- "nand_ad_1",
- "nand_ad_2",
- "nand_ad_3",
- "nand_ad_4",
- "nand_ad_5",
- "nand_ad_6",
- "nand_ad_7",
- "nand_ale",
- "nand_cen_0",
- "nand_cen_1",
- "nand_cle",
- "nand_oen",
- "nand_rdy_0",
- "nand_rdy_1",
- "nand_wen",
- "nand_wp",
- "pc1",
- "pc2",
- "pmu_int",
- "pmu_scl",
- "pmu_sda",
- "rfst2g_mtsloten3g",
- "rgmii_0_rx_ctl",
- "rgmii_0_rxc",
- "rgmii_0_rxd_0",
- "rgmii_0_rxd_1",
- "rgmii_0_rxd_2",
- "rgmii_0_rxd_3",
- "rgmii_0_tx_ctl",
- "rgmii_0_txc",
- "rgmii_0_txd_0",
- "rgmii_0_txd_1",
- "rgmii_0_txd_2",
- "rgmii_0_txd_3",
- "rgmii_1_rx_ctl",
- "rgmii_1_rxc",
- "rgmii_1_rxd_0",
- "rgmii_1_rxd_1",
- "rgmii_1_rxd_2",
- "rgmii_1_rxd_3",
- "rgmii_1_tx_ctl",
- "rgmii_1_txc",
- "rgmii_1_txd_0",
- "rgmii_1_txd_1",
- "rgmii_1_txd_2",
- "rgmii_1_txd_3",
- "rgmii_gpio_0",
- "rgmii_gpio_1",
- "rgmii_gpio_2",
- "rgmii_gpio_3",
- "rtxdata2g_txdata3g1",
- "rtxen2g_txdata3g2",
- "rxdata3g0",
- "rxdata3g1",
- "rxdata3g2",
- "sdio1_clk",
- "sdio1_cmd",
- "sdio1_data_0",
- "sdio1_data_1",
- "sdio1_data_2",
- "sdio1_data_3",
- "sdio4_clk",
- "sdio4_cmd",
- "sdio4_data_0",
- "sdio4_data_1",
- "sdio4_data_2",
- "sdio4_data_3",
- "sim_clk",
- "sim_data",
- "sim_det",
- "sim_resetn",
- "sim2_clk",
- "sim2_data",
- "sim2_det",
- "sim2_resetn",
- "sri_c",
- "sri_d",
- "sri_e",
- "ssp_extclk",
- "ssp0_clk",
- "ssp0_fs",
- "ssp0_rxd",
- "ssp0_txd",
- "ssp2_clk",
- "ssp2_fs_0",
- "ssp2_fs_1",
- "ssp2_fs_2",
- "ssp2_fs_3",
- "ssp2_rxd_0",
- "ssp2_rxd_1",
- "ssp2_txd_0",
- "ssp2_txd_1",
- "ssp3_clk",
- "ssp3_fs",
- "ssp3_rxd",
- "ssp3_txd",
- "ssp4_clk",
- "ssp4_fs",
- "ssp4_rxd",
- "ssp4_txd",
- "ssp5_clk",
- "ssp5_fs",
- "ssp5_rxd",
- "ssp5_txd",
- "ssp6_clk",
- "ssp6_fs",
- "ssp6_rxd",
- "ssp6_txd",
- "stat_1",
- "stat_2",
- "sysclken",
- "traceclk",
- "tracedt00",
- "tracedt01",
- "tracedt02",
- "tracedt03",
- "tracedt04",
- "tracedt05",
- "tracedt06",
- "tracedt07",
- "tracedt08",
- "tracedt09",
- "tracedt10",
- "tracedt11",
- "tracedt12",
- "tracedt13",
- "tracedt14",
- "tracedt15",
- "txdata3g0",
- "txpwrind",
- "uartb1_ucts",
- "uartb1_urts",
- "uartb1_urxd",
- "uartb1_utxd",
- "uartb2_urxd",
- "uartb2_utxd",
- "uartb3_ucts",
- "uartb3_urts",
- "uartb3_urxd",
- "uartb3_utxd",
- "uartb4_ucts",
- "uartb4_urts",
- "uartb4_urxd",
- "uartb4_utxd",
- "vc_cam1_scl",
- "vc_cam1_sda",
- "vc_cam2_scl",
- "vc_cam2_sda",
- "vc_cam3_scl",
- "vc_cam3_sda",
-};
-
-/* Every pin can implement all ALT1-ALT4 functions */
-#define CAPRI_PIN_FUNCTION(fcn_name) \
-{ \
- .name = #fcn_name, \
- .groups = capri_alt_groups, \
- .ngroups = ARRAY_SIZE(capri_alt_groups), \
-}
-
-static const struct capri_pin_function capri_functions[] = {
- CAPRI_PIN_FUNCTION(alt1),
- CAPRI_PIN_FUNCTION(alt2),
- CAPRI_PIN_FUNCTION(alt3),
- CAPRI_PIN_FUNCTION(alt4),
-};
-
-static struct capri_pinctrl_data capri_pinctrl = {
- .pins = capri_pinctrl_pins,
- .npins = ARRAY_SIZE(capri_pinctrl_pins),
- .functions = capri_functions,
- .nfunctions = ARRAY_SIZE(capri_functions),
-};
-
-static inline enum capri_pin_type pin_type_get(struct pinctrl_dev *pctldev,
- unsigned pin)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- if (pin >= pdata->npins)
- return CAPRI_PIN_TYPE_UNKNOWN;
-
- return *(enum capri_pin_type *)(pdata->pins[pin].drv_data);
-}
-
-#define CAPRI_PIN_SHIFT(type, param) \
- (CAPRI_ ## type ## _PIN_REG_ ## param ## _SHIFT)
-
-#define CAPRI_PIN_MASK(type, param) \
- (CAPRI_ ## type ## _PIN_REG_ ## param ## _MASK)
-
-/*
- * This helper function is used to build up the value and mask used to write to
- * a pin register, but does not actually write to the register.
- */
-static inline void capri_pin_update(u32 *reg_val, u32 *reg_mask, u32 param_val,
- u32 param_shift, u32 param_mask)
-{
- *reg_val &= ~param_mask;
- *reg_val |= (param_val << param_shift) & param_mask;
- *reg_mask |= param_mask;
-}
-
-static struct regmap_config capri_pinctrl_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = CAPRI_PIN_VC_CAM3_SDA,
-};
-
-static int capri_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- return pdata->npins;
-}
-
-static const char *capri_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
- unsigned group)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- return pdata->pins[group].name;
-}
-
-static int capri_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned group,
- const unsigned **pins,
- unsigned *num_pins)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- *pins = &pdata->pins[group].number;
- *num_pins = 1;
-
- return 0;
-}
-
-static void capri_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
- struct seq_file *s,
- unsigned offset)
-{
- seq_printf(s, " %s", dev_name(pctldev->dev));
-}
-
-static struct pinctrl_ops capri_pinctrl_ops = {
- .get_groups_count = capri_pinctrl_get_groups_count,
- .get_group_name = capri_pinctrl_get_group_name,
- .get_group_pins = capri_pinctrl_get_group_pins,
- .pin_dbg_show = capri_pinctrl_pin_dbg_show,
- .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
-};
-
-static int capri_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- return pdata->nfunctions;
-}
-
-static const char *capri_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
- unsigned function)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- return pdata->functions[function].name;
-}
-
-static int capri_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
- unsigned function,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
-
- *groups = pdata->functions[function].groups;
- *num_groups = pdata->functions[function].ngroups;
-
- return 0;
-}
-
-static int capri_pinmux_enable(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- const struct capri_pin_function *f = &pdata->functions[function];
- u32 offset = 4 * pdata->pins[group].number;
- int rc = 0;
-
- dev_dbg(pctldev->dev,
- "%s(): Enable function %s (%d) of pin %s (%d) @offset 0x%x.\n",
- __func__, f->name, function, pdata->pins[group].name,
- pdata->pins[group].number, offset);
-
- rc = regmap_update_bits(pdata->regmap, offset, CAPRI_PIN_REG_F_SEL_MASK,
- function << CAPRI_PIN_REG_F_SEL_SHIFT);
- if (rc)
- dev_err(pctldev->dev,
- "Error updating register for pin %s (%d).\n",
- pdata->pins[group].name, pdata->pins[group].number);
-
- return rc;
-}
-
-static struct pinmux_ops capri_pinctrl_pinmux_ops = {
- .get_functions_count = capri_pinctrl_get_fcns_count,
- .get_function_name = capri_pinctrl_get_fcn_name,
- .get_function_groups = capri_pinctrl_get_fcn_groups,
- .enable = capri_pinmux_enable,
-};
-
-static int capri_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
- unsigned pin,
- unsigned long *config)
-{
- return -ENOTSUPP;
-}
-
-
-/* Goes through the configs and update register val/mask */
-static int capri_std_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
- unsigned long *configs,
- unsigned num_configs,
- u32 *val,
- u32 *mask)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- int i;
- enum pin_config_param param;
- u16 arg;
-
- for (i = 0; i < num_configs; i++) {
- param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
-
- switch (param) {
- case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- arg = (arg >= 1 ? 1 : 0);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(STD, HYST),
- CAPRI_PIN_MASK(STD, HYST));
- break;
- /*
- * The pin bias can only be one of pull-up, pull-down, or
- * disable. The user does not need to specify a value for the
- * property, and the default value from pinconf-generic is
- * ignored.
- */
- case PIN_CONFIG_BIAS_DISABLE:
- capri_pin_update(val, mask, 0,
- CAPRI_PIN_SHIFT(STD, PULL_UP),
- CAPRI_PIN_MASK(STD, PULL_UP));
- capri_pin_update(val, mask, 0,
- CAPRI_PIN_SHIFT(STD, PULL_DN),
- CAPRI_PIN_MASK(STD, PULL_DN));
- break;
-
- case PIN_CONFIG_BIAS_PULL_UP:
- capri_pin_update(val, mask, 1,
- CAPRI_PIN_SHIFT(STD, PULL_UP),
- CAPRI_PIN_MASK(STD, PULL_UP));
- capri_pin_update(val, mask, 0,
- CAPRI_PIN_SHIFT(STD, PULL_DN),
- CAPRI_PIN_MASK(STD, PULL_DN));
- break;
-
- case PIN_CONFIG_BIAS_PULL_DOWN:
- capri_pin_update(val, mask, 0,
- CAPRI_PIN_SHIFT(STD, PULL_UP),
- CAPRI_PIN_MASK(STD, PULL_UP));
- capri_pin_update(val, mask, 1,
- CAPRI_PIN_SHIFT(STD, PULL_DN),
- CAPRI_PIN_MASK(STD, PULL_DN));
- break;
-
- case PIN_CONFIG_SLEW_RATE:
- arg = (arg >= 1 ? 1 : 0);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(STD, SLEW),
- CAPRI_PIN_MASK(STD, SLEW));
- break;
-
- case PIN_CONFIG_INPUT_ENABLE:
- /* inversed since register is for input _disable_ */
- arg = (arg >= 1 ? 0 : 1);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(STD, INPUT_DIS),
- CAPRI_PIN_MASK(STD, INPUT_DIS));
- break;
-
- case PIN_CONFIG_DRIVE_STRENGTH:
- /* Valid range is 2-16 mA, even numbers only */
- if ((arg < 2) || (arg > 16) || (arg % 2)) {
- dev_err(pctldev->dev,
- "Invalid Drive Strength value (%d) for "
- "pin %s (%d). Valid values are "
- "(2..16) mA, even numbers only.\n",
- arg, pdata->pins[pin].name, pin);
- return -EINVAL;
- }
- capri_pin_update(val, mask, (arg/2)-1,
- CAPRI_PIN_SHIFT(STD, DRV_STR),
- CAPRI_PIN_MASK(STD, DRV_STR));
- break;
-
- default:
- dev_err(pctldev->dev,
- "Unrecognized pin config %d for pin %s (%d).\n",
- param, pdata->pins[pin].name, pin);
- return -EINVAL;
-
- } /* switch config */
- } /* for each config */
-
- return 0;
-}
-
-/*
- * The pull-up strength for an I2C pin is represented by bits 4-6 in the
- * register with the following mapping:
- * 0b000: No pull-up
- * 0b001: 1200 Ohm
- * 0b010: 1800 Ohm
- * 0b011: 720 Ohm
- * 0b100: 2700 Ohm
- * 0b101: 831 Ohm
- * 0b110: 1080 Ohm
- * 0b111: 568 Ohm
- * This array maps pull-up strength in Ohms to register values (1+index).
- */
-static const u16 capri_pullup_map[] = {1200, 1800, 720, 2700, 831, 1080, 568};
-
-/* Goes through the configs and update register val/mask */
-static int capri_i2c_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
- unsigned long *configs,
- unsigned num_configs,
- u32 *val,
- u32 *mask)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- int i, j;
- enum pin_config_param param;
- u16 arg;
-
- for (i = 0; i < num_configs; i++) {
- param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
-
- switch (param) {
- case PIN_CONFIG_BIAS_PULL_UP:
- for (j = 0; j < ARRAY_SIZE(capri_pullup_map); j++)
- if (capri_pullup_map[j] == arg)
- break;
-
- if (j == ARRAY_SIZE(capri_pullup_map)) {
- dev_err(pctldev->dev,
- "Invalid pull-up value (%d) for pin %s "
- "(%d). Valid values are 568, 720, 831, "
- "1080, 1200, 1800, 2700 Ohms.\n",
- arg, pdata->pins[pin].name, pin);
- return -EINVAL;
- }
-
- capri_pin_update(val, mask, j+1,
- CAPRI_PIN_SHIFT(I2C, PULL_UP_STR),
- CAPRI_PIN_MASK(I2C, PULL_UP_STR));
- break;
-
- case PIN_CONFIG_BIAS_DISABLE:
- capri_pin_update(val, mask, 0,
- CAPRI_PIN_SHIFT(I2C, PULL_UP_STR),
- CAPRI_PIN_MASK(I2C, PULL_UP_STR));
- break;
-
- case PIN_CONFIG_SLEW_RATE:
- arg = (arg >= 1 ? 1 : 0);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(I2C, SLEW),
- CAPRI_PIN_MASK(I2C, SLEW));
- break;
-
- case PIN_CONFIG_INPUT_ENABLE:
- /* inversed since register is for input _disable_ */
- arg = (arg >= 1 ? 0 : 1);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(I2C, INPUT_DIS),
- CAPRI_PIN_MASK(I2C, INPUT_DIS));
- break;
-
- default:
- dev_err(pctldev->dev,
- "Unrecognized pin config %d for pin %s (%d).\n",
- param, pdata->pins[pin].name, pin);
- return -EINVAL;
-
- } /* switch config */
- } /* for each config */
-
- return 0;
-}
-
-/* Goes through the configs and update register val/mask */
-static int capri_hdmi_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
- unsigned long *configs,
- unsigned num_configs,
- u32 *val,
- u32 *mask)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- int i;
- enum pin_config_param param;
- u16 arg;
-
- for (i = 0; i < num_configs; i++) {
- param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
-
- switch (param) {
- case PIN_CONFIG_SLEW_RATE:
- arg = (arg >= 1 ? 1 : 0);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(HDMI, MODE),
- CAPRI_PIN_MASK(HDMI, MODE));
- break;
-
- case PIN_CONFIG_INPUT_ENABLE:
- /* inversed since register is for input _disable_ */
- arg = (arg >= 1 ? 0 : 1);
- capri_pin_update(val, mask, arg,
- CAPRI_PIN_SHIFT(HDMI, INPUT_DIS),
- CAPRI_PIN_MASK(HDMI, INPUT_DIS));
- break;
-
- default:
- dev_err(pctldev->dev,
- "Unrecognized pin config %d for pin %s (%d).\n",
- param, pdata->pins[pin].name, pin);
- return -EINVAL;
-
- } /* switch config */
- } /* for each config */
-
- return 0;
-}
-
-static int capri_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
- unsigned pin,
- unsigned long *configs,
- unsigned num_configs)
-{
- struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- enum capri_pin_type pin_type;
- u32 offset = 4 * pin;
- u32 cfg_val, cfg_mask;
- int rc;
-
- cfg_val = 0;
- cfg_mask = 0;
- pin_type = pin_type_get(pctldev, pin);
-
- /* Different pins have different configuration options */
- switch (pin_type) {
- case CAPRI_PIN_TYPE_STD:
- rc = capri_std_pin_update(pctldev, pin, configs, num_configs,
- &cfg_val, &cfg_mask);
- break;
-
- case CAPRI_PIN_TYPE_I2C:
- rc = capri_i2c_pin_update(pctldev, pin, configs, num_configs,
- &cfg_val, &cfg_mask);
- break;
-
- case CAPRI_PIN_TYPE_HDMI:
- rc = capri_hdmi_pin_update(pctldev, pin, configs, num_configs,
- &cfg_val, &cfg_mask);
- break;
-
- default:
- dev_err(pctldev->dev, "Unknown pin type for pin %s (%d).\n",
- pdata->pins[pin].name, pin);
- return -EINVAL;
-
- } /* switch pin type */
-
- if (rc)
- return rc;
-
- dev_dbg(pctldev->dev,
- "%s(): Set pin %s (%d) with config 0x%x, mask 0x%x\n",
- __func__, pdata->pins[pin].name, pin, cfg_val, cfg_mask);
-
- rc = regmap_update_bits(pdata->regmap, offset, cfg_mask, cfg_val);
- if (rc) {
- dev_err(pctldev->dev,
- "Error updating register for pin %s (%d).\n",
- pdata->pins[pin].name, pin);
- return rc;
- }
-
- return 0;
-}
-
-static struct pinconf_ops capri_pinctrl_pinconf_ops = {
- .pin_config_get = capri_pinctrl_pin_config_get,
- .pin_config_set = capri_pinctrl_pin_config_set,
-};
-
-static struct pinctrl_desc capri_pinctrl_desc = {
- /* name, pins, npins members initialized in probe function */
- .pctlops = &capri_pinctrl_ops,
- .pmxops = &capri_pinctrl_pinmux_ops,
- .confops = &capri_pinctrl_pinconf_ops,
- .owner = THIS_MODULE,
-};
-
-int __init capri_pinctrl_probe(struct platform_device *pdev)
-{
- struct capri_pinctrl_data *pdata = &capri_pinctrl;
- struct resource *res;
- struct pinctrl_dev *pctl;
-
- /* So far We can assume there is only 1 bank of registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing MEM resource\n");
- return -ENODEV;
- }
-
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pdata->reg_base)) {
- dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
- return -ENODEV;
- }
-
- /* Initialize the dynamic part of pinctrl_desc */
- pdata->regmap = devm_regmap_init_mmio(&pdev->dev, pdata->reg_base,
- &capri_pinctrl_regmap_config);
- if (IS_ERR(pdata->regmap)) {
- dev_err(&pdev->dev, "Regmap MMIO init failed.\n");
- return -ENODEV;
- }
-
- capri_pinctrl_desc.name = dev_name(&pdev->dev);
- capri_pinctrl_desc.pins = capri_pinctrl.pins;
- capri_pinctrl_desc.npins = capri_pinctrl.npins;
-
- pctl = pinctrl_register(&capri_pinctrl_desc,
- &pdev->dev,
- pdata);
- if (!pctl) {
- dev_err(&pdev->dev, "Failed to register pinctrl\n");
- return -ENODEV;
- }
-
- platform_set_drvdata(pdev, pdata);
-
- return 0;
-}
-
-static struct of_device_id capri_pinctrl_of_match[] = {
- { .compatible = "brcm,bcm11351-pinctrl", },
- { },
-};
-
-static struct platform_driver capri_pinctrl_driver = {
- .driver = {
- .name = "bcm-capri-pinctrl",
- .owner = THIS_MODULE,
- .of_match_table = capri_pinctrl_of_match,
- },
-};
-
-module_platform_driver_probe(capri_pinctrl_driver, capri_pinctrl_probe);
-
-MODULE_AUTHOR("Sherman Yin <syin@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom Capri pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/pinctrl-msm.c
index 38d579b..e43fbce 100644
--- a/drivers/pinctrl/pinctrl-msm.c
+++ b/drivers/pinctrl/pinctrl-msm.c
@@ -665,7 +665,10 @@ static void msm_gpio_irq_ack(struct irq_data *d)
spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
- val &= ~BIT(g->intr_status_bit);
+ if (g->intr_ack_high)
+ val |= BIT(g->intr_status_bit);
+ else
+ val &= ~BIT(g->intr_status_bit);
writel(val, pctrl->regs + g->intr_status_reg);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -744,6 +747,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
break;
case IRQ_TYPE_EDGE_BOTH:
val |= BIT(g->intr_detection_bit);
+ val |= BIT(g->intr_polarity_bit);
break;
case IRQ_TYPE_LEVEL_LOW:
break;
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/pinctrl-msm.h
index 8fbe9fb..6e26f1b 100644
--- a/drivers/pinctrl/pinctrl-msm.h
+++ b/drivers/pinctrl/pinctrl-msm.h
@@ -84,6 +84,7 @@ struct msm_pingroup {
unsigned intr_enable_bit:5;
unsigned intr_status_bit:5;
+ unsigned intr_ack_high:1;
unsigned intr_target_bit:5;
unsigned intr_raw_status_bit:5;
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 208341f..8f6f16e 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -877,7 +877,6 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
u32 status;
- pr_err("PLONK IRQ %d\n", irq);
clk_enable(nmk_chip->clk);
status = readl(nmk_chip->addr + NMK_GPIO_IS);
clk_disable(nmk_chip->clk);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 46dddc1..96c60d2 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -342,7 +342,7 @@ static const struct pinctrl_ops rockchip_pctrl_ops = {
* @pin: pin to change
* @mux: new mux function to set
*/
-static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
+static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
void __iomem *reg = info->reg_base + info->ctrl->mux_offset;
@@ -350,6 +350,20 @@ static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
u8 bit;
u32 data;
+ /*
+ * The first 16 pins of rk3188_bank0 are always gpios and do not have
+ * a mux register at all.
+ */
+ if (bank->bank_type == RK3188_BANK0 && pin < 16) {
+ if (mux != RK_FUNC_GPIO) {
+ dev_err(info->dev,
+ "pin %d only supports a gpio mux\n", pin);
+ return -ENOTSUPP;
+ } else {
+ return 0;
+ }
+ }
+
dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
bank->bank_num, pin, mux);
@@ -365,6 +379,8 @@ static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
writel(data, reg);
spin_unlock_irqrestore(&bank->slock, flags);
+
+ return 0;
}
#define RK2928_PULL_OFFSET 0x118
@@ -560,7 +576,7 @@ static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
const unsigned int *pins = info->groups[group].pins;
const struct rockchip_pin_config *data = info->groups[group].data;
struct rockchip_pin_bank *bank;
- int cnt;
+ int cnt, ret = 0;
dev_dbg(info->dev, "enable function %s group %s\n",
info->functions[selector].name, info->groups[group].name);
@@ -571,8 +587,18 @@ static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
*/
for (cnt = 0; cnt < info->groups[group].npins; cnt++) {
bank = pin_to_bank(info, pins[cnt]);
- rockchip_set_mux(bank, pins[cnt] - bank->pin_base,
- data[cnt].func);
+ ret = rockchip_set_mux(bank, pins[cnt] - bank->pin_base,
+ data[cnt].func);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ /* revert the already done pin settings */
+ for (cnt--; cnt >= 0; cnt--)
+ rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
+
+ return ret;
}
return 0;
@@ -607,7 +633,7 @@ static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank;
struct gpio_chip *chip;
- int pin;
+ int pin, ret;
u32 data;
chip = range->gc;
@@ -617,7 +643,9 @@ static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
offset, range->name, pin, input ? "input" : "output");
- rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
+ ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
+ if (ret < 0)
+ return ret;
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
/* set bit to 1 for output, 0 for input */
@@ -1144,9 +1172,13 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
u32 polarity;
u32 level;
u32 data;
+ int ret;
/* make sure the pin is configured as gpio input */
- rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
+ ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
+ if (ret < 0)
+ return ret;
+
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
data &= ~mask;
writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
@@ -1534,7 +1566,7 @@ static struct rockchip_pin_ctrl rk3188_pin_ctrl = {
.nr_banks = ARRAY_SIZE(rk3188_pin_banks),
.label = "RK3188-GPIO",
.type = RK3188,
- .mux_offset = 0x68,
+ .mux_offset = 0x60,
.pull_calc_reg = rk3188_calc_pull_reg_and_bit,
};
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 129f7b9..3841b98 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -201,23 +201,11 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
}
bootreg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!bootreg_res) {
- dev_err(dev,
- "platform_get_resource(IORESOURCE_MEM, 0): NULL\n");
- return -EADDRNOTAVAIL;
- }
-
- chipsig_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!chipsig_res) {
- dev_err(dev,
- "platform_get_resource(IORESOURCE_MEM, 1): NULL\n");
- return -EADDRNOTAVAIL;
- }
-
bootreg = devm_ioremap_resource(dev, bootreg_res);
if (IS_ERR(bootreg))
return PTR_ERR(bootreg);
+ chipsig_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
chipsig = devm_ioremap_resource(dev, chipsig_res);
if (IS_ERR(chipsig))
return PTR_ERR(chipsig);
@@ -301,8 +289,6 @@ static int da8xx_rproc_remove(struct platform_device *pdev)
*/
disable_irq(drproc->irq);
- devm_clk_put(dev, drproc->dsp_clk);
-
rproc_del(rproc);
rproc_put(rproc);
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index 1ec39a4..c4ac910 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -164,7 +164,7 @@ sproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
}
/* STE modem firmware handler operations */
-const struct rproc_fw_ops sproc_fw_ops = {
+static const struct rproc_fw_ops sproc_fw_ops = {
.load = sproc_load_segments,
.find_rsc_table = sproc_find_rsc_table,
.find_loaded_rsc_table = sproc_find_loaded_rsc_table,
@@ -193,7 +193,7 @@ static void sproc_kick_callback(struct ste_modem_device *mdev, int vqid)
sproc_dbg(sproc, "no message was found in vqid %d\n", vqid);
}
-struct ste_modem_dev_cb sproc_dev_cb = {
+static struct ste_modem_dev_cb sproc_dev_cb = {
.kick = sproc_kick_callback,
};
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 1990285..c316051 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1252,7 +1252,7 @@ static __init int sclp_initcall(void)
return rc;
sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
- rc = PTR_RET(sclp_pdev);
+ rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto fail_platform_driver_unregister;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 6e8f90f..6e14999 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -515,7 +515,7 @@ static int __init sclp_detect_standby_memory(void)
if (rc)
goto out;
sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
- rc = PTR_RET(sclp_pdev);
+ rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto out_driver;
sclp_add_standby_memory();
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 4eed38c..cd9c919 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -97,13 +97,16 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
-/* Registration structure for our interest in SCLP event buffers */
+/* Registration structure for SCLP output event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
+ .pm_event_fn = sclp_vt220_pm_event_fn,
+};
+
+/* Registration structure for SCLP input event buffers */
+static struct sclp_register sclp_vt220_register_input = {
.receive_mask = EVTYP_VT220MSG_MASK,
- .state_change_fn = NULL,
.receiver_fn = sclp_vt220_receiver_fn,
- .pm_event_fn = sclp_vt220_pm_event_fn,
};
@@ -715,9 +718,14 @@ static int __init sclp_vt220_tty_init(void)
rc = tty_register_driver(driver);
if (rc)
goto out_init;
+ rc = sclp_register(&sclp_vt220_register_input);
+ if (rc)
+ goto out_reg;
sclp_vt220_driver = driver;
return 0;
+out_reg:
+ tty_unregister_driver(driver);
out_init:
__sclp_vt220_cleanup();
out_driver:
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c8bd092..02832d6 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,9 @@ config SCSI_SCAN_ASYNC
You can override this choice by specifying "scsi_mod.scan=sync"
or async on the kernel's command line.
+ Note that this setting also affects whether resuming from
+ system suspend will be performed asynchronously.
+
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bfb6d07..1185484 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -125,7 +125,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
{
struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 666fe09..f42ecb23 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -40,7 +40,7 @@ struct iscsi_sw_tcp_conn {
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
- void (*old_data_ready)(struct sock *, int);
+ void (*old_data_ready)(struct sock *);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 788c4fe..68fb66f 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
+static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!cmd->sg_mapped)
+ return;
+
+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ cmd->sg_mapped = 0;
+}
+
/* Local pointer to allocated TCM configfs fabric module */
struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,7 +1482,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
}
se_tpg = &tpg->se_tpg;
- se_sess = transport_init_session();
+ se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);
@@ -1877,6 +1891,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
.queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .aborted_task = tcm_qla2xxx_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
@@ -1926,6 +1941,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
.queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .aborted_task = tcm_qla2xxx_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c4d632c..88d46fe 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -91,6 +91,15 @@ EXPORT_SYMBOL(scsi_logging_level);
ASYNC_DOMAIN(scsi_sd_probe_domain);
EXPORT_SYMBOL(scsi_sd_probe_domain);
+/*
+ * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
+ * asynchronous system resume operations. It is marked 'exclusive' to avoid
+ * being included in the async_synchronize_full() that is invoked by
+ * dpm_resume()
+ */
+ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
+EXPORT_SYMBOL(scsi_sd_pm_domain);
+
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
* encouraged once assigned by ANSI/INCITS T10
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 001e9ce..7454498 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -18,35 +18,77 @@
#ifdef CONFIG_PM_SLEEP
-static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
+static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
{
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+
+static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+
+static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+
+static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->resume ? pm->resume(dev) : 0;
+}
+
+static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+
+static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->restore ? pm->restore(dev) : 0;
+}
+
+static int scsi_dev_type_suspend(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err;
+ /* flush pending in-flight resume operations, suspend is synchronous */
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
- if (cb) {
- err = cb(dev);
- if (err)
- scsi_device_resume(to_scsi_device(dev));
- }
+ err = cb(dev, pm);
+ if (err)
+ scsi_device_resume(to_scsi_device(dev));
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;
}
-static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
+static int scsi_dev_type_resume(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- if (cb)
- err = cb(dev);
+ err = cb(dev, pm);
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
+
+ if (err == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
return err;
}
static int
-scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
+scsi_bus_suspend_common(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
{
int err = 0;
@@ -66,20 +108,54 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
return err;
}
-static int
-scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
+static void async_sdev_resume(void *dev, async_cookie_t cookie)
{
- int err = 0;
+ scsi_dev_type_resume(dev, do_scsi_resume);
+}
- if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev, cb);
+static void async_sdev_thaw(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_thaw);
+}
- if (err == 0) {
+static void async_sdev_restore(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_restore);
+}
+
+static int scsi_bus_resume_common(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ async_func_t fn;
+
+ if (!scsi_is_sdev_device(dev))
+ fn = NULL;
+ else if (cb == do_scsi_resume)
+ fn = async_sdev_resume;
+ else if (cb == do_scsi_thaw)
+ fn = async_sdev_thaw;
+ else if (cb == do_scsi_restore)
+ fn = async_sdev_restore;
+ else
+ fn = NULL;
+
+ if (fn) {
+ async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+
+ /*
+ * If a user has disabled async probing a likely reason
+ * is due to a storage enclosure that does not inject
+ * staggered spin-ups. For safety, make resume
+ * synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+ } else {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
- return err;
+ return 0;
}
static int scsi_bus_prepare(struct device *dev)
@@ -97,38 +173,32 @@ static int scsi_bus_prepare(struct device *dev)
static int scsi_bus_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+ return scsi_bus_suspend_common(dev, do_scsi_suspend);
}
static int scsi_bus_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
+ return scsi_bus_resume_common(dev, do_scsi_resume);
}
static int scsi_bus_freeze(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+ return scsi_bus_suspend_common(dev, do_scsi_freeze);
}
static int scsi_bus_thaw(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
+ return scsi_bus_resume_common(dev, do_scsi_thaw);
}
static int scsi_bus_poweroff(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+ return scsi_bus_suspend_common(dev, do_scsi_poweroff);
}
static int scsi_bus_restore(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
+ return scsi_bus_resume_common(dev, do_scsi_restore);
}
#else /* CONFIG_PM_SLEEP */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f079a59..48e5b65 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -112,6 +112,7 @@ extern void scsi_exit_procfs(void);
#endif /* CONFIG_PROC_FS */
/* scsi_scan.c */
+extern char scsi_scan_type[];
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, unsigned int, int);
@@ -166,6 +167,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM_RUNTIME */
+extern struct async_domain scsi_sd_pm_domain;
extern struct async_domain scsi_sd_probe_domain;
/*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 27f96d5..e02b3aa 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
MODULE_PARM_DESC(scan, "sync, async or none");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 89e6c04..efcbcd1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3026,6 +3026,7 @@ static int sd_remove(struct device *dev)
devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
async_synchronize_full_domain(&scsi_sd_probe_domain);
blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index d92fe40..6b349e3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task
if ((target == -1 || cp->target == target) &&
(lun == -1 || cp->lun == lun) &&
(task == -1 || cp->tag == task)) {
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
+#else
+ sym_set_cam_status(cp->cmd, DID_REQUEUE);
+#endif
sym_remque(&cp->link_ccbq);
sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index a54b506..37758d1 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -99,16 +99,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_niov;
#endif
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = scratchiov,
- .msg_iovlen = niov,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = MSG_DONTWAIT
- };
- mm_segment_t oldmm = get_fs();
+ struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
for (nob = i = 0; i < niov; i++) {
@@ -120,9 +111,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
- set_fs (KERNEL_DS);
- rc = sock_sendmsg(sock, &msg, nob);
- set_fs (oldmm);
+ rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
}
return rc;
}
@@ -174,16 +163,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_nkiov;
#endif
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = scratchiov,
- .msg_iovlen = niov,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = MSG_DONTWAIT
- };
- mm_segment_t oldmm = get_fs();
+ struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
for (nob = i = 0; i < niov; i++) {
@@ -196,9 +176,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
- set_fs (KERNEL_DS);
- rc = sock_sendmsg(sock, &msg, nob);
- set_fs (oldmm);
+ rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
for (i = 0; i < niov; i++)
kunmap(kiov[i].kiov_page);
@@ -237,15 +215,8 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
#endif
struct iovec *iov = conn->ksnc_rx_iov;
struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = scratchiov,
- .msg_iovlen = niov,
- .msg_control = NULL,
- .msg_controllen = 0,
.msg_flags = 0
};
- mm_segment_t oldmm = get_fs();
int nob;
int i;
int rc;
@@ -263,10 +234,8 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
- set_fs (KERNEL_DS);
- rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
- /* NB this is just a boolean..........................^ */
- set_fs (oldmm);
+ rc = kernel_recvmsg(conn->ksnc_sock, &msg,
+ (struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
saved_csum = 0;
if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -355,14 +324,8 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
#endif
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = scratchiov,
- .msg_control = NULL,
- .msg_controllen = 0,
.msg_flags = 0
};
- mm_segment_t oldmm = get_fs();
int nob;
int i;
int rc;
@@ -370,13 +333,14 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
void *addr;
int sum;
int fragnob;
+ int n;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
if (addr != NULL) {
nob = scratchiov[0].iov_len;
- msg.msg_iovlen = 1;
+ n = 1;
} else {
for (nob = i = 0; i < niov; i++) {
@@ -384,15 +348,13 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
}
- msg.msg_iovlen = niov;
+ n = niov;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
- set_fs (KERNEL_DS);
- rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
- /* NB this is just a boolean.......................^ */
- set_fs (oldmm);
+ rc = kernel_recvmsg(conn->ksnc_sock, &msg,
+ (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
if (conn->ksnc_msg.ksm_csum != 0) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
@@ -655,7 +617,7 @@ extern void ksocknal_write_callback (ksock_conn_t *conn);
* socket call back in Linux
*/
static void
-ksocknal_data_ready (struct sock *sk, int n)
+ksocknal_data_ready (struct sock *sk)
{
ksock_conn_t *conn;
@@ -666,7 +628,7 @@ ksocknal_data_ready (struct sock *sk, int n)
conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
- sk->sk_data_ready (sk, n);
+ sk->sk_data_ready (sk);
} else
ksocknal_read_callback(conn);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index e6069d7..7539fe1 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -265,17 +265,11 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
* empty enough to take the whole message immediately */
for (;;) {
- struct iovec iov = {
+ struct kvec iov = {
.iov_base = buffer,
.iov_len = nob
};
struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &iov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
.msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0
};
@@ -297,11 +291,9 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
}
}
- set_fs (KERNEL_DS);
then = jiffies;
- rc = sock_sendmsg (sock, &msg, iov.iov_len);
+ rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
ticks -= jiffies - then;
- set_fs (oldmm);
if (rc == nob)
return 0;
@@ -338,17 +330,11 @@ libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
LASSERT (ticks > 0);
for (;;) {
- struct iovec iov = {
+ struct kvec iov = {
.iov_base = buffer,
.iov_len = nob
};
struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &iov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
.msg_flags = 0
};
@@ -367,11 +353,9 @@ libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
return rc;
}
- set_fs(KERNEL_DS);
then = jiffies;
- rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
+ rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
ticks -= jiffies - then;
- set_fs(oldmm);
if (rc < 0)
return rc;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index ab06891..80d48b5 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -115,27 +115,6 @@ failed:
return rc;
}
-static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
-{
- struct inode *inode = dentry->d_inode;
- struct ptlrpc_request *request;
- char *symname;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op\n");
-
- ll_inode_size_lock(inode);
- rc = ll_readlink_internal(inode, &request, &symname);
- if (rc)
- GOTO(out, rc);
-
- rc = vfs_readlink(dentry, buffer, buflen, symname);
- out:
- ptlrpc_req_finished(request);
- ll_inode_size_unlock(inode);
- return rc;
-}
-
static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
@@ -175,7 +154,7 @@ static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cooki
}
struct inode_operations ll_fast_symlink_inode_operations = {
- .readlink = ll_readlink,
+ .readlink = generic_readlink,
.setattr = ll_setattr,
.follow_link = ll_follow_link,
.put_link = ll_put_link,
diff --git a/drivers/staging/media/msi3101/msi001.c b/drivers/staging/media/msi3101/msi001.c
index ac43bae..bd0b93c 100644
--- a/drivers/staging/media/msi3101/msi001.c
+++ b/drivers/staging/media/msi3101/msi001.c
@@ -201,7 +201,7 @@ static int msi001_set_tuner(struct msi001 *s)
dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n",
__func__, bandwidth_lut[i].freq);
- f_vco = (f_rf + f_if + f_if1) * lo_div;
+ f_vco = (u64) (f_rf + f_if + f_if1) * lo_div;
tmp64 = f_vco;
m = do_div(tmp64, F_REF * R_REF);
n = (unsigned int) tmp64;
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 260d1b7..65d351f 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -913,7 +913,6 @@ static int msi3101_set_usb_adc(struct msi3101_state *s)
/* set tuner, subdev, filters according to sampling rate */
bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
- bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
@@ -1078,6 +1077,7 @@ static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
static int msi3101_stop_streaming(struct vb2_queue *vq)
{
struct msi3101_state *s = vb2_get_drv_priv(vq);
+ int ret;
dev_dbg(&s->udev->dev, "%s:\n", __func__);
if (mutex_lock_interruptible(&s->v4l2_lock))
@@ -1090,17 +1090,22 @@ static int msi3101_stop_streaming(struct vb2_queue *vq)
/* according to tests, at least 700us delay is required */
msleep(20);
- msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+ ret = msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+ if (ret)
+ goto err_sleep_tuner;
/* sleep USB IF / ADC */
- msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+ if (ret)
+ goto err_sleep_tuner;
+err_sleep_tuner:
/* sleep tuner */
- v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
+ ret = v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
mutex_unlock(&s->v4l2_lock);
- return 0;
+ return ret;
}
static struct vb2_ops msi3101_vb2_ops = {
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 773d8ca..de692d7 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -86,7 +86,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
struct stub_device *sdev = dev_get_drvdata(dev);
int sockfd = 0;
struct socket *socket;
- ssize_t err = -EINVAL;
int rv;
if (!sdev) {
@@ -99,6 +98,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (sockfd != -1) {
+ int err;
dev_info(dev, "stub up\n");
spin_lock_irq(&sdev->ud.lock);
@@ -108,7 +108,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
goto err;
}
- socket = sockfd_to_socket(sockfd);
+ socket = sockfd_lookup(sockfd, &err);
if (!socket)
goto err;
@@ -141,7 +141,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
err:
spin_unlock_irq(&sdev->ud.lock);
- return err;
+ return -EINVAL;
}
static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
@@ -211,7 +211,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
* not touch NULL socket.
*/
if (ud->tcp_socket) {
- fput(ud->tcp_socket->file);
+ sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
}
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 184fa70..facaaf0 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -382,31 +382,6 @@ err:
}
EXPORT_SYMBOL_GPL(usbip_recv);
-struct socket *sockfd_to_socket(unsigned int sockfd)
-{
- struct socket *socket;
- struct file *file;
- struct inode *inode;
-
- file = fget(sockfd);
- if (!file) {
- pr_err("invalid sockfd\n");
- return NULL;
- }
-
- inode = file_inode(file);
-
- if (!inode || !S_ISSOCK(inode->i_mode)) {
- fput(file);
- return NULL;
- }
-
- socket = SOCKET_I(inode);
-
- return socket;
-}
-EXPORT_SYMBOL_GPL(sockfd_to_socket);
-
/* there may be more cases to tweak the flags. */
static unsigned int tweak_transfer_flags(unsigned int flags)
{
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 732fb63..f555d83 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -299,7 +299,6 @@ void usbip_dump_urb(struct urb *purb);
void usbip_dump_header(struct usbip_header *pdu);
int usbip_recv(struct socket *sock, void *buf, int size);
-struct socket *sockfd_to_socket(unsigned int sockfd);
void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
int pack);
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 1e84577..70e1755 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -788,7 +788,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
/* active connection is closed */
if (vdev->ud.tcp_socket) {
- fput(vdev->ud.tcp_socket->file);
+ sockfd_put(vdev->ud.tcp_socket);
vdev->ud.tcp_socket = NULL;
}
pr_info("release socket\n");
@@ -835,7 +835,7 @@ static void vhci_device_reset(struct usbip_device *ud)
vdev->udev = NULL;
if (ud->tcp_socket) {
- fput(ud->tcp_socket->file);
+ sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
}
ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index e098032..47bddcd 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -176,6 +176,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
struct socket *socket;
int sockfd = 0;
__u32 rhport = 0, devid = 0, speed = 0;
+ int err;
/*
* @rhport: port number of vhci_hcd
@@ -194,8 +195,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/* Extract socket from fd. */
- /* The correct way to clean this up is to fput(socket->file). */
- socket = sockfd_to_socket(sockfd);
+ socket = sockfd_lookup(sockfd, &err);
if (!socket)
return -EINVAL;
@@ -211,7 +211,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
spin_unlock(&vdev->ud.lock);
spin_unlock(&the_controller->lock);
- fput(socket->file);
+ sockfd_put(socket);
dev_err(dev, "port %d already used\n", rhport);
return -EINVAL;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b83ec37..78cab13 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -499,6 +499,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
return 0;
}
+static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ __iscsit_free_cmd(cmd, scsi_cmd, true);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+ return TARGET_PROT_NORMAL;
+}
+
static struct iscsit_transport iscsi_target_transport = {
.name = "iSCSI/TCP",
.transport_type = ISCSI_TCP,
@@ -513,6 +530,8 @@ static struct iscsit_transport iscsi_target_transport = {
.iscsit_response_queue = iscsit_response_queue,
.iscsit_queue_data_in = iscsit_queue_rsp,
.iscsit_queue_status = iscsit_queue_rsp,
+ .iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
};
static int __init iscsi_target_init_module(void)
@@ -1503,6 +1522,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
{
u32 payload_length = ntoh24(hdr->dlength);
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+ pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ }
+
if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
" not set, protocol error.\n");
@@ -2468,6 +2497,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd;
struct iscsi_conn *conn_p;
+ bool found = false;
/*
* Only send a Asynchronous Message on connections whos network
@@ -2476,11 +2506,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
iscsit_inc_conn_usage_count(conn_p);
+ found = true;
break;
}
}
- if (!conn_p)
+ if (!found)
return;
cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 1c0088f..ae03f3e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1052,6 +1052,11 @@ TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(default_erl);
TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_t10_pi
+ */
+DEF_TPG_ATTRIB(t10_pi);
+TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -1064,6 +1069,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_prod_mode_write_protect.attr,
&iscsi_tpg_attrib_demo_mode_discovery.attr,
&iscsi_tpg_attrib_default_erl.attr,
+ &iscsi_tpg_attrib_t10_pi.attr,
NULL,
};
@@ -1815,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
+static void lio_aborted_task(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
+}
+
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1999,6 +2012,7 @@ int iscsi_target_register_configfs(void)
fabric->tf_ops.queue_data_in = &lio_queue_data_in;
fabric->tf_ops.queue_status = &lio_queue_status;
fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+ fabric->tf_ops.aborted_task = &lio_aborted_task;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
*/
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 48f7b3b..6960f22 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -58,7 +58,8 @@
#define TA_DEMO_MODE_DISCOVERY 1
#define TA_DEFAULT_ERL 0
#define TA_CACHE_CORE_NPS 0
-
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI 0
#define ISCSI_IOV_DATA_BUFFER 5
@@ -556,7 +557,7 @@ struct iscsi_conn {
struct completion rx_half_close_comp;
/* socket used by this connection */
struct socket *sock;
- void (*orig_data_ready)(struct sock *, int);
+ void (*orig_data_ready)(struct sock *);
void (*orig_state_change)(struct sock *);
#define LOGIN_FLAGS_READ_ACTIVE 1
#define LOGIN_FLAGS_CLOSED 2
@@ -765,6 +766,7 @@ struct iscsi_tpg_attrib {
u32 prod_mode_write_protect;
u32 demo_mode_discovery;
u32 default_erl;
+ u8 t10_pi;
struct iscsi_portal_group *tpg;
};
@@ -787,6 +789,7 @@ struct iscsi_np {
void *np_context;
struct iscsit_transport *np_transport;
struct list_head np_list;
+ struct iscsi_tpg_np *tpg_np;
} ____cacheline_aligned;
struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e29279e..8739b98 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -259,6 +259,7 @@ static int iscsi_login_zero_tsih_s1(
{
struct iscsi_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+ enum target_prot_op sup_pro_ops;
int ret;
sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -320,8 +321,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
+ sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_init_session(sup_pro_ops);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 582ba84..75b6859 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -375,7 +375,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
return 0;
}
-static void iscsi_target_sk_data_ready(struct sock *sk, int count)
+static void iscsi_target_sk_data_ready(struct sock *sk)
{
struct iscsi_conn *conn = sk->sk_user_data;
bool rc;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 44a5471..eb96b20 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -225,6 +225,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
a->default_erl = TA_DEFAULT_ERL;
+ a->t10_pi = TA_DEFAULT_T10_PI;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -500,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
init_completion(&tpg_np->tpg_np_comp);
kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
+ np->tpg_np = tpg_np;
tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock);
@@ -858,3 +860,22 @@ int iscsit_ta_default_erl(
return 0;
}
+
+int iscsit_ta_t10_pi(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->t10_pi = flag;
+ pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+ " %s\n", tpg->tpgt, (a->t10_pi) ?
+ "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 213c0fc..0a182f2 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index e655b04..53e157c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
}
EXPORT_SYMBOL(iscsit_release_cmd);
-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
- bool check_queues)
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+ bool check_queues)
{
struct iscsi_conn *conn = cmd->conn;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 561a424..a68508c 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index fadad7c..c886ad1 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -212,6 +212,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
se_cmd->se_cmd_flags |= SCF_BIDI;
}
+
+ if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+ se_cmd->prot_pto = true;
+
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
scsi_bufflen(sc), tcm_loop_sam_attr(sc),
@@ -915,6 +919,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
wake_up(&tl_tmr->tl_tmr_wait);
}
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
@@ -1009,7 +1018,7 @@ static int tcm_loop_make_nexus(
/*
* Initialize the struct se_session pointer
*/
- tl_nexus->se_sess = transport_init_session();
+ tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
goto out;
@@ -1483,6 +1492,7 @@ static int tcm_loop_register_configfs(void)
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+ fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 24884ca..e7e9372 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create(
return ERR_PTR(-ENOMEM);
}
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
pr_err("failed to init se_session\n");
@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
{
}
+static void sbp_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
static int sbp_check_stop_free(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = {
.queue_data_in = sbp_queue_data_in,
.queue_status = sbp_queue_status,
.queue_tm_rsp = sbp_queue_tm_rsp,
+ .aborted_task = sbp_aborted_task,
.check_stop_free = sbp_check_stop_free,
.fabric_make_wwn = sbp_make_tport,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c3d9df6..fcbe612 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -455,11 +455,26 @@ out:
return rc;
}
-static inline int core_alua_state_nonoptimized(
+static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
+{
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/ASCQ: "
+ "0x04/0x%02x\n",
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+ cmd->scsi_asc = 0x04;
+ cmd->scsi_ascq = alua_ascq;
+}
+
+static inline void core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
- int nonop_delay_msecs,
- u8 *alua_ascq)
+ int nonop_delay_msecs)
{
/*
* Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -468,13 +483,11 @@ static inline int core_alua_state_nonoptimized(
*/
cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
cmd->alua_nonop_delay = nonop_delay_msecs;
- return 0;
}
static inline int core_alua_state_lba_dependent(
struct se_cmd *cmd,
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- u8 *alua_ascq)
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_device *dev = cmd->se_dev;
u64 segment_size, segment_mult, sectors, lba;
@@ -520,7 +533,7 @@ static inline int core_alua_state_lba_dependent(
}
if (!cur_map) {
spin_unlock(&dev->t10_alua.lba_map_lock);
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
@@ -531,11 +544,11 @@ static inline int core_alua_state_lba_dependent(
switch(map_mem->lba_map_mem_alua_state) {
case ALUA_ACCESS_STATE_STANDBY:
spin_unlock(&dev->t10_alua.lba_map_lock);
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
case ALUA_ACCESS_STATE_UNAVAILABLE:
spin_unlock(&dev->t10_alua.lba_map_lock);
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
default:
break;
@@ -548,8 +561,7 @@ static inline int core_alua_state_lba_dependent(
static inline int core_alua_state_standby(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -570,7 +582,7 @@ static inline int core_alua_state_standby(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
}
case MAINTENANCE_OUT:
@@ -578,7 +590,7 @@ static inline int core_alua_state_standby(
case MO_SET_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
}
case REQUEST_SENSE:
@@ -588,7 +600,7 @@ static inline int core_alua_state_standby(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
}
@@ -597,8 +609,7 @@ static inline int core_alua_state_standby(
static inline int core_alua_state_unavailable(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -613,7 +624,7 @@ static inline int core_alua_state_unavailable(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
case MAINTENANCE_OUT:
@@ -621,7 +632,7 @@ static inline int core_alua_state_unavailable(
case MO_SET_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
case REQUEST_SENSE:
@@ -629,7 +640,7 @@ static inline int core_alua_state_unavailable(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
@@ -638,8 +649,7 @@ static inline int core_alua_state_unavailable(
static inline int core_alua_state_transition(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
@@ -654,7 +664,7 @@ static inline int core_alua_state_transition(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
return 1;
}
case REQUEST_SENSE:
@@ -662,7 +672,7 @@ static inline int core_alua_state_transition(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
return 1;
}
@@ -684,8 +694,6 @@ target_alua_state_check(struct se_cmd *cmd)
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
- u8 alua_ascq;
- int ret;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
@@ -701,9 +709,8 @@ target_alua_state_check(struct se_cmd *cmd)
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- ret = 1;
- goto out;
+ set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
+ return TCM_CHECK_CONDITION_NOT_READY;
}
/*
* Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -731,20 +738,23 @@ target_alua_state_check(struct se_cmd *cmd)
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
- ret = core_alua_state_nonoptimized(cmd, cdb,
- nonop_delay_msecs, &alua_ascq);
+ core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
break;
case ALUA_ACCESS_STATE_STANDBY:
- ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+ if (core_alua_state_standby(cmd, cdb))
+ return TCM_CHECK_CONDITION_NOT_READY;
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+ if (core_alua_state_unavailable(cmd, cdb))
+ return TCM_CHECK_CONDITION_NOT_READY;
break;
case ALUA_ACCESS_STATE_TRANSITION:
- ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+ if (core_alua_state_transition(cmd, cdb))
+ return TCM_CHECK_CONDITION_NOT_READY;
break;
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
- ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+ if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
+ return TCM_CHECK_CONDITION_NOT_READY;
break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
@@ -757,23 +767,6 @@ target_alua_state_check(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
-out:
- if (ret > 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- pr_debug("[%s]: ALUA TG Port not available, "
- "SenseKey: NOT_READY, ASC/ASCQ: "
- "0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
-
- cmd->scsi_asc = 0x04;
- cmd->scsi_ascq = alua_ascq;
- return TCM_CHECK_CONDITION_NOT_READY;
- }
-
return 0;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f0e85b1..60a9ae6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(
pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
+ if (!tfo->aborted_task) {
+ pr_err("Missing tfo->aborted_task()\n");
+ return -EINVAL;
+ }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cf991a9..7d6cdda 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -854,25 +854,6 @@ static int fd_init_prot(struct se_device *dev)
return 0;
}
-static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
- u32 unit_size, u32 *ref_tag, u16 app_tag,
- bool inc_reftag)
-{
- unsigned char *p = buf;
- int i;
-
- for (i = 0; i < unit_size; i += dev->prot_length) {
- *((u16 *)&p[0]) = 0xffff;
- *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
- *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
-
- if (inc_reftag)
- (*ref_tag)++;
-
- p += dev->prot_length;
- }
-}
-
static int fd_format_prot(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
@@ -880,10 +861,8 @@ static int fd_format_prot(struct se_device *dev)
sector_t prot_length, prot;
unsigned char *buf;
loff_t pos = 0;
- u32 ref_tag = 0;
int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
int rc, ret = 0, size, len;
- bool inc_reftag = false;
if (!dev->dev_attrib.pi_prot_type) {
pr_err("Unable to format_prot while pi_prot_type == 0\n");
@@ -894,37 +873,20 @@ static int fd_format_prot(struct se_device *dev)
return -ENODEV;
}
- switch (dev->dev_attrib.pi_prot_type) {
- case TARGET_DIF_TYPE3_PROT:
- ref_tag = 0xffffffff;
- break;
- case TARGET_DIF_TYPE2_PROT:
- case TARGET_DIF_TYPE1_PROT:
- inc_reftag = true;
- break;
- default:
- break;
- }
-
buf = vzalloc(unit_size);
if (!buf) {
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
-
prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
size = prot_length;
pr_debug("Using FILEIO prot_length: %llu\n",
(unsigned long long)prot_length);
+ memset(buf, 0xff, unit_size);
for (prot = 0; prot < prot_length; prot += unit_size) {
-
- fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
- inc_reftag);
-
len = min(unit_size, size);
-
rc = kernel_write(prot_fd, buf, len, pos);
if (rc != len) {
pr_err("vfs_write to prot file failed: %d\n", rc);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 554d4f7..9e0232cc 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -203,10 +203,9 @@ static void iblock_free_device(struct se_device *dev)
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- if (ib_dev->ibd_bio_set != NULL) {
- bioset_integrity_free(ib_dev->ibd_bio_set);
+ if (ib_dev->ibd_bio_set != NULL)
bioset_free(ib_dev->ibd_bio_set);
- }
+
kfree(ib_dev);
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 66a5aba..b920db3 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -242,7 +242,7 @@ static void rd_release_prot_space(struct rd_dev *rd_dev)
rd_dev->sg_prot_count = 0;
}
-static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
{
struct rd_dev_sg_table *sg_table;
u32 total_sg_needed, sg_tables;
@@ -252,8 +252,13 @@ static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
-
- total_sg_needed = rd_dev->rd_page_count / prot_length;
+ /*
+ * prot_length=8byte dif data
+ * tot sg needed = rd_page_count * (PGSZ/block_size) *
+ * (prot_length/block_size) + pad
+ * PGSZ canceled each other.
+ */
+ total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
@@ -606,7 +611,8 @@ static int rd_init_prot(struct se_device *dev)
if (!dev->dev_attrib.pi_prot_type)
return 0;
- return rd_build_prot_space(rd_dev, dev->prot_length);
+ return rd_build_prot_space(rd_dev, dev->prot_length,
+ dev->dev_attrib.block_size);
}
static void rd_free_prot(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 77e6531..e022959 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -89,6 +89,7 @@ static sense_reason_t
sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
unsigned char *rbuf;
unsigned char buf[32];
unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -109,8 +110,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
/*
* Set P_TYPE and PROT_EN bits for DIF support
*/
- if (dev->dev_attrib.pi_prot_type)
- buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type)
+ buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+ }
if (dev->transport->get_lbppbe)
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -425,13 +428,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
goto out;
}
- write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
GFP_KERNEL);
if (!write_sg) {
pr_err("Unable to allocate compare_and_write sg\n");
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
+ sg_init_table(write_sg, cmd->t_data_nents);
/*
* Setup verify and write data payloads from total NumberLBAs.
*/
@@ -569,30 +573,85 @@ sbc_compare_and_write(struct se_cmd *cmd)
return TCM_NO_SENSE;
}
+static int
+sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+ bool is_write, struct se_cmd *cmd)
+{
+ if (is_write) {
+ cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
+ TARGET_PROT_DOUT_INSERT;
+ switch (protect) {
+ case 0x0:
+ case 0x3:
+ cmd->prot_checks = 0;
+ break;
+ case 0x1:
+ case 0x5:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x2:
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x4:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ break;
+ default:
+ pr_err("Unsupported protect field %d\n", protect);
+ return -EINVAL;
+ }
+ } else {
+ cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
+ TARGET_PROT_DIN_STRIP;
+ switch (protect) {
+ case 0x0:
+ case 0x1:
+ case 0x5:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x2:
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x3:
+ cmd->prot_checks = 0;
+ break;
+ case 0x4:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ break;
+ default:
+ pr_err("Unsupported protect field %d\n", protect);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static bool
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
- u32 sectors)
+ u32 sectors, bool is_write)
{
- if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+ u8 protect = cdb[1] >> 5;
+
+ if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
return true;
switch (dev->dev_attrib.pi_prot_type) {
case TARGET_DIF_TYPE3_PROT:
- if (!(cdb[1] & 0xe0))
- return true;
-
cmd->reftag_seed = 0xffffffff;
break;
case TARGET_DIF_TYPE2_PROT:
- if (cdb[1] & 0xe0)
+ if (protect)
return false;
cmd->reftag_seed = cmd->t_task_lba;
break;
case TARGET_DIF_TYPE1_PROT:
- if (!(cdb[1] & 0xe0))
- return true;
-
cmd->reftag_seed = cmd->t_task_lba;
break;
case TARGET_DIF_TYPE0_PROT:
@@ -600,9 +659,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
return true;
}
+ if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
+ is_write, cmd))
+ return false;
+
cmd->prot_type = dev->dev_attrib.pi_prot_type;
cmd->prot_length = dev->prot_length * sectors;
- cmd->prot_handover = PROT_SEPERATED;
+ pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
+ __func__, cmd->prot_type, cmd->prot_length,
+ cmd->prot_op, cmd->prot_checks);
return true;
}
@@ -628,7 +693,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
- if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -639,7 +704,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
- if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -650,7 +715,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
- if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -669,7 +734,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
- if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cdb[1] & 0x8)
@@ -682,7 +747,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
- if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cdb[1] & 0x8)
@@ -695,7 +760,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
- if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cdb[1] & 0x8)
@@ -1031,6 +1096,50 @@ err:
}
EXPORT_SYMBOL(sbc_execute_unmap);
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ sector_t sector = cmd->t_task_lba;
+ void *daddr, *paddr;
+ int i, j, offset = 0;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+ sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
+ dev->dev_attrib.block_size));
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+ sdt->app_tag = 0;
+
+ pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ sector++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+}
+
static sense_reason_t
sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
const void *p, sector_t sector, unsigned int ei_lba)
@@ -1162,9 +1271,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
}
EXPORT_SYMBOL(sbc_dif_verify_write);
-sense_reason_t
-sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
- unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+static sense_reason_t
+__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
@@ -1217,8 +1326,31 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
kunmap_atomic(paddr);
kunmap_atomic(daddr);
}
- sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
return 0;
}
+
+sense_reason_t
+sbc_dif_read_strip(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ u32 sectors = cmd->prot_length / dev->prot_length;
+
+ return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+ cmd->t_prot_sg, 0);
+}
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ sense_reason_t rc;
+
+ rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
+ if (rc)
+ return rc;
+
+ sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+ return 0;
+}
EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 3bebc71..8653666 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
/* Set RMB (removable media) for tape devices */
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -101,10 +102,13 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
if (dev->dev_attrib.emulate_3pc)
buf[5] |= 0x8;
/*
- * Set Protection (PROTECT) bit when DIF has been enabled.
+ * Set Protection (PROTECT) bit when DIF has been enabled on the
+ * device, and the transport supports VERIFY + PASS.
*/
- if (dev->dev_attrib.pi_prot_type)
- buf[5] |= 0x1;
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type)
+ buf[5] |= 0x1;
+ }
buf[7] = 0x2; /* CmdQue=1 */
@@ -473,16 +477,19 @@ static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
buf[3] = 0x3c;
/*
* Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
* only for TYPE3 protection.
*/
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
- buf[4] = 0x5;
- else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
- buf[4] = 0x4;
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ buf[4] = 0x5;
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+ buf[4] = 0x4;
+ }
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -762,7 +769,7 @@ out:
return ret;
}
-static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
{
p[0] = 0x01;
p[1] = 0x0a;
@@ -775,8 +782,11 @@ out:
return 12;
}
-static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
p[0] = 0x0a;
p[1] = 0x0a;
@@ -868,8 +878,10 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
* type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
* TAG field.
*/
- if (dev->dev_attrib.pi_prot_type)
- p[5] |= 0x80;
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type)
+ p[5] |= 0x80;
+ }
p[8] = 0xff;
p[9] = 0xff;
@@ -879,8 +891,10 @@ out:
return 12;
}
-static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
{
+ struct se_device *dev = cmd->se_dev;
+
p[0] = 0x08;
p[1] = 0x12;
@@ -896,7 +910,7 @@ out:
return 20;
}
-static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
{
p[0] = 0x1c;
p[1] = 0x0a;
@@ -912,7 +926,7 @@ out:
static struct {
uint8_t page;
uint8_t subpage;
- int (*emulate)(struct se_device *, u8, unsigned char *);
+ int (*emulate)(struct se_cmd *, u8, unsigned char *);
} modesense_handlers[] = {
{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -1050,7 +1064,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
* the only two possibilities).
*/
if ((modesense_handlers[i].subpage & ~subpage) == 0) {
- ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
if (!ten && length + ret >= 255)
break;
length += ret;
@@ -1063,7 +1077,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
- length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
goto set_length;
}
@@ -1095,7 +1109,6 @@ set_length:
static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
bool ten = cdb[0] == MODE_SELECT_10;
int off = ten ? 8 : 4;
@@ -1131,7 +1144,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
memset(tbuf, 0, SE_MODE_PAGE_BUF);
- length = modesense_handlers[i].emulate(dev, 0, tbuf);
+ length = modesense_handlers[i].emulate(cmd, 0, tbuf);
goto check_contents;
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 70c638f..f7cd95e 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(
struct se_cmd *cmd,
int tas)
{
+ bool remove = true;
/*
* TASK ABORTED status (TAS) bit support
*/
if ((tmr_nacl &&
- (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+ (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+ remove = false;
transport_send_task_abort(cmd);
+ }
- transport_cmd_finish_abort(cmd, 0);
+ transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -127,6 +130,11 @@ void core_tmr_abort_task(
if (dev != se_cmd->se_dev)
continue;
+
+ /* skip se_cmd associated with tmr */
+ if (tmr->task_cmd == se_cmd)
+ continue;
+
ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
if (tmr->ref_task_tag != ref_tag)
continue;
@@ -150,18 +158,9 @@ void core_tmr_abort_task(
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- /*
- * Now send SAM_STAT_TASK_ABORTED status for the referenced
- * se_cmd descriptor..
- */
- transport_send_task_abort(se_cmd);
- /*
- * Also deal with possible extra acknowledge reference..
- */
- if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
- target_put_sess_cmd(se_sess, se_cmd);
target_put_sess_cmd(se_sess, se_cmd);
+ transport_cmd_finish_abort(se_cmd, true);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %d\n", ref_tag);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2956250..d4b9869 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -235,7 +235,7 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(void)
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -251,6 +251,7 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref);
+ se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
@@ -288,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,
EXPORT_SYMBOL(transport_alloc_session_tags);
struct se_session *transport_init_session_tags(unsigned int tag_num,
- unsigned int tag_size)
+ unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
- se_sess = transport_init_session();
+ se_sess = transport_init_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -603,6 +605,15 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_lun_remove_cmd(cmd);
+ /*
+ * Allow the fabric driver to unmap any resources before
+ * releasing the descriptor via TFO->release_cmd()
+ */
+ if (remove)
+ cmd->se_tfo->aborted_task(cmd);
+
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
@@ -1365,6 +1376,13 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
target_put_sess_cmd(se_sess, se_cmd);
return 0;
}
+
+ rc = target_setup_cmd_from_cdb(se_cmd, cdb);
+ if (rc != 0) {
+ transport_generic_request_failure(se_cmd, rc);
+ return 0;
+ }
+
/*
* Save pointers for SGLs containing protection information,
* if present.
@@ -1374,11 +1392,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
se_cmd->t_prot_nents = sgl_prot_count;
}
- rc = target_setup_cmd_from_cdb(se_cmd, cdb);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
/*
* When a non zero sgl_count has been passed perform SGL passthrough
* mapping for pre-allocated fabric memory instead of having target
@@ -1754,6 +1767,15 @@ void target_execute_cmd(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
+ /*
+ * Perform WRITE_INSERT of PI using software emulation when backend
+ * device has PI enabled, if the transport has not already generated
+ * PI using hardware WRITE_INSERT offload.
+ */
+ if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+ sbc_dif_generate(cmd);
+ }
if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock);
@@ -1883,6 +1905,21 @@ static void transport_handle_queue_full(
schedule_work(&cmd->se_dev->qf_work_queue);
}
+static bool target_check_read_strip(struct se_cmd *cmd)
+{
+ sense_reason_t rc;
+
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+ rc = sbc_dif_read_strip(cmd);
+ if (rc) {
+ cmd->pi_err = rc;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void target_complete_ok_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1947,6 +1984,22 @@ static void target_complete_ok_work(struct work_struct *work)
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * Perform READ_STRIP of PI using software emulation when
+ * backend had PI enabled, if the transport will not be
+ * performing hardware READ_STRIP offload.
+ */
+ if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
+ target_check_read_strip(cmd)) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->pi_err, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2039,6 +2092,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
+
+ transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ cmd->t_prot_sg = NULL;
+ cmd->t_prot_nents = 0;
}
/**
@@ -2202,6 +2259,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+ if (cmd->prot_op != TARGET_PROT_NORMAL) {
+ ret = target_alloc_sgl(&cmd->t_prot_sg,
+ &cmd->t_prot_nents,
+ cmd->prot_length, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, zero_flag);
if (ret < 0)
@@ -2770,13 +2835,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
if (!(cmd->transport_state & CMD_T_ABORTED))
return 0;
- if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ /*
+ * If cmd has been aborted but either no status is to be sent or it has
+ * already been sent, just return
+ */
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
return 1;
pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
@@ -2790,7 +2859,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
@@ -2805,6 +2874,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
cmd->transport_state |= CMD_T_ABORTED;
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
smp_mb__after_atomic_inc();
return;
}
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 752863a..a0bcfd3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -94,20 +94,19 @@ struct ft_lun {
*/
struct ft_tpg {
u32 index;
- struct ft_lport_acl *lport_acl;
+ struct ft_lport_wwn *lport_wwn;
struct ft_tport *tport; /* active tport or NULL */
- struct list_head list; /* linkage in ft_lport_acl tpg_list */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
struct workqueue_struct *workqueue;
};
-struct ft_lport_acl {
+struct ft_lport_wwn {
u64 wwpn;
char name[FT_NAMELEN];
- struct list_head list;
- struct list_head tpg_list;
- struct se_wwn fc_lport_wwn;
+ struct list_head ft_wwn_node;
+ struct ft_tpg *tpg;
+ struct se_wwn se_wwn;
};
/*
@@ -128,7 +127,6 @@ struct ft_cmd {
u32 sg_cnt; /* No. of item in scatterlist */
};
-extern struct list_head ft_lport_list;
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
extern struct target_fabric_configfs *ft_configfs;
@@ -163,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
/*
* other internal functions.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 8b2c1aa..01cf37f 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
ft_send_resp_code(cmd, code);
}
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
static void ft_send_work(struct work_struct *work);
/*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e879da8..efdcb96 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -50,7 +50,7 @@
struct target_fabric_configfs *ft_configfs;
-LIST_HEAD(ft_lport_list);
+static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
unsigned int ft_debug_logging;
@@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg(
struct config_group *group,
const char *name)
{
- struct ft_lport_acl *lacl;
+ struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
struct workqueue_struct *wq;
unsigned long index;
@@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg(
if (index > UINT_MAX)
return NULL;
- lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+ if ((index != 1)) {
+ pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return NULL;
tpg->index = index;
- tpg->lport_acl = lacl;
+ tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
wq = alloc_workqueue("tcm_fc", 0, 1);
@@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg(
tpg->workqueue = wq;
mutex_lock(&ft_lport_lock);
- list_add_tail(&tpg->list, &lacl->tpg_list);
+ ft_wwn->tpg = tpg;
mutex_unlock(&ft_lport_lock);
return &tpg->se_tpg;
@@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(
static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+ struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
synchronize_rcu();
mutex_lock(&ft_lport_lock);
- list_del(&tpg->list);
+ ft_wwn->tpg = NULL;
if (tpg->tport) {
tpg->tport->tpg = NULL;
tpg->tport = NULL;
@@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
*/
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
{
- struct ft_lport_acl *lacl;
- struct ft_tpg *tpg;
+ struct ft_lport_wwn *ft_wwn;
- list_for_each_entry(lacl, &ft_lport_list, list) {
- if (lacl->wwpn == lport->wwpn) {
- list_for_each_entry(tpg, &lacl->tpg_list, list)
- return tpg; /* XXX for now return first entry */
- return NULL;
- }
+ list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+ if (ft_wwn->wwpn == lport->wwpn)
+ return ft_wwn->tpg;
}
return NULL;
}
@@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
* Add lport to allowed config.
* The name is the WWPN in lower-case ASCII, colon-separated bytes.
*/
-static struct se_wwn *ft_add_lport(
+static struct se_wwn *ft_add_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- struct ft_lport_acl *lacl;
- struct ft_lport_acl *old_lacl;
+ struct ft_lport_wwn *ft_wwn;
+ struct ft_lport_wwn *old_ft_wwn;
u64 wwpn;
- pr_debug("add lport %s\n", name);
+ pr_debug("add wwn %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
- lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
- if (!lacl)
+ ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+ if (!ft_wwn)
return NULL;
- lacl->wwpn = wwpn;
- INIT_LIST_HEAD(&lacl->tpg_list);
+ ft_wwn->wwpn = wwpn;
mutex_lock(&ft_lport_lock);
- list_for_each_entry(old_lacl, &ft_lport_list, list) {
- if (old_lacl->wwpn == wwpn) {
+ list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+ if (old_ft_wwn->wwpn == wwpn) {
mutex_unlock(&ft_lport_lock);
- kfree(lacl);
+ kfree(ft_wwn);
return NULL;
}
}
- list_add_tail(&lacl->list, &ft_lport_list);
- ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+ list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+ ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
mutex_unlock(&ft_lport_lock);
- return &lacl->fc_lport_wwn;
+ return &ft_wwn->se_wwn;
}
-static void ft_del_lport(struct se_wwn *wwn)
+static void ft_del_wwn(struct se_wwn *wwn)
{
- struct ft_lport_acl *lacl = container_of(wwn,
- struct ft_lport_acl, fc_lport_wwn);
+ struct ft_lport_wwn *ft_wwn = container_of(wwn,
+ struct ft_lport_wwn, se_wwn);
- pr_debug("del lport %s\n", lacl->name);
+ pr_debug("del wwn %s\n", ft_wwn->name);
mutex_lock(&ft_lport_lock);
- list_del(&lacl->list);
+ list_del(&ft_wwn->ft_wwn_node);
mutex_unlock(&ft_lport_lock);
- kfree(lacl);
+ kfree(ft_wwn);
}
static ssize_t ft_wwn_show_attr_version(
@@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
- return tpg->lport_acl->name;
+ return tpg->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
+ .aborted_task = ft_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
- .fabric_make_wwn = &ft_add_lport,
- .fabric_drop_wwn = &ft_del_lport,
+ .fabric_make_wwn = &ft_add_wwn,
+ .fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
.fabric_post_link = NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index ae52c08..21ce508 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *);
* Lookup or allocate target local port.
* Caller holds ft_lport_lock.
*/
-static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
{
struct ft_tpg *tpg;
struct ft_tport *tport;
@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
if (tport) {
tport->tpg = tpg;
+ tpg->tport = tport;
return tport;
}
@@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)
void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
- ft_tport_create(lport);
+ ft_tport_get(lport);
mutex_unlock(&ft_lport_lock);
}
@@ -211,7 +212,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
return NULL;
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
- sizeof(struct ft_cmd));
+ sizeof(struct ft_cmd),
+ TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
@@ -350,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
struct ft_node_acl *acl;
u32 fcp_parm;
- tport = ft_tport_create(rdata->local_port);
+ tport = ft_tport_get(rdata->local_port);
if (!tport)
goto not_target; /* not a target for this local port */
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index b0e5401..90ca082 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -65,6 +65,7 @@ static void tty_audit_log(const char *description, int major, int minor,
{
struct audit_buffer *ab;
struct task_struct *tsk = current;
+ pid_t pid = task_pid_nr(tsk);
uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
unsigned int sessionid = audit_get_sessionid(tsk);
@@ -74,7 +75,7 @@ static void tty_audit_log(const char *description, int major, int minor,
char name[sizeof(tsk->comm)];
audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
- " minor=%d comm=", description, tsk->pid, uid,
+ " minor=%d comm=", description, pid, uid,
loginuid, sessionid, major, minor);
get_task_comm(name, tsk);
audit_log_untrustedstring(ab, name);
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 460c266..f058c03 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
{
}
+static void usbg_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
static const char *usbg_check_wwn(const char *name)
{
const char *n;
@@ -1726,7 +1731,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
pr_err("Unable to allocate struct tcm_vhost_nexus\n");
goto err_unlock;
}
- tv_nexus->tvn_se_sess = transport_init_session();
+ tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(tv_nexus->tvn_se_sess))
goto err_free;
@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = {
.queue_data_in = usbg_send_read_response,
.queue_status = usbg_send_status_response,
.queue_tm_rsp = usbg_queue_tm_rsp,
+ .aborted_task = usbg_aborted_task,
.check_stop_free = usbg_check_stop_free,
.fabric_make_wwn = usbg_make_tport,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e1e22e0..be414d2 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -818,9 +818,9 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_dev_cleanup(&n->dev, false);
vhost_net_vq_reset(n);
if (tx_sock)
- fput(tx_sock->file);
+ sockfd_put(tx_sock);
if (rx_sock)
- fput(rx_sock->file);
+ sockfd_put(rx_sock);
/* Make sure no callbacks are outstanding */
synchronize_rcu_bh();
/* We do an extra flush before freeing memory,
@@ -860,7 +860,7 @@ static struct socket *get_raw_socket(int fd)
}
return sock;
err:
- fput(sock->file);
+ sockfd_put(sock);
return ERR_PTR(r);
}
@@ -966,7 +966,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
if (oldsock) {
vhost_net_flush_vq(n, index);
- fput(oldsock->file);
+ sockfd_put(oldsock);
}
mutex_unlock(&n->dev.mutex);
@@ -978,7 +978,7 @@ err_used:
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
- fput(sock->file);
+ sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:
@@ -1009,9 +1009,9 @@ static long vhost_net_reset_owner(struct vhost_net *n)
done:
mutex_unlock(&n->dev.mutex);
if (tx_sock)
- fput(tx_sock->file);
+ sockfd_put(tx_sock);
if (rx_sock)
- fput(rx_sock->file);
+ sockfd_put(rx_sock);
return err;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e48d4a6..cf50ce9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
return;
}
+static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
{
vs->vs_events_nr--;
@@ -1740,7 +1745,8 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
*/
tv_nexus->tvn_se_sess = transport_init_session_tags(
TCM_VHOST_DEFAULT_TAGS,
- sizeof(struct tcm_vhost_cmd));
+ sizeof(struct tcm_vhost_cmd),
+ TARGET_PROT_NORMAL);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
@@ -2131,6 +2137,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
.queue_data_in = tcm_vhost_queue_data_in,
.queue_status = tcm_vhost_queue_status,
.queue_tm_rsp = tcm_vhost_queue_tm_rsp,
+ .aborted_task = tcm_vhost_aborted_task,
/*
* Setup callers for generic logic in target_core_fabric_configfs.c
*/
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6c793bc..3ad7ebe 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -359,6 +359,8 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
depends on FB && ARCH_MXC
+ select BACKLIGHT_LCD_SUPPORT
+ select LCD_CLASS_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 42b8f9d..e2c42ad 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -49,13 +49,13 @@
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <asm/blackfin.h>
#include <asm/irq.h>
#include <asm/dpmc.h>
#include <asm/dma-mapping.h>
#include <asm/dma.h>
-#include <asm/gpio.h>
#include <asm/portmux.h>
#include <mach/bf54x-lq043.h>
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 0c0ba92..6b23508 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -663,15 +663,7 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
- switch (info->var.bits_per_pixel) {
- case 16:
- ((u16 *) (info->pseudo_palette))[regno] = v;
- break;
- case 24:
- case 32:
- ((u32 *) (info->pseudo_palette))[regno] = v;
- break;
- }
+ ((u32 *) (info->pseudo_palette))[regno] = v;
if (palette[0] != 0x4000) {
update_hw = 1;
palette[0] = 0x4000;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 2bbdb7f..f18397c 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -101,6 +101,8 @@ static struct {
void __iomem *base;
int irq;
+ irq_handler_t user_handler;
+ void *user_data;
unsigned long core_clk_rate;
unsigned long tv_pclk_rate;
@@ -113,6 +115,8 @@ static struct {
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
const struct dispc_features *feat;
+
+ bool is_enabled;
} dispc;
enum omap_color_component {
@@ -141,12 +145,18 @@ enum mgr_reg_fields {
DISPC_MGR_FLD_NUM,
};
+struct dispc_reg_field {
+ u16 reg;
+ u8 high;
+ u8 low;
+};
+
static const struct {
const char *name;
u32 vsync_irq;
u32 framedone_irq;
u32 sync_lost_irq;
- struct reg_field reg_desc[DISPC_MGR_FLD_NUM];
+ struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
} mgr_desc[] = {
[OMAP_DSS_CHANNEL_LCD] = {
.name = "LCD",
@@ -238,13 +248,13 @@ static inline u32 dispc_read_reg(const u16 idx)
static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld)
{
- const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
return REG_GET(rfld.reg, rfld.high, rfld.low);
}
static void mgr_fld_write(enum omap_channel channel,
enum mgr_reg_fields regfld, int val) {
- const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low);
}
@@ -3669,16 +3679,44 @@ static int __init dispc_init_features(struct platform_device *pdev)
return 0;
}
+static irqreturn_t dispc_irq_handler(int irq, void *arg)
+{
+ if (!dispc.is_enabled)
+ return IRQ_NONE;
+
+ return dispc.user_handler(irq, dispc.user_data);
+}
+
int dispc_request_irq(irq_handler_t handler, void *dev_id)
{
- return devm_request_irq(&dispc.pdev->dev, dispc.irq, handler,
- IRQF_SHARED, "OMAP DISPC", dev_id);
+ int r;
+
+ if (dispc.user_handler != NULL)
+ return -EBUSY;
+
+ dispc.user_handler = handler;
+ dispc.user_data = dev_id;
+
+ /* ensure the dispc_irq_handler sees the values above */
+ smp_wmb();
+
+ r = devm_request_irq(&dispc.pdev->dev, dispc.irq, dispc_irq_handler,
+ IRQF_SHARED, "OMAP DISPC", &dispc);
+ if (r) {
+ dispc.user_handler = NULL;
+ dispc.user_data = NULL;
+ }
+
+ return r;
}
EXPORT_SYMBOL(dispc_request_irq);
void dispc_free_irq(void *dev_id)
{
- devm_free_irq(&dispc.pdev->dev, dispc.irq, dev_id);
+ devm_free_irq(&dispc.pdev->dev, dispc.irq, &dispc);
+
+ dispc.user_handler = NULL;
+ dispc.user_data = NULL;
}
EXPORT_SYMBOL(dispc_free_irq);
@@ -3750,6 +3788,12 @@ static int __exit omap_dispchw_remove(struct platform_device *pdev)
static int dispc_runtime_suspend(struct device *dev)
{
+ dispc.is_enabled = false;
+ /* ensure the dispc_irq_handler sees the is_enabled value */
+ smp_wmb();
+ /* wait for current handler to finish before turning the DISPC off */
+ synchronize_irq(dispc.irq);
+
dispc_save_context();
return 0;
@@ -3763,12 +3807,15 @@ static int dispc_runtime_resume(struct device *dev)
* _omap_dispc_initial_config(). We can thus use it to detect if
* we have lost register context.
*/
- if (REG_GET(DISPC_CONFIG, 2, 1) == OMAP_DSS_LOAD_FRAME_ONLY)
- return 0;
+ if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
+ _omap_dispc_initial_config();
- _omap_dispc_initial_config();
+ dispc_restore_context();
+ }
- dispc_restore_context();
+ dispc.is_enabled = true;
+ /* ensure the dispc_irq_handler sees the is_enabled value */
+ smp_wmb();
return 0;
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 121d104..8be9b04 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -297,6 +297,8 @@ struct dsi_data {
int irq;
+ bool is_enabled;
+
struct clk *dss_clk;
struct clk *sys_clk;
@@ -795,6 +797,9 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
dsidev = (struct platform_device *) arg;
dsi = dsi_get_dsidrv_data(dsidev);
+ if (!dsi->is_enabled)
+ return IRQ_NONE;
+
spin_lock(&dsi->irq_lock);
irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
@@ -5671,6 +5676,15 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
static int dsi_runtime_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+
+ dsi->is_enabled = false;
+ /* ensure the irq handler sees the is_enabled value */
+ smp_wmb();
+ /* wait for current handler to finish before turning the DSI off */
+ synchronize_irq(dsi->irq);
+
dispc_runtime_put();
return 0;
@@ -5678,12 +5692,18 @@ static int dsi_runtime_suspend(struct device *dev)
static int dsi_runtime_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
int r;
r = dispc_runtime_get();
if (r)
return r;
+ dsi->is_enabled = true;
+ /* ensure the irq handler sees the is_enabled value */
+ smp_wmb();
+
return 0;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 825c019..d55266c 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -457,7 +457,7 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul);
for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
- fck = prate / fckd * m;
+ fck = DIV_ROUND_UP(prate, fckd) * m;
if (func(fck, data))
return true;
@@ -506,7 +506,7 @@ static int dss_setup_default_clock(void)
fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
max_dss_fck);
- fck = prate / fck_div * dss.feat->dss_fck_multiplier;
+ fck = DIV_ROUND_UP(prate, fck_div) * dss.feat->dss_fck_multiplier;
}
r = dss_set_fck_rate(fck);
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 918fec1..560078f 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -131,12 +131,6 @@ struct dsi_clock_info {
u16 lp_clk_div;
};
-struct reg_field {
- u16 reg;
- u8 high;
- u8 low;
-};
-
struct dss_lcd_mgr_config {
enum dss_io_pad_mode io_pad_mode;
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
index b11afac..0b12a3f 100644
--- a/drivers/video/omap2/dss/hdmi_common.c
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -347,17 +347,17 @@ int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
case 96000:
case 192000:
if (deep_color == 125)
- if (pclk == 27027 || pclk == 74250)
+ if (pclk == 27027000 || pclk == 74250000)
deep_color_correct = true;
if (deep_color == 150)
- if (pclk == 27027)
+ if (pclk == 27027000)
deep_color_correct = true;
break;
case 44100:
case 88200:
case 176400:
if (deep_color == 125)
- if (pclk == 27027)
+ if (pclk == 27027000)
deep_color_correct = true;
break;
default:
@@ -418,7 +418,7 @@ int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
}
}
/* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+ *cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10);
return 0;
}
OpenPOWER on IntegriCloud