summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2013-09-27 13:06:41 -0600
committerBjorn Helgaas <bhelgaas@google.com>2013-09-27 13:06:41 -0600
commit645e40f9c4c7b905843e4a8968e1136ee34e0206 (patch)
treed58d4756abebe6089eaa880892d5656379e2d144 /drivers/pci
parent63495fff27dfa0ce89ceb8f6350976e822398c5c (diff)
parentf62b878b4dfc71de24ea4bc085d042862cba88e4 (diff)
downloadop-kernel-dev-645e40f9c4c7b905843e4a8968e1136ee34e0206.zip
op-kernel-dev-645e40f9c4c7b905843e4a8968e1136ee34e0206.tar.gz
Merge branch 'pci/host-exynos' into next
* pci/host-exynos: PCI: exynos: Turn off power of phy block when link failed PCI: exynos: Add support for MSI MAINTAINERS: Add Jingoo Han as Samsung Exynos PCIe driver maintainer
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/host/pci-exynos.c112
-rw-r--r--drivers/pci/host/pcie-designware.c240
-rw-r--r--drivers/pci/host/pcie-designware.h14
3 files changed, 366 insertions, 0 deletions
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 94e096b..ee692c2 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -48,6 +48,7 @@ struct exynos_pcie {
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
+#define IRQ_MSI_ENABLE (0x1 << 2)
#define PCIE_IRQ_EN_SPECIAL 0x014
#define PCIE_PWR_RESET 0x018
#define PCIE_CORE_RESET 0x01c
@@ -77,18 +78,28 @@ struct exynos_pcie {
#define PCIE_PHY_PLL_BIAS 0x00c
#define PCIE_PHY_DCC_FEEDBACK 0x014
#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_COMMON_POWER 0x064
+#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
#define PCIE_PHY_TRSV0_EMP_LVL 0x084
#define PCIE_PHY_TRSV0_DRV_LVL 0x088
#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_POWER 0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV0_LVCC 0x0dc
#define PCIE_PHY_TRSV1_EMP_LVL 0x144
#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_POWER 0x184
+#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV1_LVCC 0x19c
#define PCIE_PHY_TRSV2_EMP_LVL 0x204
#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_POWER 0x244
+#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV2_LVCC 0x25c
#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_POWER 0x304
+#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val &= ~PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val &= ~PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val &= ~PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val &= ~PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val &= ~PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val |= PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val |= PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val |= PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val |= PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val |= PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
static void exynos_pcie_init_phy(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
/* de-assert phy reset */
exynos_pcie_deassert_phy_reset(pp);
+ /* power on phy */
+ exynos_pcie_power_on_phy(pp);
+
/* initialize phy */
exynos_pcie_init_phy(pp);
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
PCIE_PHY_PLL_LOCKED);
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
}
+ /* power off phy */
+ exynos_pcie_power_off_phy(pp);
+
dev_err(pp->dev, "PCIe Link Fail\n");
return -EINVAL;
}
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ dw_handle_msi_irq(pp);
+
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ dw_pcie_msi_init(pp);
+
+ /* enable MSI interrupt */
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+ val |= IRQ_MSI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+ return;
+}
+
static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
{
exynos_pcie_enable_irq_pulse(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ exynos_pcie_msi_init(pp);
+
return;
}
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
return ret;
}
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (!pp->msi_irq) {
+ dev_err(&pdev->dev, "failed to get msi irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ exynos_pcie_msi_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request msi irq\n");
+ return ret;
+ }
+ }
+
pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c10e9ac..8963017 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -11,8 +11,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -142,6 +145,204 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
return ret;
}
+static struct irq_chip dw_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+void dw_handle_msi_irq(struct pcie_port *pp)
+{
+ unsigned long val;
+ int i, pos;
+
+ for (i = 0; i < MAX_MSI_CTRLS; i++) {
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+ (u32 *)&val);
+ if (val) {
+ pos = 0;
+ while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+ generic_handle_irq(pp->msi_irq_start
+ + (i * 32) + pos);
+ pos++;
+ }
+ }
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
+ }
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+ pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+ /* program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ virt_to_phys((void *)pp->msi_data));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+ int flag = 1;
+
+ do {
+ pos = find_next_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos);
+ /*if you have reached to the end then get out from here.*/
+ if (pos == MAX_MSI_IRQS)
+ return -ENOSPC;
+ /*
+ * Check if this position is at correct offset.nvec is always a
+ * power of two. pos0 must be nvec bit alligned.
+ */
+ if (pos % msgvec)
+ pos += msgvec - (pos % msgvec);
+ else
+ flag = 0;
+ } while (flag);
+
+ *pos0 = pos;
+ return 0;
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+ int res, bit, irq, pos0, pos1, i;
+ u32 val;
+ struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS);
+ if (pos0 % no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+ goto no_valid_irq;
+ }
+ if (no_irqs > 1) {
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ /* there must be nvec number of consecutive free bits */
+ while ((pos1 - pos0) < no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+ goto no_valid_irq;
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ }
+ }
+
+ irq = (pp->msi_irq_start + pos0);
+
+ if ((irq + no_irqs) > (pp->msi_irq_start + MAX_MSI_IRQS-1))
+ goto no_valid_irq;
+
+ i = 0;
+ while (i < no_irqs) {
+ set_bit(pos0 + i, pp->msi_irq_in_use);
+ irq_alloc_descs((irq + i), (irq + i), 1, 0);
+ irq_set_msi_desc(irq + i, desc);
+ /*Enable corresponding interrupt in MSI interrupt controller */
+ res = ((pos0 + i) / 32) * 12;
+ bit = (pos0 + i) % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ i++;
+ }
+
+ *pos = pos0;
+ return irq;
+
+no_valid_irq:
+ *pos = pos0;
+ return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+ int res, bit, val, pos;
+ struct irq_desc *desc;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+
+ /* get the port structure */
+ desc = irq_to_desc(irq);
+ msi = irq_desc_get_msi_desc(desc);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ if (!pp) {
+ BUG();
+ return;
+ }
+
+ pos = irq - pp->msi_irq_start;
+
+ irq_free_desc(irq);
+
+ clear_bit(pos, pp->msi_irq_in_use);
+
+ /* Disable corresponding interrupt on MSI interrupt controller */
+ res = (pos / 32) * 12;
+ bit = pos % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ int irq, pos, msgvec;
+ u16 msg_ctr;
+ struct msi_msg msg;
+ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+ &msg_ctr);
+ msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (msgvec == 0)
+ msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+ if (msgvec > 5)
+ msgvec = 0;
+
+ irq = assign_irq((1 << msgvec), desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
+ msg_ctr |= msgvec << 4;
+ pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+ msg_ctr);
+ desc->msi_attrib.multiple = msgvec;
+
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+ msg.data = pos;
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
+};
+
int dw_pcie_link_up(struct pcie_port *pp)
{
if (pp->ops->link_up)
@@ -150,6 +351,20 @@ int dw_pcie_link_up(struct pcie_port *pp)
return 0;
}
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = dw_pcie_msi_map,
+};
+
int __init dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
@@ -157,6 +372,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
struct of_pci_range_parser parser;
u32 val;
+ struct irq_domain *irq_domain;
+
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pp->dev, "missing ranges property\n");
return -EINVAL;
@@ -223,6 +440,18 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
return -EINVAL;
}
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ irq_domain = irq_domain_add_linear(pp->dev->of_node,
+ MAX_MSI_IRQS, &msi_domain_ops,
+ &dw_pcie_msi_chip);
+ if (!irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ pp->msi_irq_start = irq_find_mapping(irq_domain, 0);
+ }
+
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -485,10 +714,21 @@ int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return pp->irq;
}
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+ dw_pcie_msi_chip.dev = pp->dev;
+ bus->msi = &dw_pcie_msi_chip;
+ }
+}
+
static struct hw_pci dw_pci = {
.setup = dw_pcie_setup,
.scan = dw_pcie_scan_bus,
.map_irq = dw_pcie_map_irq,
+ .add_bus = dw_pcie_add_bus,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 133820f..faccbbf 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -20,6 +20,14 @@ struct pcie_port_info {
phys_addr_t mem_bus_addr;
};
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
@@ -38,6 +46,10 @@ struct pcie_port {
int irq;
u32 lanes;
struct pcie_host_ops *ops;
+ int msi_irq;
+ int msi_irq_start;
+ unsigned long msi_data;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
struct pcie_host_ops {
@@ -57,6 +69,8 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val);
int cfg_write(void __iomem *addr, int where, int size, u32 val);
int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+void dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
OpenPOWER on IntegriCloud