summaryrefslogtreecommitdiffstats
path: root/sys/contrib/octeon-sdk/cvmx-pcie.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/octeon-sdk/cvmx-pcie.c')
-rw-r--r--sys/contrib/octeon-sdk/cvmx-pcie.c774
1 files changed, 658 insertions, 116 deletions
diff --git a/sys/contrib/octeon-sdk/cvmx-pcie.c b/sys/contrib/octeon-sdk/cvmx-pcie.c
index 21a9b87..8053737 100644
--- a/sys/contrib/octeon-sdk/cvmx-pcie.c
+++ b/sys/contrib/octeon-sdk/cvmx-pcie.c
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,21 +42,56 @@
+
/**
* @file
*
* Interface to PCIe as a host(RC) or target(EP)
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 52004 $<hr>
*/
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/cvmx-pcieepx-defs.h>
+#include <asm/octeon/cvmx-pciercx-defs.h>
+#include <asm/octeon/cvmx-pemx-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-pescx-defs.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#include <asm/octeon/cvmx-sriox-defs.h>
+
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+#include <asm/octeon/cvmx-error.h>
+#endif
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/cvmx-pcie.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include <asm/octeon/cvmx-swap.h>
+#include <asm/octeon/cvmx-wqe.h>
+#else
#include "cvmx.h"
#include "cvmx-csr-db.h"
#include "cvmx-pcie.h"
#include "cvmx-sysinfo.h"
#include "cvmx-swap.h"
#include "cvmx-wqe.h"
+#include "cvmx-error.h"
#include "cvmx-helper-errata.h"
+#endif
+#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
+#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
+#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
+#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
/**
* Return the Core virtual base address for PCIe IO access. IOs are
@@ -142,8 +178,16 @@ static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
{
cvmx_pciercx_cfg030_t pciercx_cfg030;
pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
- pciercx_cfg030.s.mps = 0; /* Max payload size = 128 bytes for best Octeon DMA performance */
- pciercx_cfg030.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ pciercx_cfg030.s.mps = MPS_CN5XXX;
+ pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
+ }
+ else
+ {
+ pciercx_cfg030.s.mps = MPS_CN6XXX;
+ pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
+ }
pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
@@ -153,15 +197,36 @@ static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
}
- /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
- /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
+ /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
cvmx_npei_ctl_status2_t npei_ctl_status2;
npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
- npei_ctl_status2.s.mps = 0; /* Max payload size = 128 bytes for best Octeon DMA performance */
- npei_ctl_status2.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
+ if (pcie_port)
+ npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
+ else
+ npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
+
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
}
+ else
+ {
+ /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ cvmx_dpi_sli_prtx_cfg_t prt_cfg;
+ cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
+ prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
+ prt_cfg.s.mps = MPS_CN6XXX;
+ prt_cfg.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
+
+ sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
+ sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
+ }
/* ECRC Generation (PCIE*_CFG070[GE,CE]) */
{
@@ -202,9 +267,6 @@ static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
}
- /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */
- // FIXME: Anything needed here?
-
/* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
/* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
{
@@ -283,10 +345,9 @@ static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
}
}
-
/**
* @INTERNAL
- * Initialize a host mode PCIe link. This function takes a PCIe
+ * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
* port from reset to a link up state. Software can then begin
* configuring the rest of the link.
*
@@ -294,7 +355,7 @@ static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
*
* @return Zero on success
*/
-static int __cvmx_pcie_rc_initialize_link(int pcie_port)
+static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
{
uint64_t start_cycle;
cvmx_pescx_ctl_status_t pescx_ctl_status;
@@ -348,7 +409,7 @@ static int __cvmx_pcie_rc_initialize_link(int pcie_port)
start_cycle = cvmx_get_cycle();
do
{
- if (cvmx_get_cycle() - start_cycle > 2*cvmx_sysinfo_get()->cpu_clock_hz)
+ if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
{
cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
return -1;
@@ -357,6 +418,9 @@ static int __cvmx_pcie_rc_initialize_link(int pcie_port)
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while (pciercx_cfg032.s.dlla == 0);
+ /* Clear all pending errors */
+ cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
+
/* Update the Replay Time Limit. Empirically, some PCIe devices take a
little longer to respond than expected under load. As a workaround for
this we configure the Replay Time Limit to the value expected for a 512
@@ -385,15 +449,18 @@ static int __cvmx_pcie_rc_initialize_link(int pcie_port)
/**
- * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
+ * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
+ * the bus.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
-int cvmx_pcie_rc_initialize(int pcie_port)
+static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
{
int i;
+ int base;
+ uint64_t addr_swizzle;
cvmx_ciu_soft_prst_t ciu_soft_prst;
cvmx_pescx_bist_status_t pescx_bist_status;
cvmx_pescx_bist_status2_t pescx_bist_status2;
@@ -403,13 +470,14 @@ int cvmx_pcie_rc_initialize(int pcie_port)
cvmx_npei_dbg_data_t npei_dbg_data;
cvmx_pescx_ctl_status2_t pescx_ctl_status2;
cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_npei_bar1_indexx_t bar1_index;
retry:
/* Make sure we aren't trying to setup a target mode interface in host mode */
npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
{
- cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port0, but port0 is not in host mode\n");
+ cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
return -1;
}
@@ -554,9 +622,9 @@ retry:
__cvmx_pcie_rc_initialize_config_space(pcie_port);
/* Bring the link up */
- if (__cvmx_pcie_rc_initialize_link(pcie_port))
+ if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
{
- cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n");
+ cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
return -1;
}
@@ -597,10 +665,29 @@ retry:
/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
- /* Disable Octeon's BAR1. It isn't needed in RC mode since BAR2
- maps all of memory. BAR2 also maps 256MB-512MB into the 2nd
- 256MB of memory */
- cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1);
+ /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
+
+ bar1_index.u32 = 0;
+ bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
+ bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.end_swp = 1; /* Endian Swap mode */
+ bar1_index.s.addr_v = 1; /* Valid entry */
+
+ base = pcie_port ? 16 : 0;
+
+ /* Big endian swizzle for 32-bit PEXP_NCB register. */
+#ifdef __MIPSEB__
+ addr_swizzle = 4;
+#else
+ addr_swizzle = 0;
+#endif
+ for (i = 0; i < 16; i++) {
+ cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
+ base++;
+ /* 256MB / 16 >> 22 == 4 */
+ bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
+ }
/* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
where they overlap. It also overlaps with the device addresses, so
@@ -722,6 +809,315 @@ retry:
/**
+ * @INTERNAL
+ * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
+ * port from reset to a link up state. Software can then begin
+ * configuring the rest of the link.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
+{
+ uint64_t start_cycle;
+ cvmx_pemx_ctl_status_t pem_ctl_status;
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_pciercx_cfg448_t pciercx_cfg448;
+
+ /* Bring up the link */
+ pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
+ pem_ctl_status.s.lnk_enb = 1;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
+
+ /* Wait for the link to come up */
+ start_cycle = cvmx_get_cycle();
+ do
+ {
+ if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
+ return -1;
+ cvmx_wait(10000);
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ } while (pciercx_cfg032.s.dlla == 0);
+
+ /* Update the Replay Time Limit. Empirically, some PCIe devices take a
+ little longer to respond than expected under load. As a workaround for
+ this we configure the Replay Time Limit to the value expected for a 512
+ byte MPS instead of our actual 256 byte MPS. The numbers below are
+ directly from the PCIe spec table 3-4 */
+ pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
+ switch (pciercx_cfg032.s.nlw)
+ {
+ case 1: /* 1 lane */
+ pciercx_cfg448.s.rtl = 1677;
+ break;
+ case 2: /* 2 lanes */
+ pciercx_cfg448.s.rtl = 867;
+ break;
+ case 4: /* 4 lanes */
+ pciercx_cfg448.s.rtl = 462;
+ break;
+ case 8: /* 8 lanes */
+ pciercx_cfg448.s.rtl = 258;
+ break;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
+
+ return 0;
+}
+
+
+/**
+ * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
+ * the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
+{
+ int i;
+ cvmx_ciu_soft_prst_t ciu_soft_prst;
+ cvmx_mio_rst_ctlx_t mio_rst_ctl;
+ cvmx_pemx_bar_ctl_t pemx_bar_ctl;
+ cvmx_pemx_ctl_status_t pemx_ctl_status;
+ cvmx_pemx_bist_status_t pemx_bist_status;
+ cvmx_pemx_bist_status2_t pemx_bist_status2;
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_pciercx_cfg515_t pciercx_cfg515;
+ cvmx_sli_ctl_portx_t sli_ctl_portx;
+ cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
+ cvmx_sli_mem_access_subidx_t mem_access_subid;
+ cvmx_mio_rst_ctlx_t mio_rst_ctlx;
+ cvmx_sriox_status_reg_t sriox_status_reg;
+ cvmx_pemx_bar1_indexx_t bar1_index;
+
+ /* Make sure this interface isn't SRIO */
+ sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
+ if (sriox_status_reg.s.srio)
+ {
+ cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /* Make sure we aren't trying to setup a target mode interface in host mode */
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
+ if (!mio_rst_ctl.s.host_mode)
+ {
+ cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
+ return -1;
+ }
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ if (pcie_port)
+ {
+ cvmx_ciu_qlm1_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ }
+ else
+ {
+ cvmx_ciu_qlm0_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
+
+ /* Bring the PCIe out of reset */
+ if (pcie_port)
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ else
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /* After a chip reset the PCIe will also be in reset. If it isn't,
+ most likely someone is trying to init it again without a proper
+ PCIe reset */
+ if (ciu_soft_prst.s.soft_prst == 0)
+ {
+ /* Reset the port */
+ ciu_soft_prst.s.soft_prst = 1;
+ if (pcie_port)
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ cvmx_wait_usec(2000);
+ }
+ if (pcie_port)
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ }
+ else
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+
+ /* Wait for PCIe reset to complete */
+ cvmx_wait_usec(1000);
+
+ /* Check and make sure PCIe came out of reset. If it doesn't the board
+ probably hasn't wired the clocks up and the interface should be
+ skipped */
+ mio_rst_ctlx.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
+ if (!mio_rst_ctlx.s.rst_done)
+ {
+ cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
+ if (pemx_bist_status.u64)
+ cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
+ pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
+ if (pemx_bist_status2.u64)
+ cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
+
+ /* Initialize the config space CSRs */
+ __cvmx_pcie_rc_initialize_config_space(pcie_port);
+
+ /* Enable gen2 speed selection */
+ pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
+ pciercx_cfg515.s.dsc = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
+
+ /* Bring the link up */
+ if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
+ {
+ /* Some gen1 devices don't handle the gen 2 training correctly. Disable
+ gen2 and try again with only gen1 */
+ cvmx_pciercx_cfg031_t pciercx_cfg031;
+ pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
+ pciercx_cfg031.s.mls = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg515.u32);
+ if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
+ {
+ cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
+ return -1;
+ }
+ }
+
+ /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
+ sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
+
+ /* Setup Mem access SubDIDs */
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
+
+ /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
+ for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
+ {
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
+ mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
+ }
+
+ /* Disable the peer to peer forwarding register. This must be setup
+ by the OS after it enumerates the bus and assigns addresses to the
+ PCIe busses */
+ for (i=0; i<4; i++)
+ {
+ cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
+ cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
+ }
+
+ /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
+
+ /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
+ where they overlap. It also overlaps with the device addresses, so
+ make sure the peer to peer forwarding is set right */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
+
+ /* Setup BAR2 attributes */
+ /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
+ /* ­ PTLP_RO,CTLP_RO should normally be set (except for debug). */
+ /* ­ WAIT_COM=0 will likely work for all applications. */
+ /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
+ pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
+ pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
+ pemx_bar_ctl.s.bar2_enb = 1;
+ pemx_bar_ctl.s.bar2_esx = 1;
+ pemx_bar_ctl.s.bar2_cax = 0;
+ cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
+ sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
+ sli_ctl_portx.s.ptlp_ro = 1;
+ sli_ctl_portx.s.ctlp_ro = 1;
+ sli_ctl_portx.s.wait_com = 0;
+ sli_ctl_portx.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
+
+ /* BAR1 follows BAR2 */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
+
+ bar1_index.u64 = 0;
+ bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
+ bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.end_swp = 1; /* Endian Swap mode */
+ bar1_index.s.addr_v = 1; /* Valid entry */
+
+ for (i = 0; i < 16; i++) {
+ cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
+ /* 256MB / 16 >> 22 == 4 */
+ bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
+ }
+
+ /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
+ clock */
+ pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
+ pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
+
+ /* Display the link status */
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
+
+ return 0;
+}
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_rc_initialize(int pcie_port)
+{
+ int result;
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
+ else
+ result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
+ if (result == 0)
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
+#endif
+ return result;
+}
+
+
+/**
* Shutdown a PCIe port and put it in reset
*
* @param pcie_port PCIe port to shutdown
@@ -730,9 +1126,20 @@ retry:
*/
int cvmx_pcie_rc_shutdown(int pcie_port)
{
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
+ cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
+#endif
/* Wait for all pending operations to complete */
- if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
- cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
+ cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
+ }
+ else
+ {
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
+ cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
+ }
/* Force reset */
if (pcie_port)
@@ -918,12 +1325,24 @@ void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg,
*/
uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
{
- cvmx_pescx_cfg_rd_t pescx_cfg_rd;
- pescx_cfg_rd.u64 = 0;
- pescx_cfg_rd.s.addr = cfg_offset;
- cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
- pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
- return pescx_cfg_rd.s.data;
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_pescx_cfg_rd_t pescx_cfg_rd;
+ pescx_cfg_rd.u64 = 0;
+ pescx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
+ pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
+ return pescx_cfg_rd.s.data;
+ }
+ else
+ {
+ cvmx_pemx_cfg_rd_t pemx_cfg_rd;
+ pemx_cfg_rd.u64 = 0;
+ pemx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
+ pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
+ return pemx_cfg_rd.s.data;
+ }
}
@@ -937,65 +1356,134 @@ uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
*/
void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
{
- cvmx_pescx_cfg_wr_t pescx_cfg_wr;
- pescx_cfg_wr.u64 = 0;
- pescx_cfg_wr.s.addr = cfg_offset;
- pescx_cfg_wr.s.data = val;
- cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_pescx_cfg_wr_t pescx_cfg_wr;
+ pescx_cfg_wr.u64 = 0;
+ pescx_cfg_wr.s.addr = cfg_offset;
+ pescx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
+ }
+ else
+ {
+ cvmx_pemx_cfg_wr_t pemx_cfg_wr;
+ pemx_cfg_wr.u64 = 0;
+ pemx_cfg_wr.s.addr = cfg_offset;
+ pemx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
+ }
}
/**
* Initialize a PCIe port for use in target(EP) mode.
*
+ * @param pcie_port PCIe port to initialize
+ *
* @return Zero on success
*/
-int cvmx_pcie_ep_initialize(void)
+int cvmx_pcie_ep_initialize(int pcie_port)
{
- int pcie_port = 0;
- cvmx_npei_ctl_status_t npei_ctl_status;
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_ctl_status_t npei_ctl_status;
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ if (npei_ctl_status.s.host_mode)
+ return -1;
+ }
+ else
+ {
+ cvmx_mio_rst_ctlx_t mio_rst_ctl;
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
+ if (mio_rst_ctl.s.host_mode)
+ return -1;
+ }
- npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
- if (npei_ctl_status.s.host_mode)
- return -1;
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ if (pcie_port)
+ {
+ cvmx_ciu_qlm1_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ }
+ else
+ {
+ cvmx_ciu_qlm0_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
/* Enable bus master and memory */
- cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEP_CFG001, 0x6);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
/* Max Payload Size (PCIE*_CFG030[MPS]) */
/* Max Read Request Size (PCIE*_CFG030[MRRS]) */
/* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
/* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
{
- cvmx_pciercx_cfg030_t pciercx_cfg030;
- pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
- pciercx_cfg030.s.mps = 0; /* Max payload size = 128 bytes (Limit of most PCs) */
- pciercx_cfg030.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
- pciercx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
- pciercx_cfg030.s.ns_en = 1; /* Enable no snoop. */
- pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
- pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
- pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
- pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
- cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
+ cvmx_pcieepx_cfg030_t pcieepx_cfg030;
+ pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ pcieepx_cfg030.s.mps = MPS_CN5XXX;
+ pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
+ }
+ else
+ {
+ pcieepx_cfg030.s.mps = MPS_CN6XXX;
+ pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
+ }
+ pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
+ pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
+ pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
+ pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
+ pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
+ pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
}
- /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
- /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
+ /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
cvmx_npei_ctl_status2_t npei_ctl_status2;
npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
- npei_ctl_status2.s.mps = 0; /* Max payload size = 128 bytes (Limit of most PCs) */
- npei_ctl_status2.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
+ npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
}
+ else
+ {
+ /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ cvmx_dpi_sli_prtx_cfg_t prt_cfg;
+ cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
+ prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
+ prt_cfg.s.mps = MPS_CN6XXX;
+ prt_cfg.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
+
+ sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
+ sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
+ }
/* Setup Mem access SubDID 12 to access Host memory */
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_mem_access_subidx_t mem_access_subid;
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
- mem_access_subid.s.nmerge = 1; /* Merging is allowed in this window. */
+ mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */
mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
@@ -1005,6 +1493,19 @@ int cvmx_pcie_ep_initialize(void)
mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
}
+ else
+ {
+ cvmx_sli_mem_access_subidx_t mem_access_subid;
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */
+ mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
+ }
return 0;
}
@@ -1020,43 +1521,84 @@ int cvmx_pcie_ep_initialize(void)
*/
void cvmx_pcie_wait_for_pending(int pcie_port)
{
- cvmx_npei_data_out_cnt_t npei_data_out_cnt;
- int a;
- int b;
- int c;
-
- /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
- description of how this code works */
- npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
- if (pcie_port)
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
- if (!npei_data_out_cnt.s.p1_fcnt)
- return;
- a = npei_data_out_cnt.s.p1_ucnt;
- b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
+ cvmx_npei_data_out_cnt_t npei_data_out_cnt;
+ int a;
+ int b;
+ int c;
+
+ /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
+ description of how this code works */
+ npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
+ if (pcie_port)
+ {
+ if (!npei_data_out_cnt.s.p1_fcnt)
+ return;
+ a = npei_data_out_cnt.s.p1_ucnt;
+ b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
+ }
+ else
+ {
+ if (!npei_data_out_cnt.s.p0_fcnt)
+ return;
+ a = npei_data_out_cnt.s.p0_ucnt;
+ b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
+ }
+
+ while (1)
+ {
+ npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
+ c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
+ if (a<=b)
+ {
+ if ((c<a) || (c>b))
+ return;
+ }
+ else
+ {
+ if ((c>b) && (c<a))
+ return;
+ }
+ }
}
else
{
- if (!npei_data_out_cnt.s.p0_fcnt)
- return;
- a = npei_data_out_cnt.s.p0_ucnt;
- b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
- }
+ cvmx_sli_data_out_cnt_t sli_data_out_cnt;
+ int a;
+ int b;
+ int c;
- while (1)
- {
- npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
- c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
- if (a<=b)
+ sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
+ if (pcie_port)
{
- if ((c<a) || (c>b))
+ if (!sli_data_out_cnt.s.p1_fcnt)
return;
+ a = sli_data_out_cnt.s.p1_ucnt;
+ b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
}
else
{
- if ((c>b) && (c<a))
+ if (!sli_data_out_cnt.s.p0_fcnt)
return;
+ a = sli_data_out_cnt.s.p0_ucnt;
+ b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
+ }
+
+ while (1)
+ {
+ sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
+ c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
+ if (a<=b)
+ {
+ if ((c<a) || (c>b))
+ return;
+ }
+ else
+ {
+ if ((c>b) && (c<a))
+ return;
+ }
}
}
}
-
OpenPOWER on IntegriCloud