summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorluigi <luigi@FreeBSD.org>2012-02-10 21:03:04 +0000
committerluigi <luigi@FreeBSD.org>2012-02-10 21:03:04 +0000
commit4119b9cf7a5e3dd966371d685eda1010641fe34f (patch)
tree3063d4c50b218a2983c6311cbe50ce7d0c9b8c52
parentac654b06161d867de0b53cf65e24b08c59db3d01 (diff)
downloadFreeBSD-src-4119b9cf7a5e3dd966371d685eda1010641fe34f.zip
FreeBSD-src-4119b9cf7a5e3dd966371d685eda1010641fe34f.tar.gz
Add a driver for Emulex OneConnect ethernet cards (10 Gbit PCIe)
A manpage will come in a future commit. Submitted by: Naresh Raju Gottumukkala (emulex)
-rw-r--r--sys/conf/NOTES2
-rw-r--r--sys/conf/files6
-rw-r--r--sys/dev/oce/oce_hw.c588
-rw-r--r--sys/dev/oce/oce_hw.h3381
-rw-r--r--sys/dev/oce/oce_if.c2000
-rw-r--r--sys/dev/oce/oce_if.h1071
-rw-r--r--sys/dev/oce/oce_mbox.c1705
-rw-r--r--sys/dev/oce/oce_queue.c1213
-rw-r--r--sys/dev/oce/oce_sysctl.c1300
-rw-r--r--sys/dev/oce/oce_util.c270
-rw-r--r--sys/modules/Makefile1
-rw-r--r--sys/modules/oce/Makefile15
12 files changed, 11552 insertions, 0 deletions
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index af351fa..c61c0a1 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -1972,6 +1972,7 @@ device xmphy # XaQti XMAC II
# SMC EZ Card 1000 (SMC9462TX), D-Link DGE-500T, Asante FriendlyNet
# GigaNIX 1000TA and 1000TPC, the Addtron AEG320T, the Surecom
# EP-320G-TX and the Netgear GA622T.
+# oce: Emulex 10 Gbit adapters (OneConnect Ethernet)
# pcn: Support for PCI fast ethernet adapters based on the AMD Am79c97x
# PCnet-FAST, PCnet-FAST+, PCnet-FAST III, PCnet-PRO and PCnet-Home
# chipsets. These can also be handled by the le(4) driver if the
@@ -2112,6 +2113,7 @@ device ixgbe # Intel Pro/10Gbe PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device mxge # Myricom Myri-10G 10GbE NIC
device nxge # Neterion Xframe 10GbE Server/Storage Adapter
+device oce # Emulex 10 GbE (OneConnect Ethernet)
device ti # Alteon Networks Tigon I/II gigabit Ethernet
device txp # 3Com 3cR990 (``Typhoon'')
device vx # 3Com 3c590, 3c595 (``Vortex'')
diff --git a/sys/conf/files b/sys/conf/files
index b43b4a4..02642fb 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1068,6 +1068,12 @@ dev/e1000/e1000_mbx.c optional em | igb \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_osdep.c optional em | igb \
compile-with "${NORMAL_C} -I$S/dev/e1000"
+dev/oce/oce_hw.c optional oce pci
+dev/oce/oce_if.c optional oce pci
+dev/oce/oce_mbox.c optional oce pci
+dev/oce/oce_queue.c optional oce pci
+dev/oce/oce_sysctl.c optional oce pci
+dev/oce/oce_util.c optional oce pci
dev/et/if_et.c optional et
dev/en/if_en_pci.c optional en pci
dev/en/midway.c optional en
diff --git a/sys/dev/oce/oce_hw.c b/sys/dev/oce/oce_hw.c
new file mode 100644
index 0000000..e877f84
--- /dev/null
+++ b/sys/dev/oce/oce_hw.c
@@ -0,0 +1,588 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/* $FreeBSD$ */
+
+#include "oce_if.h"
+
+static int oce_POST(POCE_SOFTC sc);
+
+/**
+ * @brief Function to post status
+ * @param sc software handle to the device
+ */
+static int
+oce_POST(POCE_SOFTC sc)
+{
+ mpu_ep_semaphore_t post_status;
+ int tmo = 60000;
+
+ /* read semaphore CSR */
+ post_status.dw0 = OCE_READ_REG32(sc, csr, MPU_EP_SEMAPHORE(sc));
+
+ /* if host is ready then wait for fw ready else send POST */
+ if (post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) {
+ post_status.bits.stage = POST_STAGE_CHIP_RESET;
+ OCE_WRITE_REG32(sc, csr, MPU_EP_SEMAPHORE(sc), post_status.dw0);
+ }
+
+ /* wait for FW ready */
+ for (;;) {
+ if (--tmo == 0)
+ break;
+
+ DELAY(1000);
+
+ post_status.dw0 = OCE_READ_REG32(sc, csr, MPU_EP_SEMAPHORE(sc));
+ if (post_status.bits.error) {
+ device_printf(sc->dev,
+ "POST failed: %x\n", post_status.dw0);
+ return ENXIO;
+ }
+ if (post_status.bits.stage == POST_STAGE_ARMFW_READY)
+ return 0;
+ }
+
+ device_printf(sc->dev, "POST timed out: %x\n", post_status.dw0);
+
+ return ENXIO;
+}
+
+/**
+ * @brief Function for hardware initialization
+ * @param sc software handle to the device
+ */
+int
+oce_hw_init(POCE_SOFTC sc)
+{
+ int rc = 0;
+
+ rc = oce_POST(sc);
+ if (rc)
+ return rc;
+
+ /* create the bootstrap mailbox */
+ rc = oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->bsmbx, 0);
+ if (rc) {
+ device_printf(sc->dev, "Mailbox alloc failed\n");
+ return rc;
+ }
+
+ rc = oce_reset_fun(sc);
+ if (rc)
+ goto error;
+
+
+ rc = oce_mbox_init(sc);
+ if (rc)
+ goto error;
+
+
+ rc = oce_get_fw_version(sc);
+ if (rc)
+ goto error;
+
+
+ rc = oce_get_fw_config(sc);
+ if (rc)
+ goto error;
+
+
+ sc->macaddr.size_of_struct = 6;
+ rc = oce_read_mac_addr(sc, 0, 1, MAC_ADDRESS_TYPE_NETWORK,
+ &sc->macaddr);
+ if (rc)
+ goto error;
+
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE3)) {
+ rc = oce_mbox_check_native_mode(sc);
+ if (rc)
+ goto error;
+ } else
+ sc->be3_native = 0;
+
+ return rc;
+
+error:
+ oce_dma_free(sc, &sc->bsmbx);
+ device_printf(sc->dev, "Hardware initialisation failed\n");
+ return rc;
+}
+
+
+
+/**
+ * @brief Releases the obtained pci resources
+ * @param sc software handle to the device
+ */
+void
+oce_hw_pci_free(POCE_SOFTC sc)
+{
+ int pci_cfg_barnum = 0;
+
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
+ pci_cfg_barnum = OCE_DEV_BE2_CFG_BAR;
+ else
+ pci_cfg_barnum = OCE_DEV_CFG_BAR;
+
+ if (sc->devcfg_res != NULL) {
+ bus_release_resource(sc->dev,
+ SYS_RES_MEMORY,
+ PCIR_BAR(pci_cfg_barnum), sc->devcfg_res);
+ sc->devcfg_res = (struct resource *)NULL;
+ sc->devcfg_btag = (bus_space_tag_t) 0;
+ sc->devcfg_bhandle = (bus_space_handle_t)0;
+ sc->devcfg_vhandle = (void *)NULL;
+ }
+
+ if (sc->csr_res != NULL) {
+ bus_release_resource(sc->dev,
+ SYS_RES_MEMORY,
+ PCIR_BAR(OCE_PCI_CSR_BAR), sc->csr_res);
+ sc->csr_res = (struct resource *)NULL;
+ sc->csr_btag = (bus_space_tag_t)0;
+ sc->csr_bhandle = (bus_space_handle_t)0;
+ sc->csr_vhandle = (void *)NULL;
+ }
+
+ if (sc->db_res != NULL) {
+ bus_release_resource(sc->dev,
+ SYS_RES_MEMORY,
+ PCIR_BAR(OCE_PCI_DB_BAR), sc->db_res);
+ sc->db_res = (struct resource *)NULL;
+ sc->db_btag = (bus_space_tag_t)0;
+ sc->db_bhandle = (bus_space_handle_t)0;
+ sc->db_vhandle = (void *)NULL;
+ }
+}
+
+
+
+
+/**
+ * @brief Function to get the PCI capabilities
+ * @param sc software handle to the device
+ */
+static
+void oce_get_pci_capabilities(POCE_SOFTC sc)
+{
+ uint32_t val;
+
+ if (pci_find_extcap(sc->dev, PCIY_PCIX, &val) == 0) {
+ if (val != 0)
+ sc->flags |= OCE_FLAGS_PCIX;
+ }
+
+ if (pci_find_extcap(sc->dev, PCIY_EXPRESS, &val) == 0) {
+ if (val != 0) {
+ uint16_t link_status =
+ pci_read_config(sc->dev, val + 0x12, 2);
+
+ sc->flags |= OCE_FLAGS_PCIE;
+ sc->pcie_link_speed = link_status & 0xf;
+ sc->pcie_link_width = (link_status >> 4) & 0x3f;
+ }
+ }
+
+ if (pci_find_extcap(sc->dev, PCIY_MSI, &val) == 0) {
+ if (val != 0)
+ sc->flags |= OCE_FLAGS_MSI_CAPABLE;
+ }
+
+ if (pci_find_extcap(sc->dev, PCIY_MSIX, &val) == 0) {
+ if (val != 0) {
+ val = pci_msix_count(sc->dev);
+ sc->flags |= OCE_FLAGS_MSIX_CAPABLE;
+ }
+ }
+}
+
+/**
+ * @brief Allocate PCI resources.
+ *
+ * @param sc software handle to the device
+ * @returns 0 if successful, or error
+ */
+int
+oce_hw_pci_alloc(POCE_SOFTC sc)
+{
+ int rr, pci_cfg_barnum = 0;
+ pci_sli_intf_t intf;
+
+ pci_enable_busmaster(sc->dev);
+
+ oce_get_pci_capabilities(sc);
+
+ sc->fn = pci_get_function(sc->dev);
+
+ /* setup the device config region */
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
+ pci_cfg_barnum = OCE_DEV_BE2_CFG_BAR;
+ else
+ pci_cfg_barnum = OCE_DEV_CFG_BAR;
+
+ rr = PCIR_BAR(pci_cfg_barnum);
+
+ if (IS_BE(sc))
+ sc->devcfg_res = bus_alloc_resource_any(sc->dev,
+ SYS_RES_MEMORY, &rr,
+ RF_ACTIVE|RF_SHAREABLE);
+ else
+ sc->devcfg_res = bus_alloc_resource(sc->dev,
+ SYS_RES_MEMORY, &rr,
+ 0ul, ~0ul, 32768,
+ RF_ACTIVE|RF_SHAREABLE);
+
+ if (!sc->devcfg_res)
+ goto error;
+
+ sc->devcfg_btag = rman_get_bustag(sc->devcfg_res);
+ sc->devcfg_bhandle = rman_get_bushandle(sc->devcfg_res);
+ sc->devcfg_vhandle = rman_get_virtual(sc->devcfg_res);
+
+ /* Read the SLI_INTF register and determine whether we
+ * can use this port and its features
+ */
+ intf.dw0 = pci_read_config((sc)->dev,OCE_INTF_REG_OFFSET,4);
+
+ if (intf.bits.sli_valid != OCE_INTF_VALID_SIG)
+ goto error;
+
+ if (intf.bits.sli_rev != OCE_INTF_SLI_REV4) {
+ device_printf(sc->dev, "Adapter doesnt support SLI4\n");
+ goto error;
+ }
+
+ if (intf.bits.sli_if_type == OCE_INTF_IF_TYPE_1)
+ sc->flags |= OCE_FLAGS_MBOX_ENDIAN_RQD;
+
+ if (intf.bits.sli_hint1 == OCE_INTF_FUNC_RESET_REQD)
+ sc->flags |= OCE_FLAGS_FUNCRESET_RQD;
+
+ if (intf.bits.sli_func_type == OCE_INTF_VIRT_FUNC)
+ sc->flags |= OCE_FLAGS_VIRTUAL_PORT;
+
+ /* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
+ if (IS_BE(sc)) {
+ /* set up CSR region */
+ rr = PCIR_BAR(OCE_PCI_CSR_BAR);
+ sc->csr_res = bus_alloc_resource_any(sc->dev,
+ SYS_RES_MEMORY, &rr, RF_ACTIVE|RF_SHAREABLE);
+ if (!sc->csr_res)
+ goto error;
+ sc->csr_btag = rman_get_bustag(sc->csr_res);
+ sc->csr_bhandle = rman_get_bushandle(sc->csr_res);
+ sc->csr_vhandle = rman_get_virtual(sc->csr_res);
+
+ /* set up DB doorbell region */
+ rr = PCIR_BAR(OCE_PCI_DB_BAR);
+ sc->db_res = bus_alloc_resource_any(sc->dev,
+ SYS_RES_MEMORY, &rr, RF_ACTIVE|RF_SHAREABLE);
+ if (!sc->db_res)
+ goto error;
+ sc->db_btag = rman_get_bustag(sc->db_res);
+ sc->db_bhandle = rman_get_bushandle(sc->db_res);
+ sc->db_vhandle = rman_get_virtual(sc->db_res);
+ }
+
+ return 0;
+
+error:
+ oce_hw_pci_free(sc);
+ return ENXIO;
+}
+
+
+/**
+ * @brief Function for device shutdown
+ * @param sc software handle to the device
+ * @returns 0 on success, error otherwise
+ */
+void
+oce_hw_shutdown(POCE_SOFTC sc)
+{
+
+ oce_stats_free(sc);
+ /* disable hardware interrupts */
+ oce_hw_intr_disable(sc);
+ /* Free LRO resources */
+ oce_free_lro(sc);
+ /* Release queue*/
+ oce_queue_release_all(sc);
+ /*Delete Network Interface*/
+ oce_delete_nw_interface(sc);
+ /* After fw clean we dont send any cmds to fw.*/
+ oce_fw_clean(sc);
+ /* release intr resources */
+ oce_intr_free(sc);
+ /* release PCI resources */
+ oce_hw_pci_free(sc);
+ /* free mbox specific resources */
+ LOCK_DESTROY(&sc->bmbx_lock);
+ LOCK_DESTROY(&sc->dev_lock);
+
+ oce_dma_free(sc, &sc->bsmbx);
+}
+
+
+/**
+ * @brief Function for creating nw interface.
+ * @param sc software handle to the device
+ * @returns 0 on success, error otherwise
+ */
+int
+oce_create_nw_interface(POCE_SOFTC sc)
+{
+ int rc;
+ uint32_t capab_flags;
+ uint32_t capab_en_flags;
+
+ /* interface capabilities to give device when creating interface */
+ capab_flags = OCE_CAPAB_FLAGS;
+
+ /* capabilities to enable by default (others set dynamically) */
+ capab_en_flags = OCE_CAPAB_ENABLE;
+
+ if (IS_XE201(sc)) {
+ /* LANCER A0 workaround */
+ capab_en_flags &= ~MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR;
+ capab_flags &= ~MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR;
+ }
+
+ /* enable capabilities controlled via driver startup parameters */
+ if (sc->rss_enable)
+ capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS;
+ else {
+ capab_en_flags &= ~MBX_RX_IFACE_FLAGS_RSS;
+ capab_flags &= ~MBX_RX_IFACE_FLAGS_RSS;
+ }
+
+ rc = oce_if_create(sc,
+ capab_flags,
+ capab_en_flags,
+ 0, &sc->macaddr.mac_addr[0], &sc->if_id);
+ if (rc)
+ return rc;
+
+ atomic_inc_32(&sc->nifs);
+
+ sc->if_cap_flags = capab_en_flags;
+
+ /* Enable VLAN Promisc on HW */
+ rc = oce_config_vlan(sc, (uint8_t) sc->if_id, NULL, 0, 1, 1);
+ if (rc)
+ goto error;
+
+ /* set default flow control */
+ rc = oce_set_flow_control(sc, sc->flow_control);
+ if (rc)
+ goto error;
+
+ rc = oce_rxf_set_promiscuous(sc, sc->promisc);
+ if (rc)
+ goto error;
+
+ return rc;
+
+error:
+ oce_delete_nw_interface(sc);
+ return rc;
+
+}
+
+/**
+ * @brief Function to delete a nw interface.
+ * @param sc software handle to the device
+ */
+void
+oce_delete_nw_interface(POCE_SOFTC sc)
+{
+ /* currently only single interface is implmeneted */
+ if (sc->nifs > 0) {
+ oce_if_del(sc, sc->if_id);
+ atomic_dec_32(&sc->nifs);
+ }
+}
+
+/**
+ * @brief Soft reset.
+ * @param sc software handle to the device
+ * @returns 0 on success, error otherwise
+ */
+int
+oce_pci_soft_reset(POCE_SOFTC sc)
+{
+ int rc;
+ mpu_ep_control_t ctrl;
+
+ ctrl.dw0 = OCE_READ_REG32(sc, csr, MPU_EP_CONTROL);
+ ctrl.bits.cpu_reset = 1;
+ OCE_WRITE_REG32(sc, csr, MPU_EP_CONTROL, ctrl.dw0);
+ DELAY(50);
+ rc=oce_POST(sc);
+
+ return rc;
+}
+
+/**
+ * @brief Function for hardware start
+ * @param sc software handle to the device
+ * @returns 0 on success, error otherwise
+ */
+int
+oce_hw_start(POCE_SOFTC sc)
+{
+ struct link_status link = { 0 };
+ int rc = 0;
+
+ rc = oce_get_link_status(sc, &link);
+ if (rc)
+ return 1;
+
+ if (link.logical_link_status == NTWK_LOGICAL_LINK_UP) {
+ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->link_status = NTWK_LOGICAL_LINK_UP;
+ if_link_state_change(sc->ifp, LINK_STATE_UP);
+ } else {
+ sc->ifp->if_drv_flags &=
+ ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->link_status = NTWK_LOGICAL_LINK_DOWN;
+ if_link_state_change(sc->ifp, LINK_STATE_DOWN);
+ }
+
+ if (link.mac_speed > 0 && link.mac_speed < 5)
+ sc->link_speed = link.mac_speed;
+ else
+ sc->link_speed = 0;
+
+ sc->qos_link_speed = (uint32_t )link.qos_link_speed * 10;
+
+ rc = oce_start_mq(sc->mq);
+
+ /* we need to get MCC aync events.
+ So enable intrs and also arm first EQ
+ */
+ oce_hw_intr_enable(sc);
+ oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
+
+ return rc;
+}
+
+
+/**
+ * @brief Function for hardware enable interupts.
+ * @param sc software handle to the device
+ */
+void
+oce_hw_intr_enable(POCE_SOFTC sc)
+{
+ uint32_t reg;
+
+ reg = OCE_READ_REG32(sc, devcfg, PCICFG_INTR_CTRL);
+ reg |= HOSTINTR_MASK;
+ OCE_WRITE_REG32(sc, devcfg, PCICFG_INTR_CTRL, reg);
+
+}
+
+
+/**
+ * @brief Function for hardware disable interupts
+ * @param sc software handle to the device
+ */
+void
+oce_hw_intr_disable(POCE_SOFTC sc)
+{
+ uint32_t reg;
+
+ reg = OCE_READ_REG32(sc, devcfg, PCICFG_INTR_CTRL);
+ reg &= ~HOSTINTR_MASK;
+ OCE_WRITE_REG32(sc, devcfg, PCICFG_INTR_CTRL, reg);
+}
+
+
+
+/**
+ * @brief Function for hardware update multicast filter
+ * @param sc software handle to the device
+ */
+int
+oce_hw_update_multicast(POCE_SOFTC sc)
+{
+ struct ifnet *ifp = sc->ifp;
+ struct ifmultiaddr *ifma;
+ struct mbx_set_common_iface_multicast *req = NULL;
+ OCE_DMA_MEM dma;
+ int rc = 0;
+
+ /* Allocate DMA mem*/
+ if (oce_dma_alloc(sc, sizeof(struct mbx_set_common_iface_multicast),
+ &dma, 0))
+ return ENOMEM;
+
+ req = OCE_DMAPTR(&dma, struct mbx_set_common_iface_multicast);
+ bzero(req, sizeof(struct mbx_set_common_iface_multicast));
+
+#if __FreeBSD_version > 800000
+ IF_ADDR_LOCK(ifp);
+#endif
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+
+ if (req->params.req.num_mac == OCE_MAX_MC_FILTER_SIZE) {
+ /*More multicast addresses than our hardware table
+ So Enable multicast promiscus in our hardware to
+ accept all multicat packets
+ */
+ req->params.req.promiscuous = 1;
+ break;
+ }
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ &req->params.req.mac[req->params.req.num_mac],
+ ETH_ADDR_LEN);
+ req->params.req.num_mac = req->params.req.num_mac + 1;
+ }
+#if __FreeBSD_version > 800000
+IF_ADDR_UNLOCK(ifp);
+#endif
+ req->params.req.if_id = sc->if_id;
+ rc = oce_update_multicast(sc, &dma);
+ oce_dma_free(sc, &dma);
+ return rc;
+}
+
diff --git a/sys/dev/oce/oce_hw.h b/sys/dev/oce/oce_hw.h
new file mode 100644
index 0000000..6aee9fa
--- /dev/null
+++ b/sys/dev/oce/oce_hw.h
@@ -0,0 +1,3381 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/* $FreeBSD$ */
+
+#include <sys/types.h>
+
+#undef _BIG_ENDIAN /* TODO */
+#pragma pack(1)
+
+#define OC_CNA_GEN2 0x2
+#define OC_CNA_GEN3 0x3
+#define DEVID_TIGERSHARK 0x700
+#define DEVID_TOMCAT 0x710
+
+/* PCI CSR offsets */
+#define PCICFG_F1_CSR 0x0 /* F1 for NIC */
+#define PCICFG_SEMAPHORE 0xbc
+#define PCICFG_SOFT_RESET 0x5c
+#define PCICFG_UE_STATUS_HI_MASK 0xac
+#define PCICFG_UE_STATUS_LO_MASK 0xa8
+#define PCICFG_ONLINE0 0xb0
+#define PCICFG_ONLINE1 0xb4
+#define INTR_EN 0x20000000
+#define IMAGE_TRANSFER_SIZE (32 * 1024) /* 32K at a time */
+
+/* CSR register offsets */
+#define MPU_EP_CONTROL 0
+#define MPU_EP_SEMAPHORE_BE3 0xac
+#define MPU_EP_SEMAPHORE_XE201 0x400
+#define MPU_EP_SEMAPHORE(sc) \
+ ((IS_BE(sc)) ? MPU_EP_SEMAPHORE_BE3 : MPU_EP_SEMAPHORE_XE201)
+#define PCICFG_INTR_CTRL 0xfc
+#define HOSTINTR_MASK (1 << 29)
+#define HOSTINTR_PFUNC_SHIFT 26
+#define HOSTINTR_PFUNC_MASK 7
+
+/* POST status reg struct */
+#define POST_STAGE_POWER_ON_RESET 0x00
+#define POST_STAGE_AWAITING_HOST_RDY 0x01
+#define POST_STAGE_HOST_RDY 0x02
+#define POST_STAGE_CHIP_RESET 0x03
+#define POST_STAGE_ARMFW_READY 0xc000
+#define POST_STAGE_ARMFW_UE 0xf000
+
+/* DOORBELL registers */
+#define PD_RXULP_DB 0x0100
+#define PD_TXULP_DB 0x0060
+#define DB_RQ_ID_MASK 0x3FF
+
+#define PD_CQ_DB 0x0120
+#define PD_EQ_DB PD_CQ_DB
+#define PD_MPU_MBOX_DB 0x0160
+#define PD_MQ_DB 0x0140
+
+/* EQE completion types */
+#define EQ_MINOR_CODE_COMPLETION 0x00
+#define EQ_MINOR_CODE_OTHER 0x01
+#define EQ_MAJOR_CODE_COMPLETION 0x00
+
+/* Link Status field values */
+#define PHY_LINK_FAULT_NONE 0x0
+#define PHY_LINK_FAULT_LOCAL 0x01
+#define PHY_LINK_FAULT_REMOTE 0x02
+
+#define PHY_LINK_SPEED_ZERO 0x0 /* No link */
+#define PHY_LINK_SPEED_10MBPS 0x1 /* (10 Mbps) */
+#define PHY_LINK_SPEED_100MBPS 0x2 /* (100 Mbps) */
+#define PHY_LINK_SPEED_1GBPS 0x3 /* (1 Gbps) */
+#define PHY_LINK_SPEED_10GBPS 0x4 /* (10 Gbps) */
+
+#define PHY_LINK_DUPLEX_NONE 0x0
+#define PHY_LINK_DUPLEX_HALF 0x1
+#define PHY_LINK_DUPLEX_FULL 0x2
+
+#define NTWK_PORT_A 0x0 /* (Port A) */
+#define NTWK_PORT_B 0x1 /* (Port B) */
+
+#define PHY_LINK_SPEED_ZERO 0x0 /* (No link.) */
+#define PHY_LINK_SPEED_10MBPS 0x1 /* (10 Mbps) */
+#define PHY_LINK_SPEED_100MBPS 0x2 /* (100 Mbps) */
+#define PHY_LINK_SPEED_1GBPS 0x3 /* (1 Gbps) */
+#define PHY_LINK_SPEED_10GBPS 0x4 /* (10 Gbps) */
+
+/* Hardware Address types */
+#define MAC_ADDRESS_TYPE_STORAGE 0x0 /* (Storage MAC Address) */
+#define MAC_ADDRESS_TYPE_NETWORK 0x1 /* (Network MAC Address) */
+#define MAC_ADDRESS_TYPE_PD 0x2 /* (Protection Domain MAC Addr) */
+#define MAC_ADDRESS_TYPE_MANAGEMENT 0x3 /* (Management MAC Address) */
+#define MAC_ADDRESS_TYPE_FCOE 0x4 /* (FCoE MAC Address) */
+
+/* CREATE_IFACE capability and cap_en flags */
+#define MBX_RX_IFACE_FLAGS_RSS 0x4
+#define MBX_RX_IFACE_FLAGS_PROMISCUOUS 0x8
+#define MBX_RX_IFACE_FLAGS_BROADCAST 0x10
+#define MBX_RX_IFACE_FLAGS_UNTAGGED 0x20
+#define MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS 0x80
+#define MBX_RX_IFACE_FLAGS_VLAN 0x100
+#define MBX_RX_IFACE_FLAGS_MCAST_PROMISCUOUS 0x200
+#define MBX_RX_IFACE_FLAGS_PASS_L2_ERR 0x400
+#define MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR 0x800
+#define MBX_RX_IFACE_FLAGS_MULTICAST 0x1000
+#define MBX_RX_IFACE_RX_FILTER_IF_MULTICAST_HASH 0x2000
+#define MBX_RX_IFACE_FLAGS_HDS 0x4000
+#define MBX_RX_IFACE_FLAGS_DIRECTED 0x8000
+#define MBX_RX_IFACE_FLAGS_VMQ 0x10000
+#define MBX_RX_IFACE_FLAGS_NETQ 0x20000
+#define MBX_RX_IFACE_FLAGS_QGROUPS 0x40000
+#define MBX_RX_IFACE_FLAGS_LSO 0x80000
+#define MBX_RX_IFACE_FLAGS_LRO 0x100000
+
+#define MQ_RING_CONTEXT_SIZE_16 0x5 /* (16 entries) */
+#define MQ_RING_CONTEXT_SIZE_32 0x6 /* (32 entries) */
+#define MQ_RING_CONTEXT_SIZE_64 0x7 /* (64 entries) */
+#define MQ_RING_CONTEXT_SIZE_128 0x8 /* (128 entries) */
+
+#define MBX_DB_READY_BIT 0x1
+#define MBX_DB_HI_BIT 0x2
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_LINK_UP 0x1
+#define ASYNC_EVENT_LINK_DOWN 0x0
+
+/* port link_status */
+#define ASYNC_EVENT_LOGICAL 0x02
+
+/* Logical Link Status */
+#define NTWK_LOGICAL_LINK_DOWN 0
+#define NTWK_LOGICAL_LINK_UP 1
+
+/* Rx filter bits */
+#define NTWK_RX_FILTER_IP_CKSUM 0x1
+#define NTWK_RX_FILTER_TCP_CKSUM 0x2
+#define NTWK_RX_FILTER_UDP_CKSUM 0x4
+#define NTWK_RX_FILTER_STRIP_CRC 0x8
+
+/* max SGE per mbx */
+#define MAX_MBX_SGE 19
+
+/* Max multicast filter size*/
+#define OCE_MAX_MC_FILTER_SIZE 64
+
+/* PCI SLI (Service Level Interface) capabilities register */
+#define OCE_INTF_REG_OFFSET 0x58
+#define OCE_INTF_VALID_SIG 6 /* register's signature */
+#define OCE_INTF_FUNC_RESET_REQD 1
+#define OCE_INTF_HINT1_NOHINT 0
+#define OCE_INTF_HINT1_SEMAINIT 1
+#define OCE_INTF_HINT1_STATCTRL 2
+#define OCE_INTF_IF_TYPE_0 0
+#define OCE_INTF_IF_TYPE_1 1
+#define OCE_INTF_IF_TYPE_2 2
+#define OCE_INTF_IF_TYPE_3 3
+#define OCE_INTF_SLI_REV3 3 /* not supported by driver */
+#define OCE_INTF_SLI_REV4 4 /* driver supports SLI-4 */
+#define OCE_INTF_PHYS_FUNC 0
+#define OCE_INTF_VIRT_FUNC 1
+#define OCE_INTF_FAMILY_BE2 0 /* not supported by driver */
+#define OCE_INTF_FAMILY_BE3 1 /* driver supports BE3 */
+#define OCE_INTF_FAMILY_A0_CHIP 0xA /* Lancer A0 chip (supported) */
+#define OCE_INTF_FAMILY_B0_CHIP 0xB /* Lancer B0 chip (future) */
+
+#define NIC_WQE_SIZE 16
+#define NIC_UNICAST 0x00
+#define NIC_MULTICAST 0x01
+#define NIC_BROADCAST 0x02
+
+#define NIC_HDS_NO_SPLIT 0x00
+#define NIC_HDS_SPLIT_L3PL 0x01
+#define NIC_HDS_SPLIT_L4PL 0x02
+
+#define NIC_WQ_TYPE_FORWARDING 0x01
+#define NIC_WQ_TYPE_STANDARD 0x02
+#define NIC_WQ_TYPE_LOW_LATENCY 0x04
+
+#define OCE_RESET_STATS 1
+#define OCE_RETAIN_STATS 0
+#define OCE_TXP_SW_SZ 48
+
+typedef union pci_sli_intf_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t sli_valid:3;
+ uint32_t sli_hint2:5;
+ uint32_t sli_hint1:8;
+ uint32_t sli_if_type:4;
+ uint32_t sli_family:4;
+ uint32_t sli_rev:4;
+ uint32_t rsv0:3;
+ uint32_t sli_func_type:1;
+#else
+ uint32_t sli_func_type:1;
+ uint32_t rsv0:3;
+ uint32_t sli_rev:4;
+ uint32_t sli_family:4;
+ uint32_t sli_if_type:4;
+ uint32_t sli_hint1:8;
+ uint32_t sli_hint2:5;
+ uint32_t sli_valid:3;
+#endif
+ } bits;
+} pci_sli_intf_t;
+
+
+
+/* physical address structure to be used in MBX */
+struct phys_addr {
+ /* dw0 */
+ uint32_t lo;
+ /* dw1 */
+ uint32_t hi;
+};
+
+
+
+typedef union pcicfg_intr_ctl_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t winselect:2;
+ uint32_t hostintr:1;
+ uint32_t pfnum:3;
+ uint32_t vf_cev_int_line_en:1;
+ uint32_t winaddr:23;
+ uint32_t membarwinen:1;
+#else
+ uint32_t membarwinen:1;
+ uint32_t winaddr:23;
+ uint32_t vf_cev_int_line_en:1;
+ uint32_t pfnum:3;
+ uint32_t hostintr:1;
+ uint32_t winselect:2;
+#endif
+ } bits;
+} pcicfg_intr_ctl_t;
+
+
+
+
+typedef union pcicfg_semaphore_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd:31;
+ uint32_t lock:1;
+#else
+ uint32_t lock:1;
+ uint32_t rsvd:31;
+#endif
+ } bits;
+} pcicfg_semaphore_t;
+
+
+
+
+typedef union pcicfg_soft_reset_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t nec_ll_rcvdetect:8;
+ uint32_t dbg_all_reqs_62_49:14;
+ uint32_t scratchpad0:1;
+ uint32_t exception_oe:1;
+ uint32_t soft_reset:1;
+ uint32_t rsvd0:7;
+#else
+ uint32_t rsvd0:7;
+ uint32_t soft_reset:1;
+ uint32_t exception_oe:1;
+ uint32_t scratchpad0:1;
+ uint32_t dbg_all_reqs_62_49:14;
+ uint32_t nec_ll_rcvdetect:8;
+#endif
+ } bits;
+} pcicfg_soft_reset_t;
+
+
+
+
+typedef union pcicfg_online1_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t host8_online:1;
+ uint32_t host7_online:1;
+ uint32_t host6_online:1;
+ uint32_t host5_online:1;
+ uint32_t host4_online:1;
+ uint32_t host3_online:1;
+ uint32_t host2_online:1;
+ uint32_t ipc_online:1;
+ uint32_t arm_online:1;
+ uint32_t txp_online:1;
+ uint32_t xaui_online:1;
+ uint32_t rxpp_online:1;
+ uint32_t txpb_online:1;
+ uint32_t rr_online:1;
+ uint32_t pmem_online:1;
+ uint32_t pctl1_online:1;
+ uint32_t pctl0_online:1;
+ uint32_t pcs1online_online:1;
+ uint32_t mpu_iram_online:1;
+ uint32_t pcs0online_online:1;
+ uint32_t mgmt_mac_online:1;
+ uint32_t lpcmemhost_online:1;
+#else
+ uint32_t lpcmemhost_online:1;
+ uint32_t mgmt_mac_online:1;
+ uint32_t pcs0online_online:1;
+ uint32_t mpu_iram_online:1;
+ uint32_t pcs1online_online:1;
+ uint32_t pctl0_online:1;
+ uint32_t pctl1_online:1;
+ uint32_t pmem_online:1;
+ uint32_t rr_online:1;
+ uint32_t txpb_online:1;
+ uint32_t rxpp_online:1;
+ uint32_t xaui_online:1;
+ uint32_t txp_online:1;
+ uint32_t arm_online:1;
+ uint32_t ipc_online:1;
+ uint32_t host2_online:1;
+ uint32_t host3_online:1;
+ uint32_t host4_online:1;
+ uint32_t host5_online:1;
+ uint32_t host6_online:1;
+ uint32_t host7_online:1;
+ uint32_t host8_online:1;
+#endif
+ } bits;
+} pcicfg_online1_t;
+
+
+
+typedef union mpu_ep_semaphore_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t error:1;
+ uint32_t backup_fw:1;
+ uint32_t iscsi_no_ip:1;
+ uint32_t iscsi_ip_conflict:1;
+ uint32_t option_rom_installed:1;
+ uint32_t iscsi_drv_loaded:1;
+ uint32_t rsvd0:10;
+ uint32_t stage:16;
+#else
+ uint32_t stage:16;
+ uint32_t rsvd0:10;
+ uint32_t iscsi_drv_loaded:1;
+ uint32_t option_rom_installed:1;
+ uint32_t iscsi_ip_conflict:1;
+ uint32_t iscsi_no_ip:1;
+ uint32_t backup_fw:1;
+ uint32_t error:1;
+#endif
+ } bits;
+} mpu_ep_semaphore_t;
+
+
+
+
+typedef union mpu_ep_control_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t cpu_reset:1;
+ uint32_t rsvd1:15;
+ uint32_t ep_ram_init_status:1;
+ uint32_t rsvd0:12;
+ uint32_t m2_rxpbuf:1;
+ uint32_t m1_rxpbuf:1;
+ uint32_t m0_rxpbuf:1;
+#else
+ uint32_t m0_rxpbuf:1;
+ uint32_t m1_rxpbuf:1;
+ uint32_t m2_rxpbuf:1;
+ uint32_t rsvd0:12;
+ uint32_t ep_ram_init_status:1;
+ uint32_t rsvd1:15;
+ uint32_t cpu_reset:1;
+#endif
+ } bits;
+} mpu_ep_control_t;
+
+
+
+
+/* RX doorbell */
+typedef union pd_rxulp_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t num_posted:8;
+ uint32_t invalidate:1;
+ uint32_t rsvd1:13;
+ uint32_t qid:10;
+#else
+ uint32_t qid:10;
+ uint32_t rsvd1:13;
+ uint32_t invalidate:1;
+ uint32_t num_posted:8;
+#endif
+ } bits;
+} pd_rxulp_db_t;
+
+
+/* TX doorbell */
+typedef union pd_txulp_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t num_posted:14;
+ uint32_t rsvd0:6;
+ uint32_t qid:10;
+#else
+ uint32_t qid:10;
+ uint32_t rsvd0:6;
+ uint32_t num_posted:14;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} pd_txulp_db_t;
+
+/* CQ doorbell */
+typedef union cq_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t rearm:1;
+ uint32_t num_popped:13;
+ uint32_t rsvd0:5;
+ uint32_t event:1;
+ uint32_t qid:10;
+#else
+ uint32_t qid:10;
+ uint32_t event:1;
+ uint32_t rsvd0:5;
+ uint32_t num_popped:13;
+ uint32_t rearm:1;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} cq_db_t;
+
+/* EQ doorbell */
+typedef union eq_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t rearm:1;
+ uint32_t num_popped:13;
+ uint32_t rsvd0:5;
+ uint32_t event:1;
+ uint32_t clrint:1;
+ uint32_t qid:9;
+#else
+ uint32_t qid:9;
+ uint32_t clrint:1;
+ uint32_t event:1;
+ uint32_t rsvd0:5;
+ uint32_t num_popped:13;
+ uint32_t rearm:1;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} eq_db_t;
+
+/* bootstrap mbox doorbell */
+typedef union pd_mpu_mbox_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t address:30;
+ uint32_t hi:1;
+ uint32_t ready:1;
+#else
+ uint32_t ready:1;
+ uint32_t hi:1;
+ uint32_t address:30;
+#endif
+ } bits;
+} pd_mpu_mbox_db_t;
+
+/* MQ ring doorbell */
+typedef union pd_mq_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t num_posted:14;
+ uint32_t rsvd0:5;
+ uint32_t mq_id:11;
+#else
+ uint32_t mq_id:11;
+ uint32_t rsvd0:5;
+ uint32_t num_posted:14;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} pd_mq_db_t;
+
+/*
+ * Event Queue Entry
+ */
+struct oce_eqe {
+ uint32_t evnt;
+};
+
+/* MQ scatter gather entry. Array of these make an SGL */
+struct oce_mq_sge {
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+ uint32_t length;
+};
+
+/*
+ * payload can contain an SGL or an embedded array of upto 59 dwords
+ */
+struct oce_mbx_payload {
+ union {
+ union {
+ struct oce_mq_sge sgl[MAX_MBX_SGE];
+ uint32_t embedded[59];
+ } u1;
+ uint32_t dw[59];
+ } u0;
+};
+
+/*
+ * MQ MBX structure
+ */
+struct oce_mbx {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t special:8;
+ uint32_t rsvd1:16;
+ uint32_t sge_count:5;
+ uint32_t rsvd0:2;
+ uint32_t embedded:1;
+#else
+ uint32_t embedded:1;
+ uint32_t rsvd0:2;
+ uint32_t sge_count:5;
+ uint32_t rsvd1:16;
+ uint32_t special:8;
+#endif
+ } s;
+ uint32_t dw0;
+ } u0;
+
+ uint32_t payload_length;
+ uint32_t tag[2];
+ uint32_t rsvd2[1];
+ struct oce_mbx_payload payload;
+};
+
+/* completion queue entry for MQ */
+struct oce_mq_cqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint32_t extended_status:16;
+ uint32_t completion_status:16;
+ /* dw1 dw2 */
+ uint32_t mq_tag[2];
+ /* dw3 */
+ uint32_t valid:1;
+ uint32_t async_event:1;
+ uint32_t hpi_buffer_cmpl:1;
+ uint32_t completed:1;
+ uint32_t consumed:1;
+ uint32_t rsvd0:27;
+#else
+ /* dw0 */
+ uint32_t completion_status:16;
+ uint32_t extended_status:16;
+ /* dw1 dw2 */
+ uint32_t mq_tag[2];
+ /* dw3 */
+ uint32_t rsvd0:27;
+ uint32_t consumed:1;
+ uint32_t completed:1;
+ uint32_t hpi_buffer_cmpl:1;
+ uint32_t async_event:1;
+ uint32_t valid:1;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+
+/* Mailbox Completion Status Codes */
+enum MBX_COMPLETION_STATUS {
+ MBX_CQE_STATUS_SUCCESS = 0x00,
+ MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 0x01,
+ MBX_CQE_STATUS_INVALID_PARAMETER = 0x02,
+ MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 0x03,
+ MBX_CQE_STATUS_QUEUE_FLUSHING = 0x04,
+ MBX_CQE_STATUS_DMA_FAILED = 0x05
+};
+
+struct oce_async_cqe_link_state {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint8_t speed;
+ uint8_t duplex;
+ uint8_t link_status;
+ uint8_t phy_port;
+ /* dw1 */
+ uint16_t qos_link_speed;
+ uint8_t rsvd0;
+ uint8_t fault;
+ /* dw2 */
+ uint32_t event_tag;
+ /* dw3 */
+ uint32_t valid:1;
+ uint32_t async_event:1;
+ uint32_t rsvd2:6;
+ uint32_t event_type:8;
+ uint32_t event_code:8;
+ uint32_t rsvd1:8;
+#else
+ /* dw0 */
+ uint8_t phy_port;
+ uint8_t link_status;
+ uint8_t duplex;
+ uint8_t speed;
+ /* dw1 */
+ uint8_t fault;
+ uint8_t rsvd0;
+ uint16_t qos_link_speed;
+ /* dw2 */
+ uint32_t event_tag;
+ /* dw3 */
+ uint32_t rsvd1:8;
+ uint32_t event_code:8;
+ uint32_t event_type:8;
+ uint32_t rsvd2:6;
+ uint32_t async_event:1;
+ uint32_t valid:1;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+
+/* MQ mailbox structure */
+struct oce_bmbx {
+ struct oce_mbx mbx;
+ struct oce_mq_cqe cqe;
+};
+
+/* ---[ MBXs start here ]---------------------------------------------- */
+/* MBXs sub system codes */
+enum MBX_SUBSYSTEM_CODES {
+ MBX_SUBSYSTEM_RSVD = 0,
+ MBX_SUBSYSTEM_COMMON = 1,
+ MBX_SUBSYSTEM_COMMON_ISCSI = 2,
+ MBX_SUBSYSTEM_NIC = 3,
+ MBX_SUBSYSTEM_TOE = 4,
+ MBX_SUBSYSTEM_PXE_UNDI = 5,
+ MBX_SUBSYSTEM_ISCSI_INI = 6,
+ MBX_SUBSYSTEM_ISCSI_TGT = 7,
+ MBX_SUBSYSTEM_MILI_PTL = 8,
+ MBX_SUBSYSTEM_MILI_TMD = 9,
+ MBX_SUBSYSTEM_RDMA = 10,
+ MBX_SUBSYSTEM_LOWLEVEL = 11,
+ MBX_SUBSYSTEM_LRO = 13,
+ IOCBMBX_SUBSYSTEM_DCBX = 15,
+ IOCBMBX_SUBSYSTEM_DIAG = 16,
+ IOCBMBX_SUBSYSTEM_VENDOR = 17
+};
+
+/* common ioctl opcodes */
+enum COMMON_SUBSYSTEM_OPCODES {
+/* These opcodes are common to both networking and storage PCI functions
+ * They are used to reserve resources and configure CNA. These opcodes
+ * all use the MBX_SUBSYSTEM_COMMON subsystem code.
+ */
+ OPCODE_COMMON_QUERY_IFACE_MAC = 1,
+ OPCODE_COMMON_SET_IFACE_MAC = 2,
+ OPCODE_COMMON_SET_IFACE_MULTICAST = 3,
+ OPCODE_COMMON_CONFIG_IFACE_VLAN = 4,
+ OPCODE_COMMON_QUERY_LINK_CONFIG = 5,
+ OPCODE_COMMON_READ_FLASHROM = 6,
+ OPCODE_COMMON_WRITE_FLASHROM = 7,
+ OPCODE_COMMON_QUERY_MAX_MBX_BUFFER_SIZE = 8,
+ OPCODE_COMMON_CREATE_CQ = 12,
+ OPCODE_COMMON_CREATE_EQ = 13,
+ OPCODE_COMMON_CREATE_MQ = 21,
+ OPCODE_COMMON_GET_QOS = 27,
+ OPCODE_COMMON_SET_QOS = 28,
+ OPCODE_COMMON_READ_EPROM = 30,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES = 32,
+ OPCODE_COMMON_NOP = 33,
+ OPCODE_COMMON_SET_IFACE_RX_FILTER = 34,
+ OPCODE_COMMON_GET_FW_VERSION = 35,
+ OPCODE_COMMON_SET_FLOW_CONTROL = 36,
+ OPCODE_COMMON_GET_FLOW_CONTROL = 37,
+ OPCODE_COMMON_SET_FRAME_SIZE = 39,
+ OPCODE_COMMON_MODIFY_EQ_DELAY = 41,
+ OPCODE_COMMON_CREATE_IFACE = 50,
+ OPCODE_COMMON_DESTROY_IFACE = 51,
+ OPCODE_COMMON_MODIFY_MSI_MESSAGES = 52,
+ OPCODE_COMMON_DESTROY_MQ = 53,
+ OPCODE_COMMON_DESTROY_CQ = 54,
+ OPCODE_COMMON_DESTROY_EQ = 55,
+ OPCODE_COMMON_UPLOAD_TCP = 56,
+ OPCODE_COMMON_SET_NTWK_LINK_SPEED = 57,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG = 58,
+ OPCODE_COMMON_ADD_IFACE_MAC = 59,
+ OPCODE_COMMON_DEL_IFACE_MAC = 60,
+ OPCODE_COMMON_FUNCTION_RESET = 61,
+ OPCODE_COMMON_SET_PHYSICAL_LINK_CONFIG = 62,
+ OPCODE_COMMON_GET_BOOT_CONFIG = 66,
+ OPCPDE_COMMON_SET_BOOT_CONFIG = 67,
+ OPCODE_COMMON_SET_BEACON_CONFIG = 69,
+ OPCODE_COMMON_GET_BEACON_CONFIG = 70,
+ OPCODE_COMMON_GET_PHYSICAL_LINK_CONFIG = 71,
+ OPCODE_COMMON_GET_OEM_ATTRIBUTES = 76,
+ OPCODE_COMMON_GET_PORT_NAME = 77,
+ OPCODE_COMMON_GET_CONFIG_SIGNATURE = 78,
+ OPCODE_COMMON_SET_CONFIG_SIGNATURE = 79,
+ OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG = 80,
+ OPCODE_COMMON_GET_BE_CONFIGURATION_RESOURCES = 81,
+ OPCODE_COMMON_SET_BE_CONFIGURATION_RESOURCES = 82,
+ OPCODE_COMMON_GET_RESET_NEEDED = 84,
+ OPCODE_COMMON_GET_SERIAL_NUMBER = 85,
+ OPCODE_COMMON_GET_NCSI_CONFIG = 86,
+ OPCODE_COMMON_SET_NCSI_CONFIG = 87,
+ OPCODE_COMMON_CREATE_MQ_EXT = 90,
+ OPCODE_COMMON_SET_FUNCTION_PRIVILEGES = 100,
+ OPCODE_COMMON_SET_VF_PORT_TYPE = 101,
+ OPCODE_COMMON_GET_PHY_CONFIG = 102,
+ OPCODE_COMMON_SET_FUNCTIONAL_CAPS = 103,
+ OPCODE_COMMON_GET_ADAPTER_ID = 110,
+ OPCODE_COMMON_GET_UPGRADE_FEATURES = 111,
+ OPCODE_COMMON_GET_INSTALLED_FEATURES = 112,
+ OPCODE_COMMON_GET_AVAIL_PERSONALITIES = 113,
+ OPCODE_COMMON_GET_CONFIG_PERSONALITIES = 114,
+ OPCODE_COMMON_SEND_ACTIVATION = 115,
+ OPCODE_COMMON_RESET_LICENSES = 116,
+ OPCODE_COMMON_GET_CNTL_ADDL_ATTRIBUTES = 121,
+ OPCODE_COMMON_QUERY_TCB = 144,
+ OPCODE_COMMON_ADD_IFACE_QUEUE_FILTER = 145,
+ OPCODE_COMMON_DEL_IFACE_QUEUE_FILTER = 146,
+ OPCODE_COMMON_GET_IFACE_MAC_LIST = 147,
+ OPCODE_COMMON_SET_IFACE_MAC_LIST = 148,
+ OPCODE_COMMON_MODIFY_CQ = 149,
+ OPCODE_COMMON_GET_IFACE_VLAN_LIST = 150,
+ OPCODE_COMMON_SET_IFACE_VLAN_LIST = 151,
+ OPCODE_COMMON_GET_HSW_CONFIG = 152,
+ OPCODE_COMMON_SET_HSW_CONFIG = 153,
+ OPCODE_COMMON_GET_RESOURCE_EXTENT_INFO = 154,
+ OPCODE_COMMON_GET_ALLOCATED_RESOURCE_EXTENTS = 155,
+ OPCODE_COMMON_ALLOC_RESOURCE_EXTENTS = 156,
+ OPCODE_COMMON_DEALLOC_RESOURCE_EXTENTS = 157,
+ OPCODE_COMMON_SET_DIAG_REGISTERS = 158,
+ OPCODE_COMMON_GET_FUNCTION_CONFIG = 160,
+ OPCODE_COMMON_GET_PROFILE_CAPACITIES = 161,
+ OPCODE_COMMON_GET_MR_PROFILE_CAPACITIES = 162,
+ OPCODE_COMMON_SET_MR_PROFILE_CAPACITIES = 163,
+ OPCODE_COMMON_GET_PROFILE_CONFIG = 164,
+ OPCODE_COMMON_SET_PROFILE_CONFIG = 165,
+ OPCODE_COMMON_GET_PROFILE_LIST = 166,
+ OPCODE_COMMON_GET_ACTIVE_PROFILE = 167,
+ OPCODE_COMMON_SET_ACTIVE_PROFILE = 168,
+ OPCODE_COMMON_GET_FUNCTION_PRIVILEGES = 170,
+ OPCODE_COMMON_READ_OBJECT = 171,
+ OPCODE_COMMON_WRITE_OBJECT = 172
+};
+
+/* common ioctl header */
+#define OCE_MBX_VER_V2 0x0002 /* Version V2 mailbox command */
+#define OCE_MBX_VER_V1 0x0001 /* Version V1 mailbox command */
+#define OCE_MBX_VER_V0 0x0000 /* Version V0 mailbox command */
+struct mbx_hdr {
+ union {
+ uint32_t dw[4];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t domain:8;
+ uint32_t port_number:8;
+ uint32_t subsystem:8;
+ uint32_t opcode:8;
+ /* dw 1 */
+ uint32_t timeout;
+ /* dw 2 */
+ uint32_t request_length;
+ /* dw 3 */
+ uint32_t rsvd0:24;
+ uint32_t version:8;
+ #else
+ /* dw 0 */
+ uint32_t opcode:8;
+ uint32_t subsystem:8;
+ uint32_t port_number:8;
+ uint32_t domain:8;
+ /* dw 1 */
+ uint32_t timeout;
+ /* dw 2 */
+ uint32_t request_length;
+ /* dw 3 */
+ uint32_t version:8;
+ uint32_t rsvd0:24;
+ #endif
+ } req;
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t domain:8;
+ uint32_t rsvd0:8;
+ uint32_t subsystem:8;
+ uint32_t opcode:8;
+ /* dw 1 */
+ uint32_t rsvd1:16;
+ uint32_t additional_status:8;
+ uint32_t status:8;
+ #else
+ /* dw 0 */
+ uint32_t opcode:8;
+ uint32_t subsystem:8;
+ uint32_t rsvd0:8;
+ uint32_t domain:8;
+ /* dw 1 */
+ uint32_t status:8;
+ uint32_t additional_status:8;
+ uint32_t rsvd1:16;
+ #endif
+ uint32_t rsp_length;
+ uint32_t actual_rsp_length;
+ } rsp;
+ } u0;
+};
+#define OCE_BMBX_RHDR_SZ 20
+#define OCE_MBX_RRHDR_SZ sizeof (struct mbx_hdr)
+#define OCE_MBX_ADDL_STATUS(_MHDR) ((_MHDR)->u0.rsp.additional_status)
+#define OCE_MBX_STATUS(_MHDR) ((_MHDR)->u0.rsp.status)
+
+/* [05] OPCODE_COMMON_QUERY_LINK_CONFIG */
+struct mbx_query_common_link_config {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ struct {
+ /* dw 0 */
+ uint8_t physical_port;
+ uint8_t mac_duplex;
+ uint8_t mac_speed;
+ uint8_t mac_fault;
+ /* dw 1 */
+ uint8_t mgmt_mac_duplex;
+ uint8_t mgmt_mac_speed;
+ uint16_t qos_link_speed;
+ uint32_t logical_link_status;
+ } rsp;
+ } params;
+};
+
+/* [57] OPCODE_COMMON_SET_LINK_SPEED */
+struct mbx_set_common_link_speed {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint8_t rsvd0;
+ uint8_t mac_speed;
+ uint8_t virtual_port;
+ uint8_t physical_port;
+#else
+ uint8_t physical_port;
+ uint8_t virtual_port;
+ uint8_t mac_speed;
+ uint8_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw;
+ } params;
+};
+
+struct mac_address_format {
+ uint16_t size_of_struct;
+ uint8_t mac_addr[6];
+};
+
+/* [01] OPCODE_COMMON_QUERY_IFACE_MAC */
+struct mbx_query_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t if_id;
+ uint8_t permanent;
+ uint8_t type;
+#else
+ uint8_t type;
+ uint8_t permanent;
+ uint16_t if_id;
+#endif
+
+ } req;
+
+ struct {
+ struct mac_address_format mac;
+ } rsp;
+ } params;
+};
+
+/* [02] OPCODE_COMMON_SET_IFACE_MAC */
+struct mbx_set_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint16_t if_id;
+ uint8_t invalidate;
+ uint8_t type;
+#else
+ /* dw 0 */
+ uint8_t type;
+ uint8_t invalidate;
+ uint16_t if_id;
+#endif
+ /* dw 1 */
+ struct mac_address_format mac;
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw[2];
+ } params;
+};
+
+/* [03] OPCODE_COMMON_SET_IFACE_MULTICAST */
+struct mbx_set_common_iface_multicast {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw 0 */
+ uint16_t num_mac;
+ uint8_t promiscuous;
+ uint8_t if_id;
+ /* dw 1-48 */
+ struct {
+ uint8_t byte[6];
+ } mac[32];
+
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw[49];
+ } params;
+};
+
+struct qinq_vlan {
+#ifdef _BIG_ENDIAN
+ uint16_t inner;
+ uint16_t outer;
+#else
+ uint16_t outer;
+ uint16_t inner;
+#endif
+};
+
+struct normal_vlan {
+ uint16_t vtag;
+};
+
+struct ntwk_if_vlan_tag {
+ union {
+ struct normal_vlan normal;
+ struct qinq_vlan qinq;
+ } u0;
+};
+
+/* [50] OPCODE_COMMON_CREATE_IFACE */
+struct mbx_create_common_iface {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t version;
+ uint32_t cap_flags;
+ uint32_t enable_flags;
+ uint8_t mac_addr[6];
+ uint8_t rsvd0;
+ uint8_t mac_invalid;
+ struct ntwk_if_vlan_tag vlan_tag;
+ } req;
+
+ struct {
+ uint32_t if_id;
+ uint32_t pmac_id;
+ } rsp;
+ uint32_t dw[4];
+ } params;
+};
+
+/* [51] OPCODE_COMMON_DESTROY_IFACE */
+struct mbx_destroy_common_iface {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t if_id;
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw;
+ } params;
+};
+
+/* event queue context structure */
+struct oce_eq_ctx {
+#ifdef _BIG_ENDIAN
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+
+ uint32_t size:1;
+ uint32_t dw5rsvd2:1;
+ uint32_t valid:1;
+ uint32_t dw5rsvd1:29;
+
+ uint32_t armed:1;
+ uint32_t dw6rsvd2:2;
+ uint32_t count:3;
+ uint32_t dw6rsvd1:26;
+
+ uint32_t dw7rsvd2:9;
+ uint32_t delay_mult:10;
+ uint32_t dw7rsvd1:13;
+
+ uint32_t dw8rsvd1;
+#else
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+
+ uint32_t dw5rsvd1:29;
+ uint32_t valid:1;
+ uint32_t dw5rsvd2:1;
+ uint32_t size:1;
+
+ uint32_t dw6rsvd1:26;
+ uint32_t count:3;
+ uint32_t dw6rsvd2:2;
+ uint32_t armed:1;
+
+ uint32_t dw7rsvd1:13;
+ uint32_t delay_mult:10;
+ uint32_t dw7rsvd2:9;
+
+ uint32_t dw8rsvd1;
+#endif
+};
+
+/* [13] OPCODE_COMMON_CREATE_EQ */
+struct mbx_create_common_eq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ struct oce_eq_ctx ctx;
+ struct phys_addr pages[8];
+ } req;
+
+ struct {
+ uint16_t eq_id;
+ uint16_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [55] OPCODE_COMMON_DESTROY_EQ */
+struct mbx_destroy_common_eq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint16_t id;
+#else
+ uint16_t id;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* SLI-4 CQ context - use version V0 for B3, version V2 for Lancer */
+typedef union oce_cq_ctx_u {
+ uint32_t dw[5];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+ /* dw5 */
+ uint32_t eventable:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t valid:1;
+ uint32_t count:2;
+ uint32_t dw5rsvd2:12;
+ uint32_t nodelay:1;
+ uint32_t coalesce_wm:2;
+ uint32_t dw5rsvd1:12;
+ /* dw6 */
+ uint32_t armed:1;
+ uint32_t dw6rsvd2:1;
+ uint32_t eq_id:8;
+ uint32_t dw6rsvd1:22;
+ #else
+ /* dw4 */
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+ /* dw5 */
+ uint32_t dw5rsvd1:12;
+ uint32_t coalesce_wm:2;
+ uint32_t nodelay:1;
+ uint32_t dw5rsvd2:12;
+ uint32_t count:2;
+ uint32_t valid:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t eventable:1;
+ /* dw6 */
+ uint32_t dw6rsvd1:22;
+ uint32_t eq_id:8;
+ uint32_t dw6rsvd2:1;
+ uint32_t armed:1;
+ #endif
+ /* dw7 */
+ uint32_t dw7rsvd1;
+ /* dw8 */
+ uint32_t dw8rsvd1;
+ } v0;
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd1:8;
+ uint32_t page_size:8;
+ uint32_t num_pages:16;
+ /* dw5 */
+ uint32_t eventable:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t valid:1;
+ uint32_t count:2;
+ uint32_t dw5rsvd2:11;
+ uint32_t autovalid:1;
+ uint32_t nodelay:1;
+ uint32_t coalesce_wm:2;
+ uint32_t dw5rsvd1:12;
+ /* dw6 */
+ uint32_t armed:1;
+ uint32_t dw6rsvd1:15;
+ uint32_t eq_id:16;
+ /* dw7 */
+ uint32_t dw7rsvd1:16;
+ uint32_t cqe_count:16;
+ #else
+ /* dw4 */
+ uint32_t num_pages:16;
+ uint32_t page_size:8;
+ uint32_t dw4rsvd1:8;
+ /* dw5 */
+ uint32_t dw5rsvd1:12;
+ uint32_t coalesce_wm:2;
+ uint32_t nodelay:1;
+ uint32_t autovalid:1;
+ uint32_t dw5rsvd2:11;
+ uint32_t count:2;
+ uint32_t valid:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t eventable:1;
+ /* dw6 */
+ uint32_t eq_id:8;
+ uint32_t dw6rsvd1:15;
+ uint32_t armed:1;
+ /* dw7 */
+ uint32_t cqe_count:16;
+ uint32_t dw7rsvd1:16;
+ #endif
+ /* dw8 */
+ uint32_t dw8rsvd1;
+ } v2;
+} oce_cq_ctx_t;
+
+/* [12] OPCODE_COMMON_CREATE_CQ */
+struct mbx_create_common_cq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ oce_cq_ctx_t cq_ctx;
+ struct phys_addr pages[4];
+ } req;
+
+ struct {
+ uint16_t cq_id;
+ uint16_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [54] OPCODE_COMMON_DESTROY_CQ */
+struct mbx_destroy_common_cq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint16_t id;
+#else
+ uint16_t id;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+typedef union oce_mq_ctx_u {
+ uint32_t dw[5];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+ /* dw5 */
+ uint32_t cq_id:10;
+ uint32_t dw5rsvd2:2;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd1:16;
+ /* dw6 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw7 */
+ uint32_t dw7rsvd1:21;
+ uint32_t async_cq_id:10;
+ uint32_t async_cq_valid:1;
+ #else
+ /* dw4 */
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+ /* dw5 */
+ uint32_t dw5rsvd1:16;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd2:2;
+ uint32_t cq_id:10;
+ /* dw6 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw7 */
+ uint32_t async_cq_valid:1;
+ uint32_t async_cq_id:10;
+ uint32_t dw7rsvd1:21;
+ #endif
+ /* dw8 */
+ uint32_t dw8rsvd1;
+ } v0;
+} oce_mq_ctx_t;
+
+/**
+ * @brief [21] OPCODE_COMMON_CREATE_MQ
+ * A MQ must be at least 16 entries deep (corresponding to 1 page) and
+ * at most 128 entries deep (corresponding to 8 pages).
+ */
+struct mbx_create_common_mq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ oce_mq_ctx_t context;
+ struct phys_addr pages[8];
+ } req;
+
+ struct {
+ uint32_t mq_id:16;
+ uint32_t rsvd0:16;
+ } rsp;
+ } params;
+};
+
+/* [53] OPCODE_COMMON_DESTROY_MQ */
+struct mbx_destroy_common_mq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint16_t id;
+#else
+ uint16_t id;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [35] OPCODE_COMMON_GET_ FW_VERSION */
+struct mbx_get_common_fw_version {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ struct {
+ uint8_t fw_ver_str[32];
+ uint8_t fw_on_flash_ver_str[32];
+ } rsp;
+ } params;
+};
+
+/* [52] OPCODE_COMMON_CEV_MODIFY_MSI_MESSAGES */
+struct mbx_common_cev_modify_msi_messages {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t num_msi_msgs;
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [36] OPCODE_COMMON_SET_FLOW_CONTROL */
+/* [37] OPCODE_COMMON_GET_FLOW_CONTROL */
+struct mbx_common_get_set_flow_control {
+ struct mbx_hdr hdr;
+#ifdef _BIG_ENDIAN
+ uint16_t tx_flow_control;
+ uint16_t rx_flow_control;
+#else
+ uint16_t rx_flow_control;
+ uint16_t tx_flow_control;
+#endif
+};
+
+enum e_flash_opcode {
+ MGMT_FLASHROM_OPCODE_FLASH = 1,
+ MGMT_FLASHROM_OPCODE_SAVE = 2
+};
+
+/* [06] OPCODE_READ_COMMON_FLASHROM */
+/* [07] OPCODE_WRITE_COMMON_FLASHROM */
+
+struct mbx_common_read_write_flashrom {
+ struct mbx_hdr hdr;
+ uint32_t flash_op_code;
+ uint32_t flash_op_type;
+ uint32_t data_buffer_size;
+ uint32_t data_offset;
+ uint8_t data_buffer[4]; /* + IMAGE_TRANSFER_SIZE */
+};
+
+struct oce_phy_info {
+ uint16_t phy_type;
+ uint16_t interface_type;
+ uint32_t misc_params;
+ uint16_t ext_phy_details;
+ uint16_t rsvd;
+ uint16_t auto_speeds_supported;
+ uint16_t fixed_speeds_supported;
+ uint32_t future_use[2];
+};
+
+struct mbx_common_phy_info {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0[4];
+ } req;
+ struct {
+ struct oce_phy_info phy_info;
+ } rsp;
+ } params;
+};
+
+/*Lancer firmware*/
+
+struct mbx_lancer_common_write_object {
+ union {
+ struct {
+ struct mbx_hdr hdr;
+ uint32_t write_length: 24;
+ uint32_t rsvd: 7;
+ uint32_t eof: 1;
+ uint32_t write_offset;
+ uint8_t object_name[104];
+ uint32_t descriptor_count;
+ uint32_t buffer_length;
+ uint32_t address_lower;
+ uint32_t address_upper;
+ } req;
+ struct {
+ uint8_t opcode;
+ uint8_t subsystem;
+ uint8_t rsvd1[2];
+ uint8_t status;
+ uint8_t additional_status;
+ uint8_t rsvd2[2];
+ uint32_t response_length;
+ uint32_t actual_response_length;
+ uint32_t actual_write_length;
+ } rsp;
+ } params;
+};
+
+/**
+ * @brief MBX Common Quiery Firmaware Config
+ * This command retrieves firmware configuration parameters and adapter
+ * resources available to the driver originating the request. The firmware
+ * configuration defines supported protocols by the installed adapter firmware.
+ * This includes which ULP processors support the specified protocols and
+ * the number of TCP connections allowed for that protocol.
+ */
+struct mbx_common_query_fw_config {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0[30];
+ } req;
+
+ struct {
+ uint32_t config_number;
+ uint32_t asic_revision;
+ uint32_t port_id; /* used for stats retrieval */
+ uint32_t function_mode;
+ struct {
+
+ uint32_t ulp_mode;
+ uint32_t nic_wqid_base;
+ uint32_t nic_wq_tot;
+ uint32_t toe_wqid_base;
+ uint32_t toe_wq_tot;
+ uint32_t toe_rqid_base;
+ uint32_t toe_rqid_tot;
+ uint32_t toe_defrqid_base;
+ uint32_t toe_defrqid_count;
+ uint32_t lro_rqid_base;
+ uint32_t lro_rqid_tot;
+ uint32_t iscsi_icd_base;
+ uint32_t iscsi_icd_count;
+ } ulp[2];
+ uint32_t function_caps;
+ uint32_t cqid_base;
+ uint32_t cqid_tot;
+ uint32_t eqid_base;
+ uint32_t eqid_tot;
+ } rsp;
+ } params;
+};
+
+enum CQFW_CONFIG_NUMBER {
+ FCN_NIC_ISCSI_Initiator = 0x0,
+ FCN_ISCSI_Target = 0x3,
+ FCN_FCoE = 0x7,
+ FCN_ISCSI_Initiator_Target = 0x9,
+ FCN_NIC_RDMA_TOE = 0xA,
+ FCN_NIC_RDMA_FCoE = 0xB,
+ FCN_NIC_RDMA_iSCSI = 0xC,
+ FCN_NIC_iSCSI_FCoE = 0xD
+};
+
+/**
+ * @brief Function Capabilites
+ * This field contains the flags indicating the capabilities of
+ * the SLI Host’s PCI function.
+ */
+enum CQFW_FUNCTION_CAPABILITIES {
+ FNC_UNCLASSIFIED_STATS = 0x1,
+ FNC_RSS = 0x2,
+ FNC_PROMISCUOUS = 0x4,
+ FNC_LEGACY_MODE = 0x8,
+ FNC_HDS = 0x4000,
+ FNC_VMQ = 0x10000,
+ FNC_NETQ = 0x20000,
+ FNC_QGROUPS = 0x40000,
+ FNC_LRO = 0x100000,
+ FNC_VLAN_OFFLOAD = 0x800000
+};
+
+enum CQFW_ULP_MODES_SUPPORTED {
+ ULP_TOE_MODE = 0x1,
+ ULP_NIC_MODE = 0x2,
+ ULP_RDMA_MODE = 0x4,
+ ULP_ISCSI_INI_MODE = 0x10,
+ ULP_ISCSI_TGT_MODE = 0x20,
+ ULP_FCOE_INI_MODE = 0x40,
+ ULP_FCOE_TGT_MODE = 0x80,
+ ULP_DAL_MODE = 0x100,
+ ULP_LRO_MODE = 0x200
+};
+
+/**
+ * @brief Function Modes Supported
+ * Valid function modes (or protocol-types) supported on the SLI-Host’s
+ * PCIe function. This field is a logical OR of the following values:
+ */
+enum CQFW_FUNCTION_MODES_SUPPORTED {
+ FNM_TOE_MODE = 0x1, /* TCP offload supported */
+ FNM_NIC_MODE = 0x2, /* Raw Ethernet supported */
+ FNM_RDMA_MODE = 0x4, /* RDMA protocol supported */
+ FNM_VM_MODE = 0x8, /* Virtual Machines supported */
+ FNM_ISCSI_INI_MODE = 0x10, /* iSCSI initiator supported */
+ FNM_ISCSI_TGT_MODE = 0x20, /* iSCSI target plus initiator */
+ FNM_FCOE_INI_MODE = 0x40, /* FCoE Initiator supported */
+ FNM_FCOE_TGT_MODE = 0x80, /* FCoE target supported */
+ FNM_DAL_MODE = 0x100, /* DAL supported */
+ FNM_LRO_MODE = 0x200, /* LRO supported */
+ FNM_FLEX10_MODE = 0x400, /* QinQ, FLEX-10 or VNIC */
+ FNM_NCSI_MODE = 0x800, /* NCSI supported */
+ FNM_IPV6_MODE = 0x1000, /* IPV6 stack enabled */
+ FNM_BE2_COMPAT_MODE = 0x2000, /* BE2 compatibility (BE3 disable)*/
+ FNM_INVALID_MODE = 0x8000, /* Invalid */
+ FNM_BE3_COMPAT_MODE = 0x10000, /* BE3 features */
+ FNM_VNIC_MODE = 0x20000, /* Set when IBM vNIC mode is set */
+ FNM_VNTAG_MODE = 0x40000, /* Set when VNTAG mode is set */
+ FNM_UMC_MODE = 0x80000, /* Set when UMC mode is set */
+ FNM_UMC_DEF_EN = 0x100000, /* Set when UMC Default is set */
+ FNM_ONE_GB_EN = 0x200000, /* Set when 1GB Default is set */
+ FNM_VNIC_DEF_VALID = 0x400000, /* Set when VNIC_DEF_EN is valid */
+ FNM_VNIC_DEF_EN = 0x800000 /* Set when VNIC Default enabled */
+};
+
+
+struct mbx_common_config_vlan {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint8_t num_vlans;
+ uint8_t untagged;
+ uint8_t promisc;
+ uint8_t if_id;
+#else
+ uint8_t if_id;
+ uint8_t promisc;
+ uint8_t untagged;
+ uint8_t num_vlans;
+#endif
+ union {
+ struct normal_vlan normal_vlans[64];
+ struct qinq_vlan qinq_vlans[32];
+ } tags;
+ } req;
+
+ struct {
+ uint32_t rsvd;
+ } rsp;
+ } params;
+};
+
+typedef struct iface_rx_filter_ctx {
+ uint32_t global_flags_mask;
+ uint32_t global_flags;
+ uint32_t iface_flags_mask;
+ uint32_t iface_flags;
+ uint32_t if_id;
+ #define IFACE_RX_NUM_MCAST_MAX 64
+ uint32_t num_mcast;
+ struct mbx_mcast_addr {
+ uint8_t byte[6];
+ } mac[IFACE_RX_NUM_MCAST_MAX];
+} iface_rx_filter_ctx_t;
+
+/* [34] OPCODE_COMMON_SET_IFACE_RX_FILTER */
+struct mbx_set_common_iface_rx_filter {
+ struct mbx_hdr hdr;
+ union {
+ iface_rx_filter_ctx_t req;
+ iface_rx_filter_ctx_t rsp;
+ } params;
+};
+
+/* [41] OPCODE_COMMON_MODIFY_EQ_DELAY */
+struct mbx_modify_common_eq_delay {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t num_eq;
+ struct {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t dm;
+ } delay[8];
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [59] OPCODE_ADD_COMMON_IFACE_MAC */
+struct mbx_add_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t if_id;
+ uint8_t mac_address[6];
+ uint8_t rsvd0[2];
+ } req;
+ struct {
+ uint32_t pmac_id;
+ } rsp;
+ } params;
+};
+
+/* [60] OPCODE_DEL_COMMON_IFACE_MAC */
+struct mbx_del_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t if_id;
+ uint32_t pmac_id;
+ } req;
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [8] OPCODE_QUERY_COMMON_MAX_MBX_BUFFER_SIZE */
+struct mbx_query_common_max_mbx_buffer_size {
+ struct mbx_hdr hdr;
+ struct {
+ uint32_t max_ioctl_bufsz;
+ } rsp;
+};
+
+/* [61] OPCODE_COMMON_FUNCTION_RESET */
+struct ioctl_common_function_reset {
+ struct mbx_hdr hdr;
+};
+
+/* [80] OPCODE_COMMON_FUNCTION_LINK_CONFIG */
+struct mbx_common_func_link_cfg {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t enable;
+ } req;
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+/* [103] OPCODE_COMMON_SET_FUNCTIONAL_CAPS */
+#define CAP_SW_TIMESTAMPS 2
+#define CAP_BE3_NATIVE_ERX_API 4
+
+struct mbx_common_set_function_cap {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t valid_capability_flags;
+ uint32_t capability_flags;
+ uint8_t sbz[212];
+ } req;
+ struct {
+ uint32_t valid_capability_flags;
+ uint32_t capability_flags;
+ uint8_t sbz[212];
+ } rsp;
+ } params;
+};
+struct mbx_lowlevel_test_loopback_mode {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t loopback_type;
+ uint32_t num_pkts;
+ uint64_t pattern;
+ uint32_t src_port;
+ uint32_t dest_port;
+ uint32_t pkt_size;
+ }req;
+ struct {
+ uint32_t status;
+ uint32_t num_txfer;
+ uint32_t num_rx;
+ uint32_t miscomp_off;
+ uint32_t ticks_compl;
+ }rsp;
+ } params;
+};
+
+struct mbx_lowlevel_set_loopback_mode {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint8_t src_port;
+ uint8_t dest_port;
+ uint8_t loopback_type;
+ uint8_t loopback_state;
+ } req;
+ struct {
+ uint8_t rsvd0[4];
+ } rsp;
+ } params;
+};
+
+struct flash_file_hdr {
+ uint8_t sign[52];
+ uint8_t ufi_version[4];
+ uint32_t file_len;
+ uint32_t cksum;
+ uint32_t antidote;
+ uint32_t num_imgs;
+ uint8_t build[24];
+ uint8_t rsvd[32];
+};
+
+struct image_hdr {
+ uint32_t imageid;
+ uint32_t imageoffset;
+ uint32_t imagelength;
+ uint32_t image_checksum;
+ uint8_t image_version[32];
+};
+
+struct flash_section_hdr {
+ uint32_t format_rev;
+ uint32_t cksum;
+ uint32_t antidote;
+ uint32_t num_images;
+ uint8_t id_string[128];
+ uint32_t rsvd[4];
+};
+
+struct flash_section_entry {
+ uint32_t type;
+ uint32_t offset;
+ uint32_t pad_size;
+ uint32_t image_size;
+ uint32_t cksum;
+ uint32_t entry_point;
+ uint32_t rsvd0;
+ uint32_t rsvd1;
+ uint8_t ver_data[32];
+};
+
+struct flash_sec_info {
+ uint8_t cookie[32];
+ struct flash_section_hdr fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+};
+
+
+enum LOWLEVEL_SUBSYSTEM_OPCODES {
+/* Opcodes used for lowlevel functions common to many subystems.
+ * Some of these opcodes are used for diagnostic functions only.
+ * These opcodes use the MBX_SUBSYSTEM_LOWLEVEL subsystem code.
+ */
+ OPCODE_LOWLEVEL_TEST_LOOPBACK = 18,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE = 19,
+ OPCODE_LOWLEVEL_GET_LOOPBACK_MODE = 20
+};
+
+enum LLDP_SUBSYSTEM_OPCODES {
+/* Opcodes used for LLDP susbsytem for configuring the LLDP state machines. */
+ OPCODE_LLDP_GET_CFG = 1,
+ OPCODE_LLDP_SET_CFG = 2,
+ OPCODE_LLDP_GET_STATS = 3
+};
+
+enum DCBX_SUBSYSTEM_OPCODES {
+/* Opcodes used for DCBX. */
+ OPCODE_DCBX_GET_CFG = 1,
+ OPCODE_DCBX_SET_CFG = 2,
+ OPCODE_DCBX_GET_MIB_INFO = 3,
+ OPCODE_DCBX_GET_DCBX_MODE = 4,
+ OPCODE_DCBX_SET_MODE = 5
+};
+
+enum DMTF_SUBSYSTEM_OPCODES {
+/* Opcodes used for DCBX subsystem. */
+ OPCODE_DMTF_EXEC_CLP_CMD = 1
+};
+
+enum DIAG_SUBSYSTEM_OPCODES {
+/* Opcodes used for diag functions common to many subsystems. */
+ OPCODE_DIAG_RUN_DMA_TEST = 1,
+ OPCODE_DIAG_RUN_MDIO_TEST = 2,
+ OPCODE_DIAG_RUN_NLB_TEST = 3,
+ OPCODE_DIAG_RUN_ARM_TIMER_TEST = 4,
+ OPCODE_DIAG_GET_MAC = 5
+};
+
+enum VENDOR_SUBSYSTEM_OPCODES {
+/* Opcodes used for Vendor subsystem. */
+ OPCODE_VENDOR_SLI = 1
+};
+
+/* Management Status Codes */
+enum MGMT_STATUS_SUCCESS {
+ MGMT_SUCCESS = 0,
+ MGMT_FAILED = 1,
+ MGMT_ILLEGAL_REQUEST = 2,
+ MGMT_ILLEGAL_FIELD = 3,
+ MGMT_INSUFFICIENT_BUFFER = 4,
+ MGMT_UNAUTHORIZED_REQUEST = 5,
+ MGMT_INVALID_ISNS_ADDRESS = 10,
+ MGMT_INVALID_IPADDR = 11,
+ MGMT_INVALID_GATEWAY = 12,
+ MGMT_INVALID_SUBNETMASK = 13,
+ MGMT_INVALID_TARGET_IPADDR = 16,
+ MGMT_TGTTBL_FULL = 20,
+ MGMT_FLASHROM_SAVE_FAILED = 23,
+ MGMT_IOCTLHANDLE_ALLOC_FAILED = 27,
+ MGMT_INVALID_SESSION = 31,
+ MGMT_INVALID_CONNECTION = 32,
+ MGMT_BTL_PATH_EXCEEDS_OSM_LIMIT = 33,
+ MGMT_BTL_TGTID_EXCEEDS_OSM_LIMIT = 34,
+ MGMT_BTL_PATH_TGTID_OCCUPIED = 35,
+ MGMT_BTL_NO_FREE_SLOT_PATH = 36,
+ MGMT_BTL_NO_FREE_SLOT_TGTID = 37,
+ MGMT_POLL_IOCTL_TIMEOUT = 40,
+ MGMT_ERROR_ACITISCSI = 41,
+ MGMT_BUFFER_SIZE_EXCEED_OSM_OR_OS_LIMIT = 43,
+ MGMT_REBOOT_REQUIRED = 44,
+ MGMT_INSUFFICIENT_TIMEOUT = 45,
+ MGMT_IPADDR_NOT_SET = 46,
+ MGMT_IPADDR_DUP_DETECTED = 47,
+ MGMT_CANT_REMOVE_LAST_CONNECTION = 48,
+ MGMT_TARGET_BUSY = 49,
+ MGMT_TGT_ERR_LISTEN_SOCKET = 50,
+ MGMT_TGT_ERR_BIND_SOCKET = 51,
+ MGMT_TGT_ERR_NO_SOCKET = 52,
+ MGMT_TGT_ERR_ISNS_COMM_FAILED = 55,
+ MGMT_CANNOT_DELETE_BOOT_TARGET = 56,
+ MGMT_TGT_PORTAL_MODE_IN_LISTEN = 57,
+ MGMT_FCF_IN_USE = 58 ,
+ MGMT_NO_CQE = 59,
+ MGMT_TARGET_NOT_FOUND = 65,
+ MGMT_NOT_SUPPORTED = 66,
+ MGMT_NO_FCF_RECORDS = 67,
+ MGMT_FEATURE_NOT_SUPPORTED = 68,
+ MGMT_VPD_FUNCTION_OUT_OF_RANGE = 69,
+ MGMT_VPD_FUNCTION_TYPE_INCORRECT = 70,
+ MGMT_INVALID_NON_EMBEDDED_WRB = 71,
+ MGMT_OOR = 100,
+ MGMT_INVALID_PD = 101,
+ MGMT_STATUS_PD_INUSE = 102,
+ MGMT_INVALID_CQ = 103,
+ MGMT_INVALID_QP = 104,
+ MGMT_INVALID_STAG = 105,
+ MGMT_ORD_EXCEEDS = 106,
+ MGMT_IRD_EXCEEDS = 107,
+ MGMT_SENDQ_WQE_EXCEEDS = 108,
+ MGMT_RECVQ_RQE_EXCEEDS = 109,
+ MGMT_SGE_SEND_EXCEEDS = 110,
+ MGMT_SGE_WRITE_EXCEEDS = 111,
+ MGMT_SGE_RECV_EXCEEDS = 112,
+ MGMT_INVALID_STATE_CHANGE = 113,
+ MGMT_MW_BOUND = 114,
+ MGMT_INVALID_VA = 115,
+ MGMT_INVALID_LENGTH = 116,
+ MGMT_INVALID_FBO = 117,
+ MGMT_INVALID_ACC_RIGHTS = 118,
+ MGMT_INVALID_PBE_SIZE = 119,
+ MGMT_INVALID_PBL_ENTRY = 120,
+ MGMT_INVALID_PBL_OFFSET = 121,
+ MGMT_ADDR_NON_EXIST = 122,
+ MGMT_INVALID_VLANID = 123,
+ MGMT_INVALID_MTU = 124,
+ MGMT_INVALID_BACKLOG = 125,
+ MGMT_CONNECTION_INPROGRESS = 126,
+ MGMT_INVALID_RQE_SIZE = 127,
+ MGMT_INVALID_RQE_ENTRY = 128
+};
+
+/* Additional Management Status Codes */
+enum MGMT_ADDI_STATUS {
+ MGMT_ADDI_NO_STATUS = 0,
+ MGMT_ADDI_INVALID_IPTYPE = 1,
+ MGMT_ADDI_TARGET_HANDLE_NOT_FOUND = 9,
+ MGMT_ADDI_SESSION_HANDLE_NOT_FOUND = 10,
+ MGMT_ADDI_CONNECTION_HANDLE_NOT_FOUND = 11,
+ MGMT_ADDI_ACTIVE_SESSIONS_PRESENT = 16,
+ MGMT_ADDI_SESSION_ALREADY_OPENED = 17,
+ MGMT_ADDI_SESSION_ALREADY_CLOSED = 18,
+ MGMT_ADDI_DEST_HOST_UNREACHABLE = 19,
+ MGMT_ADDI_LOGIN_IN_PROGRESS = 20,
+ MGMT_ADDI_TCP_CONNECT_FAILED = 21,
+ MGMT_ADDI_INSUFFICIENT_RESOURCES = 22,
+ MGMT_ADDI_LINK_DOWN = 23,
+ MGMT_ADDI_DHCP_ERROR = 24,
+ MGMT_ADDI_CONNECTION_OFFLOADED = 25,
+ MGMT_ADDI_CONNECTION_NOT_OFFLOADED = 26,
+ MGMT_ADDI_CONNECTION_UPLOAD_IN_PROGRESS = 27,
+ MGMT_ADDI_REQUEST_REJECTED = 28,
+ MGMT_ADDI_INVALID_SUBSYSTEM = 29,
+ MGMT_ADDI_INVALID_OPCODE = 30,
+ MGMT_ADDI_INVALID_MAXCONNECTION_PARAM = 31,
+ MGMT_ADDI_INVALID_KEY = 32,
+ MGMT_ADDI_INVALID_DOMAIN = 35,
+ MGMT_ADDI_LOGIN_INITIATOR_ERROR = 43,
+ MGMT_ADDI_LOGIN_AUTHENTICATION_ERROR = 44,
+ MGMT_ADDI_LOGIN_AUTHORIZATION_ERROR = 45,
+ MGMT_ADDI_LOGIN_NOT_FOUND = 46,
+ MGMT_ADDI_LOGIN_TARGET_REMOVED = 47,
+ MGMT_ADDI_LOGIN_UNSUPPORTED_VERSION = 48,
+ MGMT_ADDI_LOGIN_TOO_MANY_CONNECTIONS = 49,
+ MGMT_ADDI_LOGIN_MISSING_PARAMETER = 50,
+ MGMT_ADDI_LOGIN_NO_SESSION_SPANNING = 51,
+ MGMT_ADDI_LOGIN_SESSION_TYPE_NOT_SUPPORTED = 52,
+ MGMT_ADDI_LOGIN_SESSION_DOES_NOT_EXIST = 53,
+ MGMT_ADDI_LOGIN_INVALID_DURING_LOGIN = 54,
+ MGMT_ADDI_LOGIN_TARGET_ERROR = 55,
+ MGMT_ADDI_LOGIN_SERVICE_UNAVAILABLE = 56,
+ MGMT_ADDI_LOGIN_OUT_OF_RESOURCES = 57,
+ MGMT_ADDI_SAME_CHAP_SECRET = 58,
+ MGMT_ADDI_INVALID_SECRET_LENGTH = 59,
+ MGMT_ADDI_DUPLICATE_ENTRY = 60,
+ MGMT_ADDI_SETTINGS_MODIFIED_REBOOT_REQD = 63,
+ MGMT_ADDI_INVALID_EXTENDED_TIMEOUT = 64,
+ MGMT_ADDI_INVALID_INTERFACE_HANDLE = 65,
+ MGMT_ADDI_ERR_VLAN_ON_DEF_INTERFACE = 66,
+ MGMT_ADDI_INTERFACE_DOES_NOT_EXIST = 67,
+ MGMT_ADDI_INTERFACE_ALREADY_EXISTS = 68,
+ MGMT_ADDI_INVALID_VLAN_RANGE = 69,
+ MGMT_ADDI_ERR_SET_VLAN = 70,
+ MGMT_ADDI_ERR_DEL_VLAN = 71,
+ MGMT_ADDI_CANNOT_DEL_DEF_INTERFACE = 72,
+ MGMT_ADDI_DHCP_REQ_ALREADY_PENDING = 73,
+ MGMT_ADDI_TOO_MANY_INTERFACES = 74,
+ MGMT_ADDI_INVALID_REQUEST = 75
+};
+
+enum NIC_SUBSYSTEM_OPCODES {
+/**
+ * @brief NIC Subsystem Opcodes (see Network SLI-4 manual >= Rev4, v21-2)
+ * These opcodes are used for configuring the Ethernet interfaces.
+ * These opcodes all use the MBX_SUBSYSTEM_NIC subsystem code.
+ */
+ NIC_CONFIG_RSS = 1,
+ NIC_CONFIG_ACPI = 2,
+ NIC_CONFIG_PROMISCUOUS = 3,
+ NIC_GET_STATS = 4,
+ NIC_CREATE_WQ = 7,
+ NIC_CREATE_RQ = 8,
+ NIC_DELETE_WQ = 9,
+ NIC_DELETE_RQ = 10,
+ NIC_CONFIG_ACPI_WOL_MAGIC = 12,
+ NIC_GET_NETWORK_STATS = 13,
+ NIC_CREATE_HDS_RQ = 16,
+ NIC_DELETE_HDS_RQ = 17,
+ NIC_GET_PPORT_STATS = 18,
+ NIC_GET_VPORT_STATS = 19,
+ NIC_GET_QUEUE_STATS = 20
+};
+
+/* Hash option flags for RSS enable */
+enum RSS_ENABLE_FLAGS {
+ RSS_ENABLE_NONE = 0x0, /* (No RSS) */
+ RSS_ENABLE_IPV4 = 0x1, /* (IPV4 HASH enabled ) */
+ RSS_ENABLE_TCP_IPV4 = 0x2, /* (TCP IPV4 Hash enabled) */
+ RSS_ENABLE_IPV6 = 0x4, /* (IPV6 HASH enabled) */
+ RSS_ENABLE_TCP_IPV6 = 0x8 /* (TCP IPV6 HASH */
+};
+#define RSS_ENABLE (RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV4)
+#define RSS_DISABLE RSS_ENABLE_NONE
+
+/* NIC header WQE */
+struct oce_nic_hdr_wqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint32_t rsvd0;
+
+ /* dw1 */
+ uint32_t last_seg_udp_len:14;
+ uint32_t rsvd1:18;
+
+ /* dw2 */
+ uint32_t lso_mss:14;
+ uint32_t num_wqe:5;
+ uint32_t rsvd4:2;
+ uint32_t vlan:1;
+ uint32_t lso:1;
+ uint32_t tcpcs:1;
+ uint32_t udpcs:1;
+ uint32_t ipcs:1;
+ uint32_t rsvd3:1;
+ uint32_t rsvd2:1;
+ uint32_t forward:1;
+ uint32_t crc:1;
+ uint32_t event:1;
+ uint32_t complete:1;
+
+ /* dw3 */
+ uint32_t vlan_tag:16;
+ uint32_t total_length:16;
+#else
+ /* dw0 */
+ uint32_t rsvd0;
+
+ /* dw1 */
+ uint32_t rsvd1:18;
+ uint32_t last_seg_udp_len:14;
+
+ /* dw2 */
+ uint32_t complete:1;
+ uint32_t event:1;
+ uint32_t crc:1;
+ uint32_t forward:1;
+ uint32_t rsvd2:1;
+ uint32_t rsvd3:1;
+ uint32_t ipcs:1;
+ uint32_t udpcs:1;
+ uint32_t tcpcs:1;
+ uint32_t lso:1;
+ uint32_t vlan:1;
+ uint32_t rsvd4:2;
+ uint32_t num_wqe:5;
+ uint32_t lso_mss:14;
+
+ /* dw3 */
+ uint32_t total_length:16;
+ uint32_t vlan_tag:16;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+
+/* NIC fragment WQE */
+struct oce_nic_frag_wqe {
+ union {
+ struct {
+ /* dw0 */
+ uint32_t frag_pa_hi;
+ /* dw1 */
+ uint32_t frag_pa_lo;
+ /* dw2 */
+ uint32_t rsvd0;
+ uint32_t frag_len;
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+
+/* Ethernet Tx Completion Descriptor */
+struct oce_nic_tx_cqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t status:4;
+ uint32_t rsvd0:8;
+ uint32_t port:2;
+ uint32_t ct:2;
+ uint32_t wqe_index:16;
+
+ /* dw 1 */
+ uint32_t rsvd1:5;
+ uint32_t cast_enc:2;
+ uint32_t lso:1;
+ uint32_t nwh_bytes:8;
+ uint32_t user_bytes:16;
+
+ /* dw 2 */
+ uint32_t rsvd2;
+
+ /* dw 3 */
+ uint32_t valid:1;
+ uint32_t rsvd3:4;
+ uint32_t wq_id:11;
+ uint32_t num_pkts:16;
+#else
+ /* dw 0 */
+ uint32_t wqe_index:16;
+ uint32_t ct:2;
+ uint32_t port:2;
+ uint32_t rsvd0:8;
+ uint32_t status:4;
+
+ /* dw 1 */
+ uint32_t user_bytes:16;
+ uint32_t nwh_bytes:8;
+ uint32_t lso:1;
+ uint32_t cast_enc:2;
+ uint32_t rsvd1:5;
+ /* dw 2 */
+ uint32_t rsvd2;
+
+ /* dw 3 */
+ uint32_t num_pkts:16;
+ uint32_t wq_id:11;
+ uint32_t rsvd3:4;
+ uint32_t valid:1;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+#define WQ_CQE_VALID(_cqe) (_cqe->u0.dw[3])
+#define WQ_CQE_INVALIDATE(_cqe) (_cqe->u0.dw[3] = 0)
+
+/* Receive Queue Entry (RQE) */
+struct oce_nic_rqe {
+ union {
+ struct {
+ uint32_t frag_pa_hi;
+ uint32_t frag_pa_lo;
+ } s;
+ uint32_t dw[2];
+ } u0;
+};
+
+/* NIC Receive CQE */
+struct oce_nic_rx_cqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t ip_options:1;
+ uint32_t port:1;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag:16;
+
+ /* dw 1 */
+ uint32_t num_fragments:3;
+ uint32_t switched:1;
+ uint32_t ct:2;
+ uint32_t frag_index:10;
+ uint32_t rsvd0:1;
+ uint32_t vlan_tag_present:1;
+ uint32_t mac_dst:6;
+ uint32_t ip_ver:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t udpframe:1;
+ uint32_t tcpframe:1;
+ uint32_t ipframe:1;
+ uint32_t rss_hp:1;
+ uint32_t error:1;
+
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t hds_type:2;
+ uint32_t lro_pkt:1;
+ uint32_t rsvd4:1;
+ uint32_t hds_hdr_size:12;
+ uint32_t hds_hdr_frag_index:10;
+ uint32_t rss_bank:1;
+ uint32_t qnq:1;
+ uint32_t pkt_type:2;
+ uint32_t rss_flush:1;
+
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#else
+ /* dw 0 */
+ uint32_t vlan_tag:16;
+ uint32_t pkt_size:14;
+ uint32_t port:1;
+ uint32_t ip_options:1;
+ /* dw 1 */
+ uint32_t error:1;
+ uint32_t rss_hp:1;
+ uint32_t ipframe:1;
+ uint32_t tcpframe:1;
+ uint32_t udpframe:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_ver:1;
+ uint32_t mac_dst:6;
+ uint32_t vlan_tag_present:1;
+ uint32_t rsvd0:1;
+ uint32_t frag_index:10;
+ uint32_t ct:2;
+ uint32_t switched:1;
+ uint32_t num_fragments:3;
+
+ /* dw 2 */
+ uint32_t rss_flush:1;
+ uint32_t pkt_type:2;
+ uint32_t qnq:1;
+ uint32_t rss_bank:1;
+ uint32_t hds_hdr_frag_index:10;
+ uint32_t hds_hdr_size:12;
+ uint32_t rsvd4:1;
+ uint32_t lro_pkt:1;
+ uint32_t hds_type:2;
+ uint32_t valid:1;
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+/* NIC Receive CQE_v1 */
+struct oce_nic_rx_cqe_v1 {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t ip_options:1;
+ uint32_t vlan_tag_present:1;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag:16;
+
+ /* dw 1 */
+ uint32_t num_fragments:3;
+ uint32_t switched:1;
+ uint32_t ct:2;
+ uint32_t frag_index:10;
+ uint32_t rsvd0:1;
+ uint32_t mac_dst:7;
+ uint32_t ip_ver:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t udpframe:1;
+ uint32_t tcpframe:1;
+ uint32_t ipframe:1;
+ uint32_t rss_hp:1;
+ uint32_t error:1;
+
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t rsvd4:13;
+ uint32_t hds_hdr_size:
+ uint32_t hds_hdr_frag_index:8;
+ uint32_t vlantag:1;
+ uint32_t port:2;
+ uint32_t rss_bank:1;
+ uint32_t qnq:1;
+ uint32_t pkt_type:2;
+ uint32_t rss_flush:1;
+
+ /* dw 3 */
+ uint32_t rss_hash_value;
+ #else
+ /* dw 0 */
+ uint32_t vlan_tag:16;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag_present:1;
+ uint32_t ip_options:1;
+ /* dw 1 */
+ uint32_t error:1;
+ uint32_t rss_hp:1;
+ uint32_t ipframe:1;
+ uint32_t tcpframe:1;
+ uint32_t udpframe:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_ver:1;
+ uint32_t mac_dst:7;
+ uint32_t rsvd0:1;
+ uint32_t frag_index:10;
+ uint32_t ct:2;
+ uint32_t switched:1;
+ uint32_t num_fragments:3;
+
+ /* dw 2 */
+ uint32_t rss_flush:1;
+ uint32_t pkt_type:2;
+ uint32_t qnq:1;
+ uint32_t rss_bank:1;
+ uint32_t port:2;
+ uint32_t vlantag:1;
+ uint32_t hds_hdr_frag_index:8;
+ uint32_t hds_hdr_size:2;
+ uint32_t rsvd4:13;
+ uint32_t valid:1;
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+};
+
+#define RQ_CQE_VALID_MASK 0x80
+#define RQ_CQE_VALID(_cqe) (_cqe->u0.dw[2])
+#define RQ_CQE_INVALIDATE(_cqe) (_cqe->u0.dw[2] = 0)
+
+struct mbx_config_nic_promiscuous {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint8_t port1_promisc;
+ uint8_t port0_promisc;
+#else
+ uint8_t port0_promisc;
+ uint8_t port1_promisc;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+typedef union oce_wq_ctx_u {
+ uint32_t dw[17];
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd2:8;
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd1:8;
+ uint32_t num_pages:8;
+ /* dw5 */
+ uint32_t dw5rsvd2:12;
+ uint32_t wq_size:4;
+ uint32_t dw5rsvd1:16;
+ /* dw6 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw7 */
+ uint32_t dw7rsvd1:16;
+ uint32_t cq_id:16;
+#else
+ /* dw4 */
+ uint32_t num_pages:8;
+#if 0
+ uint32_t dw4rsvd1:8;
+#else
+/* PSP: this workaround is not documented: fill 0x01 for ulp_mask */
+ uint32_t ulp_mask:8;
+#endif
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd2:8;
+ /* dw5 */
+ uint32_t dw5rsvd1:16;
+ uint32_t wq_size:4;
+ uint32_t dw5rsvd2:12;
+ /* dw6 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw7 */
+ uint32_t cq_id:16;
+ uint32_t dw7rsvd1:16;
+#endif
+ /* dw8 - dw20 */
+ uint32_t dw8_20rsvd1[13];
+ } v0;
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd2:8;
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd1:8;
+ uint32_t num_pages:8;
+ /* dw5 */
+ uint32_t dw5rsvd2:12;
+ uint32_t wq_size:4;
+ uint32_t iface_id:16;
+ /* dw6 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw7 */
+ uint32_t dw7rsvd1:16;
+ uint32_t cq_id:16;
+#else
+ /* dw4 */
+ uint32_t num_pages:8;
+ uint32_t dw4rsvd1:8;
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd2:8;
+ /* dw5 */
+ uint32_t iface_id:16;
+ uint32_t wq_size:4;
+ uint32_t dw5rsvd2:12;
+ /* dw6 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw7 */
+ uint32_t cq_id:16;
+ uint32_t dw7rsvd1:16;
+#endif
+ /* dw8 - dw20 */
+ uint32_t dw8_20rsvd1[13];
+ } v1;
+} oce_wq_ctx_t;
+
+/**
+ * @brief [07] NIC_CREATE_WQ
+ * @note
+ * Lancer requires an InterfaceID to be specified with every WQ. This
+ * is the basis for NIC IOV where the Interface maps to a vPort and maps
+ * to both Tx and Rx sides.
+ */
+#define OCE_WQ_TYPE_FORWARDING 0x1 /* wq forwards pkts to TOE */
+#define OCE_WQ_TYPE_STANDARD 0x2 /* wq sends network pkts */
+struct mbx_create_nic_wq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint8_t num_pages;
+ uint8_t ulp_num;
+ uint16_t nic_wq_type;
+ uint16_t if_id;
+ uint8_t wq_size;
+ uint8_t rsvd1;
+ uint32_t rsvd2;
+ uint16_t cq_id;
+ uint16_t rsvd3;
+ uint32_t rsvd4[13];
+ struct phys_addr pages[8];
+
+ } req;
+
+ struct {
+ uint16_t wq_id;
+ uint16_t rid;
+ uint32_t db_offset;
+ uint8_t tc_id;
+ uint8_t rsvd0[3];
+ } rsp;
+ } params;
+};
+
+/* [09] NIC_DELETE_WQ */
+struct mbx_delete_nic_wq {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint16_t rsvd0;
+ uint16_t wq_id;
+#else
+ /* dw4 */
+ uint16_t wq_id;
+ uint16_t rsvd0;
+#endif
+ } req;
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+
+
+struct mbx_create_nic_rq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint16_t cq_id;
+ uint8_t frag_size;
+ uint8_t num_pages;
+ struct phys_addr pages[2];
+ uint32_t if_id;
+ uint16_t max_frame_size;
+ uint16_t page_size;
+ uint32_t is_rss_queue;
+ } req;
+
+ struct {
+ uint16_t rq_id;
+ uint8_t rss_cpuid;
+ uint8_t rsvd0;
+ } rsp;
+
+ } params;
+};
+
+
+
+/* [10] NIC_DELETE_RQ */
+struct mbx_delete_nic_rq {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint16_t bypass_flush;
+ uint16_t rq_id;
+#else
+ /* dw4 */
+ uint16_t rq_id;
+ uint16_t bypass_flush;
+#endif
+ } req;
+
+ struct {
+ /* dw4 */
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+};
+
+
+
+
+struct oce_port_rxf_stats_v0 {
+ uint32_t rx_bytes_lsd; /* dword 0*/
+ uint32_t rx_bytes_msd; /* dword 1*/
+ uint32_t rx_total_frames; /* dword 2*/
+ uint32_t rx_unicast_frames; /* dword 3*/
+ uint32_t rx_multicast_frames; /* dword 4*/
+ uint32_t rx_broadcast_frames; /* dword 5*/
+ uint32_t rx_crc_errors; /* dword 6*/
+ uint32_t rx_alignment_symbol_errors; /* dword 7*/
+ uint32_t rx_pause_frames; /* dword 8*/
+ uint32_t rx_control_frames; /* dword 9*/
+ uint32_t rx_in_range_errors; /* dword 10*/
+ uint32_t rx_out_range_errors; /* dword 11*/
+ uint32_t rx_frame_too_long; /* dword 12*/
+ uint32_t rx_address_match_errors; /* dword 13*/
+ uint32_t rx_vlan_mismatch; /* dword 14*/
+ uint32_t rx_dropped_too_small; /* dword 15*/
+ uint32_t rx_dropped_too_short; /* dword 16*/
+ uint32_t rx_dropped_header_too_small; /* dword 17*/
+ uint32_t rx_dropped_tcp_length; /* dword 18*/
+ uint32_t rx_dropped_runt; /* dword 19*/
+ uint32_t rx_64_byte_packets; /* dword 20*/
+ uint32_t rx_65_127_byte_packets; /* dword 21*/
+ uint32_t rx_128_256_byte_packets; /* dword 22*/
+ uint32_t rx_256_511_byte_packets; /* dword 23*/
+ uint32_t rx_512_1023_byte_packets; /* dword 24*/
+ uint32_t rx_1024_1518_byte_packets; /* dword 25*/
+ uint32_t rx_1519_2047_byte_packets; /* dword 26*/
+ uint32_t rx_2048_4095_byte_packets; /* dword 27*/
+ uint32_t rx_4096_8191_byte_packets; /* dword 28*/
+ uint32_t rx_8192_9216_byte_packets; /* dword 29*/
+ uint32_t rx_ip_checksum_errs; /* dword 30*/
+ uint32_t rx_tcp_checksum_errs; /* dword 31*/
+ uint32_t rx_udp_checksum_errs; /* dword 32*/
+ uint32_t rx_non_rss_packets; /* dword 33*/
+ uint32_t rx_ipv4_packets; /* dword 34*/
+ uint32_t rx_ipv6_packets; /* dword 35*/
+ uint32_t rx_ipv4_bytes_lsd; /* dword 36*/
+ uint32_t rx_ipv4_bytes_msd; /* dword 37*/
+ uint32_t rx_ipv6_bytes_lsd; /* dword 38*/
+ uint32_t rx_ipv6_bytes_msd; /* dword 39*/
+ uint32_t rx_chute1_packets; /* dword 40*/
+ uint32_t rx_chute2_packets; /* dword 41*/
+ uint32_t rx_chute3_packets; /* dword 42*/
+ uint32_t rx_management_packets; /* dword 43*/
+ uint32_t rx_switched_unicast_packets; /* dword 44*/
+ uint32_t rx_switched_multicast_packets; /* dword 45*/
+ uint32_t rx_switched_broadcast_packets; /* dword 46*/
+ uint32_t tx_bytes_lsd; /* dword 47*/
+ uint32_t tx_bytes_msd; /* dword 48*/
+ uint32_t tx_unicastframes; /* dword 49*/
+ uint32_t tx_multicastframes; /* dword 50*/
+ uint32_t tx_broadcastframes; /* dword 51*/
+ uint32_t tx_pauseframes; /* dword 52*/
+ uint32_t tx_controlframes; /* dword 53*/
+ uint32_t tx_64_byte_packets; /* dword 54*/
+ uint32_t tx_65_127_byte_packets; /* dword 55*/
+ uint32_t tx_128_256_byte_packets; /* dword 56*/
+ uint32_t tx_256_511_byte_packets; /* dword 57*/
+ uint32_t tx_512_1023_byte_packets; /* dword 58*/
+ uint32_t tx_1024_1518_byte_packets; /* dword 59*/
+ uint32_t tx_1519_2047_byte_packets; /* dword 60*/
+ uint32_t tx_2048_4095_byte_packets; /* dword 61*/
+ uint32_t tx_4096_8191_byte_packets; /* dword 62*/
+ uint32_t tx_8192_9216_byte_packets; /* dword 63*/
+ uint32_t rxpp_fifo_overflow_drop; /* dword 64*/
+ uint32_t rx_input_fifo_overflow_drop; /* dword 65*/
+};
+
+
+struct oce_rxf_stats_v0 {
+ struct oce_port_rxf_stats_v0 port[2];
+ uint32_t rx_drops_no_pbuf; /* dword 132*/
+ uint32_t rx_drops_no_txpb; /* dword 133*/
+ uint32_t rx_drops_no_erx_descr; /* dword 134*/
+ uint32_t rx_drops_no_tpre_descr; /* dword 135*/
+ uint32_t management_rx_port_packets; /* dword 136*/
+ uint32_t management_rx_port_bytes; /* dword 137*/
+ uint32_t management_rx_port_pause_frames;/* dword 138*/
+ uint32_t management_rx_port_errors; /* dword 139*/
+ uint32_t management_tx_port_packets; /* dword 140*/
+ uint32_t management_tx_port_bytes; /* dword 141*/
+ uint32_t management_tx_port_pause; /* dword 142*/
+ uint32_t management_rx_port_rxfifo_overflow; /* dword 143*/
+ uint32_t rx_drops_too_many_frags; /* dword 144*/
+ uint32_t rx_drops_invalid_ring; /* dword 145*/
+ uint32_t forwarded_packets; /* dword 146*/
+ uint32_t rx_drops_mtu; /* dword 147*/
+ uint32_t rsvd0[7];
+ uint32_t port0_jabber_events;
+ uint32_t port1_jabber_events;
+ uint32_t rsvd1[6];
+};
+
+struct oce_port_rxf_stats_v1 {
+ uint32_t rsvd0[12];
+ uint32_t rx_crc_errors;
+ uint32_t rx_alignment_symbol_errors;
+ uint32_t rx_pause_frames;
+ uint32_t rx_priority_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_range_errors;
+ uint32_t rx_frame_too_long;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rsvd1[10];
+ uint32_t rx_ip_checksum_errs;
+ uint32_t rx_tcp_checksum_errs;
+ uint32_t rx_udp_checksum_errs;
+ uint32_t rsvd2[7];
+ uint32_t rx_switched_unicast_packets;
+ uint32_t rx_switched_multicast_packets;
+ uint32_t rx_switched_broadcast_packets;
+ uint32_t rsvd3[3];
+ uint32_t tx_pauseframes;
+ uint32_t tx_priority_pauseframes;
+ uint32_t tx_controlframes;
+ uint32_t rsvd4[10];
+ uint32_t rxpp_fifo_overflow_drop;
+ uint32_t rx_input_fifo_overflow_drop;
+ uint32_t pmem_fifo_overflow_drop;
+ uint32_t jabber_events;
+ uint32_t rsvd5[3];
+};
+
+
+struct oce_rxf_stats_v1 {
+ struct oce_port_rxf_stats_v1 port[4];
+ uint32_t rsvd0[2];
+ uint32_t rx_drops_no_pbuf;
+ uint32_t rx_drops_no_txpb;
+ uint32_t rx_drops_no_erx_descr;
+ uint32_t rx_drops_no_tpre_descr;
+ uint32_t rsvd1[6];
+ uint32_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_ring;
+ uint32_t forwarded_packets;
+ uint32_t rx_drops_mtu;
+ uint32_t rsvd2[14];
+};
+
+struct oce_erx_stats_v1 {
+ uint32_t rx_drops_no_fragments[68];
+ uint32_t rsvd[4];
+};
+
+
+struct oce_erx_stats_v0 {
+ uint32_t rx_drops_no_fragments[44];
+ uint32_t rsvd[4];
+};
+
+struct oce_pmem_stats {
+ uint32_t eth_red_drops;
+ uint32_t rsvd[5];
+};
+
+struct oce_hw_stats_v1 {
+ struct oce_rxf_stats_v1 rxf;
+ uint32_t rsvd0[OCE_TXP_SW_SZ];
+ struct oce_erx_stats_v1 erx;
+ struct oce_pmem_stats pmem;
+ uint32_t rsvd1[18];
+};
+
+struct oce_hw_stats_v0 {
+ struct oce_rxf_stats_v0 rxf;
+ uint32_t rsvd[48];
+ struct oce_erx_stats_v0 erx;
+ struct oce_pmem_stats pmem;
+};
+
+struct mbx_get_nic_stats_v0 {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ union {
+ struct oce_hw_stats_v0 stats;
+ } rsp;
+ } params;
+};
+
+struct mbx_get_nic_stats {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ struct {
+ struct oce_hw_stats_v1 stats;
+ } rsp;
+ } params;
+};
+
+
+/* [18(0x12)] NIC_GET_PPORT_STATS */
+struct pport_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_unicast_pkts;
+ uint64_t tx_multicast_pkts;
+ uint64_t tx_broadcast_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_unicast_bytes;
+ uint64_t tx_multicast_bytes;
+ uint64_t tx_broadcast_bytes;
+ uint64_t tx_discards;
+ uint64_t tx_errors;
+ uint64_t tx_pause_frames;
+ uint64_t tx_pause_on_frames;
+ uint64_t tx_pause_off_frames;
+ uint64_t tx_internal_mac_errors;
+ uint64_t tx_control_frames;
+ uint64_t tx_pkts_64_bytes;
+ uint64_t tx_pkts_65_to_127_bytes;
+ uint64_t tx_pkts_128_to_255_bytes;
+ uint64_t tx_pkts_256_to_511_bytes;
+ uint64_t tx_pkts_512_to_1023_bytes;
+ uint64_t tx_pkts_1024_to_1518_bytes;
+ uint64_t tx_pkts_1519_to_2047_bytes;
+ uint64_t tx_pkts_2048_to_4095_bytes;
+ uint64_t tx_pkts_4096_to_8191_bytes;
+ uint64_t tx_pkts_8192_to_9216_bytes;
+ uint64_t tx_lso_pkts;
+ uint64_t rx_pkts;
+ uint64_t rx_unicast_pkts;
+ uint64_t rx_multicast_pkts;
+ uint64_t rx_broadcast_pkts;
+ uint64_t rx_bytes;
+ uint64_t rx_unicast_bytes;
+ uint64_t rx_multicast_bytes;
+ uint64_t rx_broadcast_bytes;
+ uint32_t rx_unknown_protos;
+ uint32_t reserved_word69;
+ uint64_t rx_discards;
+ uint64_t rx_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_alignment_errors;
+ uint64_t rx_symbol_errors;
+ uint64_t rx_pause_frames;
+ uint64_t rx_pause_on_frames;
+ uint64_t rx_pause_off_frames;
+ uint64_t rx_frames_too_long;
+ uint64_t rx_internal_mac_errors;
+ uint32_t rx_undersize_pkts;
+ uint32_t rx_oversize_pkts;
+ uint32_t rx_fragment_pkts;
+ uint32_t rx_jabbers;
+ uint64_t rx_control_frames;
+ uint64_t rx_control_frames_unknown_opcode;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_of_range_errors;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_vlan_mismatch_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_invalid_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rx_ip_checksum_errors;
+ uint32_t rx_tcp_checksum_errors;
+ uint32_t rx_udp_checksum_errors;
+ uint32_t rx_non_rss_pkts;
+ uint64_t reserved_word111;
+ uint64_t rx_ipv4_pkts;
+ uint64_t rx_ipv6_pkts;
+ uint64_t rx_ipv4_bytes;
+ uint64_t rx_ipv6_bytes;
+ uint64_t rx_nic_pkts;
+ uint64_t rx_tcp_pkts;
+ uint64_t rx_iscsi_pkts;
+ uint64_t rx_management_pkts;
+ uint64_t rx_switched_unicast_pkts;
+ uint64_t rx_switched_multicast_pkts;
+ uint64_t rx_switched_broadcast_pkts;
+ uint64_t num_forwards;
+ uint32_t rx_fifo_overflow;
+ uint32_t rx_input_fifo_overflow;
+ uint64_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_queue;
+ uint32_t reserved_word141;
+ uint64_t rx_drops_mtu;
+ uint64_t rx_pkts_64_bytes;
+ uint64_t rx_pkts_65_to_127_bytes;
+ uint64_t rx_pkts_128_to_255_bytes;
+ uint64_t rx_pkts_256_to_511_bytes;
+ uint64_t rx_pkts_512_to_1023_bytes;
+ uint64_t rx_pkts_1024_to_1518_bytes;
+ uint64_t rx_pkts_1519_to_2047_bytes;
+ uint64_t rx_pkts_2048_to_4095_bytes;
+ uint64_t rx_pkts_4096_to_8191_bytes;
+ uint64_t rx_pkts_8192_to_9216_bytes;
+};
+
+struct mbx_get_pport_stats {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw4 */
+#ifdef _BIG_ENDIAN
+ uint32_t reset_stats:8;
+ uint32_t rsvd0:8;
+ uint32_t port_number:16;
+#else
+ uint32_t port_number:16;
+ uint32_t rsvd0:8;
+ uint32_t reset_stats:8;
+#endif
+ } req;
+
+ union {
+ struct pport_stats pps;
+ uint32_t pport_stats[164 - 4 + 1];
+ } rsp;
+ } params;
+};
+
+/* [19(0x13)] NIC_GET_VPORT_STATS */
+struct vport_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_unicast_pkts;
+ uint64_t tx_multicast_pkts;
+ uint64_t tx_broadcast_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_unicast_bytes;
+ uint64_t tx_multicast_bytes;
+ uint64_t tx_broadcast_bytes;
+ uint64_t tx_discards;
+ uint64_t tx_errors;
+ uint64_t tx_pkts_64_bytes;
+ uint64_t tx_pkts_65_to_127_bytes;
+ uint64_t tx_pkts_128_to_255_bytes;
+ uint64_t tx_pkts_256_to_511_bytes;
+ uint64_t tx_pkts_512_to_1023_bytes;
+ uint64_t tx_pkts_1024_to_1518_bytes;
+ uint64_t tx_pkts_1519_to_9699_bytes;
+ uint64_t tx_pkts_over_9699_bytes;
+ uint64_t rx_pkts;
+ uint64_t rx_unicast_pkts;
+ uint64_t rx_multicast_pkts;
+ uint64_t rx_broadcast_pkts;
+ uint64_t rx_bytes;
+ uint64_t rx_unicast_bytes;
+ uint64_t rx_multicast_bytes;
+ uint64_t rx_broadcast_bytes;
+ uint64_t rx_discards;
+ uint64_t rx_errors;
+ uint64_t rx_pkts_64_bytes;
+ uint64_t rx_pkts_65_to_127_bytes;
+ uint64_t rx_pkts_128_to_255_bytes;
+ uint64_t rx_pkts_256_to_511_bytes;
+ uint64_t rx_pkts_512_to_1023_bytes;
+ uint64_t rx_pkts_1024_to_1518_bytes;
+ uint64_t rx_pkts_1519_to_9699_bytes;
+ uint64_t rx_pkts_gt_9699_bytes;
+};
+struct mbx_get_vport_stats {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw4 */
+#ifdef _BIG_ENDIAN
+ uint32_t reset_stats:8;
+ uint32_t rsvd0:8;
+ uint32_t vport_number:16;
+#else
+ uint32_t vport_number:16;
+ uint32_t rsvd0:8;
+ uint32_t reset_stats:8;
+#endif
+ } req;
+
+ union {
+ struct vport_stats vps;
+ uint32_t vport_stats[75 - 4 + 1];
+ } rsp;
+ } params;
+};
+
+/**
+ * @brief [20(0x14)] NIC_GET_QUEUE_STATS
+ * The significant difference between vPort and Queue statistics is
+ * the packet byte counters.
+ */
+struct queue_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t drops;
+ uint64_t buffer_errors; /* rsvd when tx */
+};
+
+#define QUEUE_TYPE_WQ 0
+#define QUEUE_TYPE_RQ 1
+#define QUEUE_TYPE_HDS_RQ 1 /* same as RQ */
+
+struct mbx_get_queue_stats {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw4 */
+#ifdef _BIG_ENDIAN
+ uint32_t reset_stats:8;
+ uint32_t queue_type:8;
+ uint32_t queue_id:16;
+#else
+ uint32_t queue_id:16;
+ uint32_t queue_type:8;
+ uint32_t reset_stats:8;
+#endif
+ } req;
+
+ union {
+ struct queue_stats qs;
+ uint32_t queue_stats[13 - 4 + 1];
+ } rsp;
+ } params;
+};
+
+
+/* [01] NIC_CONFIG_RSS */
+#define OCE_HASH_TBL_SZ 10
+#define OCE_CPU_TBL_SZ 128
+#define OCE_FLUSH 1 /* RSS flush completion per CQ port */
+struct mbx_config_nic_rss {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t if_id;
+ uint16_t cpu_tbl_sz_log2;
+ uint16_t enable_rss;
+ uint32_t hash[OCE_HASH_TBL_SZ];
+ uint8_t cputable[OCE_CPU_TBL_SZ];
+ uint8_t rsvd[3];
+ uint8_t flush;
+#else
+ uint32_t if_id;
+ uint16_t enable_rss;
+ uint16_t cpu_tbl_sz_log2;
+ uint32_t hash[OCE_HASH_TBL_SZ];
+ uint8_t cputable[OCE_CPU_TBL_SZ];
+ uint8_t flush;
+ uint8_t rsvd[3];
+#endif
+ } req;
+ struct {
+ uint8_t rsvd[3];
+ uint8_t rss_bank;
+ } rsp;
+ } params;
+};
+
+
+#pragma pack()
+
+
+typedef uint32_t oce_stat_t; /* statistic counter */
+
+enum OCE_RXF_PORT_STATS {
+ RXF_RX_BYTES_LSD,
+ RXF_RX_BYTES_MSD,
+ RXF_RX_TOTAL_FRAMES,
+ RXF_RX_UNICAST_FRAMES,
+ RXF_RX_MULTICAST_FRAMES,
+ RXF_RX_BROADCAST_FRAMES,
+ RXF_RX_CRC_ERRORS,
+ RXF_RX_ALIGNMENT_SYMBOL_ERRORS,
+ RXF_RX_PAUSE_FRAMES,
+ RXF_RX_CONTROL_FRAMES,
+ RXF_RX_IN_RANGE_ERRORS,
+ RXF_RX_OUT_RANGE_ERRORS,
+ RXF_RX_FRAME_TOO_LONG,
+ RXF_RX_ADDRESS_MATCH_ERRORS,
+ RXF_RX_VLAN_MISMATCH,
+ RXF_RX_DROPPED_TOO_SMALL,
+ RXF_RX_DROPPED_TOO_SHORT,
+ RXF_RX_DROPPED_HEADER_TOO_SMALL,
+ RXF_RX_DROPPED_TCP_LENGTH,
+ RXF_RX_DROPPED_RUNT,
+ RXF_RX_64_BYTE_PACKETS,
+ RXF_RX_65_127_BYTE_PACKETS,
+ RXF_RX_128_256_BYTE_PACKETS,
+ RXF_RX_256_511_BYTE_PACKETS,
+ RXF_RX_512_1023_BYTE_PACKETS,
+ RXF_RX_1024_1518_BYTE_PACKETS,
+ RXF_RX_1519_2047_BYTE_PACKETS,
+ RXF_RX_2048_4095_BYTE_PACKETS,
+ RXF_RX_4096_8191_BYTE_PACKETS,
+ RXF_RX_8192_9216_BYTE_PACKETS,
+ RXF_RX_IP_CHECKSUM_ERRS,
+ RXF_RX_TCP_CHECKSUM_ERRS,
+ RXF_RX_UDP_CHECKSUM_ERRS,
+ RXF_RX_NON_RSS_PACKETS,
+ RXF_RX_IPV4_PACKETS,
+ RXF_RX_IPV6_PACKETS,
+ RXF_RX_IPV4_BYTES_LSD,
+ RXF_RX_IPV4_BYTES_MSD,
+ RXF_RX_IPV6_BYTES_LSD,
+ RXF_RX_IPV6_BYTES_MSD,
+ RXF_RX_CHUTE1_PACKETS,
+ RXF_RX_CHUTE2_PACKETS,
+ RXF_RX_CHUTE3_PACKETS,
+ RXF_RX_MANAGEMENT_PACKETS,
+ RXF_RX_SWITCHED_UNICAST_PACKETS,
+ RXF_RX_SWITCHED_MULTICAST_PACKETS,
+ RXF_RX_SWITCHED_BROADCAST_PACKETS,
+ RXF_TX_BYTES_LSD,
+ RXF_TX_BYTES_MSD,
+ RXF_TX_UNICAST_FRAMES,
+ RXF_TX_MULTICAST_FRAMES,
+ RXF_TX_BROADCAST_FRAMES,
+ RXF_TX_PAUSE_FRAMES,
+ RXF_TX_CONTROL_FRAMES,
+ RXF_TX_64_BYTE_PACKETS,
+ RXF_TX_65_127_BYTE_PACKETS,
+ RXF_TX_128_256_BYTE_PACKETS,
+ RXF_TX_256_511_BYTE_PACKETS,
+ RXF_TX_512_1023_BYTE_PACKETS,
+ RXF_TX_1024_1518_BYTE_PACKETS,
+ RXF_TX_1519_2047_BYTE_PACKETS,
+ RXF_TX_2048_4095_BYTE_PACKETS,
+ RXF_TX_4096_8191_BYTE_PACKETS,
+ RXF_TX_8192_9216_BYTE_PACKETS,
+ RXF_RX_FIFO_OVERFLOW,
+ RXF_RX_INPUT_FIFO_OVERFLOW,
+ RXF_PORT_STATS_N_WORDS
+};
+
+enum OCE_RXF_ADDL_STATS {
+ RXF_RX_DROPS_NO_PBUF,
+ RXF_RX_DROPS_NO_TXPB,
+ RXF_RX_DROPS_NO_ERX_DESCR,
+ RXF_RX_DROPS_NO_TPRE_DESCR,
+ RXF_MANAGEMENT_RX_PORT_PACKETS,
+ RXF_MANAGEMENT_RX_PORT_BYTES,
+ RXF_MANAGEMENT_RX_PORT_PAUSE_FRAMES,
+ RXF_MANAGEMENT_RX_PORT_ERRORS,
+ RXF_MANAGEMENT_TX_PORT_PACKETS,
+ RXF_MANAGEMENT_TX_PORT_BYTES,
+ RXF_MANAGEMENT_TX_PORT_PAUSE,
+ RXF_MANAGEMENT_RX_PORT_RXFIFO_OVERFLOW,
+ RXF_RX_DROPS_TOO_MANY_FRAGS,
+ RXF_RX_DROPS_INVALID_RING,
+ RXF_FORWARDED_PACKETS,
+ RXF_RX_DROPS_MTU,
+ RXF_ADDL_STATS_N_WORDS
+};
+
+enum OCE_TX_CHUTE_PORT_STATS {
+ CTPT_XMT_IPV4_PKTS,
+ CTPT_XMT_IPV4_LSD,
+ CTPT_XMT_IPV4_MSD,
+ CTPT_XMT_IPV6_PKTS,
+ CTPT_XMT_IPV6_LSD,
+ CTPT_XMT_IPV6_MSD,
+ CTPT_REXMT_IPV4_PKTs,
+ CTPT_REXMT_IPV4_LSD,
+ CTPT_REXMT_IPV4_MSD,
+ CTPT_REXMT_IPV6_PKTs,
+ CTPT_REXMT_IPV6_LSD,
+ CTPT_REXMT_IPV6_MSD,
+ CTPT_N_WORDS,
+};
+
+enum OCE_RX_ERR_STATS {
+ RX_DROPS_NO_FRAGMENTS_0,
+ RX_DROPS_NO_FRAGMENTS_1,
+ RX_DROPS_NO_FRAGMENTS_2,
+ RX_DROPS_NO_FRAGMENTS_3,
+ RX_DROPS_NO_FRAGMENTS_4,
+ RX_DROPS_NO_FRAGMENTS_5,
+ RX_DROPS_NO_FRAGMENTS_6,
+ RX_DROPS_NO_FRAGMENTS_7,
+ RX_DROPS_NO_FRAGMENTS_8,
+ RX_DROPS_NO_FRAGMENTS_9,
+ RX_DROPS_NO_FRAGMENTS_10,
+ RX_DROPS_NO_FRAGMENTS_11,
+ RX_DROPS_NO_FRAGMENTS_12,
+ RX_DROPS_NO_FRAGMENTS_13,
+ RX_DROPS_NO_FRAGMENTS_14,
+ RX_DROPS_NO_FRAGMENTS_15,
+ RX_DROPS_NO_FRAGMENTS_16,
+ RX_DROPS_NO_FRAGMENTS_17,
+ RX_DROPS_NO_FRAGMENTS_18,
+ RX_DROPS_NO_FRAGMENTS_19,
+ RX_DROPS_NO_FRAGMENTS_20,
+ RX_DROPS_NO_FRAGMENTS_21,
+ RX_DROPS_NO_FRAGMENTS_22,
+ RX_DROPS_NO_FRAGMENTS_23,
+ RX_DROPS_NO_FRAGMENTS_24,
+ RX_DROPS_NO_FRAGMENTS_25,
+ RX_DROPS_NO_FRAGMENTS_26,
+ RX_DROPS_NO_FRAGMENTS_27,
+ RX_DROPS_NO_FRAGMENTS_28,
+ RX_DROPS_NO_FRAGMENTS_29,
+ RX_DROPS_NO_FRAGMENTS_30,
+ RX_DROPS_NO_FRAGMENTS_31,
+ RX_DROPS_NO_FRAGMENTS_32,
+ RX_DROPS_NO_FRAGMENTS_33,
+ RX_DROPS_NO_FRAGMENTS_34,
+ RX_DROPS_NO_FRAGMENTS_35,
+ RX_DROPS_NO_FRAGMENTS_36,
+ RX_DROPS_NO_FRAGMENTS_37,
+ RX_DROPS_NO_FRAGMENTS_38,
+ RX_DROPS_NO_FRAGMENTS_39,
+ RX_DROPS_NO_FRAGMENTS_40,
+ RX_DROPS_NO_FRAGMENTS_41,
+ RX_DROPS_NO_FRAGMENTS_42,
+ RX_DROPS_NO_FRAGMENTS_43,
+ RX_DEBUG_WDMA_SENT_HOLD,
+ RX_DEBUG_WDMA_PBFREE_SENT_HOLD,
+ RX_DEBUG_WDMA_0B_PBFREE_SENT_HOLD,
+ RX_DEBUG_PMEM_PBUF_DEALLOC,
+ RX_ERRORS_N_WORDS
+};
+
+enum OCE_PMEM_ERR_STATS {
+ PMEM_ETH_RED_DROPS,
+ PMEM_LRO_RED_DROPS,
+ PMEM_ULP0_RED_DROPS,
+ PMEM_ULP1_RED_DROPS,
+ PMEM_GLOBAL_RED_DROPS,
+ PMEM_ERRORS_N_WORDS
+};
+
+/**
+ * @brief Statistics for a given Physical Port
+ * These satisfy all the required BE2 statistics and also the
+ * following MIB objects:
+ *
+ * RFC 2863 - The Interfaces Group MIB
+ * RFC 2819 - Remote Network Monitoring Management Information Base (RMON)
+ * RFC 3635 - Managed Objects for the Ethernet-like Interface Types
+ * RFC 4502 - Remote Network Monitoring Mgmt Information Base Ver-2 (RMON2)
+ *
+ */
+enum OCE_PPORT_STATS {
+ PPORT_TX_PKTS = 0,
+ PPORT_TX_UNICAST_PKTS = 2,
+ PPORT_TX_MULTICAST_PKTS = 4,
+ PPORT_TX_BROADCAST_PKTS = 6,
+ PPORT_TX_BYTES = 8,
+ PPORT_TX_UNICAST_BYTES = 10,
+ PPORT_TX_MULTICAST_BYTES = 12,
+ PPORT_TX_BROADCAST_BYTES = 14,
+ PPORT_TX_DISCARDS = 16,
+ PPORT_TX_ERRORS = 18,
+ PPORT_TX_PAUSE_FRAMES = 20,
+ PPORT_TX_PAUSE_ON_FRAMES = 22,
+ PPORT_TX_PAUSE_OFF_FRAMES = 24,
+ PPORT_TX_INTERNAL_MAC_ERRORS = 26,
+ PPORT_TX_CONTROL_FRAMES = 28,
+ PPORT_TX_PKTS_64_BYTES = 30,
+ PPORT_TX_PKTS_65_TO_127_BYTES = 32,
+ PPORT_TX_PKTS_128_TO_255_BYTES = 34,
+ PPORT_TX_PKTS_256_TO_511_BYTES = 36,
+ PPORT_TX_PKTS_512_TO_1023_BYTES = 38,
+ PPORT_TX_PKTS_1024_TO_1518_BYTES = 40,
+ PPORT_TX_PKTS_1519_TO_2047_BYTES = 42,
+ PPORT_TX_PKTS_2048_TO_4095_BYTES = 44,
+ PPORT_TX_PKTS_4096_TO_8191_BYTES = 46,
+ PPORT_TX_PKTS_8192_TO_9216_BYTES = 48,
+ PPORT_TX_LSO_PKTS = 50,
+ PPORT_RX_PKTS = 52,
+ PPORT_RX_UNICAST_PKTS = 54,
+ PPORT_RX_MULTICAST_PKTS = 56,
+ PPORT_RX_BROADCAST_PKTS = 58,
+ PPORT_RX_BYTES = 60,
+ PPORT_RX_UNICAST_BYTES = 62,
+ PPORT_RX_MULTICAST_BYTES = 64,
+ PPORT_RX_BROADCAST_BYTES = 66,
+ PPORT_RX_UNKNOWN_PROTOS = 68,
+ PPORT_RESERVED_WORD69 = 69,
+ PPORT_RX_DISCARDS = 70,
+ PPORT_RX_ERRORS = 72,
+ PPORT_RX_CRC_ERRORS = 74,
+ PPORT_RX_ALIGNMENT_ERRORS = 76,
+ PPORT_RX_SYMBOL_ERRORS = 78,
+ PPORT_RX_PAUSE_FRAMES = 80,
+ PPORT_RX_PAUSE_ON_FRAMES = 82,
+ PPORT_RX_PAUSE_OFF_FRAMES = 84,
+ PPORT_RX_FRAMES_TOO_LONG = 86,
+ PPORT_RX_INTERNAL_MAC_ERRORS = 88,
+ PPORT_RX_UNDERSIZE_PKTS = 90,
+ PPORT_RX_OVERSIZE_PKTS = 91,
+ PPORT_RX_FRAGMENT_PKTS = 92,
+ PPORT_RX_JABBERS = 93,
+ PPORT_RX_CONTROL_FRAMES = 94,
+ PPORT_RX_CONTROL_FRAMES_UNK_OPCODE = 96,
+ PPORT_RX_IN_RANGE_ERRORS = 98,
+ PPORT_RX_OUT_OF_RANGE_ERRORS = 99,
+ PPORT_RX_ADDRESS_MATCH_ERRORS = 100,
+ PPORT_RX_VLAN_MISMATCH_ERRORS = 101,
+ PPORT_RX_DROPPED_TOO_SMALL = 102,
+ PPORT_RX_DROPPED_TOO_SHORT = 103,
+ PPORT_RX_DROPPED_HEADER_TOO_SMALL = 104,
+ PPORT_RX_DROPPED_INVALID_TCP_LENGTH = 105,
+ PPORT_RX_DROPPED_RUNT = 106,
+ PPORT_RX_IP_CHECKSUM_ERRORS = 107,
+ PPORT_RX_TCP_CHECKSUM_ERRORS = 108,
+ PPORT_RX_UDP_CHECKSUM_ERRORS = 109,
+ PPORT_RX_NON_RSS_PKTS = 110,
+ PPORT_RESERVED_WORD111 = 111,
+ PPORT_RX_IPV4_PKTS = 112,
+ PPORT_RX_IPV6_PKTS = 114,
+ PPORT_RX_IPV4_BYTES = 116,
+ PPORT_RX_IPV6_BYTES = 118,
+ PPORT_RX_NIC_PKTS = 120,
+ PPORT_RX_TCP_PKTS = 122,
+ PPORT_RX_ISCSI_PKTS = 124,
+ PPORT_RX_MANAGEMENT_PKTS = 126,
+ PPORT_RX_SWITCHED_UNICAST_PKTS = 128,
+ PPORT_RX_SWITCHED_MULTICAST_PKTS = 130,
+ PPORT_RX_SWITCHED_BROADCAST_PKTS = 132,
+ PPORT_NUM_FORWARDS = 134,
+ PPORT_RX_FIFO_OVERFLOW = 136,
+ PPORT_RX_INPUT_FIFO_OVERFLOW = 137,
+ PPORT_RX_DROPS_TOO_MANY_FRAGS = 138,
+ PPORT_RX_DROPS_INVALID_QUEUE = 140,
+ PPORT_RESERVED_WORD141 = 141,
+ PPORT_RX_DROPS_MTU = 142,
+ PPORT_RX_PKTS_64_BYTES = 144,
+ PPORT_RX_PKTS_65_TO_127_BYTES = 146,
+ PPORT_RX_PKTS_128_TO_255_BYTES = 148,
+ PPORT_RX_PKTS_256_TO_511_BYTES = 150,
+ PPORT_RX_PKTS_512_TO_1023_BYTES = 152,
+ PPORT_RX_PKTS_1024_TO_1518_BYTES = 154,
+ PPORT_RX_PKTS_1519_TO_2047_BYTES = 156,
+ PPORT_RX_PKTS_2048_TO_4095_BYTES = 158,
+ PPORT_RX_PKTS_4096_TO_8191_BYTES = 160,
+ PPORT_RX_PKTS_8192_TO_9216_BYTES = 162,
+ PPORT_N_WORDS = 164
+};
+
+/**
+ * @brief Statistics for a given Virtual Port (vPort)
+ * The following describes the vPort statistics satisfying
+ * requirements of Linux/VMWare netdev statistics and
+ * Microsoft Windows Statistics along with other Operating Systems.
+ */
+enum OCE_VPORT_STATS {
+ VPORT_TX_PKTS = 0,
+ VPORT_TX_UNICAST_PKTS = 2,
+ VPORT_TX_MULTICAST_PKTS = 4,
+ VPORT_TX_BROADCAST_PKTS = 6,
+ VPORT_TX_BYTES = 8,
+ VPORT_TX_UNICAST_BYTES = 10,
+ VPORT_TX_MULTICAST_BYTES = 12,
+ VPORT_TX_BROADCAST_BYTES = 14,
+ VPORT_TX_DISCARDS = 16,
+ VPORT_TX_ERRORS = 18,
+ VPORT_TX_PKTS_64_BYTES = 20,
+ VPORT_TX_PKTS_65_TO_127_BYTES = 22,
+ VPORT_TX_PKTS_128_TO_255_BYTES = 24,
+ VPORT_TX_PKTS_256_TO_511_BYTES = 26,
+ VPORT_TX_PKTS_512_TO_1023_BYTEs = 28,
+ VPORT_TX_PKTS_1024_TO_1518_BYTEs = 30,
+ VPORT_TX_PKTS_1519_TO_9699_BYTEs = 32,
+ VPORT_TX_PKTS_OVER_9699_BYTES = 34,
+ VPORT_RX_PKTS = 36,
+ VPORT_RX_UNICAST_PKTS = 38,
+ VPORT_RX_MULTICAST_PKTS = 40,
+ VPORT_RX_BROADCAST_PKTS = 42,
+ VPORT_RX_BYTES = 44,
+ VPORT_RX_UNICAST_BYTES = 46,
+ VPORT_RX_MULTICAST_BYTES = 48,
+ VPORT_RX_BROADCAST_BYTES = 50,
+ VPORT_RX_DISCARDS = 52,
+ VPORT_RX_ERRORS = 54,
+ VPORT_RX_PKTS_64_BYTES = 56,
+ VPORT_RX_PKTS_65_TO_127_BYTES = 58,
+ VPORT_RX_PKTS_128_TO_255_BYTES = 60,
+ VPORT_RX_PKTS_256_TO_511_BYTES = 62,
+ VPORT_RX_PKTS_512_TO_1023_BYTEs = 64,
+ VPORT_RX_PKTS_1024_TO_1518_BYTEs = 66,
+ VPORT_RX_PKTS_1519_TO_9699_BYTEs = 68,
+ VPORT_RX_PKTS_OVER_9699_BYTES = 70,
+ VPORT_N_WORDS = 72
+};
+
+/**
+ * @brief Statistics for a given queue (NIC WQ, RQ, or HDS RQ)
+ * This set satisfies requirements of VMQare NetQueue and Microsoft VMQ
+ */
+enum OCE_QUEUE_TX_STATS {
+ QUEUE_TX_PKTS = 0,
+ QUEUE_TX_BYTES = 2,
+ QUEUE_TX_ERRORS = 4,
+ QUEUE_TX_DROPS = 6,
+ QUEUE_TX_N_WORDS = 8
+};
+
+enum OCE_QUEUE_RX_STATS {
+ QUEUE_RX_PKTS = 0,
+ QUEUE_RX_BYTES = 2,
+ QUEUE_RX_ERRORS = 4,
+ QUEUE_RX_DROPS = 6,
+ QUEUE_RX_BUFFER_ERRORS = 8,
+ QUEUE_RX_N_WORDS = 10
+};
+
diff --git a/sys/dev/oce/oce_if.c b/sys/dev/oce/oce_if.c
new file mode 100644
index 0000000..c400507
--- /dev/null
+++ b/sys/dev/oce/oce_if.c
@@ -0,0 +1,2000 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+
+/* $FreeBSD$ */
+
+#include "oce_if.h"
+
+
+/* Driver entry points prototypes */
+static int oce_probe(device_t dev);
+static int oce_attach(device_t dev);
+static int oce_detach(device_t dev);
+static int oce_shutdown(device_t dev);
+static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
+static void oce_init(void *xsc);
+static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
+static void oce_multiq_flush(struct ifnet *ifp);
+
+/* Driver interrupt routines protypes */
+static void oce_intr(void *arg, int pending);
+static int oce_setup_intr(POCE_SOFTC sc);
+static int oce_fast_isr(void *arg);
+static int oce_alloc_intr(POCE_SOFTC sc, int vector,
+ void (*isr) (void *arg, int pending));
+
+/* Media callbacks prototypes */
+static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
+static int oce_media_change(struct ifnet *ifp);
+
+/* Transmit routines prototypes */
+static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
+static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
+static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
+ uint32_t status);
+static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp,
+ uint16_t *mss);
+static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
+ struct oce_wq *wq);
+
+/* Receive routines prototypes */
+static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
+static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
+static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
+static void oce_rx_flush_lro(struct oce_rq *rq);
+static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
+ struct oce_nic_rx_cqe *cqe);
+
+/* Helper function prototypes in this file */
+static int oce_attach_ifp(POCE_SOFTC sc);
+static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
+static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
+static int oce_vid_config(POCE_SOFTC sc);
+static void oce_mac_addr_set(POCE_SOFTC sc);
+static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
+static void oce_local_timer(void *arg);
+static int oce_init_lro(POCE_SOFTC sc);
+static void oce_if_deactivate(POCE_SOFTC sc);
+static void oce_if_activate(POCE_SOFTC sc);
+static void setup_max_queues_want(POCE_SOFTC sc);
+static void update_queues_got(POCE_SOFTC sc);
+
+static device_method_t oce_dispatch[] = {
+ DEVMETHOD(device_probe, oce_probe),
+ DEVMETHOD(device_attach, oce_attach),
+ DEVMETHOD(device_detach, oce_detach),
+ DEVMETHOD(device_shutdown, oce_shutdown),
+ {0, 0}
+};
+
+static driver_t oce_driver = {
+ "oce",
+ oce_dispatch,
+ sizeof(OCE_SOFTC)
+};
+static devclass_t oce_devclass;
+
+
+DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
+MODULE_DEPEND(oce, pci, 1, 1, 1);
+MODULE_DEPEND(oce, ether, 1, 1, 1);
+MODULE_VERSION(oce, 1);
+
+
+/* global vars */
+const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
+
+/* Module capabilites and parameters */
+uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
+uint32_t oce_enable_rss = OCE_MODCAP_RSS;
+
+
+TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
+TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
+
+
+/* Supported devices table */
+static uint32_t supportedDevices[] = {
+ (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
+ (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
+ (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
+ (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
+ (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
+};
+
+
+
+
+/*****************************************************************************
+ * Driver entry points functions *
+ *****************************************************************************/
+
+static int
+oce_probe(device_t dev)
+{
+ uint16_t vendor;
+ uint16_t device;
+ int i;
+ char str[80];
+ POCE_SOFTC sc;
+
+ sc = device_get_softc(dev);
+ bzero(sc, sizeof(OCE_SOFTC));
+ sc->dev = dev;
+
+ vendor = pci_get_vendor(dev);
+ device = pci_get_device(dev);
+
+ for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint16_t)); i++) {
+ if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
+ if (device == (supportedDevices[i] & 0xffff)) {
+ sprintf(str, "%s:%s",
+ "Emulex CNA NIC function",
+ component_revision);
+ device_set_desc_copy(dev, str);
+
+ switch (device) {
+ case PCI_PRODUCT_BE2:
+ sc->flags |= OCE_FLAGS_BE2;
+ break;
+ case PCI_PRODUCT_BE3:
+ sc->flags |= OCE_FLAGS_BE3;
+ break;
+ case PCI_PRODUCT_XE201:
+ case PCI_PRODUCT_XE201_VF:
+ sc->flags |= OCE_FLAGS_XE201;
+ break;
+ default:
+ return ENXIO;
+ }
+ return BUS_PROBE_DEFAULT;
+ }
+ }
+ }
+
+ return ENXIO;
+}
+
+
+static int
+oce_attach(device_t dev)
+{
+ POCE_SOFTC sc;
+ int rc = 0;
+
+ sc = device_get_softc(dev);
+
+ rc = oce_hw_pci_alloc(sc);
+ if (rc)
+ return rc;
+
+ sc->rss_enable = oce_enable_rss;
+ sc->tx_ring_size = OCE_TX_RING_SIZE;
+ sc->rx_ring_size = OCE_RX_RING_SIZE;
+ sc->rq_frag_size = OCE_RQ_BUF_SIZE;
+ sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
+ sc->promisc = OCE_DEFAULT_PROMISCUOUS;
+
+ LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
+ LOCK_CREATE(&sc->dev_lock, "Device_lock");
+
+ /* initialise the hardware */
+ rc = oce_hw_init(sc);
+ if (rc)
+ goto pci_res_free;
+
+
+ setup_max_queues_want(sc);
+
+
+ rc = oce_setup_intr(sc);
+ if (rc)
+ goto mbox_free;
+
+
+ rc = oce_queue_init_all(sc);
+ if (rc)
+ goto intr_free;
+
+
+ rc = oce_attach_ifp(sc);
+ if (rc)
+ goto queues_free;
+
+
+ rc = oce_init_lro(sc);
+ if (rc)
+ goto ifp_free;
+
+
+ rc = oce_hw_start(sc);
+ if (rc)
+ goto lro_free;;
+
+
+ sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
+ sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
+
+ rc = oce_stats_init(sc);
+ if (rc)
+ goto vlan_free;
+
+ oce_add_sysctls(sc);
+
+
+ callout_init(&sc->timer, CALLOUT_MPSAFE);
+ rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
+ if (rc)
+ goto stats_free;
+
+ return 0;
+
+stats_free:
+ callout_drain(&sc->timer);
+ oce_stats_free(sc);
+vlan_free:
+ if (sc->vlan_attach)
+ EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
+ if (sc->vlan_detach)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
+ oce_hw_intr_disable(sc);
+lro_free:
+ oce_free_lro(sc);
+ifp_free:
+ ether_ifdetach(sc->ifp);
+ if_free(sc->ifp);
+queues_free:
+ oce_queue_release_all(sc);
+intr_free:
+ oce_intr_free(sc);
+mbox_free:
+ oce_dma_free(sc, &sc->bsmbx);
+pci_res_free:
+ oce_hw_pci_free(sc);
+ LOCK_DESTROY(&sc->dev_lock);
+ LOCK_DESTROY(&sc->bmbx_lock);
+ return rc;
+
+}
+
+
+static int
+oce_detach(device_t dev)
+{
+ POCE_SOFTC sc = device_get_softc(dev);
+
+ LOCK(&sc->dev_lock);
+
+ oce_if_deactivate(sc);
+
+ UNLOCK(&sc->dev_lock);
+
+ callout_drain(&sc->timer);
+
+ if (sc->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
+ if (sc->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
+
+ ether_ifdetach(sc->ifp);
+
+ if_free(sc->ifp);
+
+ oce_hw_shutdown(sc);
+
+ bus_generic_detach(dev);
+
+ return 0;
+}
+
+
+static int
+oce_shutdown(device_t dev)
+{
+ int rc;
+
+ rc = oce_detach(dev);
+
+ return rc;
+}
+
+
+static int
+oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ifreq *ifr = (struct ifreq *)data;
+ POCE_SOFTC sc = ifp->if_softc;
+ int rc = 0;
+ uint32_t u;
+
+ switch (command) {
+ case SIOCGIFPSRCADDR_IN6:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+
+ case SIOCGIFPSRCADDR:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+
+ case SIOCGIFSTATUS:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+
+ case SIOCGIFMEDIA:
+ rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+
+ case SIOCSIFMEDIA:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+
+ case SIOCGIFGENERIC:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+
+ case SIOCGETMIFCNT_IN6:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu > OCE_MAX_MTU)
+ rc = EINVAL;
+ else
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ oce_init(sc);
+ }
+ device_printf(sc->dev, "Interface Up\n");
+ } else {
+ LOCK(&sc->dev_lock);
+
+ sc->ifp->if_drv_flags &=
+ ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ oce_if_deactivate(sc);
+
+ UNLOCK(&sc->dev_lock);
+
+ device_printf(sc->dev, "Interface Down\n");
+ }
+
+ if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
+ sc->promisc = TRUE;
+ oce_rxf_set_promiscuous(sc, sc->promisc);
+ } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
+ sc->promisc = FALSE;
+ oce_rxf_set_promiscuous(sc, sc->promisc);
+ }
+
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ rc = oce_hw_update_multicast(sc);
+ if (rc)
+ device_printf(sc->dev,
+ "Update multicast address failed\n");
+ break;
+
+ case SIOCSIFCAP:
+ u = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+ if (u & IFCAP_TXCSUM) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
+
+ if (IFCAP_TSO & ifp->if_capenable &&
+ !(IFCAP_TXCSUM & ifp->if_capenable)) {
+ ifp->if_capenable &= ~IFCAP_TSO;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ if_printf(ifp,
+ "TSO disabled due to -txcsum.\n");
+ }
+ }
+
+ if (u & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+
+ if (u & IFCAP_TSO4) {
+ ifp->if_capenable ^= IFCAP_TSO4;
+
+ if (IFCAP_TSO & ifp->if_capenable) {
+ if (IFCAP_TXCSUM & ifp->if_capenable)
+ ifp->if_hwassist |= CSUM_TSO;
+ else {
+ ifp->if_capenable &= ~IFCAP_TSO;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ if_printf(ifp,
+ "Enable txcsum first.\n");
+ rc = EAGAIN;
+ }
+ } else
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+
+ if (u & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+ if (u & IFCAP_VLAN_HWFILTER) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ oce_vid_config(sc);
+ }
+
+ if (u & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+
+ break;
+
+ case SIOCGPRIVATE_0:
+ rc = oce_handle_passthrough(ifp, data);
+ break;
+ default:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return rc;
+}
+
+
+static void
+oce_init(void *arg)
+{
+ POCE_SOFTC sc = arg;
+
+ LOCK(&sc->dev_lock);
+
+ if (sc->ifp->if_flags & IFF_UP) {
+ oce_if_deactivate(sc);
+ oce_if_activate(sc);
+ }
+
+ UNLOCK(&sc->dev_lock);
+
+}
+
+
+static int
+oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+ struct oce_wq *wq = NULL;
+ int queue_index = 0;
+ int status = 0;
+
+ if ((m->m_flags & M_FLOWID) != 0)
+ queue_index = m->m_pkthdr.flowid % sc->nwqs;
+
+ wq = sc->wq[queue_index];
+
+ if (TRY_LOCK(&wq->tx_lock)) {
+ status = oce_multiq_transmit(ifp, m, wq);
+ UNLOCK(&wq->tx_lock);
+ } else {
+ status = drbr_enqueue(ifp, wq->br, m);
+ }
+ return status;
+
+}
+
+
+static void
+oce_multiq_flush(struct ifnet *ifp)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+ struct mbuf *m;
+ int i = 0;
+
+ for (i = 0; i < sc->nwqs; i++) {
+ while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
+ m_freem(m);
+ }
+ if_qflush(ifp);
+}
+
+
+
+/*****************************************************************************
+ * Driver interrupt routines functions *
+ *****************************************************************************/
+
+static void
+oce_intr(void *arg, int pending)
+{
+
+ POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
+ POCE_SOFTC sc = ii->sc;
+ struct oce_eq *eq = ii->eq;
+ struct oce_eqe *eqe;
+ struct oce_cq *cq = NULL;
+ int i, num_eqes = 0;
+
+
+ bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
+ BUS_DMASYNC_POSTWRITE);
+ do {
+ eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
+ if (eqe->evnt == 0)
+ break;
+ eqe->evnt = 0;
+ bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
+ BUS_DMASYNC_POSTWRITE);
+ RING_GET(eq->ring, 1);
+ num_eqes++;
+
+ } while (TRUE);
+
+ if (!num_eqes)
+ goto eq_arm; /* Spurious */
+
+ /* Clear EQ entries, but dont arm */
+ oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
+
+ /* Process TX, RX and MCC. But dont arm CQ*/
+ for (i = 0; i < eq->cq_valid; i++) {
+ cq = eq->cq[i];
+ (*cq->cq_handler)(cq->cb_arg);
+ }
+
+ /* Arm all cqs connected to this EQ */
+ for (i = 0; i < eq->cq_valid; i++) {
+ cq = eq->cq[i];
+ oce_arm_cq(sc, cq->cq_id, 0, TRUE);
+ }
+
+eq_arm:
+ oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
+ return;
+}
+
+
+static int
+oce_setup_intr(POCE_SOFTC sc)
+{
+ int rc = 0, use_intx = 0;
+ int vector = 0, req_vectors = 0;
+
+ if (sc->rss_enable)
+ req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
+ else
+ req_vectors = 1;
+
+ if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
+ sc->intr_count = req_vectors;
+ rc = pci_alloc_msix(sc->dev, &sc->intr_count);
+ if (rc != 0) {
+ use_intx = 1;
+ pci_release_msi(sc->dev);
+ } else
+ sc->flags |= OCE_FLAGS_USING_MSIX;
+ } else
+ use_intx = 1;
+
+ if (use_intx)
+ sc->intr_count = 1;
+
+ /* Scale number of queues based on intr we got */
+ update_queues_got(sc);
+
+ if (use_intx) {
+ device_printf(sc->dev, "Using legacy interrupt\n");
+ rc = oce_alloc_intr(sc, vector, oce_intr);
+ if (rc)
+ goto error;
+ } else {
+ for (; vector < sc->intr_count; vector++) {
+ rc = oce_alloc_intr(sc, vector, oce_intr);
+ if (rc)
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ oce_intr_free(sc);
+ return rc;
+}
+
+
+static int
+oce_fast_isr(void *arg)
+{
+ POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
+ POCE_SOFTC sc = ii->sc;
+
+ if (ii->eq == NULL)
+ return FILTER_STRAY;
+
+ oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
+
+ taskqueue_enqueue_fast(ii->tq, &ii->task);
+
+ return FILTER_HANDLED;
+}
+
+
+static int
+oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
+{
+ POCE_INTR_INFO ii = &sc->intrs[vector];
+ int rc = 0, rr;
+
+ if (vector >= OCE_MAX_EQ)
+ return (EINVAL);
+
+ /* Set the resource id for the interrupt.
+ * MSIx is vector + 1 for the resource id,
+ * INTx is 0 for the resource id.
+ */
+ if (sc->flags & OCE_FLAGS_USING_MSIX)
+ rr = vector + 1;
+ else
+ rr = 0;
+ ii->intr_res = bus_alloc_resource_any(sc->dev,
+ SYS_RES_IRQ,
+ &rr, RF_ACTIVE|RF_SHAREABLE);
+ ii->irq_rr = rr;
+ if (ii->intr_res == NULL) {
+ device_printf(sc->dev,
+ "Could not allocate interrupt\n");
+ rc = ENXIO;
+ return rc;
+ }
+
+ TASK_INIT(&ii->task, 0, isr, ii);
+ ii->vector = vector;
+ sprintf(ii->task_name, "oce_task[%d]", ii->vector);
+ ii->tq = taskqueue_create_fast(ii->task_name,
+ M_NOWAIT,
+ taskqueue_thread_enqueue,
+ &ii->tq);
+ taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(sc->dev));
+
+ ii->sc = sc;
+ rc = bus_setup_intr(sc->dev,
+ ii->intr_res,
+ INTR_TYPE_NET,
+ oce_fast_isr, NULL, ii, &ii->tag);
+ return rc;
+
+}
+
+
+void
+oce_intr_free(POCE_SOFTC sc)
+{
+ int i = 0;
+
+ for (i = 0; i < sc->intr_count; i++) {
+
+ if (sc->intrs[i].tag != NULL)
+ bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
+ sc->intrs[i].tag);
+ if (sc->intrs[i].tq != NULL)
+ taskqueue_free(sc->intrs[i].tq);
+
+ if (sc->intrs[i].intr_res != NULL)
+ bus_release_resource(sc->dev, SYS_RES_IRQ,
+ sc->intrs[i].irq_rr,
+ sc->intrs[i].intr_res);
+ sc->intrs[i].tag = NULL;
+ sc->intrs[i].intr_res = NULL;
+ }
+
+ if (sc->flags & OCE_FLAGS_USING_MSIX)
+ pci_release_msi(sc->dev);
+
+}
+
+
+
+/******************************************************************************
+* Media callbacks functions *
+******************************************************************************/
+
+static void
+oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
+
+
+ req->ifm_status = IFM_AVALID;
+ req->ifm_active = IFM_ETHER;
+
+ if (sc->link_status == 1)
+ req->ifm_status |= IFM_ACTIVE;
+ else
+ return;
+
+ switch (sc->link_speed) {
+ case 1: /* 10 Mbps */
+ req->ifm_active |= IFM_10_T | IFM_FDX;
+ sc->speed = 10;
+ break;
+ case 2: /* 100 Mbps */
+ req->ifm_active |= IFM_100_TX | IFM_FDX;
+ sc->speed = 100;
+ break;
+ case 3: /* 1 Gbps */
+ req->ifm_active |= IFM_1000_T | IFM_FDX;
+ sc->speed = 1000;
+ break;
+ case 4: /* 10 Gbps */
+ req->ifm_active |= IFM_10G_SR | IFM_FDX;
+ sc->speed = 10000;
+ break;
+ }
+
+ return;
+}
+
+
+int
+oce_media_change(struct ifnet *ifp)
+{
+ return 0;
+}
+
+
+
+
+/*****************************************************************************
+ * Transmit routines functions *
+ *****************************************************************************/
+
+static int
+oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
+{
+ int rc = 0, i, retry_cnt = 0;
+ bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
+ struct mbuf *m, *m_temp;
+ struct oce_wq *wq = sc->wq[wq_index];
+ struct oce_packet_desc *pd;
+ uint32_t out;
+ struct oce_nic_hdr_wqe *nichdr;
+ struct oce_nic_frag_wqe *nicfrag;
+ int num_wqes;
+ uint32_t reg_value;
+ uint16_t mss = 0;
+
+ m = *mpp;
+ if (!m)
+ return EINVAL;
+
+ if (!(m->m_flags & M_PKTHDR)) {
+ rc = ENXIO;
+ goto free_ret;
+ }
+
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ /* consolidate packet buffers for TSO/LSO segment offload */
+ m = oce_tso_setup(sc, mpp, &mss);
+ if (m == NULL) {
+ rc = ENXIO;
+ goto free_ret;
+ }
+ }
+
+ out = wq->packets_out + 1;
+ if (out == OCE_WQ_PACKET_ARRAY_SIZE)
+ out = 0;
+ if (out == wq->packets_in)
+ return EBUSY;
+
+ pd = &wq->pckts[wq->packets_out];
+retry:
+ rc = bus_dmamap_load_mbuf_sg(wq->tag,
+ pd->map,
+ m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
+ if (rc == 0) {
+ num_wqes = pd->nsegs + 1;
+ if (IS_BE(sc)) {
+ /*Dummy required only for BE3.*/
+ if (num_wqes & 1)
+ num_wqes++;
+ }
+ if (num_wqes >= RING_NUM_FREE(wq->ring)) {
+ bus_dmamap_unload(wq->tag, pd->map);
+ return EBUSY;
+ }
+
+ bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
+ pd->mbuf = m;
+ wq->packets_out = out;
+
+ nichdr =
+ RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
+ nichdr->u0.dw[0] = 0;
+ nichdr->u0.dw[1] = 0;
+ nichdr->u0.dw[2] = 0;
+ nichdr->u0.dw[3] = 0;
+
+ nichdr->u0.s.complete = 1;
+ nichdr->u0.s.event = 1;
+ nichdr->u0.s.crc = 1;
+ nichdr->u0.s.forward = 0;
+ nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
+ nichdr->u0.s.udpcs =
+ (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
+ nichdr->u0.s.tcpcs =
+ (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
+ nichdr->u0.s.num_wqe = num_wqes;
+ nichdr->u0.s.total_length = m->m_pkthdr.len;
+ if (m->m_flags & M_VLANTAG) {
+ nichdr->u0.s.vlan = 1; /*Vlan present*/
+ nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
+ }
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ if (m->m_pkthdr.tso_segsz) {
+ nichdr->u0.s.lso = 1;
+ nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
+ }
+ if (!IS_BE(sc))
+ nichdr->u0.s.ipcs = 1;
+ }
+
+ RING_PUT(wq->ring, 1);
+ wq->ring->num_used++;
+
+ for (i = 0; i < pd->nsegs; i++) {
+ nicfrag =
+ RING_GET_PRODUCER_ITEM_VA(wq->ring,
+ struct oce_nic_frag_wqe);
+ nicfrag->u0.s.rsvd0 = 0;
+ nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
+ nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
+ nicfrag->u0.s.frag_len = segs[i].ds_len;
+ pd->wqe_idx = wq->ring->pidx;
+ RING_PUT(wq->ring, 1);
+ wq->ring->num_used++;
+ }
+ if (num_wqes > (pd->nsegs + 1)) {
+ nicfrag =
+ RING_GET_PRODUCER_ITEM_VA(wq->ring,
+ struct oce_nic_frag_wqe);
+ nicfrag->u0.dw[0] = 0;
+ nicfrag->u0.dw[1] = 0;
+ nicfrag->u0.dw[2] = 0;
+ nicfrag->u0.dw[3] = 0;
+ pd->wqe_idx = wq->ring->pidx;
+ RING_PUT(wq->ring, 1);
+ wq->ring->num_used++;
+ pd->nsegs++;
+ }
+
+ sc->ifp->if_opackets++;
+ wq->tx_stats.tx_reqs++;
+ wq->tx_stats.tx_wrbs += num_wqes;
+ wq->tx_stats.tx_bytes += m->m_pkthdr.len;
+ wq->tx_stats.tx_pkts++;
+
+ bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ reg_value = (num_wqes << 16) | wq->wq_id;
+ OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
+
+ } else if (rc == EFBIG) {
+ if (retry_cnt == 0) {
+ m_temp = m_defrag(m, M_DONTWAIT);
+ if (m_temp == NULL)
+ goto free_ret;
+ m = m_temp;
+ *mpp = m_temp;
+ retry_cnt = retry_cnt + 1;
+ goto retry;
+ } else
+ goto free_ret;
+ } else if (rc == ENOMEM)
+ return rc;
+ else
+ goto free_ret;
+
+ return 0;
+
+free_ret:
+ m_freem(*mpp);
+ *mpp = NULL;
+ return rc;
+}
+
+
+static void
+oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
+{
+ uint32_t in;
+ struct oce_packet_desc *pd;
+ POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
+ struct mbuf *m;
+
+ if (wq->packets_out == wq->packets_in)
+ device_printf(sc->dev, "WQ transmit descriptor missing\n");
+
+ in = wq->packets_in + 1;
+ if (in == OCE_WQ_PACKET_ARRAY_SIZE)
+ in = 0;
+
+ pd = &wq->pckts[wq->packets_in];
+ wq->packets_in = in;
+ wq->ring->num_used -= (pd->nsegs + 1);
+ bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(wq->tag, pd->map);
+
+ m = pd->mbuf;
+ m_freem(m);
+ pd->mbuf = NULL;
+
+ if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
+ if (wq->ring->num_used < (wq->ring->num_items / 2)) {
+ sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
+ oce_tx_restart(sc, wq);
+ }
+ }
+}
+
+
+static void
+oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
+{
+
+ if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
+ return;
+
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(sc->ifp, wq->br))
+#else
+ if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
+#endif
+ taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
+
+}
+
+
+static struct mbuf *
+oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp, uint16_t *mss)
+{
+ struct mbuf *m;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct ether_vlan_header *eh;
+ struct tcphdr *th;
+ uint16_t etype;
+ int total_len = 0, ehdrlen = 0;
+
+ m = *mpp;
+ *mss = m->m_pkthdr.tso_segsz;
+
+ if (M_WRITABLE(m) == 0) {
+ m = m_dup(*mpp, M_DONTWAIT);
+ if (!m)
+ return NULL;
+ m_freem(*mpp);
+ *mpp = m;
+ }
+
+ eh = mtod(m, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ ehdrlen = ETHER_HDR_LEN;
+ }
+
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(m->m_data + ehdrlen);
+ if (ip->ip_p != IPPROTO_TCP)
+ return NULL;
+ th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+
+ total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
+ if (ip6->ip6_nxt != IPPROTO_TCP)
+ return NULL;
+ th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
+
+ total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
+ break;
+ default:
+ return NULL;
+ }
+
+ m = m_pullup(m, total_len);
+ if (!m)
+ return NULL;
+ *mpp = m;
+ return m;
+
+}
+
+
+void
+oce_tx_task(void *arg, int npending)
+{
+ struct oce_wq *wq = arg;
+ POCE_SOFTC sc = wq->parent;
+ struct ifnet *ifp = sc->ifp;
+ int rc = 0;
+
+#if __FreeBSD_version >= 800000
+ if (TRY_LOCK(&wq->tx_lock)) {
+ rc = oce_multiq_transmit(ifp, NULL, wq);
+ if (rc) {
+ device_printf(sc->dev,
+ "TX[%d] restart failed\n", wq->queue_index);
+ }
+ UNLOCK(&wq->tx_lock);
+ }
+#else
+ oce_start(ifp);
+#endif
+
+}
+
+
+void
+oce_start(struct ifnet *ifp)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+ struct mbuf *m;
+ int rc = 0;
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return;
+
+ do {
+ IF_DEQUEUE(&sc->ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ /* oce_start always uses default TX queue 0 */
+ LOCK(&sc->wq[0]->tx_lock);
+ rc = oce_tx(sc, &m, 0);
+ UNLOCK(&sc->wq[0]->tx_lock);
+ if (rc) {
+ if (m != NULL) {
+ sc->wq[0]->tx_stats.tx_stops ++;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ m = NULL;
+ }
+ break;
+ }
+ if (m != NULL)
+ ETHER_BPF_MTAP(ifp, m);
+
+ } while (1);
+
+ return;
+}
+
+
+/* Handle the Completion Queue for transmit */
+uint16_t
+oce_wq_handler(void *arg)
+{
+ struct oce_wq *wq = (struct oce_wq *)arg;
+ POCE_SOFTC sc = wq->parent;
+ struct oce_cq *cq = wq->cq;
+ struct oce_nic_tx_cqe *cqe;
+ int num_cqes = 0;
+
+ LOCK(&wq->tx_lock);
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
+ while (cqe->u0.dw[3]) {
+ DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
+
+ wq->ring->cidx = cqe->u0.s.wqe_index + 1;
+ if (wq->ring->cidx >= wq->ring->num_items)
+ wq->ring->cidx -= wq->ring->num_items;
+
+ oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
+ wq->tx_stats.tx_compl++;
+ cqe->u0.dw[3] = 0;
+ RING_GET(cq->ring, 1);
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe =
+ RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
+ num_cqes++;
+ }
+
+ if (num_cqes)
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+ UNLOCK(&wq->tx_lock);
+
+ return 0;
+}
+
+
+static int
+oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+ int status = 0, queue_index = 0;
+ struct mbuf *next = NULL;
+ struct buf_ring *br = NULL;
+
+ br = wq->br;
+ queue_index = wq->queue_index;
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING) {
+ if (m != NULL)
+ status = drbr_enqueue(ifp, br, m);
+ return status;
+ }
+
+ if (m == NULL)
+ next = drbr_dequeue(ifp, br);
+ else if (drbr_needs_enqueue(ifp, br)) {
+ if ((status = drbr_enqueue(ifp, br, m)) != 0)
+ return status;
+ next = drbr_dequeue(ifp, br);
+ } else
+ next = m;
+
+ while (next != NULL) {
+ if (oce_tx(sc, &next, queue_index)) {
+ if (next != NULL) {
+ wq->tx_stats.tx_stops ++;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ status = drbr_enqueue(ifp, br, next);
+ }
+ break;
+ }
+ drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+ ETHER_BPF_MTAP(ifp, next);
+ next = drbr_dequeue(ifp, br);
+ }
+
+ return status;
+}
+
+
+
+
+/*****************************************************************************
+ * Receive routines functions *
+ *****************************************************************************/
+
+static void
+oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
+{
+ uint32_t out;
+ struct oce_packet_desc *pd;
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ int i, len, frag_len;
+ struct mbuf *m = NULL, *tail = NULL;
+ uint16_t vtag;
+
+ len = cqe->u0.s.pkt_size;
+ vtag = cqe->u0.s.vlan_tag;
+ if (!len) {
+ /*partial DMA workaround for Lancer*/
+ oce_discard_rx_comp(rq, cqe);
+ goto exit;
+ }
+
+ for (i = 0; i < cqe->u0.s.num_fragments; i++) {
+
+ if (rq->packets_out == rq->packets_in) {
+ device_printf(sc->dev,
+ "RQ transmit descriptor missing\n");
+ }
+ out = rq->packets_out + 1;
+ if (out == OCE_RQ_PACKET_ARRAY_SIZE)
+ out = 0;
+ pd = &rq->pckts[rq->packets_out];
+ rq->packets_out = out;
+
+ bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ rq->pending--;
+
+ frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
+ pd->mbuf->m_len = frag_len;
+
+ if (tail != NULL) {
+ /* additional fragments */
+ pd->mbuf->m_flags &= ~M_PKTHDR;
+ tail->m_next = pd->mbuf;
+ tail = pd->mbuf;
+ } else {
+ /* first fragment, fill out much of the packet header */
+ pd->mbuf->m_pkthdr.len = len;
+ pd->mbuf->m_pkthdr.csum_flags = 0;
+ if (IF_CSUM_ENABLED(sc)) {
+ if (cqe->u0.s.l4_cksum_pass) {
+ pd->mbuf->m_pkthdr.csum_flags |=
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ pd->mbuf->m_pkthdr.csum_data = 0xffff;
+ }
+ if (cqe->u0.s.ip_cksum_pass) {
+ if (!cqe->u0.s.ip_ver) { //IPV4
+ pd->mbuf->m_pkthdr.csum_flags |=
+ (CSUM_IP_CHECKED|CSUM_IP_VALID);
+ }
+ }
+ }
+ m = tail = pd->mbuf;
+ }
+ pd->mbuf = NULL;
+ len -= frag_len;
+ }
+
+ if (m) {
+ if (!oce_cqe_portid_valid(sc, cqe)) {
+ m_freem(m);
+ goto exit;
+ }
+
+ m->m_pkthdr.rcvif = sc->ifp;
+#if __FreeBSD_version >= 800000
+ m->m_pkthdr.flowid = rq->queue_index;
+ m->m_flags |= M_FLOWID;
+#endif
+ //This deternies if vlan tag is present
+ if (oce_cqe_vtp_valid(sc, cqe)) {
+ if (sc->function_mode & FNM_FLEX10_MODE) {
+ /* FLEX10 */
+ if (cqe->u0.s.qnq) {
+ /* If QnQ is not set, neglect VLAN */
+ if (IS_BE(sc))
+ m->m_pkthdr.ether_vtag =
+ BSWAP_16(vtag);
+ else
+ m->m_pkthdr.ether_vtag = vtag;
+ m->m_flags |= M_VLANTAG;
+ }
+ } else {
+ if (IS_BE(sc))
+ m->m_pkthdr.ether_vtag = BSWAP_16(vtag);
+ else
+ m->m_pkthdr.ether_vtag = vtag;
+ m->m_flags |= M_VLANTAG;
+ }
+ }
+
+ sc->ifp->if_ipackets++;
+ /* Try to queue to LRO */
+ if (IF_LRO_ENABLED(sc) &&
+ !(m->m_flags & M_VLANTAG) &&
+ (cqe->u0.s.ip_cksum_pass) &&
+ (cqe->u0.s.l4_cksum_pass) &&
+ (!cqe->u0.s.ip_ver) &&
+ (rq->lro.lro_cnt != 0)) {
+
+ if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
+ rq->lro_pkts_queued ++;
+ goto post_done;
+ }
+ /* If LRO posting fails then try to post to STACK */
+ }
+
+ (*sc->ifp->if_input) (sc->ifp, m);
+post_done:
+ /* Update rx stats per queue */
+ rq->rx_stats.rx_pkts++;
+ rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
+ rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
+ if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
+ rq->rx_stats.rx_mcast_pkts++;
+ if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
+ rq->rx_stats.rx_ucast_pkts++;
+ }
+exit:
+ return;
+}
+
+
+static void
+oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
+{
+ uint32_t out, i = 0;
+ struct oce_packet_desc *pd;
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ int num_frags = cqe->u0.s.num_fragments;
+
+ if (IS_XE201(sc) && cqe->u0.s.error) {
+ /* Lancer A0 workaround
+ * num_frags will be 1 more than actual in case of error
+ */
+ if (num_frags)
+ num_frags -= 1;
+ }
+ for (i = 0; i < num_frags; i++) {
+ if (rq->packets_out == rq->packets_in) {
+ device_printf(sc->dev,
+ "RQ transmit descriptor missing\n");
+ }
+ out = rq->packets_out + 1;
+ if (out == OCE_RQ_PACKET_ARRAY_SIZE)
+ out = 0;
+ pd = &rq->pckts[rq->packets_out];
+ rq->packets_out = out;
+
+ bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ rq->pending--;
+ m_freem(pd->mbuf);
+ }
+
+}
+
+
+static int
+oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
+{
+ struct oce_nic_rx_cqe_v1 *cqe_v1;
+ int vtp = 0;
+
+ if (sc->be3_native) {
+ cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
+ vtp = cqe_v1->u0.s.vlan_tag_present;
+ } else {
+ vtp = cqe->u0.s.vlan_tag_present;
+ }
+
+ return vtp;
+
+}
+
+
+static int
+oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
+{
+ struct oce_nic_rx_cqe_v1 *cqe_v1;
+ int port_id = 0;
+
+ if (sc->be3_native && IS_BE(sc)) {
+ cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
+ port_id = cqe_v1->u0.s.port;
+ if (sc->port_id != port_id)
+ return 0;
+ } else
+ ;/* For BE3 legacy and Lancer this is dummy */
+
+ return 1;
+
+}
+
+
+static void
+oce_rx_flush_lro(struct oce_rq *rq)
+{
+ struct lro_ctrl *lro = &rq->lro;
+ struct lro_entry *queued;
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+
+ if (!IF_LRO_ENABLED(sc))
+ return;
+
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+ rq->lro_pkts_queued = 0;
+
+ return;
+}
+
+
+static int
+oce_init_lro(POCE_SOFTC sc)
+{
+ struct lro_ctrl *lro = NULL;
+ int i = 0, rc = 0;
+
+ for (i = 0; i < sc->nrqs; i++) {
+ lro = &sc->rq[i]->lro;
+ rc = tcp_lro_init(lro);
+ if (rc != 0) {
+ device_printf(sc->dev, "LRO init failed\n");
+ return rc;
+ }
+ lro->ifp = sc->ifp;
+ }
+
+ return rc;
+}
+
+
+void
+oce_free_lro(POCE_SOFTC sc)
+{
+ struct lro_ctrl *lro = NULL;
+ int i = 0;
+
+ for (i = 0; i < sc->nrqs; i++) {
+ lro = &sc->rq[i]->lro;
+ if (lro)
+ tcp_lro_free(lro);
+ }
+}
+
+
+int
+oce_alloc_rx_bufs(struct oce_rq *rq, int count)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ int i, in, rc;
+ struct oce_packet_desc *pd;
+ bus_dma_segment_t segs[6];
+ int nsegs, added = 0;
+ struct oce_nic_rqe *rqe;
+ pd_rxulp_db_t rxdb_reg;
+
+
+ for (i = 0; i < count; i++) {
+ in = rq->packets_in + 1;
+ if (in == OCE_RQ_PACKET_ARRAY_SIZE)
+ in = 0;
+ if (in == rq->packets_out)
+ break; /* no more room */
+
+ pd = &rq->pckts[rq->packets_in];
+ pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (pd->mbuf == NULL)
+ break;
+
+ pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
+ rc = bus_dmamap_load_mbuf_sg(rq->tag,
+ pd->map,
+ pd->mbuf,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (rc) {
+ m_free(pd->mbuf);
+ break;
+ }
+
+ if (nsegs != 1) {
+ i--;
+ continue;
+ }
+
+ rq->packets_in = in;
+ bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
+
+ rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
+ rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
+ rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
+ DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
+ RING_PUT(rq->ring, 1);
+ added++;
+ rq->pending++;
+ }
+ if (added != 0) {
+ for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
+ DELAY(1);
+ rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
+ rxdb_reg.bits.qid = rq->rq_id;
+ OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ added -= OCE_MAX_RQ_POSTS;
+ }
+ if (added > 0) {
+ DELAY(1);
+ rxdb_reg.bits.qid = rq->rq_id;
+ rxdb_reg.bits.num_posted = added;
+ OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ }
+ }
+
+ return 0;
+}
+
+
+/* Handle the Completion Queue for receive */
+uint16_t
+oce_rq_handler(void *arg)
+{
+ struct oce_rq *rq = (struct oce_rq *)arg;
+ struct oce_cq *cq = rq->cq;
+ POCE_SOFTC sc = rq->parent;
+ struct oce_nic_rx_cqe *cqe;
+ int num_cqes = 0, rq_buffers_used = 0;
+
+
+ LOCK(&rq->rx_lock);
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ while (cqe->u0.dw[2]) {
+ DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
+
+ RING_GET(rq->ring, 1);
+ if (cqe->u0.s.error == 0) {
+ oce_rx(rq, cqe->u0.s.frag_index, cqe);
+ } else {
+ rq->rx_stats.rxcp_err++;
+ sc->ifp->if_ierrors++;
+ if (IS_XE201(sc))
+ /* Lancer A0 no buffer workaround */
+ oce_discard_rx_comp(rq, cqe);
+ else
+ /* Post L3/L4 errors to stack.*/
+ oce_rx(rq, cqe->u0.s.frag_index, cqe);
+
+ }
+ rq->rx_stats.rx_compl++;
+ cqe->u0.dw[2] = 0;
+
+ if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
+ oce_rx_flush_lro(rq);
+ }
+
+ RING_GET(cq->ring, 1);
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe =
+ RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ num_cqes++;
+ if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
+ break;
+ }
+ if (IF_LRO_ENABLED(sc))
+ oce_rx_flush_lro(rq);
+
+ if (num_cqes) {
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+ rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
+ if (rq_buffers_used > 1)
+ oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
+ }
+
+ UNLOCK(&rq->rx_lock);
+
+ return 0;
+
+}
+
+
+
+
+/*****************************************************************************
+ * Helper function prototypes in this file *
+ *****************************************************************************/
+
+static int
+oce_attach_ifp(POCE_SOFTC sc)
+{
+
+ sc->ifp = if_alloc(IFT_ETHER);
+ if (!sc->ifp)
+ return ENOMEM;
+
+ ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+ sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
+ sc->ifp->if_ioctl = oce_ioctl;
+ sc->ifp->if_start = oce_start;
+ sc->ifp->if_init = oce_init;
+ sc->ifp->if_mtu = ETHERMTU;
+ sc->ifp->if_softc = sc;
+#if __FreeBSD_version >= 800000
+ sc->ifp->if_transmit = oce_multiq_start;
+ sc->ifp->if_qflush = oce_multiq_flush;
+#endif
+
+ if_initname(sc->ifp,
+ device_get_name(sc->dev), device_get_unit(sc->dev));
+
+ sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
+ IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&sc->ifp->if_snd);
+
+ sc->ifp->if_hwassist = OCE_IF_HWASSIST;
+ sc->ifp->if_hwassist |= CSUM_TSO;
+ sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
+
+ sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
+ sc->ifp->if_capabilities |= IFCAP_TSO;
+ sc->ifp->if_capabilities |= IFCAP_HWCSUM;
+ sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+ sc->ifp->if_capabilities |= IFCAP_LRO;
+
+ sc->ifp->if_capenable = sc->ifp->if_capabilities;
+ sc->ifp->if_baudrate = IF_Mbps(10000ULL);
+
+ ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
+
+ return 0;
+}
+
+
+static void
+oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+
+ if (ifp->if_softc != arg)
+ return;
+ if ((vtag == 0) || (vtag > 4095))
+ return;
+
+ sc->vlan_tag[vtag] = 1;
+ sc->vlans_added++;
+ oce_vid_config(sc);
+}
+
+
+static void
+oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+
+ if (ifp->if_softc != arg)
+ return;
+ if ((vtag == 0) || (vtag > 4095))
+ return;
+
+ sc->vlan_tag[vtag] = 0;
+ sc->vlans_added--;
+ oce_vid_config(sc);
+}
+
+
+/*
+ * A max of 64 vlans can be configured in BE. If the user configures
+ * more, place the card in vlan promiscuous mode.
+ */
+static int
+oce_vid_config(POCE_SOFTC sc)
+{
+ struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
+ uint16_t ntags = 0, i;
+ int status = 0;
+
+ if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
+ (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
+ for (i = 0; i < MAX_VLANS; i++) {
+ if (sc->vlan_tag[i]) {
+ vtags[ntags].vtag = i;
+ ntags++;
+ }
+ }
+ if (ntags)
+ status = oce_config_vlan(sc, (uint8_t) sc->if_id,
+ vtags, ntags, 1, 0);
+ } else
+ status = oce_config_vlan(sc, (uint8_t) sc->if_id,
+ NULL, 0, 1, 1);
+ return status;
+}
+
+
+static void
+oce_mac_addr_set(POCE_SOFTC sc)
+{
+ uint32_t old_pmac_id = sc->pmac_id;
+ int status = 0;
+
+
+ status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
+ sc->macaddr.size_of_struct);
+ if (!status)
+ return;
+
+ status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
+ sc->if_id, &sc->pmac_id);
+ if (!status) {
+ status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
+ bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
+ sc->macaddr.size_of_struct);
+ }
+ if (status)
+ device_printf(sc->dev, "Failed update macaddress\n");
+
+}
+
+
+static int
+oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
+{
+ POCE_SOFTC sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int rc = ENXIO;
+ char cookie[32] = {0};
+ void *priv_data = (void *)ifr->ifr_data;
+ void *ioctl_ptr;
+ uint32_t req_size;
+ struct mbx_hdr req;
+ OCE_DMA_MEM dma_mem;
+
+
+ if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
+ return EFAULT;
+
+ if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
+ return EINVAL;
+
+ ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
+ if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
+ return EFAULT;
+
+ req_size = le32toh(req.u0.req.request_length);
+ if (req_size > 65536)
+ return EINVAL;
+
+ req_size += sizeof(struct mbx_hdr);
+ rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
+ if (rc)
+ return ENOMEM;
+
+ if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
+ rc = EFAULT;
+ goto dma_free;
+ }
+
+ rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
+ if (rc) {
+ rc = EIO;
+ goto dma_free;
+ }
+
+ if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
+ rc = EFAULT;
+
+dma_free:
+ oce_dma_free(sc, &dma_mem);
+ return rc;
+
+}
+
+
+static void
+oce_local_timer(void *arg)
+{
+ POCE_SOFTC sc = arg;
+ int i = 0;
+
+ oce_refresh_nic_stats(sc);
+ oce_refresh_queue_stats(sc);
+ oce_mac_addr_set(sc);
+
+ /* TX Watch Dog*/
+ for (i = 0; i < sc->nwqs; i++)
+ oce_tx_restart(sc, sc->wq[i]);
+
+ callout_reset(&sc->timer, hz, oce_local_timer, sc);
+}
+
+
+static void
+oce_if_deactivate(POCE_SOFTC sc)
+{
+ int i, mtime = 0;
+ int wait_req = 0;
+ struct oce_rq *rq;
+ struct oce_wq *wq;
+ struct oce_eq *eq;
+
+ sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ /*Wait for max of 400ms for TX completions to be done */
+ while (mtime < 400) {
+ wait_req = 0;
+ for_all_wq_queues(sc, wq, i) {
+ if (wq->ring->num_used) {
+ wait_req = 1;
+ DELAY(1);
+ break;
+ }
+ }
+ mtime += 1;
+ if (!wait_req)
+ break;
+ }
+
+ /* Stop intrs and finish any bottom halves pending */
+ oce_hw_intr_disable(sc);
+
+ for (i = 0; i < sc->intr_count; i++) {
+ if (sc->intrs[i].tq != NULL) {
+ taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
+ }
+ }
+
+ /* Delete RX queue in card with flush param */
+ oce_stop_rx(sc);
+
+ /* Invalidate any pending cq and eq entries*/
+ for_all_evnt_queues(sc, eq, i)
+ oce_drain_eq(eq);
+ for_all_rq_queues(sc, rq, i)
+ oce_drain_rq_cq(rq);
+ for_all_wq_queues(sc, wq, i)
+ oce_drain_wq_cq(wq);
+
+ /* But still we need to get MCC aync events.
+ So enable intrs and also arm first EQ
+ */
+ oce_hw_intr_enable(sc);
+ oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
+
+ DELAY(10);
+}
+
+
+static void
+oce_if_activate(POCE_SOFTC sc)
+{
+ struct oce_eq *eq;
+ struct oce_rq *rq;
+ struct oce_wq *wq;
+ int i, rc = 0;
+
+ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+ oce_hw_intr_disable(sc);
+
+ oce_start_rx(sc);
+
+ for_all_rq_queues(sc, rq, i) {
+ rc = oce_start_rq(rq);
+ if (rc)
+ device_printf(sc->dev, "Unable to start RX\n");
+ }
+
+ for_all_wq_queues(sc, wq, i) {
+ rc = oce_start_wq(wq);
+ if (rc)
+ device_printf(sc->dev, "Unable to start TX\n");
+ }
+
+
+ for_all_evnt_queues(sc, eq, i)
+ oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
+
+ oce_hw_intr_enable(sc);
+
+}
+
+/* Handle the Completion Queue for the Mailbox/Async notifications */
+uint16_t
+oce_mq_handler(void *arg)
+{
+ struct oce_mq *mq = (struct oce_mq *)arg;
+ POCE_SOFTC sc = mq->parent;
+ struct oce_cq *cq = mq->cq;
+ int num_cqes = 0;
+ struct oce_mq_cqe *cqe;
+ struct oce_async_cqe_link_state *acqe;
+
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
+ while (cqe->u0.dw[3]) {
+ DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
+ if (cqe->u0.s.async_event) {
+ acqe = (struct oce_async_cqe_link_state *)cqe;
+ if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
+ ASYNC_EVENT_LINK_UP) {
+ sc->link_status = ASYNC_EVENT_LINK_UP;
+ if_link_state_change(sc->ifp, LINK_STATE_UP);
+ } else {
+ sc->link_status = ASYNC_EVENT_LINK_DOWN;
+ if_link_state_change(sc->ifp, LINK_STATE_DOWN);
+ }
+
+ if (acqe->u0.s.event_code ==
+ ASYNC_EVENT_CODE_LINK_STATE) {
+ sc->link_speed = acqe->u0.s.speed;
+ sc->qos_link_speed =
+ (uint32_t )acqe->u0.s.qos_link_speed * 10;
+ }
+ }
+ cqe->u0.dw[3] = 0;
+ RING_GET(cq->ring, 1);
+ RING_GET(mq->ring, 1);
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
+ num_cqes++;
+ }
+
+ if (num_cqes)
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+
+ return 0;
+}
+
+
+static void
+setup_max_queues_want(POCE_SOFTC sc)
+{
+ int max_rss = 0;
+
+ /* Check if it is FLEX machine. Is so dont use RSS */
+ if ((sc->function_mode & FNM_FLEX10_MODE) ||
+ (!sc->rss_enable) ||
+ (sc->flags & OCE_FLAGS_BE2)) {
+ sc->nrqs = 1;
+ sc->nwqs = 1;
+ sc->rss_enable = 0;
+ } else {
+ /* For multiq, our deisgn is to have TX rings equal to
+ RSS rings. So that we can pair up one RSS ring and TX
+ to a single intr, which improves CPU cache efficiency.
+ */
+ if (IS_BE(sc) && (!sc->be3_native))
+ max_rss = OCE_LEGACY_MODE_RSS;
+ else
+ max_rss = OCE_MAX_RSS;
+
+ sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
+ sc->nwqs = MIN(OCE_NCPUS, max_rss);
+
+ /*Hardware issue. Turn off multi TX for be2 */
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
+ sc->nwqs = 1;
+
+ }
+
+}
+
+
+static void
+update_queues_got(POCE_SOFTC sc)
+{
+ if (sc->rss_enable) {
+ sc->nrqs = sc->intr_count + 1;
+ sc->nwqs = sc->intr_count;
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
+ sc->nwqs = 1;
+ } else {
+ sc->nrqs = 1;
+ sc->nwqs = 1;
+ }
+}
+
diff --git a/sys/dev/oce/oce_if.h b/sys/dev/oce/oce_if.h
new file mode 100644
index 0000000..b08865d
--- /dev/null
+++ b/sys/dev/oce/oce_if.h
@@ -0,0 +1,1071 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+
+/* $FreeBSD$ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/module.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/mbuf.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sockopt.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/random.h>
+#include <sys/firmware.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/if_vlan_var.h>
+#include <net/if_dl.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet6/in6_var.h>
+#include <netinet6/ip6_mroute.h>
+
+#include <netinet/udp.h>
+#include <netinet/tcp.h>
+#include <netinet/sctp.h>
+#include <netinet/tcp_lro.h>
+
+#include <machine/bus.h>
+
+#include "oce_hw.h"
+
+#define COMPONENT_REVISION "4.2.116.0"
+
+/* OCE devices supported by this driver */
+#define PCI_VENDOR_EMULEX 0x10df /* Emulex */
+#define PCI_VENDOR_SERVERENGINES 0x19a2 /* ServerEngines (BE) */
+#define PCI_PRODUCT_BE2 0x0700 /* BE2 network adapter */
+#define PCI_PRODUCT_BE3 0x0710 /* BE3 network adapter */
+#define PCI_PRODUCT_XE201 0xe220 /* XE201 network adapter */
+#define PCI_PRODUCT_XE201_VF 0xe228 /* XE201 with VF in Lancer */
+
+#define IS_BE(sc) (((sc->flags & OCE_FLAGS_BE3) | \
+ (sc->flags & OCE_FLAGS_BE2))? 1:0)
+#define IS_XE201(sc) ((sc->flags & OCE_FLAGS_XE201) ? 1:0)
+#define HAS_A0_CHIP(sc) ((sc->flags & OCE_FLAGS_HAS_A0_CHIP) ? 1:0)
+
+
+/* proportion Service Level Interface queues */
+#define OCE_MAX_UNITS 2
+#define OCE_MAX_PPORT OCE_MAX_UNITS
+#define OCE_MAX_VPORT OCE_MAX_UNITS
+
+extern int mp_ncpus; /* system's total active cpu cores */
+#define OCE_NCPUS mp_ncpus
+#define OCE_MAX_RSS 8 /* This should be powers of 2. Like 2,4,8 & 16 */
+#define OCE_LEGACY_MODE_RSS 4 /* For BE3 Legacy mode*/
+
+#define OCE_MIN_RQ 1
+#define OCE_MIN_WQ 1
+
+#define OCE_MAX_RQ OCE_MAX_RSS + 1 /* one default queue */
+#define OCE_MAX_WQ 8
+
+#define OCE_MAX_EQ 32
+#define OCE_MAX_CQ OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
+#define OCE_MAX_CQ_EQ 8 /* Max CQ that can attached to an EQ */
+
+#define OCE_DEFAULT_WQ_EQD 16
+#define OCE_MAX_PACKET_Q 16
+#define OCE_RQ_BUF_SIZE 2048
+#define OCE_LSO_MAX_SIZE (64 * 1024)
+#define LONG_TIMEOUT 30
+#define OCE_MAX_JUMBO_FRAME_SIZE 16360
+#define OCE_MAX_MTU (OCE_MAX_JUMBO_FRAME_SIZE - \
+ ETHER_VLAN_ENCAP_LEN - \
+ ETHER_HDR_LEN)
+
+#define OCE_MAX_TX_ELEMENTS 29
+#define OCE_MAX_TX_DESC 1024
+#define OCE_MAX_TX_SIZE 65535
+#define OCE_MAX_RX_SIZE 4096
+#define OCE_MAX_RQ_POSTS 255
+#define OCE_DEFAULT_PROMISCUOUS 0
+
+
+#define RSS_ENABLE_IPV4 0x1
+#define RSS_ENABLE_TCP_IPV4 0x2
+#define RSS_ENABLE_IPV6 0x4
+#define RSS_ENABLE_TCP_IPV6 0x8
+
+
+/* flow control definitions */
+#define OCE_FC_NONE 0x00000000
+#define OCE_FC_TX 0x00000001
+#define OCE_FC_RX 0x00000002
+#define OCE_DEFAULT_FLOW_CONTROL (OCE_FC_TX | OCE_FC_RX)
+
+
+/* Interface capabilities to give device when creating interface */
+#define OCE_CAPAB_FLAGS (MBX_RX_IFACE_FLAGS_BROADCAST | \
+ MBX_RX_IFACE_FLAGS_UNTAGGED | \
+ MBX_RX_IFACE_FLAGS_PROMISCUOUS | \
+ MBX_RX_IFACE_FLAGS_MCAST_PROMISCUOUS | \
+ MBX_RX_IFACE_FLAGS_RSS | \
+ MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR)
+
+/* Interface capabilities to enable by default (others set dynamically) */
+#define OCE_CAPAB_ENABLE (MBX_RX_IFACE_FLAGS_BROADCAST | \
+ MBX_RX_IFACE_FLAGS_UNTAGGED | \
+ MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR)
+
+#define OCE_IF_HWASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP)
+#define OCE_IF_CAPABILITIES (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \
+ IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \
+ IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | \
+ IFCAP_VLAN_MTU)
+#define OCE_IF_HWASSIST_NONE 0
+#define OCE_IF_CAPABILITIES_NONE 0
+
+
+#define ETH_ADDR_LEN 6
+#define MAX_VLANFILTER_SIZE 64
+#define MAX_VLANS 4096
+
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#define BSWAP_8(x) ((x) & 0xff)
+#define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
+#define BSWAP_32(x) ((BSWAP_16(x) << 16) | \
+ BSWAP_16((x) >> 16))
+#define BSWAP_64(x) ((BSWAP_32(x) << 32) | \
+ BSWAP_32((x) >> 32))
+
+#define for_all_wq_queues(sc, wq, i) \
+ for (i = 0, wq = sc->wq[0]; i < sc->nwqs; i++, wq = sc->wq[i])
+#define for_all_rq_queues(sc, rq, i) \
+ for (i = 0, rq = sc->rq[0]; i < sc->nrqs; i++, rq = sc->rq[i])
+#define for_all_evnt_queues(sc, eq, i) \
+ for (i = 0, eq = sc->eq[0]; i < sc->neqs; i++, eq = sc->eq[i])
+#define for_all_cq_queues(sc, cq, i) \
+ for (i = 0, cq = sc->cq[0]; i < sc->ncqs; i++, cq = sc->cq[i])
+
+
+/* Flash specific */
+#define IOCTL_COOKIE "SERVERENGINES CORP"
+#define MAX_FLASH_COMP 32
+
+#define IMG_ISCSI 160
+#define IMG_REDBOOT 224
+#define IMG_BIOS 34
+#define IMG_PXEBIOS 32
+#define IMG_FCOEBIOS 33
+#define IMG_ISCSI_BAK 176
+#define IMG_FCOE 162
+#define IMG_FCOE_BAK 178
+#define IMG_NCSI 16
+#define IMG_PHY 192
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+#define FLASHROM_OPER_FLASH_PHY 9
+#define FLASHROM_OPER_SAVE_PHY 10
+#define TN_8022 13
+
+enum {
+ PHY_TYPE_CX4_10GB = 0,
+ PHY_TYPE_XFP_10GB,
+ PHY_TYPE_SFP_1GB,
+ PHY_TYPE_SFP_PLUS_10GB,
+ PHY_TYPE_KR_10GB,
+ PHY_TYPE_KX4_10GB,
+ PHY_TYPE_BASET_10GB,
+ PHY_TYPE_BASET_1GB,
+ PHY_TYPE_BASEX_1GB,
+ PHY_TYPE_SGMII,
+ PHY_TYPE_DISABLED = 255
+};
+
+/**
+ * @brief Define and hold all necessary info for a single interrupt
+ */
+#define OCE_MAX_MSI 32 /* Message Signaled Interrupts */
+#define OCE_MAX_MSIX 2048 /* PCI Express MSI Interrrupts */
+
+typedef struct oce_intr_info {
+ void *tag; /* cookie returned by bus_setup_intr */
+ struct resource *intr_res; /* PCI resource container */
+ int irq_rr; /* resource id for the interrupt */
+ struct oce_softc *sc; /* pointer to the parent soft c */
+ struct oce_eq *eq; /* pointer to the connected EQ */
+ struct taskqueue *tq; /* Associated task queue */
+ struct task task; /* task queue task */
+ char task_name[32]; /* task name */
+ int vector; /* interrupt vector number */
+} OCE_INTR_INFO, *POCE_INTR_INFO;
+
+
+/* Ring related */
+#define GET_Q_NEXT(_START, _STEP, _END) \
+ (((_START) + (_STEP)) < (_END) ? ((_START) + (_STEP)) \
+ : (((_START) + (_STEP)) - (_END)))
+
+#define DBUF_PA(obj) ((obj)->addr)
+#define DBUF_VA(obj) ((obj)->ptr)
+#define DBUF_TAG(obj) ((obj)->tag)
+#define DBUF_MAP(obj) ((obj)->map)
+#define DBUF_SYNC(obj, flags) \
+ (void) bus_dmamap_sync(DBUF_TAG(obj), DBUF_MAP(obj), (flags))
+
+#define RING_NUM_PENDING(ring) ring->num_used
+#define RING_FULL(ring) (ring->num_used == ring->num_items)
+#define RING_EMPTY(ring) (ring->num_used == 0)
+#define RING_NUM_FREE(ring) \
+ (uint32_t)(ring->num_items - ring->num_used)
+#define RING_GET(ring, n) \
+ ring->cidx = GET_Q_NEXT(ring->cidx, n, ring->num_items)
+#define RING_PUT(ring, n) \
+ ring->pidx = GET_Q_NEXT(ring->pidx, n, ring->num_items)
+
+#define RING_GET_CONSUMER_ITEM_VA(ring, type) \
+ (void*)((type *)DBUF_VA(&ring->dma) + ring->cidx)
+#define RING_GET_CONSUMER_ITEM_PA(ring, type) \
+ (uint64_t)(((type *)DBUF_PA(ring->dbuf)) + ring->cidx)
+#define RING_GET_PRODUCER_ITEM_VA(ring, type) \
+ (void *)(((type *)DBUF_VA(&ring->dma)) + ring->pidx)
+#define RING_GET_PRODUCER_ITEM_PA(ring, type) \
+ (uint64_t)(((type *)DBUF_PA(ring->dbuf)) + ring->pidx)
+
+#define OCE_DMAPTR(o, c) ((c *)(o)->ptr)
+
+struct oce_packet_desc {
+ struct mbuf *mbuf;
+ bus_dmamap_t map;
+ int nsegs;
+ uint32_t wqe_idx;
+};
+
+typedef struct oce_dma_mem {
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ void *ptr;
+ bus_addr_t paddr;
+} OCE_DMA_MEM, *POCE_DMA_MEM;
+
+typedef struct oce_ring_buffer_s {
+ uint16_t cidx; /* Get ptr */
+ uint16_t pidx; /* Put Ptr */
+ size_t item_size;
+ size_t num_items;
+ uint32_t num_used;
+ OCE_DMA_MEM dma;
+} oce_ring_buffer_t;
+
+/* Stats */
+#define OCE_UNICAST_PACKET 0
+#define OCE_MULTICAST_PACKET 1
+#define OCE_BROADCAST_PACKET 2
+#define OCE_RSVD_PACKET 3
+
+struct oce_rx_stats {
+ /* Total Receive Stats*/
+ uint64_t t_rx_pkts;
+ uint64_t t_rx_bytes;
+ uint32_t t_rx_frags;
+ uint32_t t_rx_mcast_pkts;
+ uint32_t t_rx_ucast_pkts;
+ uint32_t t_rxcp_errs;
+};
+struct oce_tx_stats {
+ /*Total Transmit Stats */
+ uint64_t t_tx_pkts;
+ uint64_t t_tx_bytes;
+ uint32_t t_tx_reqs;
+ uint32_t t_tx_stops;
+ uint32_t t_tx_wrbs;
+ uint32_t t_tx_compl;
+ uint32_t t_ipv6_ext_hdr_tx_drop;
+};
+
+struct oce_be_stats {
+ uint8_t be_on_die_temperature;
+ uint32_t be_tx_events;
+ uint32_t eth_red_drops;
+ uint32_t rx_drops_no_pbuf;
+ uint32_t rx_drops_no_txpb;
+ uint32_t rx_drops_no_erx_descr;
+ uint32_t rx_drops_no_tpre_descr;
+ uint32_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_ring;
+ uint32_t forwarded_packets;
+ uint32_t rx_drops_mtu;
+ uint32_t rx_crc_errors;
+ uint32_t rx_alignment_symbol_errors;
+ uint32_t rx_pause_frames;
+ uint32_t rx_priority_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_range_errors;
+ uint32_t rx_frame_too_long;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rx_ip_checksum_errs;
+ uint32_t rx_tcp_checksum_errs;
+ uint32_t rx_udp_checksum_errs;
+ uint32_t rx_switched_unicast_packets;
+ uint32_t rx_switched_multicast_packets;
+ uint32_t rx_switched_broadcast_packets;
+ uint32_t tx_pauseframes;
+ uint32_t tx_priority_pauseframes;
+ uint32_t tx_controlframes;
+ uint32_t rxpp_fifo_overflow_drop;
+ uint32_t rx_input_fifo_overflow_drop;
+ uint32_t pmem_fifo_overflow_drop;
+ uint32_t jabber_events;
+};
+
+struct oce_xe201_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_unicast_pkts;
+ uint64_t tx_multicast_pkts;
+ uint64_t tx_broadcast_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_unicast_bytes;
+ uint64_t tx_multicast_bytes;
+ uint64_t tx_broadcast_bytes;
+ uint64_t tx_discards;
+ uint64_t tx_errors;
+ uint64_t tx_pause_frames;
+ uint64_t tx_pause_on_frames;
+ uint64_t tx_pause_off_frames;
+ uint64_t tx_internal_mac_errors;
+ uint64_t tx_control_frames;
+ uint64_t tx_pkts_64_bytes;
+ uint64_t tx_pkts_65_to_127_bytes;
+ uint64_t tx_pkts_128_to_255_bytes;
+ uint64_t tx_pkts_256_to_511_bytes;
+ uint64_t tx_pkts_512_to_1023_bytes;
+ uint64_t tx_pkts_1024_to_1518_bytes;
+ uint64_t tx_pkts_1519_to_2047_bytes;
+ uint64_t tx_pkts_2048_to_4095_bytes;
+ uint64_t tx_pkts_4096_to_8191_bytes;
+ uint64_t tx_pkts_8192_to_9216_bytes;
+ uint64_t tx_lso_pkts;
+ uint64_t rx_pkts;
+ uint64_t rx_unicast_pkts;
+ uint64_t rx_multicast_pkts;
+ uint64_t rx_broadcast_pkts;
+ uint64_t rx_bytes;
+ uint64_t rx_unicast_bytes;
+ uint64_t rx_multicast_bytes;
+ uint64_t rx_broadcast_bytes;
+ uint32_t rx_unknown_protos;
+ uint64_t rx_discards;
+ uint64_t rx_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_alignment_errors;
+ uint64_t rx_symbol_errors;
+ uint64_t rx_pause_frames;
+ uint64_t rx_pause_on_frames;
+ uint64_t rx_pause_off_frames;
+ uint64_t rx_frames_too_long;
+ uint64_t rx_internal_mac_errors;
+ uint32_t rx_undersize_pkts;
+ uint32_t rx_oversize_pkts;
+ uint32_t rx_fragment_pkts;
+ uint32_t rx_jabbers;
+ uint64_t rx_control_frames;
+ uint64_t rx_control_frames_unknown_opcode;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_of_range_errors;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_vlan_mismatch_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_invalid_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rx_ip_checksum_errors;
+ uint32_t rx_tcp_checksum_errors;
+ uint32_t rx_udp_checksum_errors;
+ uint32_t rx_non_rss_pkts;
+ uint64_t rx_ipv4_pkts;
+ uint64_t rx_ipv6_pkts;
+ uint64_t rx_ipv4_bytes;
+ uint64_t rx_ipv6_bytes;
+ uint64_t rx_nic_pkts;
+ uint64_t rx_tcp_pkts;
+ uint64_t rx_iscsi_pkts;
+ uint64_t rx_management_pkts;
+ uint64_t rx_switched_unicast_pkts;
+ uint64_t rx_switched_multicast_pkts;
+ uint64_t rx_switched_broadcast_pkts;
+ uint64_t num_forwards;
+ uint32_t rx_fifo_overflow;
+ uint32_t rx_input_fifo_overflow;
+ uint64_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_queue;
+ uint64_t rx_drops_mtu;
+ uint64_t rx_pkts_64_bytes;
+ uint64_t rx_pkts_65_to_127_bytes;
+ uint64_t rx_pkts_128_to_255_bytes;
+ uint64_t rx_pkts_256_to_511_bytes;
+ uint64_t rx_pkts_512_to_1023_bytes;
+ uint64_t rx_pkts_1024_to_1518_bytes;
+ uint64_t rx_pkts_1519_to_2047_bytes;
+ uint64_t rx_pkts_2048_to_4095_bytes;
+ uint64_t rx_pkts_4096_to_8191_bytes;
+ uint64_t rx_pkts_8192_to_9216_bytes;
+};
+
+struct oce_drv_stats {
+ struct oce_rx_stats rx;
+ struct oce_tx_stats tx;
+ union {
+ struct oce_be_stats be;
+ struct oce_xe201_stats xe201;
+ } u0;
+};
+
+
+
+#define MAX_LOCK_DESC_LEN 32
+struct oce_lock {
+ struct mtx mutex;
+ char name[MAX_LOCK_DESC_LEN+1];
+};
+#define OCE_LOCK struct oce_lock
+
+#define LOCK_CREATE(lock, desc) { \
+ strncpy((lock)->name, (desc), MAX_LOCK_DESC_LEN); \
+ (lock)->name[MAX_LOCK_DESC_LEN] = '\0'; \
+ mtx_init(&(lock)->mutex, (lock)->name, MTX_NETWORK_LOCK, MTX_DEF); \
+}
+#define LOCK_DESTROY(lock) \
+ if (mtx_initialized(&(lock)->mutex))\
+ mtx_destroy(&(lock)->mutex)
+#define TRY_LOCK(lock) mtx_trylock(&(lock)->mutex)
+#define LOCK(lock) mtx_lock(&(lock)->mutex)
+#define LOCKED(lock) mtx_owned(&(lock)->mutex)
+#define UNLOCK(lock) mtx_unlock(&(lock)->mutex)
+
+#define DEFAULT_MQ_MBOX_TIMEOUT (5 * 1000 * 1000)
+#define MBX_READY_TIMEOUT (1 * 1000 * 1000)
+#define DEFAULT_DRAIN_TIME 200
+#define MBX_TIMEOUT_SEC 5
+#define STAT_TIMEOUT 2000000
+
+/* size of the packet descriptor array in a transmit queue */
+#define OCE_TX_RING_SIZE 2048
+#define OCE_RX_RING_SIZE 1024
+#define OCE_WQ_PACKET_ARRAY_SIZE (OCE_TX_RING_SIZE/2)
+#define OCE_RQ_PACKET_ARRAY_SIZE (OCE_RX_RING_SIZE)
+
+struct oce_dev;
+
+enum eq_len {
+ EQ_LEN_256 = 256,
+ EQ_LEN_512 = 512,
+ EQ_LEN_1024 = 1024,
+ EQ_LEN_2048 = 2048,
+ EQ_LEN_4096 = 4096
+};
+
+enum eqe_size {
+ EQE_SIZE_4 = 4,
+ EQE_SIZE_16 = 16
+};
+
+enum qtype {
+ QTYPE_EQ,
+ QTYPE_MQ,
+ QTYPE_WQ,
+ QTYPE_RQ,
+ QTYPE_CQ,
+ QTYPE_RSS
+};
+
+typedef enum qstate_e {
+ QDELETED = 0x0,
+ QCREATED = 0x1
+} qstate_t;
+
+struct eq_config {
+ enum eq_len q_len;
+ enum eqe_size item_size;
+ uint32_t q_vector_num;
+ uint8_t min_eqd;
+ uint8_t max_eqd;
+ uint8_t cur_eqd;
+ uint8_t pad;
+};
+
+struct oce_eq {
+ uint32_t eq_id;
+ void *parent;
+ void *cb_context;
+ oce_ring_buffer_t *ring;
+ uint32_t ref_count;
+ qstate_t qstate;
+ struct oce_cq *cq[OCE_MAX_CQ_EQ];
+ int cq_valid;
+ struct eq_config eq_cfg;
+ int vector;
+};
+
+enum cq_len {
+ CQ_LEN_256 = 256,
+ CQ_LEN_512 = 512,
+ CQ_LEN_1024 = 1024
+};
+
+struct cq_config {
+ enum cq_len q_len;
+ uint32_t item_size;
+ boolean_t is_eventable;
+ boolean_t sol_eventable;
+ boolean_t nodelay;
+ uint16_t dma_coalescing;
+};
+
+typedef uint16_t(*cq_handler_t) (void *arg1);
+
+struct oce_cq {
+ uint32_t cq_id;
+ void *parent;
+ struct oce_eq *eq;
+ cq_handler_t cq_handler;
+ void *cb_arg;
+ oce_ring_buffer_t *ring;
+ qstate_t qstate;
+ struct cq_config cq_cfg;
+ uint32_t ref_count;
+};
+
+
+struct mq_config {
+ uint32_t eqd;
+ uint8_t q_len;
+ uint8_t pad[3];
+};
+
+
+struct oce_mq {
+ void *parent;
+ oce_ring_buffer_t *ring;
+ uint32_t mq_id;
+ struct oce_cq *cq;
+ struct oce_cq *async_cq;
+ uint32_t mq_free;
+ qstate_t qstate;
+ struct mq_config cfg;
+};
+
+struct oce_mbx_ctx {
+ struct oce_mbx *mbx;
+ void (*cb) (void *ctx);
+ void *cb_ctx;
+};
+
+struct wq_config {
+ uint8_t wq_type;
+ uint16_t buf_size;
+ uint8_t pad[1];
+ uint32_t q_len;
+ uint16_t pd_id;
+ uint16_t pci_fn_num;
+ uint32_t eqd; /* interrupt delay */
+ uint32_t nbufs;
+ uint32_t nhdl;
+};
+
+struct oce_tx_queue_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_bytes;
+ uint32_t tx_reqs;
+ uint32_t tx_stops; /* number of times TX Q was stopped */
+ uint32_t tx_wrbs;
+ uint32_t tx_compl;
+ uint32_t tx_rate;
+ uint32_t ipv6_ext_hdr_tx_drop;
+};
+
+struct oce_wq {
+ OCE_LOCK tx_lock;
+ void *parent;
+ oce_ring_buffer_t *ring;
+ struct oce_cq *cq;
+ bus_dma_tag_t tag;
+ struct oce_packet_desc pckts[OCE_WQ_PACKET_ARRAY_SIZE];
+ uint32_t packets_in;
+ uint32_t packets_out;
+ uint32_t wqm_used;
+ boolean_t resched;
+ uint32_t wq_free;
+ uint32_t tx_deferd;
+ uint32_t pkt_drops;
+ qstate_t qstate;
+ uint16_t wq_id;
+ struct wq_config cfg;
+ int queue_index;
+ struct oce_tx_queue_stats tx_stats;
+ struct buf_ring *br;
+ struct task txtask;
+};
+
+struct rq_config {
+ uint32_t q_len;
+ uint32_t frag_size;
+ uint32_t mtu;
+ uint32_t if_id;
+ uint32_t is_rss_queue;
+ uint32_t eqd;
+ uint32_t nbufs;
+};
+
+struct oce_rx_queue_stats {
+ uint32_t rx_post_fail;
+ uint32_t rx_ucast_pkts;
+ uint32_t rx_compl;
+ uint64_t rx_bytes;
+ uint64_t rx_bytes_prev;
+ uint64_t rx_pkts;
+ uint32_t rx_rate;
+ uint32_t rx_mcast_pkts;
+ uint32_t rxcp_err;
+ uint32_t rx_frags;
+ uint32_t prev_rx_frags;
+ uint32_t rx_fps;
+};
+
+
+struct oce_rq {
+ struct rq_config cfg;
+ uint32_t rq_id;
+ int queue_index;
+ uint32_t rss_cpuid;
+ void *parent;
+ oce_ring_buffer_t *ring;
+ struct oce_cq *cq;
+ void *pad1;
+ bus_dma_tag_t tag;
+ struct oce_packet_desc pckts[OCE_RQ_PACKET_ARRAY_SIZE];
+ uint32_t packets_in;
+ uint32_t packets_out;
+ uint32_t pending;
+#ifdef notdef
+ struct mbuf *head;
+ struct mbuf *tail;
+ int fragsleft;
+#endif
+ qstate_t qstate;
+ OCE_LOCK rx_lock;
+ struct oce_rx_queue_stats rx_stats;
+ struct lro_ctrl lro;
+ int lro_pkts_queued;
+
+};
+
+struct link_status {
+ uint8_t physical_port;
+ uint8_t mac_duplex;
+ uint8_t mac_speed;
+ uint8_t mac_fault;
+ uint8_t mgmt_mac_duplex;
+ uint8_t mgmt_mac_speed;
+ uint16_t qos_link_speed;
+ uint32_t logical_link_status;
+};
+
+
+
+#define OCE_FLAGS_PCIX 0x00000001
+#define OCE_FLAGS_PCIE 0x00000002
+#define OCE_FLAGS_MSI_CAPABLE 0x00000004
+#define OCE_FLAGS_MSIX_CAPABLE 0x00000008
+#define OCE_FLAGS_USING_MSI 0x00000010
+#define OCE_FLAGS_USING_MSIX 0x00000020
+#define OCE_FLAGS_FUNCRESET_RQD 0x00000040
+#define OCE_FLAGS_VIRTUAL_PORT 0x00000080
+#define OCE_FLAGS_MBOX_ENDIAN_RQD 0x00000100
+#define OCE_FLAGS_BE3 0x00000200
+#define OCE_FLAGS_XE201 0x00000400
+#define OCE_FLAGS_BE2 0x00000800
+
+#define OCE_DEV_BE2_CFG_BAR 1
+#define OCE_DEV_CFG_BAR 0
+#define OCE_PCI_CSR_BAR 2
+#define OCE_PCI_DB_BAR 4
+
+typedef struct oce_softc {
+ device_t dev;
+ OCE_LOCK dev_lock;
+
+ uint32_t flags;
+
+ uint32_t pcie_link_speed;
+ uint32_t pcie_link_width;
+
+ uint8_t fn; /* PCI function number */
+
+ struct resource *devcfg_res;
+ bus_space_tag_t devcfg_btag;
+ bus_space_handle_t devcfg_bhandle;
+ void *devcfg_vhandle;
+
+ struct resource *csr_res;
+ bus_space_tag_t csr_btag;
+ bus_space_handle_t csr_bhandle;
+ void *csr_vhandle;
+
+ struct resource *db_res;
+ bus_space_tag_t db_btag;
+ bus_space_handle_t db_bhandle;
+ void *db_vhandle;
+
+ OCE_INTR_INFO intrs[OCE_MAX_EQ];
+ int intr_count;
+
+ struct ifnet *ifp;
+
+ struct ifmedia media;
+ uint8_t link_status;
+ uint8_t link_speed;
+ uint8_t duplex;
+ uint32_t qos_link_speed;
+ uint32_t speed;
+
+ char fw_version[32];
+ struct mac_address_format macaddr;
+
+ OCE_DMA_MEM bsmbx;
+ OCE_LOCK bmbx_lock;
+
+ uint32_t config_number;
+ uint32_t asic_revision;
+ uint32_t port_id;
+ uint32_t function_mode;
+ uint32_t function_caps;
+ uint32_t max_tx_rings;
+ uint32_t max_rx_rings;
+
+ struct oce_wq *wq[OCE_MAX_WQ]; /* TX work queues */
+ struct oce_rq *rq[OCE_MAX_RQ]; /* RX work queues */
+ struct oce_cq *cq[OCE_MAX_CQ]; /* Completion queues */
+ struct oce_eq *eq[OCE_MAX_EQ]; /* Event queues */
+ struct oce_mq *mq; /* Mailbox queue */
+
+ uint32_t neqs;
+ uint32_t ncqs;
+ uint32_t nrqs;
+ uint32_t nwqs;
+
+ uint32_t tx_ring_size;
+ uint32_t rx_ring_size;
+ uint32_t rq_frag_size;
+ uint32_t rss_enable;
+
+ uint32_t if_id; /* interface ID */
+ uint32_t nifs; /* number of adapter interfaces, 0 or 1 */
+ uint32_t pmac_id; /* PMAC id */
+
+ uint32_t if_cap_flags;
+
+ uint32_t flow_control;
+ uint32_t promisc;
+ /*Vlan Filtering related */
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
+ uint16_t vlans_added;
+ uint8_t vlan_tag[MAX_VLANS];
+ /*stats */
+ OCE_DMA_MEM stats_mem;
+ struct oce_drv_stats oce_stats_info;
+ struct callout timer;
+ int8_t be3_native;
+
+} OCE_SOFTC, *POCE_SOFTC;
+
+
+
+/**************************************************
+ * BUS memory read/write macros
+ * BE3: accesses three BAR spaces (CFG, CSR, DB)
+ * Lancer: accesses one BAR space (CFG)
+ **************************************************/
+#define OCE_READ_REG32(sc, space, o) \
+ ((IS_BE(sc)) ? (bus_space_read_4((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (bus_space_read_4((sc)->devcfg_btag, \
+ (sc)->devcfg_bhandle,o)))
+#define OCE_READ_REG16(sc, space, o) \
+ ((IS_BE(sc)) ? (bus_space_read_2((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (bus_space_read_2((sc)->devcfg_btag, \
+ (sc)->devcfg_bhandle,o)))
+#define OCE_READ_REG8(sc, space, o) \
+ ((IS_BE(sc)) ? (bus_space_read_1((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (bus_space_read_1((sc)->devcfg_btag, \
+ (sc)->devcfg_bhandle,o)))
+
+#define OCE_WRITE_REG32(sc, space, o, v) \
+ ((IS_BE(sc)) ? (bus_space_write_4((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (bus_space_write_4((sc)->devcfg_btag, \
+ (sc)->devcfg_bhandle,o,v)))
+#define OCE_WRITE_REG16(sc, space, o, v) \
+ ((IS_BE(sc)) ? (bus_space_write_2((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (bus_space_write_2((sc)->devcfg_btag, \
+ (sc)->devcfg_bhandle,o,v)))
+#define OCE_WRITE_REG8(sc, space, o, v) \
+ ((IS_BE(sc)) ? (bus_space_write_1((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (bus_space_write_1((sc)->devcfg_btag, \
+ (sc)->devcfg_bhandle,o,v)))
+
+
+/***********************************************************
+ * DMA memory functions
+ ***********************************************************/
+#define oce_dma_sync(d, f) bus_dmamap_sync((d)->tag, (d)->map, f)
+int oce_dma_alloc(POCE_SOFTC sc, bus_size_t size, POCE_DMA_MEM dma, int flags);
+void oce_dma_free(POCE_SOFTC sc, POCE_DMA_MEM dma);
+void oce_dma_map_addr(void *arg, bus_dma_segment_t * segs, int nseg, int error);
+void oce_destroy_ring_buffer(POCE_SOFTC sc, oce_ring_buffer_t *ring);
+oce_ring_buffer_t *oce_create_ring_buffer(POCE_SOFTC sc,
+ uint32_t q_len, uint32_t num_entries);
+/************************************************************
+ * oce_hw_xxx functions
+ ************************************************************/
+int oce_clear_rx_buf(struct oce_rq *rq);
+int oce_hw_pci_alloc(POCE_SOFTC sc);
+int oce_hw_init(POCE_SOFTC sc);
+int oce_hw_start(POCE_SOFTC sc);
+int oce_create_nw_interface(POCE_SOFTC sc);
+int oce_pci_soft_reset(POCE_SOFTC sc);
+int oce_hw_update_multicast(POCE_SOFTC sc);
+void oce_delete_nw_interface(POCE_SOFTC sc);
+void oce_hw_shutdown(POCE_SOFTC sc);
+void oce_hw_intr_enable(POCE_SOFTC sc);
+void oce_hw_intr_disable(POCE_SOFTC sc);
+void oce_hw_pci_free(POCE_SOFTC sc);
+
+/***********************************************************
+ * oce_queue_xxx functions
+ ***********************************************************/
+int oce_queue_init_all(POCE_SOFTC sc);
+int oce_start_rq(struct oce_rq *rq);
+int oce_start_wq(struct oce_wq *wq);
+int oce_start_mq(struct oce_mq *mq);
+int oce_start_rx(POCE_SOFTC sc);
+void oce_arm_eq(POCE_SOFTC sc,
+ int16_t qid, int npopped, uint32_t rearm, uint32_t clearint);
+void oce_queue_release_all(POCE_SOFTC sc);
+void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm);
+void oce_drain_eq(struct oce_eq *eq);
+void oce_drain_mq_cq(void *arg);
+void oce_drain_rq_cq(struct oce_rq *rq);
+void oce_drain_wq_cq(struct oce_wq *wq);
+
+uint32_t oce_page_list(oce_ring_buffer_t *ring, struct phys_addr *pa_list);
+
+/***********************************************************
+ * cleanup functions
+ ***********************************************************/
+void oce_free_lro(POCE_SOFTC sc);
+void oce_stop_rx(POCE_SOFTC sc);
+void oce_intr_free(POCE_SOFTC sc);
+void oce_free_posted_rxbuf(struct oce_rq *rq);
+
+
+/************************************************************
+ * Mailbox functions
+ ************************************************************/
+int oce_fw_clean(POCE_SOFTC sc);
+int oce_reset_fun(POCE_SOFTC sc);
+int oce_mbox_init(POCE_SOFTC sc);
+int oce_mbox_dispatch(POCE_SOFTC sc, uint32_t tmo_sec);
+int oce_get_fw_version(POCE_SOFTC sc);
+int oce_read_mac_addr(POCE_SOFTC sc, uint32_t if_id, uint8_t perm,
+ uint8_t type, struct mac_address_format *mac);
+int oce_get_fw_config(POCE_SOFTC sc);
+int oce_if_create(POCE_SOFTC sc, uint32_t cap_flags, uint32_t en_flags,
+ uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id);
+int oce_if_del(POCE_SOFTC sc, uint32_t if_id);
+int oce_config_vlan(POCE_SOFTC sc, uint32_t if_id,
+ struct normal_vlan *vtag_arr, uint8_t vtag_cnt,
+ uint32_t untagged, uint32_t enable_promisc);
+int oce_set_flow_control(POCE_SOFTC sc, uint32_t flow_control);
+int oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss);
+int oce_rxf_set_promiscuous(POCE_SOFTC sc, uint32_t enable);
+int oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl);
+int oce_get_link_status(POCE_SOFTC sc, struct link_status *link);
+int oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
+int oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
+int oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
+ uint32_t reset_stats);
+int oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
+ uint32_t req_size, uint32_t reset_stats);
+int oce_update_multicast(POCE_SOFTC sc, POCE_DMA_MEM pdma_mem);
+int oce_pass_through_mbox(POCE_SOFTC sc, POCE_DMA_MEM dma_mem, uint32_t req_size);
+int oce_mbox_macaddr_del(POCE_SOFTC sc, uint32_t if_id, uint32_t pmac_id);
+int oce_mbox_macaddr_add(POCE_SOFTC sc, uint8_t *mac_addr,
+ uint32_t if_id, uint32_t *pmac_id);
+int oce_mbox_cmd_test_loopback(POCE_SOFTC sc, uint32_t port_num,
+ uint32_t loopback_type, uint32_t pkt_size, uint32_t num_pkts,
+ uint64_t pattern);
+
+int oce_mbox_cmd_set_loopback(POCE_SOFTC sc, uint8_t port_num,
+ uint8_t loopback_type, uint8_t enable);
+
+int oce_mbox_check_native_mode(POCE_SOFTC sc);
+int oce_mbox_post(POCE_SOFTC sc,
+ struct oce_mbx *mbx, struct oce_mbx_ctx *mbxctx);
+int oce_mbox_write_flashrom(POCE_SOFTC sc, uint32_t optype,uint32_t opcode,
+ POCE_DMA_MEM pdma_mem, uint32_t num_bytes);
+int oce_mbox_lancer_write_flashrom(POCE_SOFTC sc, uint32_t data_size,
+ uint32_t data_offset,POCE_DMA_MEM pdma_mem,
+ uint32_t *written_data, uint32_t *additional_status);
+
+int oce_mbox_get_flashrom_crc(POCE_SOFTC sc, uint8_t *flash_crc,
+ uint32_t offset, uint32_t optype);
+int oce_mbox_get_phy_info(POCE_SOFTC sc, struct oce_phy_info *phy_info);
+int oce_mbox_create_rq(struct oce_rq *rq);
+int oce_mbox_create_wq(struct oce_wq *wq);
+int oce_mbox_create_eq(struct oce_eq *eq);
+int oce_mbox_cq_create(struct oce_cq *cq, uint32_t ncoalesce,
+ uint32_t is_eventable);
+void mbx_common_req_hdr_init(struct mbx_hdr *hdr,
+ uint8_t dom,
+ uint8_t port,
+ uint8_t subsys,
+ uint8_t opcode,
+ uint32_t timeout, uint32_t pyld_len,
+ uint8_t version);
+
+
+uint16_t oce_mq_handler(void *arg);
+
+/************************************************************
+ * Transmit functions
+ ************************************************************/
+uint16_t oce_wq_handler(void *arg);
+void oce_start(struct ifnet *ifp);
+void oce_tx_task(void *arg, int npending);
+
+/************************************************************
+ * Receive functions
+ ************************************************************/
+int oce_alloc_rx_bufs(struct oce_rq *rq, int count);
+uint16_t oce_rq_handler(void *arg);
+
+
+/* Sysctl functions */
+void oce_add_sysctls(POCE_SOFTC sc);
+void oce_refresh_queue_stats(POCE_SOFTC sc);
+int oce_refresh_nic_stats(POCE_SOFTC sc);
+int oce_stats_init(POCE_SOFTC sc);
+void oce_stats_free(POCE_SOFTC sc);
+
+/* Capabilities */
+#define OCE_MODCAP_RSS 1
+#define OCE_MAX_RSP_HANDLED 64
+extern uint32_t oce_max_rsp_handled; /* max responses */
+
+#define OCE_MAC_LOOPBACK 0x0
+#define OCE_PHY_LOOPBACK 0x1
+#define OCE_ONE_PORT_EXT_LOOPBACK 0x2
+#define OCE_NO_LOOPBACK 0xff
+
+#define atomic_inc_32(x) atomic_add_32(x, 1)
+#define atomic_dec_32(x) atomic_subtract_32(x, 1)
+
+#define LE_64(x) htole64(x)
+#define LE_32(x) htole32(x)
+#define LE_16(x) htole16(x)
+#define DW_SWAP(x, l)
+#define IS_ALIGNED(x,a) ((x % a) == 0)
+#define ADDR_HI(x) ((uint32_t)((uint64_t)(x) >> 32))
+#define ADDR_LO(x) ((uint32_t)((uint64_t)(x) & 0xffffffff));
+
+#define IF_LRO_ENABLED(sc) (((sc)->ifp->if_capenable & IFCAP_LRO) ? 1:0)
+#define IF_LSO_ENABLED(sc) (((sc)->ifp->if_capenable & IFCAP_TSO4) ? 1:0)
+#define IF_CSUM_ENABLED(sc) (((sc)->ifp->if_capenable & IFCAP_HWCSUM) ? 1:0)
+
+#define OCE_LOG2(x) (oce_highbit(x))
+static inline uint32_t oce_highbit(uint32_t x)
+{
+ int i;
+ int c;
+ int b;
+
+ c = 0;
+ b = 0;
+
+ for (i = 0; i < 32; i++) {
+ if ((1 << i) & x) {
+ c++;
+ b = i;
+ }
+ }
+
+ if (c == 1)
+ return b;
+
+ return 0;
+}
+
diff --git a/sys/dev/oce/oce_mbox.c b/sys/dev/oce/oce_mbox.c
new file mode 100644
index 0000000..7ca3d41
--- /dev/null
+++ b/sys/dev/oce/oce_mbox.c
@@ -0,0 +1,1705 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+
+
+/* $FreeBSD$ */
+
+
+#include "oce_if.h"
+
+
+/**
+ * @brief Reset (firmware) common function
+ * @param sc software handle to the device
+ * @returns 0 on success, ETIMEDOUT on failure
+ */
+int
+oce_reset_fun(POCE_SOFTC sc)
+{
+ struct oce_mbx *mbx;
+ struct oce_bmbx *mb;
+ struct ioctl_common_function_reset *fwcmd;
+ int rc = 0;
+
+ if (sc->flags & OCE_FLAGS_FUNCRESET_RQD) {
+ mb = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ mbx = &mb->mbx;
+ bzero(mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct ioctl_common_function_reset *)&mbx->payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET,
+ 10, /* MBX_TIMEOUT_SEC */
+ sizeof(struct
+ ioctl_common_function_reset),
+ OCE_MBX_VER_V0);
+
+ mbx->u0.s.embedded = 1;
+ mbx->payload_length =
+ sizeof(struct ioctl_common_function_reset);
+
+ rc = oce_mbox_dispatch(sc, 2);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief This funtions tells firmware we are
+ * done with commands.
+ * @param sc software handle to the device
+ * @returns 0 on success, ETIMEDOUT on failure
+ */
+int
+oce_fw_clean(POCE_SOFTC sc)
+{
+ struct oce_bmbx *mbx;
+ uint8_t *ptr;
+ int ret = 0;
+
+ mbx = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ ptr = (uint8_t *) &mbx->mbx;
+
+ /* Endian Signature */
+ *ptr++ = 0xff;
+ *ptr++ = 0xaa;
+ *ptr++ = 0xbb;
+ *ptr++ = 0xff;
+ *ptr++ = 0xff;
+ *ptr++ = 0xcc;
+ *ptr++ = 0xdd;
+ *ptr = 0xff;
+
+ ret = oce_mbox_dispatch(sc, 2);
+
+ return ret;
+}
+
+
+/**
+ * @brief Mailbox wait
+ * @param sc software handle to the device
+ * @param tmo_sec timeout in seconds
+ */
+static int
+oce_mbox_wait(POCE_SOFTC sc, uint32_t tmo_sec)
+{
+ tmo_sec *= 10000;
+ pd_mpu_mbox_db_t mbox_db;
+
+ for (;;) {
+ if (tmo_sec != 0) {
+ if (--tmo_sec == 0)
+ break;
+ }
+
+ mbox_db.dw0 = OCE_READ_REG32(sc, db, PD_MPU_MBOX_DB);
+
+ if (mbox_db.bits.ready)
+ return 0;
+
+ DELAY(100);
+ }
+
+ device_printf(sc->dev, "Mailbox timed out\n");
+
+ return ETIMEDOUT;
+}
+
+
+
+/**
+ * @brief Mailbox dispatch
+ * @param sc software handle to the device
+ * @param tmo_sec timeout in seconds
+ */
+int
+oce_mbox_dispatch(POCE_SOFTC sc, uint32_t tmo_sec)
+{
+ pd_mpu_mbox_db_t mbox_db;
+ uint32_t pa;
+ int rc;
+
+ oce_dma_sync(&sc->bsmbx, BUS_DMASYNC_PREWRITE);
+ pa = (uint32_t) ((uint64_t) sc->bsmbx.paddr >> 34);
+ bzero(&mbox_db, sizeof(pd_mpu_mbox_db_t));
+ mbox_db.bits.ready = 0;
+ mbox_db.bits.hi = 1;
+ mbox_db.bits.address = pa;
+
+ rc = oce_mbox_wait(sc, tmo_sec);
+ if (rc == 0) {
+ OCE_WRITE_REG32(sc, db, PD_MPU_MBOX_DB, mbox_db.dw0);
+
+ pa = (uint32_t) ((uint64_t) sc->bsmbx.paddr >> 4) & 0x3fffffff;
+ mbox_db.bits.ready = 0;
+ mbox_db.bits.hi = 0;
+ mbox_db.bits.address = pa;
+
+ rc = oce_mbox_wait(sc, tmo_sec);
+
+ if (rc == 0) {
+ OCE_WRITE_REG32(sc, db, PD_MPU_MBOX_DB, mbox_db.dw0);
+
+ rc = oce_mbox_wait(sc, tmo_sec);
+
+ oce_dma_sync(&sc->bsmbx, BUS_DMASYNC_POSTWRITE);
+ }
+ }
+
+ return rc;
+}
+
+
+
+/**
+ * @brief Mailbox common request header initialization
+ * @param hdr mailbox header
+ * @param dom domain
+ * @param port port
+ * @param subsys subsystem
+ * @param opcode opcode
+ * @param timeout timeout
+ * @param pyld_len payload length
+ */
+void
+mbx_common_req_hdr_init(struct mbx_hdr *hdr,
+ uint8_t dom, uint8_t port,
+ uint8_t subsys, uint8_t opcode,
+ uint32_t timeout, uint32_t pyld_len,
+ uint8_t version)
+{
+ hdr->u0.req.opcode = opcode;
+ hdr->u0.req.subsystem = subsys;
+ hdr->u0.req.port_number = port;
+ hdr->u0.req.domain = dom;
+
+ hdr->u0.req.timeout = timeout;
+ hdr->u0.req.request_length = pyld_len - sizeof(struct mbx_hdr);
+ hdr->u0.req.version = version;
+}
+
+
+
+/**
+ * @brief Function to initialize the hw with host endian information
+ * @param sc software handle to the device
+ * @returns 0 on success, ETIMEDOUT on failure
+ */
+int
+oce_mbox_init(POCE_SOFTC sc)
+{
+ struct oce_bmbx *mbx;
+ uint8_t *ptr;
+ int ret = 0;
+
+ if (sc->flags & OCE_FLAGS_MBOX_ENDIAN_RQD) {
+ mbx = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ ptr = (uint8_t *) &mbx->mbx;
+
+ /* Endian Signature */
+ *ptr++ = 0xff;
+ *ptr++ = 0x12;
+ *ptr++ = 0x34;
+ *ptr++ = 0xff;
+ *ptr++ = 0xff;
+ *ptr++ = 0x56;
+ *ptr++ = 0x78;
+ *ptr = 0xff;
+
+ ret = oce_mbox_dispatch(sc, 0);
+ }
+
+ return ret;
+}
+
+
+/**
+ * @brief Function to get the firmware version
+ * @param sc software handle to the device
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_get_fw_version(POCE_SOFTC sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_common_fw_version *fwcmd;
+ int ret = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_get_common_fw_version *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FW_VERSION,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_common_fw_version),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_get_common_fw_version);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ ret = oce_mbox_post(sc, &mbx, NULL);
+ if (ret)
+ return ret;
+
+ bcopy(fwcmd->params.rsp.fw_ver_str, sc->fw_version, 32);
+
+ return 0;
+}
+
+
+/**
+ * @brief Function to post a MBX to the mbox
+ * @param sc software handle to the device
+ * @param mbx pointer to the MBX to send
+ * @param mbxctx pointer to the mbx context structure
+ * @returns 0 on success, error on failure
+ */
+int
+oce_mbox_post(POCE_SOFTC sc, struct oce_mbx *mbx, struct oce_mbx_ctx *mbxctx)
+{
+ struct oce_mbx *mb_mbx = NULL;
+ struct oce_mq_cqe *mb_cqe = NULL;
+ struct oce_bmbx *mb = NULL;
+ int rc = 0;
+ uint32_t tmo = 0;
+ uint32_t cstatus = 0;
+ uint32_t xstatus = 0;
+
+ LOCK(&sc->bmbx_lock);
+
+ mb = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ mb_mbx = &mb->mbx;
+
+ /* get the tmo */
+ tmo = mbx->tag[0];
+ mbx->tag[0] = 0;
+
+ /* copy mbx into mbox */
+ bcopy(mbx, mb_mbx, sizeof(struct oce_mbx));
+
+ /* now dispatch */
+ rc = oce_mbox_dispatch(sc, tmo);
+ if (rc == 0) {
+ /*
+ * the command completed successfully. Now get the
+ * completion queue entry
+ */
+ mb_cqe = &mb->cqe;
+ DW_SWAP(u32ptr(&mb_cqe->u0.dw[0]), sizeof(struct oce_mq_cqe));
+
+ /* copy mbox mbx back */
+ bcopy(mb_mbx, mbx, sizeof(struct oce_mbx));
+
+ /* pick up the mailbox status */
+ cstatus = mb_cqe->u0.s.completion_status;
+ xstatus = mb_cqe->u0.s.extended_status;
+
+ /*
+ * store the mbx context in the cqe tag section so that
+ * the upper layer handling the cqe can associate the mbx
+ * with the response
+ */
+ if (cstatus == 0 && mbxctx) {
+ /* save context */
+ mbxctx->mbx = mb_mbx;
+ bcopy(&mbxctx, mb_cqe->u0.s.mq_tag,
+ sizeof(struct oce_mbx_ctx *));
+ }
+ }
+
+ UNLOCK(&sc->bmbx_lock);
+
+ return rc;
+}
+
+/**
+ * @brief Function to read the mac address associated with an interface
+ * @param sc software handle to the device
+ * @param if_id interface id to read the address from
+ * @param perm set to 1 if reading the factory mac address.
+ * In this case if_id is ignored
+ * @param type type of the mac address, whether network or storage
+ * @param[out] mac [OUTPUT] pointer to a buffer containing the
+ * mac address when the command succeeds.
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_read_mac_addr(POCE_SOFTC sc, uint32_t if_id,
+ uint8_t perm, uint8_t type, struct mac_address_format *mac)
+{
+ struct oce_mbx mbx;
+ struct mbx_query_common_iface_mac *fwcmd;
+ int ret = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_query_common_iface_mac *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_IFACE_MAC,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_query_common_iface_mac),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.permanent = perm;
+ if (!perm)
+ fwcmd->params.req.if_id = (uint16_t) if_id;
+ else
+ fwcmd->params.req.if_id = 0;
+
+ fwcmd->params.req.type = type;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_query_common_iface_mac);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ ret = oce_mbox_post(sc, &mbx, NULL);
+ if (ret)
+ return ret;
+
+ /* copy the mac addres in the output parameter */
+ mac->size_of_struct = fwcmd->params.rsp.mac.size_of_struct;
+ bcopy(&fwcmd->params.rsp.mac.mac_addr[0], &mac->mac_addr[0],
+ mac->size_of_struct);
+
+ return 0;
+}
+
+/**
+ * @brief Function to query the fw attributes from the hw
+ * @param sc software handle to the device
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_get_fw_config(POCE_SOFTC sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_query_fw_config *fwcmd;
+ int ret = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_common_query_fw_config *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_query_fw_config),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_query_fw_config);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ ret = oce_mbox_post(sc, &mbx, NULL);
+ if (ret)
+ return ret;
+
+ DW_SWAP(u32ptr(fwcmd), sizeof(struct mbx_common_query_fw_config));
+
+ sc->config_number = fwcmd->params.rsp.config_number;
+ sc->asic_revision = fwcmd->params.rsp.asic_revision;
+ sc->port_id = fwcmd->params.rsp.port_id;
+ sc->function_mode = fwcmd->params.rsp.function_mode;
+ sc->function_caps = fwcmd->params.rsp.function_caps;
+
+ if (fwcmd->params.rsp.ulp[0].ulp_mode & ULP_NIC_MODE) {
+ sc->max_tx_rings = fwcmd->params.rsp.ulp[0].nic_wq_tot;
+ sc->max_rx_rings = fwcmd->params.rsp.ulp[0].lro_rqid_tot;
+ } else {
+ sc->max_tx_rings = fwcmd->params.rsp.ulp[1].nic_wq_tot;
+ sc->max_rx_rings = fwcmd->params.rsp.ulp[1].lro_rqid_tot;
+ }
+
+ return 0;
+
+}
+
+/**
+ *
+ * @brief function to create a device interface
+ * @param sc software handle to the device
+ * @param cap_flags capability flags
+ * @param en_flags enable capability flags
+ * @param vlan_tag optional vlan tag to associate with the if
+ * @param mac_addr pointer to a buffer containing the mac address
+ * @param[out] if_id [OUTPUT] pointer to an integer to hold the ID of the
+ interface created
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_if_create(POCE_SOFTC sc,
+ uint32_t cap_flags,
+ uint32_t en_flags,
+ uint16_t vlan_tag,
+ uint8_t *mac_addr,
+ uint32_t *if_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_iface *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_iface *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_IFACE,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_iface),
+ OCE_MBX_VER_V0);
+ DW_SWAP(u32ptr(&fwcmd->hdr), sizeof(struct mbx_hdr));
+
+ fwcmd->params.req.version = 0;
+ fwcmd->params.req.cap_flags = LE_32(cap_flags);
+ fwcmd->params.req.enable_flags = LE_32(en_flags);
+ if (mac_addr != NULL) {
+ bcopy(mac_addr, &fwcmd->params.req.mac_addr[0], 6);
+ fwcmd->params.req.vlan_tag.u0.normal.vtag = LE_16(vlan_tag);
+ fwcmd->params.req.mac_invalid = 0;
+ } else {
+ fwcmd->params.req.mac_invalid = 1;
+ }
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_iface);
+ DW_SWAP(u32ptr(&mbx), OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ return rc;
+
+ *if_id = LE_32(fwcmd->params.rsp.if_id);
+
+ if (mac_addr != NULL)
+ sc->pmac_id = LE_32(fwcmd->params.rsp.pmac_id);
+
+ return 0;
+}
+
+/**
+ * @brief Function to delete an interface
+ * @param sc software handle to the device
+ * @param if_id ID of the interface to delete
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_if_del(POCE_SOFTC sc, uint32_t if_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_iface *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_destroy_common_iface *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_DESTROY_IFACE,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_destroy_common_iface),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = if_id;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_destroy_common_iface);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+/**
+ * @brief Function to send the mbx command to configure vlan
+ * @param sc software handle to the device
+ * @param if_id interface identifier index
+ * @param vtag_arr array of vlan tags
+ * @param vtag_cnt number of elements in array
+ * @param untagged boolean TRUE/FLASE
+ * @param enable_promisc flag to enable/disable VLAN promiscuous mode
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_config_vlan(POCE_SOFTC sc,
+ uint32_t if_id,
+ struct normal_vlan *vtag_arr,
+ uint8_t vtag_cnt, uint32_t untagged, uint32_t enable_promisc)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_config_vlan *fwcmd;
+ int rc;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = (struct mbx_common_config_vlan *)&mbx.payload;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CONFIG_IFACE_VLAN,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_config_vlan),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = (uint8_t) if_id;
+ fwcmd->params.req.promisc = (uint8_t) enable_promisc;
+ fwcmd->params.req.untagged = (uint8_t) untagged;
+ fwcmd->params.req.num_vlans = vtag_cnt;
+
+ if (!enable_promisc) {
+ bcopy(vtag_arr, fwcmd->params.req.tags.normal_vlans,
+ vtag_cnt * sizeof(struct normal_vlan));
+ }
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_config_vlan);
+ DW_SWAP(u32ptr(&mbx), (OCE_BMBX_RHDR_SZ + mbx.payload_length));
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+
+}
+
+/**
+ * @brief Function to set flow control capability in the hardware
+ * @param sc software handle to the device
+ * @param flow_control flow control flags to set
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_set_flow_control(POCE_SOFTC sc, uint32_t flow_control)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_get_set_flow_control *fwcmd =
+ (struct mbx_common_get_set_flow_control *)&mbx.payload;
+ int rc;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FLOW_CONTROL,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_get_set_flow_control),
+ OCE_MBX_VER_V0);
+
+ if (flow_control & OCE_FC_TX)
+ fwcmd->tx_flow_control = 1;
+
+ if (flow_control & OCE_FC_RX)
+ fwcmd->rx_flow_control = 1;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_get_set_flow_control);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+}
+
+/**
+ * @brief Initialize the RSS CPU indirection table
+ *
+ * The table is used to choose the queue to place the incomming packets.
+ * Incomming packets are hashed. The lowest bits in the hash result
+ * are used as the index into the CPU indirection table.
+ * Each entry in the table contains the RSS CPU-ID returned by the NIC
+ * create. Based on the CPU ID, the receive completion is routed to
+ * the corresponding RSS CQs. (Non-RSS packets are always completed
+ * on the default (0) CQ).
+ *
+ * @param sc software handle to the device
+ * @param *fwcmd pointer to the rss mbox command
+ * @returns none
+ */
+static int
+oce_rss_itbl_init(POCE_SOFTC sc, struct mbx_config_nic_rss *fwcmd)
+{
+ int i = 0, j = 0, rc = 0;
+ uint8_t *tbl = fwcmd->params.req.cputable;
+
+
+ for (j = 0; j < sc->nrqs; j++) {
+ if (sc->rq[j]->cfg.is_rss_queue) {
+ tbl[i] = sc->rq[j]->rss_cpuid;
+ i = i + 1;
+ }
+ }
+ if (i == 0) {
+ device_printf(sc->dev, "error: Invalid number of RSS RQ's\n");
+ rc = ENXIO;
+
+ }
+
+ /* fill log2 value indicating the size of the CPU table */
+ if (rc == 0)
+ fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(i));
+
+ return rc;
+}
+
+/**
+ * @brief Function to set flow control capability in the hardware
+ * @param sc software handle to the device
+ * @param if_id interface id to read the address from
+ * @param enable_rss 0=disable, RSS_ENABLE_xxx flags otherwise
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss)
+{
+ int rc;
+ struct oce_mbx mbx;
+ struct mbx_config_nic_rss *fwcmd =
+ (struct mbx_config_nic_rss *)&mbx.payload;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_CONFIG_RSS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_config_nic_rss),
+ OCE_MBX_VER_V0);
+ if (enable_rss)
+ fwcmd->params.req.enable_rss = (RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV4 |
+ RSS_ENABLE_IPV6 |
+ RSS_ENABLE_TCP_IPV6);
+ fwcmd->params.req.flush = OCE_FLUSH;
+ fwcmd->params.req.if_id = LE_32(if_id);
+
+ srandom(arc4random()); /* random entropy seed */
+ read_random(fwcmd->params.req.hash, sizeof(fwcmd->params.req.hash));
+
+ rc = oce_rss_itbl_init(sc, fwcmd);
+ if (rc == 0) {
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_config_nic_rss);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ }
+
+ return rc;
+}
+
+/**
+ * @brief RXF function to enable/disable device promiscuous mode
+ * @param sc software handle to the device
+ * @param enable enable/disable flag
+ * @returns 0 on success, EIO on failure
+ * @note
+ * The NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
+ * This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
+ */
+int
+oce_rxf_set_promiscuous(POCE_SOFTC sc, uint32_t enable)
+{
+ struct mbx_set_common_iface_rx_filter *fwcmd;
+ int sz = sizeof(struct mbx_set_common_iface_rx_filter);
+ iface_rx_filter_ctx_t *req;
+ OCE_DMA_MEM sgl;
+ int rc;
+
+ /* allocate mbx payload's dma scatter/gather memory */
+ rc = oce_dma_alloc(sc, sz, &sgl, 0);
+ if (rc)
+ return rc;
+
+ fwcmd = OCE_DMAPTR(&sgl, struct mbx_set_common_iface_rx_filter);
+
+ req = &fwcmd->params.req;
+ req->iface_flags_mask = MBX_RX_IFACE_FLAGS_PROMISCUOUS |
+ MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
+ if (enable) {
+ req->iface_flags = MBX_RX_IFACE_FLAGS_PROMISCUOUS |
+ MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
+ }
+ req->if_id = sc->if_id;
+
+ rc = oce_set_common_iface_rx_filter(sc, &sgl);
+ oce_dma_free(sc, &sgl);
+
+ return rc;
+}
+
+
+/**
+ * @brief Function modify and select rx filter options
+ * @param sc software handle to the device
+ * @param sgl scatter/gather request/response
+ * @returns 0 on success, error code on failure
+ */
+int
+oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl)
+{
+ struct oce_mbx mbx;
+ int mbx_sz = sizeof(struct mbx_set_common_iface_rx_filter);
+ struct mbx_set_common_iface_rx_filter *fwcmd;
+ int rc;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(sgl, struct mbx_set_common_iface_rx_filter);
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_IFACE_RX_FILTER,
+ MBX_TIMEOUT_SEC,
+ mbx_sz,
+ OCE_MBX_VER_V0);
+
+ oce_dma_sync(sgl, BUS_DMASYNC_PREWRITE);
+ mbx.u0.s.embedded = 0;
+ mbx.u0.s.sge_count = 1;
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(sgl->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(sgl->paddr);
+ mbx.payload.u0.u1.sgl[0].length = mbx_sz;
+ mbx.payload_length = mbx_sz;
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+/**
+ * @brief Function to query the link status from the hardware
+ * @param sc software handle to the device
+ * @param[out] link pointer to the structure returning link attributes
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_get_link_status(POCE_SOFTC sc, struct link_status *link)
+{
+ struct oce_mbx mbx;
+ struct mbx_query_common_link_config *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_query_common_link_config *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_LINK_CONFIG,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_query_common_link_config),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_query_common_link_config);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ if (rc) {
+ device_printf(sc->dev, "Could not get link speed: %d\n", rc);
+ } else {
+ /* interpret response */
+ bcopy(&fwcmd->params.rsp, link, sizeof(struct link_status));
+ link->logical_link_status = LE_32(link->logical_link_status);
+ link->qos_link_speed = LE_16(link->qos_link_speed);
+ }
+
+ return rc;
+}
+
+
+
+int
+oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_nic_stats_v0 *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v0);
+ bzero(fwcmd, sizeof(struct mbx_get_nic_stats_v0));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_GET_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_nic_stats_v0),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 0;
+ mbx.u0.s.sge_count = 1;
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats_v0);
+
+ mbx.payload_length = sizeof(struct mbx_get_nic_stats_v0);
+
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+
+ if (rc) {
+ device_printf(sc->dev,
+ "Could not get nic statistics: %d\n", rc);
+ }
+
+ return rc;
+}
+
+
+
+/**
+ * @brief Function to get NIC statistics
+ * @param sc software handle to the device
+ * @param *stats pointer to where to store statistics
+ * @param reset_stats resets statistics of set
+ * @returns 0 on success, EIO on failure
+ * @note command depricated in Lancer
+ */
+int
+oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_nic_stats *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats);
+ bzero(fwcmd, sizeof(struct mbx_get_nic_stats));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_GET_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_nic_stats),
+ OCE_MBX_VER_V1);
+
+
+ mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
+ mbx.u0.s.sge_count = 1; /* using scatter gather instead */
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats);
+
+ mbx.payload_length = sizeof(struct mbx_get_nic_stats);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+ if (rc) {
+ device_printf(sc->dev,
+ "Could not get nic statistics: %d\n", rc);
+ }
+ return rc;
+}
+
+
+/**
+ * @brief Function to get pport (physical port) statistics
+ * @param sc software handle to the device
+ * @param *stats pointer to where to store statistics
+ * @param reset_stats resets statistics of set
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
+ uint32_t reset_stats)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_pport_stats *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_pport_stats);
+ bzero(fwcmd, sizeof(struct mbx_get_pport_stats));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_GET_PPORT_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_pport_stats),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.reset_stats = reset_stats;
+ fwcmd->params.req.port_number = sc->if_id;
+
+ mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
+ mbx.u0.s.sge_count = 1; /* using scatter gather instead */
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_pport_stats);
+
+ mbx.payload_length = sizeof(struct mbx_get_pport_stats);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "Could not get physical port statistics: %d\n", rc);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Function to get vport (virtual port) statistics
+ * @param sc software handle to the device
+ * @param *stats pointer to where to store statistics
+ * @param reset_stats resets statistics of set
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
+ uint32_t req_size, uint32_t reset_stats)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_vport_stats *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_vport_stats);
+ bzero(fwcmd, sizeof(struct mbx_get_vport_stats));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_GET_VPORT_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_vport_stats),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.reset_stats = reset_stats;
+ fwcmd->params.req.vport_number = sc->if_id;
+
+ mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
+ mbx.u0.s.sge_count = 1; /* using scatter gather instead */
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_vport_stats);
+
+ mbx.payload_length = sizeof(struct mbx_get_vport_stats);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "Could not get physical port statistics: %d\n", rc);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Function to update the muticast filter with
+ * values in dma_mem
+ * @param sc software handle to the device
+ * @param dma_mem pointer to dma memory region
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_update_multicast(POCE_SOFTC sc, POCE_DMA_MEM pdma_mem)
+{
+ struct oce_mbx mbx;
+ struct oce_mq_sge *sgl;
+ struct mbx_set_common_iface_multicast *req = NULL;
+ int rc = 0;
+
+ req = OCE_DMAPTR(pdma_mem, struct mbx_set_common_iface_multicast);
+ mbx_common_req_hdr_init(&req->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_IFACE_MULTICAST,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_set_common_iface_multicast),
+ OCE_MBX_VER_V0);
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx.u0.s.embedded = 0; /*Non embeded*/
+ mbx.payload_length = sizeof(struct mbx_set_common_iface_multicast);
+ mbx.u0.s.sge_count = 1;
+ sgl = &mbx.payload.u0.u1.sgl[0];
+ sgl->pa_hi = htole32(upper_32_bits(pdma_mem->paddr));
+ sgl->pa_lo = htole32((pdma_mem->paddr) & 0xFFFFFFFF);
+ sgl->length = htole32(mbx.payload_length);
+
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+}
+
+
+/**
+ * @brief Function to send passthrough Ioctls
+ * @param sc software handle to the device
+ * @param dma_mem pointer to dma memory region
+ * @param req_size size of dma_mem
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_pass_through_mbox(POCE_SOFTC sc, POCE_DMA_MEM dma_mem, uint32_t req_size)
+{
+ struct oce_mbx mbx;
+ struct oce_mq_sge *sgl;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx.u0.s.embedded = 0; /*Non embeded*/
+ mbx.payload_length = req_size;
+ mbx.u0.s.sge_count = 1;
+ sgl = &mbx.payload.u0.u1.sgl[0];
+ sgl->pa_hi = htole32(upper_32_bits(dma_mem->paddr));
+ sgl->pa_lo = htole32((dma_mem->paddr) & 0xFFFFFFFF);
+ sgl->length = htole32(req_size);
+
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+
+int
+oce_mbox_macaddr_add(POCE_SOFTC sc, uint8_t *mac_addr,
+ uint32_t if_id, uint32_t *pmac_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_add_common_iface_mac *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_add_common_iface_mac *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ADD_IFACE_MAC,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_add_common_iface_mac),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = (uint16_t) if_id;
+ bcopy(mac_addr, fwcmd->params.req.mac_address, 6);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_add_common_iface_mac);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ return rc;
+
+ *pmac_id = fwcmd->params.rsp.pmac_id;
+
+ return rc;
+}
+
+
+int
+oce_mbox_macaddr_del(POCE_SOFTC sc, uint32_t if_id, uint32_t pmac_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_del_common_iface_mac *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_del_common_iface_mac *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_DEL_IFACE_MAC,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_del_common_iface_mac),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = (uint16_t)if_id;
+ fwcmd->params.req.pmac_id = pmac_id;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_del_common_iface_mac);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+
+
+int
+oce_mbox_check_native_mode(POCE_SOFTC sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_set_function_cap *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_common_set_function_cap *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_set_function_cap),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
+ CAP_BE3_NATIVE_ERX_API;
+
+ fwcmd->params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_set_function_cap);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ //if (rc != 0) This can fail in legacy mode. So skip
+ // FN_LEAVE(rc);
+
+ sc->be3_native = fwcmd->params.rsp.capability_flags
+ & CAP_BE3_NATIVE_ERX_API;
+
+ return 0;
+}
+
+
+
+int
+oce_mbox_cmd_set_loopback(POCE_SOFTC sc, uint8_t port_num,
+ uint8_t loopback_type, uint8_t enable)
+{
+ struct oce_mbx mbx;
+ struct mbx_lowlevel_set_loopback_mode *fwcmd;
+ int rc = 0;
+
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_lowlevel_set_loopback_mode *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_lowlevel_set_loopback_mode),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.src_port = port_num;
+ fwcmd->params.req.dest_port = port_num;
+ fwcmd->params.req.loopback_type = loopback_type;
+ fwcmd->params.req.loopback_state = enable;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_lowlevel_set_loopback_mode);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+
+}
+
+int
+oce_mbox_cmd_test_loopback(POCE_SOFTC sc, uint32_t port_num,
+ uint32_t loopback_type, uint32_t pkt_size, uint32_t num_pkts,
+ uint64_t pattern)
+{
+
+ struct oce_mbx mbx;
+ struct mbx_lowlevel_test_loopback_mode *fwcmd;
+ int rc = 0;
+
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_lowlevel_test_loopback_mode *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_TEST_LOOPBACK,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_lowlevel_test_loopback_mode),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.pattern = pattern;
+ fwcmd->params.req.src_port = port_num;
+ fwcmd->params.req.dest_port = port_num;
+ fwcmd->params.req.pkt_size = pkt_size;
+ fwcmd->params.req.num_pkts = num_pkts;
+ fwcmd->params.req.loopback_type = loopback_type;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_lowlevel_test_loopback_mode);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ return rc;
+
+ return(fwcmd->params.rsp.status);
+}
+
+int
+oce_mbox_write_flashrom(POCE_SOFTC sc, uint32_t optype,uint32_t opcode,
+ POCE_DMA_MEM pdma_mem, uint32_t num_bytes)
+{
+
+ struct oce_mbx mbx;
+ struct oce_mq_sge *sgl = NULL;
+ struct mbx_common_read_write_flashrom *fwcmd = NULL;
+ int rc = 0, payload_len = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(pdma_mem, struct mbx_common_read_write_flashrom);
+ payload_len = sizeof(struct mbx_common_read_write_flashrom) + 32*1024;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_FLASHROM,
+ LONG_TIMEOUT,
+ payload_len,
+ OCE_MBX_VER_V0);
+
+ fwcmd->flash_op_type = optype;
+ fwcmd->flash_op_code = opcode;
+ fwcmd->data_buffer_size = num_bytes;
+
+ mbx.u0.s.embedded = 0; /*Non embeded*/
+ mbx.payload_length = payload_len;
+ mbx.u0.s.sge_count = 1;
+
+ sgl = &mbx.payload.u0.u1.sgl[0];
+ sgl->pa_hi = upper_32_bits(pdma_mem->paddr);
+ sgl->pa_lo = pdma_mem->paddr & 0xFFFFFFFF;
+ sgl->length = payload_len;
+
+ /* post the command */
+ if (rc) {
+ device_printf(sc->dev, "Write FlashROM mbox post failed\n");
+ } else {
+ rc = fwcmd->hdr.u0.rsp.status;
+ }
+
+ return rc;
+
+}
+
+int
+oce_mbox_get_flashrom_crc(POCE_SOFTC sc, uint8_t *flash_crc,
+ uint32_t offset, uint32_t optype)
+{
+
+ int rc = 0, payload_len = 0;
+ struct oce_mbx mbx;
+ struct mbx_common_read_write_flashrom *fwcmd;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_common_read_write_flashrom *)&mbx.payload;
+
+ /* Firmware requires extra 4 bytes with this ioctl. Since there
+ is enough room in the mbx payload it should be good enough
+ Reference: Bug 14853
+ */
+ payload_len = sizeof(struct mbx_common_read_write_flashrom) + 4;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_FLASHROM,
+ MBX_TIMEOUT_SEC,
+ payload_len,
+ OCE_MBX_VER_V0);
+
+ fwcmd->flash_op_type = optype;
+ fwcmd->flash_op_code = FLASHROM_OPER_REPORT;
+ fwcmd->data_offset = offset;
+ fwcmd->data_buffer_size = 0x4;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = payload_len;
+
+ /* post the command */
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc) {
+ device_printf(sc->dev, "Read FlashROM CRC mbox post failed\n");
+ } else {
+ bcopy(fwcmd->data_buffer, flash_crc, 4);
+ rc = fwcmd->hdr.u0.rsp.status;
+ }
+ return rc;
+}
+
+int
+oce_mbox_get_phy_info(POCE_SOFTC sc, struct oce_phy_info *phy_info)
+{
+
+ struct oce_mbx mbx;
+ struct mbx_common_phy_info *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_common_phy_info *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PHY_CONFIG,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_phy_info),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_phy_info);
+
+ /* now post the command */
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc) {
+ device_printf(sc->dev, "Read PHY info mbox post failed\n");
+ } else {
+ rc = fwcmd->hdr.u0.rsp.status;
+ phy_info->phy_type = fwcmd->params.rsp.phy_info.phy_type;
+ phy_info->interface_type =
+ fwcmd->params.rsp.phy_info.interface_type;
+ phy_info->auto_speeds_supported =
+ fwcmd->params.rsp.phy_info.auto_speeds_supported;
+ phy_info->fixed_speeds_supported =
+ fwcmd->params.rsp.phy_info.fixed_speeds_supported;
+ phy_info->misc_params =fwcmd->params.rsp.phy_info.misc_params;
+
+ }
+ return rc;
+
+}
+
+
+int
+oce_mbox_lancer_write_flashrom(POCE_SOFTC sc, uint32_t data_size,
+ uint32_t data_offset, POCE_DMA_MEM pdma_mem,
+ uint32_t *written_data, uint32_t *additional_status)
+{
+
+ struct oce_mbx mbx;
+ struct mbx_lancer_common_write_object *fwcmd = NULL;
+ int rc = 0, payload_len = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ payload_len = sizeof(struct mbx_lancer_common_write_object);
+
+ mbx.u0.s.embedded = 1;/* Embedded */
+ mbx.payload_length = payload_len;
+ fwcmd = (struct mbx_lancer_common_write_object *)&mbx.payload;
+
+ /* initialize the ioctl header */
+ mbx_common_req_hdr_init(&fwcmd->params.req.hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_OBJECT,
+ LONG_TIMEOUT,
+ payload_len,
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.write_length = data_size;
+ if (data_size == 0)
+ fwcmd->params.req.eof = 1;
+ else
+ fwcmd->params.req.eof = 0;
+
+ strcpy(fwcmd->params.req.object_name, "/prg");
+ fwcmd->params.req.descriptor_count = 1;
+ fwcmd->params.req.write_offset = data_offset;
+ fwcmd->params.req.buffer_length = data_size;
+ fwcmd->params.req.address_lower = pdma_mem->paddr & 0xFFFFFFFF;
+ fwcmd->params.req.address_upper = upper_32_bits(pdma_mem->paddr);
+
+ /* post the command */
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc) {
+ device_printf(sc->dev,
+ "Write Lancer FlashROM mbox post failed\n");
+ } else {
+ *written_data = fwcmd->params.rsp.actual_write_length;
+ *additional_status = fwcmd->params.rsp.additional_status;
+ rc = fwcmd->params.rsp.status;
+ }
+ return rc;
+
+}
+
+
+
+int
+oce_mbox_create_rq(struct oce_rq *rq)
+{
+
+ struct oce_mbx mbx;
+ struct mbx_create_nic_rq *fwcmd;
+ POCE_SOFTC sc = rq->parent;
+ int rc, num_pages = 0;
+
+ if (rq->qstate == QCREATED)
+ return 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_CREATE_RQ, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_nic_rq),
+ OCE_MBX_VER_V0);
+
+ /* oce_page_list will also prepare pages */
+ num_pages = oce_page_list(rq->ring, &fwcmd->params.req.pages[0]);
+
+ if (IS_XE201(sc)) {
+ fwcmd->params.req.frag_size = rq->cfg.frag_size/2048;
+ fwcmd->params.req.page_size = 1;
+ fwcmd->hdr.u0.req.version = OCE_MBX_VER_V1;
+ } else
+ fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
+ fwcmd->params.req.num_pages = num_pages;
+ fwcmd->params.req.cq_id = rq->cq->cq_id;
+ fwcmd->params.req.if_id = sc->if_id;
+ fwcmd->params.req.max_frame_size = rq->cfg.mtu;
+ fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_nic_rq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto error;
+
+ rq->rq_id = fwcmd->params.rsp.rq_id;
+ rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid;
+
+ return 0;
+error:
+ device_printf(sc->dev, "Mbox Create RQ failed\n");
+ return rc;
+
+}
+
+
+
+int
+oce_mbox_create_wq(struct oce_wq *wq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_nic_wq *fwcmd;
+ POCE_SOFTC sc = wq->parent;
+ int rc = 0, version, num_pages;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
+ if (IS_XE201(sc)) {
+ version = OCE_MBX_VER_V1;
+ fwcmd->params.req.if_id = sc->if_id;
+ } else
+ version = OCE_MBX_VER_V0;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ NIC_CREATE_WQ, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_nic_wq),
+ version);
+
+ num_pages = oce_page_list(wq->ring, &fwcmd->params.req.pages[0]);
+
+ fwcmd->params.req.nic_wq_type = wq->cfg.wq_type;
+ fwcmd->params.req.num_pages = num_pages;
+ fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
+ fwcmd->params.req.cq_id = wq->cq->cq_id;
+ fwcmd->params.req.ulp_num = 1;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_nic_wq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto error;
+
+ wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
+
+ return 0;
+error:
+ device_printf(sc->dev, "Mbox Create WQ failed\n");
+ return rc;
+
+}
+
+
+
+int
+oce_mbox_create_eq(struct oce_eq *eq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_eq *fwcmd;
+ POCE_SOFTC sc = eq->parent;
+ int rc = 0;
+ uint32_t num_pages;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_EQ, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_eq),
+ OCE_MBX_VER_V0);
+
+ num_pages = oce_page_list(eq->ring, &fwcmd->params.req.pages[0]);
+ fwcmd->params.req.ctx.num_pages = num_pages;
+ fwcmd->params.req.ctx.valid = 1;
+ fwcmd->params.req.ctx.size = (eq->eq_cfg.item_size == 4) ? 0 : 1;
+ fwcmd->params.req.ctx.count = OCE_LOG2(eq->eq_cfg.q_len / 256);
+ fwcmd->params.req.ctx.armed = 0;
+ fwcmd->params.req.ctx.delay_mult = eq->eq_cfg.cur_eqd;
+
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_eq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto error;
+
+ eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
+
+ return 0;
+error:
+ device_printf(sc->dev, "Mbox Create EQ failed\n");
+ return rc;
+}
+
+
+
+int
+oce_mbox_cq_create(struct oce_cq *cq, uint32_t ncoalesce, uint32_t is_eventable)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_cq *fwcmd;
+ POCE_SOFTC sc = cq->parent;
+ uint8_t version;
+ oce_cq_ctx_t *ctx;
+ uint32_t num_pages, page_size;
+ int rc = 0;
+
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
+
+ if (IS_XE201(sc))
+ version = OCE_MBX_VER_V2;
+ else
+ version = OCE_MBX_VER_V0;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_CQ,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_cq),
+ version);
+
+ ctx = &fwcmd->params.req.cq_ctx;
+
+ num_pages = oce_page_list(cq->ring, &fwcmd->params.req.pages[0]);
+ page_size = 1; /* 1 for 4K */
+
+ if (version == OCE_MBX_VER_V2) {
+ ctx->v2.num_pages = LE_16(num_pages);
+ ctx->v2.page_size = page_size;
+ ctx->v2.eventable = is_eventable;
+ ctx->v2.valid = 1;
+ ctx->v2.count = OCE_LOG2(cq->cq_cfg.q_len / 256);
+ ctx->v2.nodelay = cq->cq_cfg.nodelay;
+ ctx->v2.coalesce_wm = ncoalesce;
+ ctx->v2.armed = 0;
+ ctx->v2.eq_id = cq->eq->eq_id;
+ if (ctx->v2.count == 3) {
+ if (cq->cq_cfg.q_len > (4*1024)-1)
+ ctx->v2.cqe_count = (4*1024)-1;
+ else
+ ctx->v2.cqe_count = cq->cq_cfg.q_len;
+ }
+ } else {
+ ctx->v0.num_pages = LE_16(num_pages);
+ ctx->v0.eventable = is_eventable;
+ ctx->v0.valid = 1;
+ ctx->v0.count = OCE_LOG2(cq->cq_cfg.q_len / 256);
+ ctx->v0.nodelay = cq->cq_cfg.nodelay;
+ ctx->v0.coalesce_wm = ncoalesce;
+ ctx->v0.armed = 0;
+ ctx->v0.eq_id = cq->eq->eq_id;
+ }
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_cq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto error;
+
+ cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
+
+ return 0;
+error:
+ device_printf(sc->dev, "Mbox Create CQ failed\n");
+ return rc;
+
+}
diff --git a/sys/dev/oce/oce_queue.c b/sys/dev/oce/oce_queue.c
new file mode 100644
index 0000000..c59dc50
--- /dev/null
+++ b/sys/dev/oce/oce_queue.c
@@ -0,0 +1,1213 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+
+
+/* $FreeBSD$ */
+
+
+#include "oce_if.h"
+
+/*****************************************************
+ * local queue functions
+ *****************************************************/
+
+static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
+ uint32_t q_len, uint32_t wq_type);
+static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
+static void oce_wq_free(struct oce_wq *wq);
+static void oce_wq_del(struct oce_wq *wq);
+static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
+ uint32_t q_len,
+ uint32_t frag_size,
+ uint32_t mtu, uint32_t rss);
+static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
+static void oce_rq_free(struct oce_rq *rq);
+static void oce_rq_del(struct oce_rq *rq);
+static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
+ uint32_t q_len,
+ uint32_t item_size,
+ uint32_t eq_delay,
+ uint32_t vector);
+static void oce_eq_del(struct oce_eq *eq);
+static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
+ struct oce_eq *eq, uint32_t q_len);
+static void oce_mq_free(struct oce_mq *mq);
+static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
+ *mbx, size_t req_size, enum qtype qtype);
+struct oce_cq *oce_cq_create(POCE_SOFTC sc,
+ struct oce_eq *eq,
+ uint32_t q_len,
+ uint32_t item_size,
+ uint32_t sol_event,
+ uint32_t is_eventable,
+ uint32_t nodelay, uint32_t ncoalesce);
+static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
+
+
+
+/**
+ * @brief Create and initialize all the queues on the board
+ * @param sc software handle to the device
+ * @returns 0 if successful, or error
+ **/
+int
+oce_queue_init_all(POCE_SOFTC sc)
+{
+ int rc = 0, i, vector;
+ struct oce_wq *wq;
+ struct oce_rq *rq;
+
+ /* alloc TX/RX queues */
+ for_all_wq_queues(sc, wq, i) {
+ sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
+ NIC_WQ_TYPE_STANDARD);
+ if (!sc->wq[i])
+ goto error;
+
+ }
+
+ for_all_rq_queues(sc, rq, i) {
+ sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
+ OCE_MAX_JUMBO_FRAME_SIZE,
+ (i == 0) ? 0 : sc->rss_enable);
+ if (!sc->rq[i])
+ goto error;
+ }
+
+ /* Create network interface on card */
+ if (oce_create_nw_interface(sc))
+ goto error;
+
+ /* create all of the event queues */
+ for (vector = 0; vector < sc->intr_count; vector++) {
+ sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
+ 0, vector);
+ if (!sc->eq[vector])
+ goto error;
+ }
+
+ /* create Tx, Rx and mcc queues */
+ for_all_wq_queues(sc, wq, i) {
+ rc = oce_wq_create(wq, sc->eq[i]);
+ if (rc)
+ goto error;
+ wq->queue_index = i;
+ TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
+ }
+
+ for_all_rq_queues(sc, rq, i) {
+ rc = oce_rq_create(rq, sc->if_id,
+ sc->eq[(i == 0) ? 0:(i-1)]);
+ if (rc)
+ goto error;
+ rq->queue_index = i;
+ }
+
+ sc->mq = oce_mq_create(sc, sc->eq[0], 64);
+ if (!sc->mq)
+ goto error;
+
+ return rc;
+
+error:
+ oce_queue_release_all(sc);
+ return 1;
+}
+
+
+
+/**
+ * @brief Releases all mailbox queues created
+ * @param sc software handle to the device
+ */
+void
+oce_queue_release_all(POCE_SOFTC sc)
+{
+ int i = 0;
+ struct oce_wq *wq;
+ struct oce_rq *rq;
+ struct oce_eq *eq;
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq) {
+ oce_rq_del(sc->rq[i]);
+ oce_rq_free(sc->rq[i]);
+ }
+ }
+
+ for_all_wq_queues(sc, wq, i) {
+ if (wq) {
+ oce_wq_del(sc->wq[i]);
+ oce_wq_free(sc->wq[i]);
+ }
+ }
+
+ if (sc->mq)
+ oce_mq_free(sc->mq);
+
+ for_all_evnt_queues(sc, eq, i) {
+ if (eq)
+ oce_eq_del(sc->eq[i]);
+ }
+}
+
+
+
+/**
+ * @brief Function to create a WQ for NIC Tx
+ * @param sc software handle to the device
+ * @param qlen number of entries in the queue
+ * @param wq_type work queue type
+ * @returns the pointer to the WQ created or NULL on failure
+ */
+static struct
+oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
+{
+ struct oce_wq *wq;
+ int rc = 0, i;
+
+ /* q_len must be min 256 and max 2k */
+ if (q_len < 256 || q_len > 2048) {
+ device_printf(sc->dev,
+ "Invalid q length. Must be "
+ "[256, 2000]: 0x%x\n", q_len);
+ return NULL;
+ }
+
+ /* allocate wq */
+ wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!wq)
+ return NULL;
+
+ /* Set the wq config */
+ wq->cfg.q_len = q_len;
+ wq->cfg.wq_type = (uint8_t) wq_type;
+ wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
+ wq->cfg.nbufs = 2 * wq->cfg.q_len;
+ wq->cfg.nhdl = 2 * wq->cfg.q_len;
+
+ wq->parent = (void *)sc;
+
+ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
+ 1, 0,
+ BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ OCE_MAX_TX_SIZE,
+ OCE_MAX_TX_ELEMENTS,
+ PAGE_SIZE, 0, NULL, NULL, &wq->tag);
+
+ if (rc)
+ goto free_wq;
+
+
+ for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
+ rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
+ if (rc)
+ goto free_wq;
+ }
+
+ wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
+ if (!wq->ring)
+ goto free_wq;
+
+
+ LOCK_CREATE(&wq->tx_lock, "TX_lock");
+
+#if __FreeBSD_version >= 800000
+ /* Allocate buf ring for multiqueue*/
+ wq->br = buf_ring_alloc(4096, M_DEVBUF,
+ M_WAITOK, &wq->tx_lock.mutex);
+ if (!wq->br)
+ goto free_wq;
+#endif
+ return wq;
+
+
+free_wq:
+ device_printf(sc->dev, "Create WQ failed\n");
+ oce_wq_free(wq);
+ return NULL;
+}
+
+
+
+/**
+ * @brief Frees the work queue
+ * @param wq pointer to work queue to free
+ */
+static void
+oce_wq_free(struct oce_wq *wq)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
+ int i;
+
+ taskqueue_drain(taskqueue_swi, &wq->txtask);
+
+ if (wq->ring != NULL) {
+ oce_destroy_ring_buffer(sc, wq->ring);
+ wq->ring = NULL;
+ }
+
+ for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
+ if (wq->pckts[i].map != NULL) {
+ bus_dmamap_unload(wq->tag, wq->pckts[i].map);
+ bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
+ wq->pckts[i].map = NULL;
+ }
+ }
+
+ if (wq->tag != NULL)
+ bus_dma_tag_destroy(wq->tag);
+ if (wq->br != NULL)
+ buf_ring_free(wq->br, M_DEVBUF);
+
+ LOCK_DESTROY(&wq->tx_lock);
+ free(wq, M_DEVBUF);
+}
+
+
+
+/**
+ * @brief Create a work queue
+ * @param wq pointer to work queue
+ * @param eq pointer to associated event queue
+ */
+static int
+oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
+{
+ POCE_SOFTC sc = wq->parent;
+ struct oce_cq *cq;
+ int rc = 0;
+
+ /* create the CQ */
+ cq = oce_cq_create(sc,
+ eq,
+ CQ_LEN_1024,
+ sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
+ if (!cq)
+ return ENXIO;
+
+
+ wq->cq = cq;
+
+ rc = oce_mbox_create_wq(wq);
+ if (rc)
+ goto error;
+
+ wq->qstate = QCREATED;
+ wq->wq_free = wq->cfg.q_len;
+ wq->ring->cidx = 0;
+ wq->ring->pidx = 0;
+
+ eq->cq[eq->cq_valid] = cq;
+ eq->cq_valid++;
+ cq->cb_arg = wq;
+ cq->cq_handler = oce_wq_handler;
+
+ return 0;
+
+error:
+ device_printf(sc->dev, "WQ create failed\n");
+ oce_wq_del(wq);
+ return rc;
+}
+
+
+
+
+/**
+ * @brief Delete a work queue
+ * @param wq pointer to work queue
+ */
+static void
+oce_wq_del(struct oce_wq *wq)
+{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_wq *fwcmd;
+ POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
+
+ if (wq->qstate == QCREATED) {
+ bzero(&mbx, sizeof(struct oce_mbx));
+ /* now fill the command */
+ fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
+ fwcmd->params.req.wq_id = wq->wq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
+ wq->qstate = QDELETED;
+ }
+
+ if (wq->cq != NULL) {
+ oce_cq_del(sc, wq->cq);
+ wq->cq = NULL;
+ }
+}
+
+
+
+/**
+ * @brief function to allocate receive queue resources
+ * @param sc software handle to the device
+ * @param q_len length of receive queue
+ * @param frag_size size of an receive queue fragment
+ * @param mtu maximum transmission unit
+ * @param rss is-rss-queue flag
+ * @returns the pointer to the RQ created or NULL on failure
+ */
+static struct
+oce_rq *oce_rq_init(POCE_SOFTC sc,
+ uint32_t q_len,
+ uint32_t frag_size,
+ uint32_t mtu, uint32_t rss)
+{
+ struct oce_rq *rq;
+ int rc = 0, i;
+
+ if (OCE_LOG2(frag_size) <= 0)
+ return NULL;
+
+ if ((q_len == 0) || (q_len > 1024))
+ return NULL;
+
+ /* allocate the rq */
+ rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!rq)
+ return NULL;
+
+
+ rq->cfg.q_len = q_len;
+ rq->cfg.frag_size = frag_size;
+ rq->cfg.mtu = mtu;
+ rq->cfg.eqd = 0;
+ rq->lro_pkts_queued = 0;
+ rq->cfg.is_rss_queue = rss;
+ rq->packets_in = 0;
+ rq->packets_out = 0;
+ rq->pending = 0;
+
+ rq->parent = (void *)sc;
+
+ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
+ 1, 0,
+ BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ OCE_MAX_RX_SIZE,
+ 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
+
+ if (rc)
+ goto free_rq;
+
+ for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
+ rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
+ if (rc)
+ goto free_rq;
+ }
+
+ /* create the ring buffer */
+ rq->ring = oce_create_ring_buffer(sc, q_len,
+ sizeof(struct oce_nic_rqe));
+ if (!rq->ring)
+ goto free_rq;
+
+ LOCK_CREATE(&rq->rx_lock, "RX_lock");
+
+ return rq;
+
+free_rq:
+ device_printf(sc->dev, "Create RQ failed\n");
+ oce_rq_free(rq);
+ return NULL;
+}
+
+
+
+
+/**
+ * @brief Free a receive queue
+ * @param rq pointer to receive queue
+ */
+static void
+oce_rq_free(struct oce_rq *rq)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ int i = 0 ;
+
+ if (rq->ring != NULL) {
+ oce_destroy_ring_buffer(sc, rq->ring);
+ rq->ring = NULL;
+ }
+ for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
+ if (rq->pckts[i].map != NULL) {
+ bus_dmamap_unload(rq->tag, rq->pckts[i].map);
+ bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
+ rq->pckts[i].map = NULL;
+ }
+ if (rq->pckts[i].mbuf) {
+ m_free(rq->pckts[i].mbuf);
+ rq->pckts[i].mbuf = NULL;
+ }
+ }
+
+ if (rq->tag != NULL)
+ bus_dma_tag_destroy(rq->tag);
+
+ LOCK_DESTROY(&rq->rx_lock);
+ free(rq, M_DEVBUF);
+}
+
+
+
+
+/**
+ * @brief Create a receive queue
+ * @param rq receive queue
+ * @param if_id interface identifier index`
+ * @param eq pointer to event queue
+ */
+static int
+oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
+{
+ POCE_SOFTC sc = rq->parent;
+ struct oce_cq *cq;
+
+ cq = oce_cq_create(sc,
+ eq,
+ CQ_LEN_1024,
+ sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
+ if (!cq)
+ return ENXIO;
+
+ rq->cq = cq;
+ rq->cfg.if_id = if_id;
+
+ /* Dont create RQ here. Create in if_activate */
+ rq->qstate = 0;
+ rq->ring->cidx = 0;
+ rq->ring->pidx = 0;
+ eq->cq[eq->cq_valid] = cq;
+ eq->cq_valid++;
+ cq->cb_arg = rq;
+ cq->cq_handler = oce_rq_handler;
+
+ return 0;
+
+}
+
+
+
+
+/**
+ * @brief Delete a receive queue
+ * @param rq receive queue
+ */
+static void
+oce_rq_del(struct oce_rq *rq)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_rq *fwcmd;
+
+ if (rq->qstate == QCREATED) {
+ bzero(&mbx, sizeof(mbx));
+
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
+ rq->qstate = QDELETED;
+ }
+
+ if (rq->cq != NULL) {
+ oce_cq_del(sc, rq->cq);
+ rq->cq = NULL;
+ }
+}
+
+
+
+/**
+ * @brief function to create an event queue
+ * @param sc software handle to the device
+ * @param q_len length of event queue
+ * @param item_size size of an event queue item
+ * @param eq_delay event queue delay
+ * @retval eq success, pointer to event queue
+ * @retval NULL failure
+ */
+static struct
+oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
+ uint32_t item_size,
+ uint32_t eq_delay,
+ uint32_t vector)
+{
+ struct oce_eq *eq;
+ int rc = 0;
+
+ /* allocate an eq */
+ eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (eq == NULL)
+ return NULL;
+
+ eq->parent = (void *)sc;
+ eq->eq_id = 0xffff;
+ eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
+ if (!eq->ring)
+ goto free_eq;
+
+ eq->eq_cfg.q_len = q_len;
+ eq->eq_cfg.item_size = item_size;
+ eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
+
+ rc = oce_mbox_create_eq(eq);
+ if (rc)
+ goto free_eq;
+
+ sc->intrs[sc->neqs++].eq = eq;
+
+ return eq;
+
+free_eq:
+ oce_eq_del(eq);
+ return NULL;
+}
+
+
+
+
+/**
+ * @brief Function to delete an event queue
+ * @param eq pointer to an event queue
+ */
+static void
+oce_eq_del(struct oce_eq *eq)
+{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_eq *fwcmd;
+ POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
+
+ if (eq->eq_id != 0xffff) {
+ bzero(&mbx, sizeof(mbx));
+ fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
+ fwcmd->params.req.id = eq->eq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
+ }
+
+ if (eq->ring != NULL) {
+ oce_destroy_ring_buffer(sc, eq->ring);
+ eq->ring = NULL;
+ }
+
+ free(eq, M_DEVBUF);
+
+}
+
+
+
+
+/**
+ * @brief Function to create an MQ
+ * @param sc software handle to the device
+ * @param eq the EQ to associate with the MQ for event notification
+ * @param q_len the number of entries to create in the MQ
+ * @returns pointer to the created MQ, failure otherwise
+ */
+static struct oce_mq *
+oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_mq *fwcmd = NULL;
+ struct oce_mq *mq = NULL;
+ int rc = 0;
+ struct oce_cq *cq;
+ oce_mq_ctx_t *ctx;
+ uint32_t num_pages;
+ uint32_t page_size;
+ uint32_t version;
+
+
+ cq = oce_cq_create(sc, eq, CQ_LEN_256,
+ sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
+ if (!cq)
+ return NULL;
+
+ /* allocate the mq */
+ mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!mq) {
+ oce_cq_del(sc, cq);
+ goto error;
+ }
+
+ mq->parent = sc;
+
+ mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
+ if (!mq->ring)
+ goto error;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
+ version = OCE_MBX_VER_V0;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_MQ,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_mq),
+ version);
+
+ num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
+ page_size = mq->ring->num_items * mq->ring->item_size;
+
+ ctx = &fwcmd->params.req.context;
+ ctx->v0.num_pages = num_pages;
+ ctx->v0.cq_id = cq->cq_id;
+ ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
+ ctx->v0.valid = 1;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_mq);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto error;
+
+ mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
+ mq->cq = cq;
+ eq->cq[eq->cq_valid] = cq;
+ eq->cq_valid++;
+ mq->cq->eq = eq;
+ mq->cfg.q_len = (uint8_t) q_len;
+ mq->cfg.eqd = 0;
+ mq->qstate = QCREATED;
+
+ mq->cq->cb_arg = mq;
+ mq->cq->cq_handler = oce_mq_handler;
+
+ return mq;
+
+error:
+ device_printf(sc->dev, "MQ create failed\n");
+ oce_mq_free(mq);
+ mq = NULL;
+ return mq;
+}
+
+
+
+
+
+/**
+ * @brief Function to free a mailbox queue
+ * @param mq pointer to a mailbox queue
+ */
+static void
+oce_mq_free(struct oce_mq *mq)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_mq *fwcmd;
+
+ if (!mq)
+ return;
+
+ if (mq->ring != NULL) {
+ oce_destroy_ring_buffer(sc, mq->ring);
+ mq->ring = NULL;
+ if (mq->qstate == QCREATED) {
+ bzero(&mbx, sizeof (struct oce_mbx));
+ fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
+ fwcmd->params.req.id = mq->mq_id;
+ (void) oce_destroy_q(sc, &mbx,
+ sizeof (struct mbx_destroy_common_mq),
+ QTYPE_MQ);
+ }
+ mq->qstate = QDELETED;
+ }
+
+ if (mq->cq != NULL) {
+ oce_cq_del(sc, mq->cq);
+ mq->cq = NULL;
+ }
+
+ free(mq, M_DEVBUF);
+ mq = NULL;
+}
+
+
+
+/**
+ * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
+ * @param sc sofware handle to the device
+ * @param mbx mailbox command to send to the fw to delete the queue
+ * (mbx contains the queue information to delete)
+ * @param req_size the size of the mbx payload dependent on the qtype
+ * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
+ * @returns 0 on success, failure otherwise
+ */
+static int
+oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
+ enum qtype qtype)
+{
+ struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
+ int opcode;
+ int subsys;
+ int rc = 0;
+
+ switch (qtype) {
+ case QTYPE_EQ:
+ opcode = OPCODE_COMMON_DESTROY_EQ;
+ subsys = MBX_SUBSYSTEM_COMMON;
+ break;
+ case QTYPE_CQ:
+ opcode = OPCODE_COMMON_DESTROY_CQ;
+ subsys = MBX_SUBSYSTEM_COMMON;
+ break;
+ case QTYPE_MQ:
+ opcode = OPCODE_COMMON_DESTROY_MQ;
+ subsys = MBX_SUBSYSTEM_COMMON;
+ break;
+ case QTYPE_WQ:
+ opcode = NIC_DELETE_WQ;
+ subsys = MBX_SUBSYSTEM_NIC;
+ break;
+ case QTYPE_RQ:
+ opcode = NIC_DELETE_RQ;
+ subsys = MBX_SUBSYSTEM_NIC;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ mbx_common_req_hdr_init(hdr, 0, 0, subsys,
+ opcode, MBX_TIMEOUT_SEC, req_size,
+ OCE_MBX_VER_V0);
+
+ mbx->u0.s.embedded = 1;
+ mbx->payload_length = (uint32_t) req_size;
+ DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, mbx, NULL);
+
+ if (rc != 0)
+ device_printf(sc->dev, "Failed to del q\n");
+
+ return rc;
+}
+
+
+
+/**
+ * @brief Function to create a completion queue
+ * @param sc software handle to the device
+ * @param eq optional eq to be associated with to the cq
+ * @param q_len length of completion queue
+ * @param item_size size of completion queue items
+ * @param sol_event command context event
+ * @param is_eventable event table
+ * @param nodelay no delay flag
+ * @param ncoalesce no coalescence flag
+ * @returns pointer to the cq created, NULL on failure
+ */
+struct oce_cq *
+oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
+ uint32_t q_len,
+ uint32_t item_size,
+ uint32_t sol_event,
+ uint32_t is_eventable,
+ uint32_t nodelay, uint32_t ncoalesce)
+{
+ struct oce_cq *cq = NULL;
+ int rc = 0;
+
+ cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!cq)
+ return NULL;
+
+ cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
+ if (!cq->ring)
+ goto error;
+
+ cq->parent = sc;
+ cq->eq = eq;
+ cq->cq_cfg.q_len = q_len;
+ cq->cq_cfg.item_size = item_size;
+ cq->cq_cfg.nodelay = (uint8_t) nodelay;
+
+ rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
+ if (rc)
+ goto error;
+
+ sc->cq[sc->ncqs++] = cq;
+
+ return cq;
+
+error:
+ device_printf(sc->dev, "CQ create failed\n");
+ oce_cq_del(sc, cq);
+ return NULL;
+}
+
+
+
+/**
+ * @brief Deletes the completion queue
+ * @param sc software handle to the device
+ * @param cq pointer to a completion queue
+ */
+static void
+oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
+{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_cq *fwcmd;
+
+ if (cq->ring != NULL) {
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ /* now fill the command */
+ fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
+ fwcmd->params.req.id = cq->cq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
+ /*NOW destroy the ring */
+ oce_destroy_ring_buffer(sc, cq->ring);
+ cq->ring = NULL;
+ }
+
+ free(cq, M_DEVBUF);
+ cq = NULL;
+}
+
+
+
+/**
+ * @brief Start a receive queue
+ * @param rq pointer to a receive queue
+ */
+int
+oce_start_rq(struct oce_rq *rq)
+{
+ int rc;
+
+ rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
+
+ if (rc == 0)
+ oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
+ return rc;
+}
+
+
+
+/**
+ * @brief Start a work queue
+ * @param wq pointer to a work queue
+ */
+int
+oce_start_wq(struct oce_wq *wq)
+{
+ oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
+ return 0;
+}
+
+
+
+/**
+ * @brief Start a mailbox queue
+ * @param mq pointer to a mailbox queue
+ */
+int
+oce_start_mq(struct oce_mq *mq)
+{
+ oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
+ return 0;
+}
+
+
+
+/**
+ * @brief Function to arm an EQ so that it can generate events
+ * @param sc software handle to the device
+ * @param qid id of the EQ returned by the fw at the time of creation
+ * @param npopped number of EQEs to arm
+ * @param rearm rearm bit enable/disable
+ * @param clearint bit to clear the interrupt condition because of which
+ * EQEs are generated
+ */
+void
+oce_arm_eq(POCE_SOFTC sc,
+ int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
+{
+ eq_db_t eq_db = { 0 };
+
+ eq_db.bits.rearm = rearm;
+ eq_db.bits.event = 1;
+ eq_db.bits.num_popped = npopped;
+ eq_db.bits.clrint = clearint;
+ eq_db.bits.qid = qid;
+ OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
+
+}
+
+
+
+
+/**
+ * @brief Function to arm a CQ with CQEs
+ * @param sc software handle to the device
+ * @param qid id of the CQ returned by the fw at the time of creation
+ * @param npopped number of CQEs to arm
+ * @param rearm rearm bit enable/disable
+ */
+void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
+{
+ cq_db_t cq_db = { 0 };
+
+ cq_db.bits.rearm = rearm;
+ cq_db.bits.num_popped = npopped;
+ cq_db.bits.event = 0;
+ cq_db.bits.qid = qid;
+ OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
+
+}
+
+
+
+
+/*
+ * @brief function to cleanup the eqs used during stop
+ * @param eq pointer to event queue structure
+ * @returns the number of EQs processed
+ */
+void
+oce_drain_eq(struct oce_eq *eq)
+{
+
+ struct oce_eqe *eqe;
+ uint16_t num_eqe = 0;
+ POCE_SOFTC sc = eq->parent;
+
+ do {
+ eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
+ if (eqe->evnt == 0)
+ break;
+ eqe->evnt = 0;
+ bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
+ BUS_DMASYNC_POSTWRITE);
+ num_eqe++;
+ RING_GET(eq->ring, 1);
+
+ } while (TRUE);
+
+ oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
+
+}
+
+
+
+void
+oce_drain_wq_cq(struct oce_wq *wq)
+{
+ POCE_SOFTC sc = wq->parent;
+ struct oce_cq *cq = wq->cq;
+ struct oce_nic_tx_cqe *cqe;
+ int num_cqes = 0;
+
+ bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
+ BUS_DMASYNC_POSTWRITE);
+
+ do {
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
+ if (cqe->u0.dw[3] == 0)
+ break;
+ cqe->u0.dw[3] = 0;
+ bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
+ BUS_DMASYNC_POSTWRITE);
+ RING_GET(cq->ring, 1);
+ num_cqes++;
+
+ } while (TRUE);
+
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+
+}
+
+
+/*
+ * @brief function to drain a MCQ and process its CQEs
+ * @param dev software handle to the device
+ * @param cq pointer to the cq to drain
+ * @returns the number of CQEs processed
+ */
+void
+oce_drain_mq_cq(void *arg)
+{
+ /* TODO: additional code. */
+ return;
+}
+
+
+
+/**
+ * @brief function to process a Recieve queue
+ * @param arg pointer to the RQ to charge
+ * @return number of cqes processed
+ */
+void
+oce_drain_rq_cq(struct oce_rq *rq)
+{
+ struct oce_nic_rx_cqe *cqe;
+ uint16_t num_cqe = 0;
+ struct oce_cq *cq;
+ POCE_SOFTC sc;
+
+ sc = rq->parent;
+ cq = rq->cq;
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ /* dequeue till you reach an invalid cqe */
+ while (RQ_CQE_VALID(cqe)) {
+ RQ_CQE_INVALIDATE(cqe);
+ RING_GET(cq->ring, 1);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
+ struct oce_nic_rx_cqe);
+ num_cqe++;
+ }
+ oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
+
+ return;
+}
+
+
+void
+oce_free_posted_rxbuf(struct oce_rq *rq)
+{
+ struct oce_packet_desc *pd;
+
+ while (rq->pending) {
+
+ pd = &rq->pckts[rq->packets_out];
+ bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ if (pd->mbuf != NULL) {
+ m_freem(pd->mbuf);
+ pd->mbuf = NULL;
+ }
+
+ if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
+ rq->packets_out = 0;
+ else
+ rq->packets_out++;
+
+ rq->pending--;
+ }
+
+}
+
+void
+oce_stop_rx(POCE_SOFTC sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_rq *fwcmd;
+ struct oce_rq *rq;
+ int i = 0;
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq->qstate == QCREATED) {
+ /* Delete rxq in firmware */
+
+ bzero(&mbx, sizeof(mbx));
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
+
+ rq->qstate = QDELETED;
+
+ DELAY(1);
+
+ /* Free posted RX buffers that are not used */
+ oce_free_posted_rxbuf(rq);
+
+ }
+ }
+}
+
+
+
+int
+oce_start_rx(POCE_SOFTC sc)
+{
+ struct oce_rq *rq;
+ int rc = 0, i;
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq->qstate == QCREATED)
+ continue;
+ rc = oce_mbox_create_rq(rq);
+ if (rc)
+ goto error;
+ /* reset queue pointers */
+ rq->qstate = QCREATED;
+ rq->pending = 0;
+ rq->ring->cidx = 0;
+ rq->ring->pidx = 0;
+ rq->packets_in = 0;
+ rq->packets_out = 0;
+ }
+
+ DELAY(1);
+
+ /* RSS config */
+ if (sc->rss_enable) {
+ rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
+ if (rc)
+ goto error;
+
+ }
+
+ return rc;
+error:
+ device_printf(sc->dev, "Start RX failed\n");
+ return rc;
+
+}
+
+
+
diff --git a/sys/dev/oce/oce_sysctl.c b/sys/dev/oce/oce_sysctl.c
new file mode 100644
index 0000000..e0c50dd
--- /dev/null
+++ b/sys/dev/oce/oce_sysctl.c
@@ -0,0 +1,1300 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+
+/* $FreeBSD$ */
+
+
+#include "oce_if.h"
+
+static void copy_stats_to_sc_xe201(POCE_SOFTC sc);
+static void copy_stats_to_sc_be3(POCE_SOFTC sc);
+static void copy_stats_to_sc_be2(POCE_SOFTC sc);
+static int oce_sysctl_loopback(SYSCTL_HANDLER_ARGS);
+static int oce_be3_fwupgrade(POCE_SOFTC sc, const struct firmware *fw);
+static int oce_sys_fwupgrade(SYSCTL_HANDLER_ARGS);
+static int oce_be3_flashdata(POCE_SOFTC sc, const struct firmware
+ *fw, int num_imgs);
+static int oce_lancer_fwupgrade(POCE_SOFTC sc, const struct firmware *fw);
+static boolean_t oce_phy_flashing_required(POCE_SOFTC sc);
+static boolean_t oce_img_flashing_required(POCE_SOFTC sc, const char *p,
+ int img_optype, uint32_t img_offset,
+ uint32_t img_size, uint32_t hdrs_size);
+static void oce_add_stats_sysctls_be3(POCE_SOFTC sc,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *stats_node);
+static void oce_add_stats_sysctls_xe201(POCE_SOFTC sc,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *stats_node);
+
+extern char component_revision[32];
+
+
+void
+oce_add_sysctls(POCE_SOFTC sc)
+{
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(sc->dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct sysctl_oid *stats_node;
+
+ SYSCTL_ADD_STRING(ctx, child,
+ OID_AUTO, "component_revision",
+ CTLTYPE_INT | CTLFLAG_RD,
+ &component_revision,
+ sizeof(component_revision),
+ "EMULEX One-Connect device driver revision");
+
+ SYSCTL_ADD_STRING(ctx, child,
+ OID_AUTO, "firmware_version",
+ CTLTYPE_INT | CTLFLAG_RD,
+ &sc->fw_version,
+ sizeof(sc->fw_version),
+ "EMULEX One-Connect Firmware Version");
+
+ SYSCTL_ADD_INT(ctx, child,
+ OID_AUTO, "max_rsp_handled",
+ CTLTYPE_INT | CTLFLAG_RW,
+ &oce_max_rsp_handled,
+ sizeof(oce_max_rsp_handled),
+ "Maximum receive frames handled per interupt");
+
+ if (sc->function_mode & FNM_FLEX10_MODE)
+ SYSCTL_ADD_UINT(ctx, child,
+ OID_AUTO, "speed",
+ CTLFLAG_RD,
+ &sc->qos_link_speed,
+ 0,"QOS Speed");
+ else
+ SYSCTL_ADD_UINT(ctx, child,
+ OID_AUTO, "speed",
+ CTLFLAG_RD,
+ &sc->speed,
+ 0,"Link Speed");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "loop_back",
+ CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
+ oce_sysctl_loopback, "I", "Loop Back Tests");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fw_upgrade",
+ CTLTYPE_STRING | CTLFLAG_RW, (void *)sc, 0,
+ oce_sys_fwupgrade, "A", "Firmware ufi file");
+
+ stats_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
+ CTLFLAG_RD, NULL, "Ethernet Statistics");
+
+ if (IS_BE(sc))
+ oce_add_stats_sysctls_be3(sc, ctx, stats_node);
+ else
+ oce_add_stats_sysctls_xe201(sc, ctx, stats_node);
+
+
+}
+
+
+
+static uint32_t
+oce_loopback_test(struct oce_softc *sc, uint8_t loopback_type)
+{
+ uint32_t status = 0;
+
+ oce_mbox_cmd_set_loopback(sc, sc->if_id, loopback_type, 1);
+ status = oce_mbox_cmd_test_loopback(sc, sc->if_id, loopback_type,
+ 1500, 2, 0xabc);
+ oce_mbox_cmd_set_loopback(sc, sc->if_id, OCE_NO_LOOPBACK, 1);
+
+ return status;
+}
+
+
+static int
+oce_sysctl_loopback(SYSCTL_HANDLER_ARGS)
+{
+ int value = 0;
+ uint32_t status;
+ struct oce_softc *sc = (struct oce_softc *)arg1;
+
+ status = sysctl_handle_int(oidp, &value, 0, req);
+ if (status || !req->newptr)
+ return status;
+
+ if (value != 1) {
+ device_printf(sc->dev,
+ "Not a Valid value. Set to loop_back=1 to run tests\n");
+ return 0;
+ }
+
+ if ((status = oce_loopback_test(sc, OCE_MAC_LOOPBACK))) {
+ device_printf(sc->dev,
+ "MAC Loopback Test = Failed (Error status = %d)\n",
+ status);
+ } else
+ device_printf(sc->dev, "MAC Loopback Test = Success\n");
+
+ if ((status = oce_loopback_test(sc, OCE_PHY_LOOPBACK))) {
+ device_printf(sc->dev,
+ "PHY Loopback Test = Failed (Error status = %d)\n",
+ status);
+ } else
+ device_printf(sc->dev, "PHY Loopback Test = Success\n");
+
+ if ((status = oce_loopback_test(sc, OCE_ONE_PORT_EXT_LOOPBACK))) {
+ device_printf(sc->dev,
+ "EXT Loopback Test = Failed (Error status = %d)\n",
+ status);
+ } else
+ device_printf(sc->dev, "EXT Loopback Test = Success\n");
+
+ return 0;
+}
+
+
+static int
+oce_sys_fwupgrade(SYSCTL_HANDLER_ARGS)
+{
+ char ufiname[256] = {0};
+ uint32_t status = 1;
+ struct oce_softc *sc = (struct oce_softc *)arg1;
+ const struct firmware *fw;
+
+ status = sysctl_handle_string(oidp, ufiname, sizeof(ufiname), req);
+ if (status || !req->newptr)
+ return status;
+
+ fw = firmware_get(ufiname);
+ if (fw == NULL) {
+ device_printf(sc->dev, "Unable to get Firmware. "
+ "Make sure %s is copied to /boot/modules\n", ufiname);
+ return ENOENT;
+ }
+
+ if (IS_BE(sc)) {
+ if ((sc->flags & OCE_FLAGS_BE2)) {
+ device_printf(sc->dev,
+ "Flashing not supported for BE2 yet.\n");
+ status = 1;
+ goto done;
+ }
+ status = oce_be3_fwupgrade(sc, fw);
+ } else
+ status = oce_lancer_fwupgrade(sc, fw);
+done:
+ if (status) {
+ device_printf(sc->dev, "Firmware Upgrade failed\n");
+ } else {
+ device_printf(sc->dev, "Firmware Flashed successfully\n");
+ }
+
+ /* Release Firmware*/
+ firmware_put(fw, FIRMWARE_UNLOAD);
+
+ return status;
+}
+
+
+static int
+oce_be3_fwupgrade(POCE_SOFTC sc, const struct firmware *fw)
+{
+ int rc = 0, num_imgs = 0, i = 0;
+ const struct flash_file_hdr *fhdr;
+ const struct image_hdr *img_ptr;
+
+ fhdr = (const struct flash_file_hdr *)fw->data;
+ if (fhdr->build[0] != '3') {
+ device_printf(sc->dev, "Invalid BE3 firmware image\n");
+ return EINVAL;
+ }
+ /* Display flash version */
+ device_printf(sc->dev, "Flashing Firmware %s\n", &fhdr->build[2]);
+
+ num_imgs = fhdr->num_imgs;
+ for (i = 0; i < num_imgs; i++) {
+ img_ptr = (const struct image_hdr *)((const char *)fw->data +
+ sizeof(struct flash_file_hdr) +
+ (i * sizeof(struct image_hdr)));
+ if (img_ptr->imageid == 1) {
+ rc = oce_be3_flashdata(sc, fw, num_imgs);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+
+static int
+oce_be3_flashdata(POCE_SOFTC sc, const struct firmware *fw, int num_imgs)
+{
+ char cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+ const char *p = (const char *)fw->data;
+ const struct flash_sec_info *fsec = NULL;
+ struct mbx_common_read_write_flashrom *req;
+ int rc = 0, i, img_type, bin_offset = 0;
+ boolean_t skip_image;
+ uint32_t optype = 0, size = 0, start = 0, num_bytes = 0;
+ uint32_t opcode = 0;
+ OCE_DMA_MEM dma_mem;
+
+ /* Validate Cookie */
+ bin_offset = (sizeof(struct flash_file_hdr) +
+ (num_imgs * sizeof(struct image_hdr)));
+ p += bin_offset;
+ while (p < ((const char *)fw->data + fw->datasize)) {
+ fsec = (const struct flash_sec_info *)p;
+ if (!memcmp(cookie, fsec->cookie, sizeof(cookie)))
+ break;
+ fsec = NULL;
+ p += 32;
+ }
+
+ if (!fsec) {
+ device_printf(sc->dev,
+ "Invalid Cookie. Firmware image corrupted ?\n");
+ return EINVAL;
+ }
+
+ rc = oce_dma_alloc(sc, sizeof(struct mbx_common_read_write_flashrom)
+ + 32*1024, &dma_mem, 0);
+ if (rc) {
+ device_printf(sc->dev,
+ "Memory allocation failure while flashing\n");
+ return ENOMEM;
+ }
+ req = OCE_DMAPTR(&dma_mem, struct mbx_common_read_write_flashrom);
+
+ for (i = 0; i < MAX_FLASH_COMP; i++) {
+
+ img_type = fsec->fsec_entry[i].type;
+ skip_image = FALSE;
+ switch (img_type) {
+ case IMG_ISCSI:
+ optype = 0;
+ size = 2097152;
+ start = 2097152;
+ break;
+ case IMG_REDBOOT:
+ optype = 1;
+ size = 1048576;
+ start = 262144;
+ if (!oce_img_flashing_required(sc, fw->data,
+ optype, start, size, bin_offset))
+ skip_image = TRUE;
+ break;
+ case IMG_BIOS:
+ optype = 2;
+ size = 524288;
+ start = 12582912;
+ break;
+ case IMG_PXEBIOS:
+ optype = 3;
+ size = 524288;
+ start = 13107200;
+ break;
+ case IMG_FCOEBIOS:
+ optype = 8;
+ size = 524288;
+ start = 13631488;
+ break;
+ case IMG_ISCSI_BAK:
+ optype = 9;
+ size = 2097152;
+ start = 4194304;
+ break;
+ case IMG_FCOE:
+ optype = 10;
+ size = 2097152;
+ start = 6291456;
+ break;
+ case IMG_FCOE_BAK:
+ optype = 11;
+ size = 2097152;
+ start = 8388608;
+ break;
+ case IMG_NCSI:
+ optype = 13;
+ size = 262144;
+ start = 15990784;
+ break;
+ case IMG_PHY:
+ optype = 99;
+ size = 262144;
+ start = 1310720;
+ if (!oce_phy_flashing_required(sc))
+ skip_image = TRUE;
+ break;
+ default:
+ skip_image = TRUE;
+ break;
+ }
+ if (skip_image)
+ continue;
+
+ p = fw->data;
+ p = p + bin_offset + start;
+ if ((p + size) > ((const char *)fw->data + fw->datasize)) {
+ rc = 1;
+ goto ret;
+ }
+
+ while (size) {
+
+ if (size > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = size;
+ size -= num_bytes;
+
+ if (!size)
+ opcode = FLASHROM_OPER_FLASH;
+ else
+ opcode = FLASHROM_OPER_SAVE;
+
+ memcpy(req->data_buffer, p, num_bytes);
+ p += num_bytes;
+
+ rc = oce_mbox_write_flashrom(sc, optype, opcode,
+ &dma_mem, num_bytes);
+ if (rc) {
+ device_printf(sc->dev,
+ "cmd to write to flash rom failed.\n");
+ rc = EIO;
+ goto ret;
+ }
+ /* Leave the CPU for others for some time */
+ pause("yield", 10);
+
+ }
+ }
+ret:
+ oce_dma_free(sc, &dma_mem);
+ return rc;
+
+}
+
+
+static boolean_t
+oce_phy_flashing_required(POCE_SOFTC sc)
+{
+ int status = 0;
+ struct oce_phy_info phy_info;
+
+ status = oce_mbox_get_phy_info(sc, &phy_info);
+ if (status)
+ return FALSE;
+
+ if ((phy_info.phy_type == TN_8022) &&
+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+static boolean_t
+oce_img_flashing_required(POCE_SOFTC sc, const char *p,
+ int img_optype, uint32_t img_offset,
+ uint32_t img_size, uint32_t hdrs_size)
+{
+ uint32_t crc_offset;
+ uint8_t flashed_crc[4];
+ int status;
+
+ crc_offset = hdrs_size + img_offset + img_size - 4;
+
+ p += crc_offset;
+
+ status = oce_mbox_get_flashrom_crc(sc, flashed_crc,
+ (img_size - 4), img_optype);
+ if (status)
+ return TRUE; /* Some thing worng. ReFlash */
+
+ /*update redboot only if crc does not match*/
+ if (bcmp(flashed_crc, p, 4))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+
+static int
+oce_lancer_fwupgrade(POCE_SOFTC sc, const struct firmware *fw)
+{
+
+ int rc = 0;
+ OCE_DMA_MEM dma_mem;
+ const uint8_t *data = NULL;
+ uint8_t *dest_image_ptr = NULL;
+ size_t size = 0;
+ uint32_t data_written = 0, chunk_size = 0;
+ uint32_t offset = 0, add_status = 0;
+
+ if (!IS_ALIGNED(fw->datasize, sizeof(uint32_t))) {
+ device_printf(sc->dev,
+ "Lancer FW image is not 4 byte aligned.");
+ return EINVAL;
+ }
+
+ rc = oce_dma_alloc(sc, 32*1024, &dma_mem, 0);
+ if (rc) {
+ device_printf(sc->dev,
+ "Memory allocation failure while flashing Lancer\n");
+ return ENOMEM;
+ }
+
+ size = fw->datasize;
+ data = fw->data;
+ dest_image_ptr = OCE_DMAPTR(&dma_mem, uint8_t);
+
+ while (size) {
+ chunk_size = MIN(size, (32*1024));
+
+ bcopy(data, dest_image_ptr, chunk_size);
+
+ rc = oce_mbox_lancer_write_flashrom(sc, chunk_size, offset,
+ &dma_mem, &data_written, &add_status);
+
+ if (rc)
+ break;
+
+ size -= data_written;
+ data += data_written;
+ offset += data_written;
+ pause("yield", 10);
+
+ }
+
+ if (!rc)
+ /* Commit the firmware*/
+ rc = oce_mbox_lancer_write_flashrom(sc, 0, offset, &dma_mem,
+ &data_written, &add_status);
+ if (rc) {
+ device_printf(sc->dev, "Lancer firmware load error. "
+ "Addstatus = 0x%x, status = %d \n", add_status, rc);
+ rc = EIO;
+ }
+ oce_dma_free(sc, &dma_mem);
+ return rc;
+
+}
+
+
+static void
+oce_add_stats_sysctls_be3(POCE_SOFTC sc,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *stats_node)
+{
+ struct sysctl_oid *rx_stats_node, *tx_stats_node;
+ struct sysctl_oid_list *rx_stat_list, *tx_stat_list;
+ struct sysctl_oid_list *queue_stats_list;
+ struct sysctl_oid *queue_stats_node;
+ struct oce_drv_stats *stats;
+ char prefix[32];
+ int i;
+
+ stats = &sc->oce_stats_info;
+
+ rx_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(stats_node),
+ OID_AUTO,"rx", CTLFLAG_RD,
+ NULL, "RX Ethernet Statistics");
+ rx_stat_list = SYSCTL_CHILDREN(rx_stats_node);
+
+
+ SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_pkts",
+ CTLFLAG_RD, &stats->rx.t_rx_pkts,
+ "Total Received Packets");
+ SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_bytes",
+ CTLFLAG_RD, &stats->rx.t_rx_bytes,
+ "Total Received Bytes");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_frags",
+ CTLFLAG_RD, &stats->rx.t_rx_frags, 0,
+ "Total Received Fragements");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_mcast_pkts",
+ CTLFLAG_RD, &stats->rx.t_rx_mcast_pkts, 0,
+ "Total Received Multicast Packets");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_ucast_pkts",
+ CTLFLAG_RD, &stats->rx.t_rx_ucast_pkts, 0,
+ "Total Received Unicast Packets");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_rxcp_errs",
+ CTLFLAG_RD, &stats->rx.t_rxcp_errs, 0,
+ "Total Receive completion errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "pause_frames",
+ CTLFLAG_RD, &stats->u0.be.rx_pause_frames, 0,
+ "Pause Frames");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "priority_pause_frames",
+ CTLFLAG_RD, &stats->u0.be.rx_priority_pause_frames, 0,
+ "Priority Pause Frames");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "control_frames",
+ CTLFLAG_RD, &stats->u0.be.rx_control_frames, 0,
+ "Control Frames");
+
+ for (i = 0; i < sc->nrqs; i++) {
+ sprintf(prefix, "queue%d",i);
+ queue_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(rx_stats_node),
+ OID_AUTO, prefix, CTLFLAG_RD,
+ NULL, "Queue name");
+ queue_stats_list = SYSCTL_CHILDREN(queue_stats_node);
+
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_pkts",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_pkts,
+ "Receive Packets");
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes,
+ "Recived Bytes");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_frags",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0,
+ "Received Fragments");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO,
+ "rx_mcast_pkts", CTLFLAG_RD,
+ &sc->rq[i]->rx_stats.rx_mcast_pkts, 0,
+ "Received Multicast Packets");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO,
+ "rx_ucast_pkts", CTLFLAG_RD,
+ &sc->rq[i]->rx_stats.rx_ucast_pkts, 0,
+ "Received Unicast Packets");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rxcp_err",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0,
+ "Received Completion Errors");
+
+ }
+
+ rx_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(rx_stats_node),
+ OID_AUTO, "err", CTLFLAG_RD,
+ NULL, "Receive Error Stats");
+ rx_stat_list = SYSCTL_CHILDREN(rx_stats_node);
+
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "crc_errs",
+ CTLFLAG_RD, &stats->u0.be.rx_crc_errors, 0,
+ "CRC Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "pbuf_errors",
+ CTLFLAG_RD, &stats->u0.be.rx_drops_no_pbuf, 0,
+ "Drops due to pbuf full");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "erx_errors",
+ CTLFLAG_RD, &stats->u0.be.rx_drops_no_erx_descr, 0,
+ "ERX Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "alignment_errors",
+ CTLFLAG_RD, &stats->u0.be.rx_drops_too_many_frags, 0,
+ "RX Alignmnet Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "in_range_errors",
+ CTLFLAG_RD, &stats->u0.be.rx_in_range_errors, 0,
+ "In Range Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "out_range_errors",
+ CTLFLAG_RD, &stats->u0.be.rx_out_range_errors, 0,
+ "Out Range Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "frame_too_long",
+ CTLFLAG_RD, &stats->u0.be.rx_frame_too_long, 0,
+ "Frame Too Long");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "address_match_errors",
+ CTLFLAG_RD, &stats->u0.be.rx_address_match_errors, 0,
+ "Address Match Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_small",
+ CTLFLAG_RD, &stats->u0.be.rx_dropped_too_small, 0,
+ "Dropped Too Small");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_short",
+ CTLFLAG_RD, &stats->u0.be.rx_dropped_too_short, 0,
+ "Dropped Too Short");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO,
+ "dropped_header_too_small", CTLFLAG_RD,
+ &stats->u0.be.rx_dropped_header_too_small, 0,
+ "Dropped Header Too Small");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_tcp_length",
+ CTLFLAG_RD, &stats->u0.be.rx_dropped_tcp_length, 0,
+ "Dropped TCP Length");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_runt",
+ CTLFLAG_RD, &stats->u0.be.rx_dropped_runt, 0,
+ "Dropped runt");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "ip_checksum_errs",
+ CTLFLAG_RD, &stats->u0.be.rx_ip_checksum_errs, 0,
+ "IP Checksum Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "tcp_checksum_errs",
+ CTLFLAG_RD, &stats->u0.be.rx_tcp_checksum_errs, 0,
+ "TCP Checksum Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "udp_checksum_errs",
+ CTLFLAG_RD, &stats->u0.be.rx_udp_checksum_errs, 0,
+ "UDP Checksum Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "fifo_overflow_drop",
+ CTLFLAG_RD, &stats->u0.be.rxpp_fifo_overflow_drop, 0,
+ "FIFO Overflow Drop");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO,
+ "input_fifo_overflow_drop", CTLFLAG_RD,
+ &stats->u0.be.rx_input_fifo_overflow_drop, 0,
+ "Input FIFO Overflow Drop");
+
+ tx_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(stats_node), OID_AUTO,
+ "tx",CTLFLAG_RD, NULL,
+ "TX Ethernet Statistics");
+ tx_stat_list = SYSCTL_CHILDREN(tx_stats_node);
+
+ SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_pkts",
+ CTLFLAG_RD, &stats->tx.t_tx_pkts,
+ "Total Transmit Packets");
+ SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_bytes",
+ CTLFLAG_RD, &stats->tx.t_tx_bytes,
+ "Total Transmit Bytes");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_reqs",
+ CTLFLAG_RD, &stats->tx.t_tx_reqs, 0,
+ "Total Transmit Requests");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_stops",
+ CTLFLAG_RD, &stats->tx.t_tx_stops, 0,
+ "Total Transmit Stops");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_wrbs",
+ CTLFLAG_RD, &stats->tx.t_tx_wrbs, 0,
+ "Total Transmit WRB's");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_compl",
+ CTLFLAG_RD, &stats->tx.t_tx_compl, 0,
+ "Total Transmit Completions");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO,
+ "total_ipv6_ext_hdr_tx_drop", CTLFLAG_RD,
+ &stats->tx.t_ipv6_ext_hdr_tx_drop, 0,
+ "Total Transmit IPV6 Drops");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "pauseframes",
+ CTLFLAG_RD, &stats->u0.be.tx_pauseframes, 0,
+ "Pause Frames");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "priority_pauseframes",
+ CTLFLAG_RD, &stats->u0.be.tx_priority_pauseframes, 0,
+ "Priority Pauseframes");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "controlframes",
+ CTLFLAG_RD, &stats->u0.be.tx_controlframes, 0,
+ "Tx Control Frames");
+
+ for (i = 0; i < sc->nwqs; i++) {
+ sprintf(prefix, "queue%d",i);
+ queue_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(tx_stats_node),
+ OID_AUTO, prefix, CTLFLAG_RD,
+ NULL, "Queue name");
+ queue_stats_list = SYSCTL_CHILDREN(queue_stats_node);
+
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_pkts",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_pkts,
+ "Transmit Packets");
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_bytes,
+ "Transmit Bytes");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_reqs",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_reqs, 0,
+ "Transmit Requests");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_stops",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_stops, 0,
+ "Transmit Stops");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_wrbs",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_wrbs, 0,
+ "Transmit WRB's");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_compl",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_compl, 0,
+ "Transmit Completions");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO,
+ "ipv6_ext_hdr_tx_drop",CTLFLAG_RD,
+ &sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop, 0,
+ "Transmit IPV6 Ext Header Drop");
+
+ }
+ return;
+}
+
+
+static void
+oce_add_stats_sysctls_xe201(POCE_SOFTC sc,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *stats_node)
+{
+ struct sysctl_oid *rx_stats_node, *tx_stats_node;
+ struct sysctl_oid_list *rx_stat_list, *tx_stat_list;
+ struct sysctl_oid_list *queue_stats_list;
+ struct sysctl_oid *queue_stats_node;
+ struct oce_drv_stats *stats;
+ char prefix[32];
+ int i;
+
+ stats = &sc->oce_stats_info;
+
+ rx_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(stats_node),
+ OID_AUTO, "rx", CTLFLAG_RD,
+ NULL, "RX Ethernet Statistics");
+ rx_stat_list = SYSCTL_CHILDREN(rx_stats_node);
+
+
+ SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_pkts",
+ CTLFLAG_RD, &stats->rx.t_rx_pkts,
+ "Total Received Packets");
+ SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_bytes",
+ CTLFLAG_RD, &stats->rx.t_rx_bytes,
+ "Total Received Bytes");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_frags",
+ CTLFLAG_RD, &stats->rx.t_rx_frags, 0,
+ "Total Received Fragements");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_mcast_pkts",
+ CTLFLAG_RD, &stats->rx.t_rx_mcast_pkts, 0,
+ "Total Received Multicast Packets");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_ucast_pkts",
+ CTLFLAG_RD, &stats->rx.t_rx_ucast_pkts, 0,
+ "Total Received Unicast Packets");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_rxcp_errs",
+ CTLFLAG_RD, &stats->rx.t_rxcp_errs, 0,
+ "Total Receive completion errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "pause_frames",
+ CTLFLAG_RD, &stats->u0.xe201.rx_pause_frames, 0,
+ "Pause Frames");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "control_frames",
+ CTLFLAG_RD, &stats->u0.xe201.rx_control_frames, 0,
+ "Control Frames");
+
+ for (i = 0; i < sc->nrqs; i++) {
+ sprintf(prefix, "queue%d",i);
+ queue_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(rx_stats_node),
+ OID_AUTO, prefix, CTLFLAG_RD,
+ NULL, "Queue name");
+ queue_stats_list = SYSCTL_CHILDREN(queue_stats_node);
+
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_pkts",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_pkts,
+ "Receive Packets");
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes,
+ "Recived Bytes");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_frags",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0,
+ "Received Fragments");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO,
+ "rx_mcast_pkts", CTLFLAG_RD,
+ &sc->rq[i]->rx_stats.rx_mcast_pkts, 0,
+ "Received Multicast Packets");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO,
+ "rx_ucast_pkts",CTLFLAG_RD,
+ &sc->rq[i]->rx_stats.rx_ucast_pkts, 0,
+ "Received Unicast Packets");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rxcp_err",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0,
+ "Received Completion Errors");
+
+ }
+
+ rx_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(rx_stats_node),
+ OID_AUTO, "err", CTLFLAG_RD,
+ NULL, "Receive Error Stats");
+ rx_stat_list = SYSCTL_CHILDREN(rx_stats_node);
+
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "crc_errs",
+ CTLFLAG_RD, &stats->u0.xe201.rx_crc_errors, 0,
+ "CRC Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "alignment_errors",
+ CTLFLAG_RD, &stats->u0.xe201.rx_alignment_errors, 0,
+ "RX Alignmnet Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "in_range_errors",
+ CTLFLAG_RD, &stats->u0.xe201.rx_in_range_errors, 0,
+ "In Range Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "out_range_errors",
+ CTLFLAG_RD, &stats->u0.xe201.rx_out_of_range_errors, 0,
+ "Out Range Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "frame_too_long",
+ CTLFLAG_RD, &stats->u0.xe201.rx_frames_too_long, 0,
+ "Frame Too Long");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "address_match_errors",
+ CTLFLAG_RD, &stats->u0.xe201.rx_address_match_errors, 0,
+ "Address Match Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_small",
+ CTLFLAG_RD, &stats->u0.xe201.rx_dropped_too_small, 0,
+ "Dropped Too Small");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_short",
+ CTLFLAG_RD, &stats->u0.xe201.rx_dropped_too_short, 0,
+ "Dropped Too Short");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO,
+ "dropped_header_too_small", CTLFLAG_RD,
+ &stats->u0.xe201.rx_dropped_header_too_small, 0,
+ "Dropped Header Too Small");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO,
+ "dropped_tcp_length", CTLFLAG_RD,
+ &stats->u0.xe201.rx_dropped_invalid_tcp_length, 0,
+ "Dropped TCP Length");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_runt",
+ CTLFLAG_RD, &stats->u0.xe201.rx_dropped_runt, 0,
+ "Dropped runt");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "ip_checksum_errs",
+ CTLFLAG_RD, &stats->u0.xe201.rx_ip_checksum_errors, 0,
+ "IP Checksum Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "tcp_checksum_errs",
+ CTLFLAG_RD, &stats->u0.xe201.rx_tcp_checksum_errors, 0,
+ "TCP Checksum Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "udp_checksum_errs",
+ CTLFLAG_RD, &stats->u0.xe201.rx_udp_checksum_errors, 0,
+ "UDP Checksum Errors");
+ SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "input_fifo_overflow_drop",
+ CTLFLAG_RD, &stats->u0.xe201.rx_fifo_overflow, 0,
+ "Input FIFO Overflow Drop");
+
+ tx_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(stats_node),
+ OID_AUTO, "tx", CTLFLAG_RD,
+ NULL, "TX Ethernet Statistics");
+ tx_stat_list = SYSCTL_CHILDREN(tx_stats_node);
+
+ SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_pkts",
+ CTLFLAG_RD, &stats->tx.t_tx_pkts,
+ "Total Transmit Packets");
+ SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_bytes",
+ CTLFLAG_RD, &stats->tx.t_tx_bytes,
+ "Total Transmit Bytes");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_reqs",
+ CTLFLAG_RD, &stats->tx.t_tx_reqs, 0,
+ "Total Transmit Requests");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_stops",
+ CTLFLAG_RD, &stats->tx.t_tx_stops, 0,
+ "Total Transmit Stops");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_wrbs",
+ CTLFLAG_RD, &stats->tx.t_tx_wrbs, 0,
+ "Total Transmit WRB's");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_compl",
+ CTLFLAG_RD, &stats->tx.t_tx_compl, 0,
+ "Total Transmit Completions");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO,
+ "total_ipv6_ext_hdr_tx_drop",
+ CTLFLAG_RD, &stats->tx.t_ipv6_ext_hdr_tx_drop, 0,
+ "Total Transmit IPV6 Drops");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "pauseframes",
+ CTLFLAG_RD, &stats->u0.xe201.tx_pause_frames, 0,
+ "Pause Frames");
+ SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "controlframes",
+ CTLFLAG_RD, &stats->u0.xe201.tx_control_frames, 0,
+ "Tx Control Frames");
+
+ for (i = 0; i < sc->nwqs; i++) {
+ sprintf(prefix, "queue%d",i);
+ queue_stats_node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(tx_stats_node),
+ OID_AUTO, prefix, CTLFLAG_RD,
+ NULL, "Queue name");
+ queue_stats_list = SYSCTL_CHILDREN(queue_stats_node);
+
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_pkts",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_pkts,
+ "Transmit Packets");
+ SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_bytes,
+ "Transmit Bytes");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_reqs",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_reqs, 0,
+ "Transmit Requests");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_stops",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_stops, 0,
+ "Transmit Stops");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_wrbs",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_wrbs, 0,
+ "Transmit WRB's");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_compl",
+ CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_compl, 0,
+ "Transmit Completions");
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO,
+ "ipv6_ext_hdr_tx_drop", CTLFLAG_RD,
+ &sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop, 0,
+ "Transmit IPV6 Ext Header Drop");
+
+ }
+ return;
+}
+
+
+void
+oce_refresh_queue_stats(POCE_SOFTC sc)
+{
+ struct oce_drv_stats *adapter_stats;
+ int i;
+
+ adapter_stats = &sc->oce_stats_info;
+
+ /* Caluculate total TX and TXstats from all queues */
+
+ bzero(&adapter_stats->rx, sizeof(struct oce_rx_stats));
+ for (i = 0; i < sc->nrqs; i++) {
+
+ adapter_stats->rx.t_rx_pkts += sc->rq[i]->rx_stats.rx_pkts;
+ adapter_stats->rx.t_rx_bytes += sc->rq[i]->rx_stats.rx_bytes;
+ adapter_stats->rx.t_rx_frags += sc->rq[i]->rx_stats.rx_frags;
+ adapter_stats->rx.t_rx_mcast_pkts +=
+ sc->rq[i]->rx_stats.rx_mcast_pkts;
+ adapter_stats->rx.t_rx_ucast_pkts +=
+ sc->rq[i]->rx_stats.rx_ucast_pkts;
+ adapter_stats->rx.t_rxcp_errs += sc-> rq[i]->rx_stats.rxcp_err;
+ }
+
+ bzero(&adapter_stats->tx, sizeof(struct oce_tx_stats));
+ for (i = 0; i < sc->nwqs; i++) {
+ adapter_stats->tx.t_tx_reqs += sc->wq[i]->tx_stats.tx_reqs;
+ adapter_stats->tx.t_tx_stops += sc->wq[i]->tx_stats.tx_stops;
+ adapter_stats->tx.t_tx_wrbs += sc->wq[i]->tx_stats.tx_wrbs;
+ adapter_stats->tx.t_tx_compl += sc->wq[i]->tx_stats.tx_compl;
+ adapter_stats->tx.t_tx_bytes += sc->wq[i]->tx_stats.tx_bytes;
+ adapter_stats->tx.t_tx_pkts += sc->wq[i]->tx_stats.tx_pkts;
+ adapter_stats->tx.t_ipv6_ext_hdr_tx_drop +=
+ sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop;
+ }
+
+}
+
+
+
+static void
+copy_stats_to_sc_xe201(POCE_SOFTC sc)
+{
+ struct oce_xe201_stats *adapter_stats;
+ struct mbx_get_pport_stats *nic_mbx;
+ struct pport_stats *port_stats;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_pport_stats);
+ port_stats = &nic_mbx->params.rsp.pps;
+ adapter_stats = &sc->oce_stats_info.u0.xe201;
+
+ adapter_stats->tx_pkts = port_stats->tx_pkts;
+ adapter_stats->tx_unicast_pkts = port_stats->tx_unicast_pkts;
+ adapter_stats->tx_multicast_pkts = port_stats->tx_multicast_pkts;
+ adapter_stats->tx_broadcast_pkts = port_stats->tx_broadcast_pkts;
+ adapter_stats->tx_bytes = port_stats->tx_bytes;
+ adapter_stats->tx_unicast_bytes = port_stats->tx_unicast_bytes;
+ adapter_stats->tx_multicast_bytes = port_stats->tx_multicast_bytes;
+ adapter_stats->tx_broadcast_bytes = port_stats->tx_broadcast_bytes;
+ adapter_stats->tx_discards = port_stats->tx_discards;
+ adapter_stats->tx_errors = port_stats->tx_errors;
+ adapter_stats->tx_pause_frames = port_stats->tx_pause_frames;
+ adapter_stats->tx_pause_on_frames = port_stats->tx_pause_on_frames;
+ adapter_stats->tx_pause_off_frames = port_stats->tx_pause_off_frames;
+ adapter_stats->tx_internal_mac_errors =
+ port_stats->tx_internal_mac_errors;
+ adapter_stats->tx_control_frames = port_stats->tx_control_frames;
+ adapter_stats->tx_pkts_64_bytes = port_stats->tx_pkts_64_bytes;
+ adapter_stats->tx_pkts_65_to_127_bytes =
+ port_stats->tx_pkts_65_to_127_bytes;
+ adapter_stats->tx_pkts_128_to_255_bytes =
+ port_stats->tx_pkts_128_to_255_bytes;
+ adapter_stats->tx_pkts_256_to_511_bytes =
+ port_stats->tx_pkts_256_to_511_bytes;
+ adapter_stats->tx_pkts_512_to_1023_bytes =
+ port_stats->tx_pkts_512_to_1023_bytes;
+ adapter_stats->tx_pkts_1024_to_1518_bytes =
+ port_stats->tx_pkts_1024_to_1518_bytes;
+ adapter_stats->tx_pkts_1519_to_2047_bytes =
+ port_stats->tx_pkts_1519_to_2047_bytes;
+ adapter_stats->tx_pkts_2048_to_4095_bytes =
+ port_stats->tx_pkts_2048_to_4095_bytes;
+ adapter_stats->tx_pkts_4096_to_8191_bytes =
+ port_stats->tx_pkts_4096_to_8191_bytes;
+ adapter_stats->tx_pkts_8192_to_9216_bytes =
+ port_stats->tx_pkts_8192_to_9216_bytes;
+ adapter_stats->tx_lso_pkts = port_stats->tx_lso_pkts;
+ adapter_stats->rx_pkts = port_stats->rx_pkts;
+ adapter_stats->rx_unicast_pkts = port_stats->rx_unicast_pkts;
+ adapter_stats->rx_multicast_pkts = port_stats->rx_multicast_pkts;
+ adapter_stats->rx_broadcast_pkts = port_stats->rx_broadcast_pkts;
+ adapter_stats->rx_bytes = port_stats->rx_bytes;
+ adapter_stats->rx_unicast_bytes = port_stats->rx_unicast_bytes;
+ adapter_stats->rx_multicast_bytes = port_stats->rx_multicast_bytes;
+ adapter_stats->rx_broadcast_bytes = port_stats->rx_broadcast_bytes;
+ adapter_stats->rx_unknown_protos = port_stats->rx_unknown_protos;
+ adapter_stats->rx_discards = port_stats->rx_discards;
+ adapter_stats->rx_errors = port_stats->rx_errors;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_alignment_errors = port_stats->rx_alignment_errors;
+ adapter_stats->rx_symbol_errors = port_stats->rx_symbol_errors;
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_pause_on_frames = port_stats->rx_pause_on_frames;
+ adapter_stats->rx_pause_off_frames = port_stats->rx_pause_off_frames;
+ adapter_stats->rx_frames_too_long = port_stats->rx_frames_too_long;
+ adapter_stats->rx_internal_mac_errors =
+ port_stats->rx_internal_mac_errors;
+ adapter_stats->rx_undersize_pkts = port_stats->rx_undersize_pkts;
+ adapter_stats->rx_oversize_pkts = port_stats->rx_oversize_pkts;
+ adapter_stats->rx_fragment_pkts = port_stats->rx_fragment_pkts;
+ adapter_stats->rx_jabbers = port_stats->rx_jabbers;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_control_frames_unknown_opcode =
+ port_stats->rx_control_frames_unknown_opcode;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_out_of_range_errors =
+ port_stats->rx_out_of_range_errors;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_vlan_mismatch_errors =
+ port_stats->rx_vlan_mismatch_errors;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_dropped_invalid_tcp_length =
+ port_stats->rx_dropped_invalid_tcp_length;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errors =
+ port_stats->rx_ip_checksum_errors;
+ adapter_stats->rx_tcp_checksum_errors =
+ port_stats->rx_tcp_checksum_errors;
+ adapter_stats->rx_udp_checksum_errors =
+ port_stats->rx_udp_checksum_errors;
+ adapter_stats->rx_non_rss_pkts = port_stats->rx_non_rss_pkts;
+ adapter_stats->rx_ipv4_pkts = port_stats->rx_ipv4_pkts;
+ adapter_stats->rx_ipv6_pkts = port_stats->rx_ipv6_pkts;
+ adapter_stats->rx_ipv4_bytes = port_stats->rx_ipv4_bytes;
+ adapter_stats->rx_ipv6_bytes = port_stats->rx_ipv6_bytes;
+ adapter_stats->rx_nic_pkts = port_stats->rx_nic_pkts;
+ adapter_stats->rx_tcp_pkts = port_stats->rx_tcp_pkts;
+ adapter_stats->rx_iscsi_pkts = port_stats->rx_iscsi_pkts;
+ adapter_stats->rx_management_pkts = port_stats->rx_management_pkts;
+ adapter_stats->rx_switched_unicast_pkts =
+ port_stats->rx_switched_unicast_pkts;
+ adapter_stats->rx_switched_multicast_pkts =
+ port_stats->rx_switched_multicast_pkts;
+ adapter_stats->rx_switched_broadcast_pkts =
+ port_stats->rx_switched_broadcast_pkts;
+ adapter_stats->num_forwards = port_stats->num_forwards;
+ adapter_stats->rx_fifo_overflow = port_stats->rx_fifo_overflow;
+ adapter_stats->rx_input_fifo_overflow =
+ port_stats->rx_input_fifo_overflow;
+ adapter_stats->rx_drops_too_many_frags =
+ port_stats->rx_drops_too_many_frags;
+ adapter_stats->rx_drops_invalid_queue =
+ port_stats->rx_drops_invalid_queue;
+ adapter_stats->rx_drops_mtu = port_stats->rx_drops_mtu;
+ adapter_stats->rx_pkts_64_bytes = port_stats->rx_pkts_64_bytes;
+ adapter_stats->rx_pkts_65_to_127_bytes =
+ port_stats->rx_pkts_65_to_127_bytes;
+ adapter_stats->rx_pkts_128_to_255_bytes =
+ port_stats->rx_pkts_128_to_255_bytes;
+ adapter_stats->rx_pkts_256_to_511_bytes =
+ port_stats->rx_pkts_256_to_511_bytes;
+ adapter_stats->rx_pkts_512_to_1023_bytes =
+ port_stats->rx_pkts_512_to_1023_bytes;
+ adapter_stats->rx_pkts_1024_to_1518_bytes =
+ port_stats->rx_pkts_1024_to_1518_bytes;
+ adapter_stats->rx_pkts_1519_to_2047_bytes =
+ port_stats->rx_pkts_1519_to_2047_bytes;
+ adapter_stats->rx_pkts_2048_to_4095_bytes =
+ port_stats->rx_pkts_2048_to_4095_bytes;
+ adapter_stats->rx_pkts_4096_to_8191_bytes =
+ port_stats->rx_pkts_4096_to_8191_bytes;
+ adapter_stats->rx_pkts_8192_to_9216_bytes =
+ port_stats->rx_pkts_8192_to_9216_bytes;
+}
+
+
+
+static void
+copy_stats_to_sc_be2(POCE_SOFTC sc)
+{
+ struct oce_be_stats *adapter_stats;
+ struct oce_pmem_stats *pmem;
+ struct oce_rxf_stats_v0 *rxf_stats;
+ struct oce_port_rxf_stats_v0 *port_stats;
+ struct mbx_get_nic_stats_v0 *nic_mbx;
+ uint32_t port = sc->port_id;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v0);
+ pmem = &nic_mbx->params.rsp.stats.pmem;
+ rxf_stats = &nic_mbx->params.rsp.stats.rxf;
+ port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
+
+ adapter_stats = &sc->oce_stats_info.u0.be;
+
+
+ /* Update stats */
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ adapter_stats->rxpp_fifo_overflow_drop =
+ port_stats->rxpp_fifo_overflow_drop;
+ adapter_stats->rx_dropped_tcp_length =
+ port_stats->rx_dropped_tcp_length;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ adapter_stats->tx_pauseframes = port_stats->tx_pauseframes;
+ adapter_stats->tx_controlframes = port_stats->tx_controlframes;
+
+ if (sc->if_id)
+ adapter_stats->jabber_events = rxf_stats->port1_jabber_events;
+ else
+ adapter_stats->jabber_events = rxf_stats->port0_jabber_events;
+
+ adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ adapter_stats->forwarded_packets = rxf_stats->forwarded_packets;
+ adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ adapter_stats->rx_drops_no_tpre_descr =
+ rxf_stats->rx_drops_no_tpre_descr;
+ adapter_stats->rx_drops_too_many_frags =
+ rxf_stats->rx_drops_too_many_frags;
+ adapter_stats->eth_red_drops = pmem->eth_red_drops;
+}
+
+
+static void
+copy_stats_to_sc_be3(POCE_SOFTC sc)
+{
+ struct oce_be_stats *adapter_stats;
+ struct oce_pmem_stats *pmem;
+ struct oce_rxf_stats_v1 *rxf_stats;
+ struct oce_port_rxf_stats_v1 *port_stats;
+ struct mbx_get_nic_stats *nic_mbx;
+ uint32_t port = sc->port_id;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats);
+ pmem = &nic_mbx->params.rsp.stats.pmem;
+ rxf_stats = &nic_mbx->params.rsp.stats.rxf;
+ port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
+
+ adapter_stats = &sc->oce_stats_info.u0.be;
+
+ /* Update stats */
+ adapter_stats->pmem_fifo_overflow_drop =
+ port_stats->pmem_fifo_overflow_drop;
+ adapter_stats->rx_priority_pause_frames =
+ port_stats->rx_priority_pause_frames;
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ adapter_stats->rx_dropped_tcp_length =
+ port_stats->rx_dropped_tcp_length;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ adapter_stats->rxpp_fifo_overflow_drop =
+ port_stats->rxpp_fifo_overflow_drop;
+ adapter_stats->tx_pauseframes = port_stats->tx_pauseframes;
+ adapter_stats->tx_controlframes = port_stats->tx_controlframes;
+ adapter_stats->jabber_events = port_stats->jabber_events;
+
+ adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ adapter_stats->forwarded_packets = rxf_stats->forwarded_packets;
+ adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ adapter_stats->rx_drops_no_tpre_descr =
+ rxf_stats->rx_drops_no_tpre_descr;
+ adapter_stats->rx_drops_too_many_frags =
+ rxf_stats->rx_drops_too_many_frags;
+
+ adapter_stats->eth_red_drops = pmem->eth_red_drops;
+}
+
+
+int
+oce_stats_init(POCE_SOFTC sc)
+{
+ int rc = 0, sz;
+
+ if (IS_BE(sc)) {
+ if (sc->flags & OCE_FLAGS_BE2)
+ sz = sizeof(struct mbx_get_nic_stats_v0);
+ else
+ sz = sizeof(struct mbx_get_nic_stats);
+ } else
+ sz = sizeof(struct mbx_get_pport_stats);
+
+ rc = oce_dma_alloc(sc, sz, &sc->stats_mem, 0);
+
+ return rc;
+}
+
+
+void
+oce_stats_free(POCE_SOFTC sc)
+{
+
+ oce_dma_free(sc, &sc->stats_mem);
+
+}
+
+
+int
+oce_refresh_nic_stats(POCE_SOFTC sc)
+{
+ int rc = 0, reset = 0;
+
+ if (IS_BE(sc)) {
+ if (sc->flags & OCE_FLAGS_BE2) {
+ rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_be2(sc);
+ } else {
+ rc = oce_mbox_get_nic_stats(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_be3(sc);
+ }
+
+ } else {
+ rc = oce_mbox_get_pport_stats(sc, &sc->stats_mem, reset);
+ if (!rc)
+ copy_stats_to_sc_xe201(sc);
+ }
+
+ return rc;
+}
diff --git a/sys/dev/oce/oce_util.c b/sys/dev/oce/oce_util.c
new file mode 100644
index 0000000..93b13bd
--- /dev/null
+++ b/sys/dev/oce/oce_util.c
@@ -0,0 +1,270 @@
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+
+/* $FreeBSD$ */
+
+
+#include "oce_if.h"
+
+static void oce_dma_map_ring(void *arg,
+ bus_dma_segment_t *segs,
+ int nseg,
+ int error);
+
+/**
+ * @brief Allocate DMA memory
+ * @param sc software handle to the device
+ * @param size bus size
+ * @param dma dma memory area
+ * @param flags creation flags
+ * @returns 0 on success, error otherwize
+ */
+int
+oce_dma_alloc(POCE_SOFTC sc, bus_size_t size, POCE_DMA_MEM dma, int flags)
+{
+ int rc;
+
+
+ memset(dma, 0, sizeof(OCE_DMA_MEM));
+
+ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
+ 8, 0,
+ BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ size, 1, size, 0, NULL, NULL, &dma->tag);
+
+ if (rc == 0) {
+ rc = bus_dmamem_alloc(dma->tag,
+ &dma->ptr,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
+ &dma->map);
+ }
+
+ dma->paddr = 0;
+ if (rc == 0) {
+ rc = bus_dmamap_load(dma->tag,
+ dma->map,
+ dma->ptr,
+ size,
+ oce_dma_map_addr,
+ &dma->paddr, flags | BUS_DMA_NOWAIT);
+ if (dma->paddr == 0)
+ rc = ENXIO;
+ }
+
+ if (rc != 0)
+ oce_dma_free(sc, dma);
+
+ return rc;
+}
+
+/**
+ * @brief Free DMA memory
+ * @param sc software handle to the device
+ * @param dma dma area to free
+ */
+void
+oce_dma_free(POCE_SOFTC sc, POCE_DMA_MEM dma)
+{
+ if (dma->tag == NULL)
+ return;
+
+ if (dma->map != NULL) {
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ }
+
+ if (dma->ptr != NULL) {
+ bus_dmamem_free(dma->tag, dma->ptr, dma->map);
+ dma->map = NULL;
+ dma->ptr = NULL;
+ }
+
+ bus_dma_tag_destroy(dma->tag);
+ dma->tag = NULL;
+
+ return;
+}
+
+
+
+/**
+ * @brief Map DMA memory segment addresses
+ * @param arg physical address pointer
+ * @param segs dma memory segments
+ * @param nseg number of dma memory segments
+ * @param error if error, zeroes the physical address
+ */
+void
+oce_dma_map_addr(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+ bus_addr_t *paddr = arg;
+
+ if (error)
+ *paddr = 0;
+ else
+ *paddr = segs->ds_addr;
+}
+
+
+
+/**
+ * @brief Destroy a ring buffer
+ * @param sc software handle to the device
+ * @param ring ring buffer
+ */
+
+void
+oce_destroy_ring_buffer(POCE_SOFTC sc, oce_ring_buffer_t *ring)
+{
+ oce_dma_free(sc, &ring->dma);
+ free(ring, M_DEVBUF);
+}
+
+
+
+oce_ring_buffer_t *
+oce_create_ring_buffer(POCE_SOFTC sc,
+ uint32_t q_len, uint32_t item_size)
+{
+ uint32_t size = q_len * item_size;
+ int rc;
+ oce_ring_buffer_t *ring;
+
+
+ ring = malloc(sizeof(oce_ring_buffer_t), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (ring == NULL)
+ return NULL;
+
+ ring->item_size = item_size;
+ ring->num_items = q_len;
+
+ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
+ 4096, 0,
+ BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ size, 8, 4096, 0, NULL, NULL, &ring->dma.tag);
+ if (rc)
+ goto fail;
+
+
+ rc = bus_dmamem_alloc(ring->dma.tag,
+ &ring->dma.ptr,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
+ &ring->dma.map);
+ if (rc)
+ goto fail;
+
+ bzero(ring->dma.ptr, size);
+ bus_dmamap_sync(ring->dma.tag, ring->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ ring->dma.paddr = 0;
+
+ return ring;
+
+fail:
+ oce_dma_free(sc, &ring->dma);
+ free(ring, M_DEVBUF);
+ ring = NULL;
+ return NULL;
+}
+
+
+
+struct _oce_dmamap_paddr_table {
+ uint32_t max_entries;
+ uint32_t num_entries;
+ struct phys_addr *paddrs;
+};
+
+
+
+/**
+ * @brief Map ring buffer
+ * @param arg dma map phyical address table pointer
+ * @param segs dma memory segments
+ * @param nseg number of dma memory segments
+ * @param error maps only if error is 0
+ */
+static void
+oce_dma_map_ring(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+ int i;
+ struct _oce_dmamap_paddr_table *dpt =
+ (struct _oce_dmamap_paddr_table *)arg;
+
+ if (error == 0) {
+ if (nseg <= dpt->max_entries) {
+ for (i = 0; i < nseg; i++) {
+ dpt->paddrs[i].lo = ADDR_LO(segs[i].ds_addr);
+ dpt->paddrs[i].hi = ADDR_HI(segs[i].ds_addr);
+ }
+ dpt->num_entries = nseg;
+ }
+ }
+}
+
+
+
+/**
+ * @brief Load bus dma map for a ring buffer
+ * @param ring ring buffer pointer
+ * @param pa_list physical address list
+ * @returns number entries
+ */
+uint32_t
+oce_page_list(oce_ring_buffer_t *ring, struct phys_addr *pa_list)
+{
+ struct _oce_dmamap_paddr_table dpt;
+
+ dpt.max_entries = 8;
+ dpt.num_entries = 0;
+ dpt.paddrs = pa_list;
+
+ bus_dmamap_load(ring->dma.tag,
+ ring->dma.map,
+ ring->dma.ptr,
+ ring->item_size * ring->num_items,
+ oce_dma_map_ring, &dpt, BUS_DMA_NOWAIT);
+
+ return dpt.num_entries;
+}
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 5dde58a..0cf237f 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -240,6 +240,7 @@ SUBDIR= ${_3dfx} \
${_nwfs} \
${_nxge} \
${_opensolaris} \
+ oce \
${_padlock} \
patm \
${_pccard} \
diff --git a/sys/modules/oce/Makefile b/sys/modules/oce/Makefile
new file mode 100644
index 0000000..6603744
--- /dev/null
+++ b/sys/modules/oce/Makefile
@@ -0,0 +1,15 @@
+#
+# $FreeBSD$
+#
+
+.PATH: ${.CURDIR}/../../dev/oce
+KMOD = oce
+SRCS = oce_if.c oce_hw.c oce_mbox.c oce_util.c oce_queue.c oce_sysctl.c
+#SRCS += ${ofw_bus_if} bus_if.h device_if.h pci_if.h opt_inet.h opt_inet6.h
+
+CFLAGS+= -I${.CURDIR}/../../dev/oce -DSMP
+
+# uncomment for lock profiling statistics
+#CFLAGS += -DLOCK_PROFILING
+
+.include <bsd.kmod.mk>
OpenPOWER on IntegriCloud