summaryrefslogtreecommitdiffstats
path: root/sys/dev/nxge
diff options
context:
space:
mode:
authorsam <sam@FreeBSD.org>2007-06-29 22:47:18 +0000
committersam <sam@FreeBSD.org>2007-06-29 22:47:18 +0000
commit6698e7dea2102dc38ec391c1e3a13cdf853699af (patch)
tree0de44f561153f2e88c7ca00be99c20dcb9f2dbcc /sys/dev/nxge
parente513d4fafe87e70a369660930e133040cd2bb31b (diff)
downloadFreeBSD-src-6698e7dea2102dc38ec391c1e3a13cdf853699af.zip
FreeBSD-src-6698e7dea2102dc38ec391c1e3a13cdf853699af.tar.gz
Neterion Xframe 10GbE Server/Storage adapter driver.
The nxge driver provides support for Neterion Xframe-I and Xframe-II adapters. The driver supports TCP Segmentation Offload (TSO/LSO), Jumbo frames (5 buffer mode), Header separation (2 and 3 Receive buffer modes), VLAN, and Promiscuous mode. Submitted by: Neterion Reviewed by: rwatson Approved by: re (kensmith)
Diffstat (limited to 'sys/dev/nxge')
-rw-r--r--sys/dev/nxge/if_nxge.c3415
-rw-r--r--sys/dev/nxge/if_nxge.h287
-rw-r--r--sys/dev/nxge/include/build-version.h6
-rw-r--r--sys/dev/nxge/include/version.h53
-rw-r--r--sys/dev/nxge/include/xge-debug.h568
-rw-r--r--sys/dev/nxge/include/xge-defs.h149
-rw-r--r--sys/dev/nxge/include/xge-list.h203
-rw-r--r--sys/dev/nxge/include/xge-os-pal.h138
-rw-r--r--sys/dev/nxge/include/xge-os-template.h614
-rw-r--r--sys/dev/nxge/include/xge-queue.h185
-rw-r--r--sys/dev/nxge/include/xgehal-channel.h507
-rw-r--r--sys/dev/nxge/include/xgehal-config.h1012
-rw-r--r--sys/dev/nxge/include/xgehal-device.h1036
-rw-r--r--sys/dev/nxge/include/xgehal-driver.h322
-rw-r--r--sys/dev/nxge/include/xgehal-event.h85
-rw-r--r--sys/dev/nxge/include/xgehal-fifo.h363
-rw-r--r--sys/dev/nxge/include/xgehal-mgmt.h228
-rw-r--r--sys/dev/nxge/include/xgehal-mgmtaux.h95
-rw-r--r--sys/dev/nxge/include/xgehal-mm.h174
-rw-r--r--sys/dev/nxge/include/xgehal-regs.h1377
-rw-r--r--sys/dev/nxge/include/xgehal-ring.h473
-rw-r--r--sys/dev/nxge/include/xgehal-stats.h1601
-rw-r--r--sys/dev/nxge/include/xgehal-types.h626
-rw-r--r--sys/dev/nxge/include/xgehal.h53
-rw-r--r--sys/dev/nxge/xge-osdep.h758
-rw-r--r--sys/dev/nxge/xgehal/xge-queue.c460
-rw-r--r--sys/dev/nxge/xgehal/xgehal-channel-fp.c299
-rw-r--r--sys/dev/nxge/xgehal/xgehal-channel.c759
-rw-r--r--sys/dev/nxge/xgehal/xgehal-config.c761
-rw-r--r--sys/dev/nxge/xgehal/xgehal-device-fp.c1432
-rw-r--r--sys/dev/nxge/xgehal/xgehal-device.c7247
-rw-r--r--sys/dev/nxge/xgehal/xgehal-driver.c300
-rw-r--r--sys/dev/nxge/xgehal/xgehal-fifo-fp.c1175
-rw-r--r--sys/dev/nxge/xgehal/xgehal-fifo.c568
-rw-r--r--sys/dev/nxge/xgehal/xgehal-mgmt.c1772
-rw-r--r--sys/dev/nxge/xgehal/xgehal-mgmtaux.c1731
-rw-r--r--sys/dev/nxge/xgehal/xgehal-mm.c436
-rw-r--r--sys/dev/nxge/xgehal/xgehal-ring-fp.c852
-rw-r--r--sys/dev/nxge/xgehal/xgehal-ring.c669
-rw-r--r--sys/dev/nxge/xgehal/xgehal-stats.c1019
-rw-r--r--sys/dev/nxge/xgell-version.h48
41 files changed, 33856 insertions, 0 deletions
diff --git a/sys/dev/nxge/if_nxge.c b/sys/dev/nxge/if_nxge.c
new file mode 100644
index 0000000..6687b4b
--- /dev/null
+++ b/sys/dev/nxge/if_nxge.c
@@ -0,0 +1,3415 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * if_nxge.c
+ *
+ * FreeBSD specific initialization & routines
+ */
+
+#include <dev/nxge/if_nxge.h>
+#include <dev/nxge/xge-osdep.h>
+#include <net/if_arp.h>
+#include <sys/types.h>
+#include <net/if.h>
+#include <net/if_vlan_var.h>
+
+int copyright_print = 0;
+int hal_driver_init_count = 0;
+size_t size = sizeof(int);
+
+/******************************************
+ * xge_probe
+ * Parameters: Device structure
+ * Return: BUS_PROBE_DEFAULT/ENXIO/ENOMEM
+ * Description: Probes for Xframe device
+ ******************************************/
+int
+xge_probe(device_t dev)
+{
+ int devid = pci_get_device(dev);
+ int vendorid = pci_get_vendor(dev);
+ int retValue = ENXIO;
+
+ ENTER_FUNCTION
+
+ if(vendorid == XGE_PCI_VENDOR_ID) {
+ if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
+ (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
+ if(!copyright_print) {
+ PRINT_COPYRIGHT;
+ copyright_print = 1;
+ }
+ device_set_desc_copy(dev,
+ "Neterion Xframe 10 Gigabit Ethernet Adapter");
+ retValue = BUS_PROBE_DEFAULT;
+ }
+ }
+
+ LEAVE_FUNCTION
+ return retValue;
+}
+
+/******************************************
+ * xge_init_params
+ * Parameters: HAL device configuration
+ * structure, device pointer
+ * Return: None
+ * Description: Sets parameter values in
+ * xge_hal_device_config_t structure
+ ******************************************/
+void
+xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
+{
+ int index, revision;
+ device_t checkdev;
+
+ ENTER_FUNCTION
+
+#define SAVE_PARAM(to, what, value) to.what = value;
+
+#define GET_PARAM(str_kenv, to, param, hardcode) { \
+ static int param##__LINE__; \
+ if(testenv(str_kenv) == 1) { \
+ getenv_int(str_kenv, &param##__LINE__); \
+ } \
+ else { \
+ param##__LINE__ = hardcode; \
+ } \
+ SAVE_PARAM(to, param, param##__LINE__); \
+}
+
+#define GET_PARAM_MAC(str_kenv, param, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).mac), param, hardcode);
+
+#define GET_PARAM_FIFO(str_kenv, param, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).fifo), param, hardcode);
+
+#define GET_PARAM_FIFO_QUEUE(str_kenv, param, qindex, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex]), param, hardcode);
+
+#define GET_PARAM_FIFO_QUEUE_TTI(str_kenv, param, qindex, tindex, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex].tti[tindex]), \
+ param, hardcode);
+
+#define GET_PARAM_RING(str_kenv, param, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).ring), param, hardcode);
+
+#define GET_PARAM_RING_QUEUE(str_kenv, param, qindex, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex]), param, hardcode);
+
+#define GET_PARAM_RING_QUEUE_RTI(str_kenv, param, qindex, hardcode) \
+ GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex].rti), param, \
+ hardcode);
+
+ dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
+ dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
+ dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
+ dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
+ dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
+ dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
+
+ GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
+ XGE_DEFAULT_LATENCY_TIMER);
+ GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
+ XGE_DEFAULT_MAX_SPLITS_TRANS);
+ GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
+ XGE_DEFAULT_MMRB_COUNT);
+ GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
+ XGE_DEFAULT_SHARED_SPLITS);
+ GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
+ XGE_DEFAULT_ISR_POLLING_CNT);
+ GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
+ stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
+
+ GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
+ XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
+ GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
+ XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
+ GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
+ XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
+ GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
+ XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
+ GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
+ XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
+ GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
+ mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
+ GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
+ mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
+
+ GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
+ XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
+ GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
+ XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
+ GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
+ XGE_DEFAULT_FIFO_MAX_FRAGS);
+
+ GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, 0,
+ XGE_DEFAULT_FIFO_QUEUE_INTR);
+ GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, 0,
+ XGE_DEFAULT_FIFO_QUEUE_MAX);
+ GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial, 0,
+ XGE_DEFAULT_FIFO_QUEUE_INITIAL);
+
+ for (index = 0; index < XGE_HAL_MAX_FIFO_TTI_NUM; index++) {
+ dconfig->fifo.queue[0].tti[index].enabled = 1;
+ dconfig->fifo.queue[0].configured = 1;
+
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
+ urange_a, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
+ urange_b, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
+ urange_c, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
+ ufc_a, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
+ ufc_b, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
+ ufc_c, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
+ ufc_d, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_ci_en",
+ timer_ci_en, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_ac_en",
+ timer_ac_en, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
+ GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_val_us",
+ timer_val_us, 0, index,
+ XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
+ }
+
+ GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
+ XGE_DEFAULT_RING_MEMBLOCK_SIZE);
+
+ GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
+ XGE_DEFAULT_RING_STRIP_VLAN_TAG);
+
+ for (index = 0; index < XGE_HAL_MIN_RING_NUM; index++) {
+ dconfig->ring.queue[index].max_frm_len = XGE_HAL_RING_USE_MTU;
+ dconfig->ring.queue[index].priority = 0;
+ dconfig->ring.queue[index].configured = 1;
+ dconfig->ring.queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
+
+ GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, index,
+ XGE_DEFAULT_RING_QUEUE_MAX);
+ GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial, index,
+ XGE_DEFAULT_RING_QUEUE_INITIAL);
+ GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb", dram_size_mb,
+ index, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
+ GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
+ indicate_max_pkts, index,
+ XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
+ GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
+ backoff_interval_us, index,
+ XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
+
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
+ timer_ac_en, index, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
+ timer_val_us, index, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a", urange_a,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b", urange_b,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
+ GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c", urange_c,
+ index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
+ }
+
+ if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
+ xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags);
+ xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
+ (PAGE_SIZE / 32));
+ xge_os_printf("Using fifo_max_frags = %d", (PAGE_SIZE / 32));
+ dconfig->fifo.max_frags = (PAGE_SIZE / 32);
+ }
+
+ checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
+ if(checkdev != NULL) {
+ /* Check Revision for 0x12 */
+ revision = pci_read_config(checkdev,
+ xge_offsetof(xge_hal_pci_config_t, revision), 1);
+ if(revision <= 0x12) {
+ /* Set mmrb_count to 1k and max splits = 2 */
+ dconfig->mmrb_count = 1;
+ dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
+ }
+ }
+
+#ifdef XGE_FEATURE_LRO
+ /* updating the LRO frame's sg size and frame len size. */
+ dconfig->lro_sg_size = 20;
+ dconfig->lro_frm_len = 65536;
+#endif
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * xge_driver_initialize
+ * Parameters: None
+ * Return: 0/1
+ * Description: Defines HAL-ULD callbacks
+ * and initializes the HAL driver
+ ******************************************/
+int
+xge_driver_initialize(void)
+{
+ xge_hal_uld_cbs_t uld_callbacks;
+ xge_hal_driver_config_t driver_config;
+ xge_hal_status_e status = XGE_HAL_OK;
+
+ ENTER_FUNCTION
+
+ /* Initialize HAL driver */
+ if(!hal_driver_init_count) {
+ xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
+
+ /*
+ * Initial and maximum size of the queue used to store the events
+ * like Link up/down (xge_hal_event_e)
+ */
+ driver_config.queue_size_initial = 1;
+ driver_config.queue_size_max = 4;
+
+ uld_callbacks.link_up = xgell_callback_link_up;
+ uld_callbacks.link_down = xgell_callback_link_down;
+ uld_callbacks.crit_err = xgell_callback_crit_err;
+ uld_callbacks.event = xgell_callback_event;
+
+ status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
+ if(status != XGE_HAL_OK) {
+ xge_os_printf("xgeX: Initialization failed (Status: %d)",
+ status);
+ goto xdi_out;
+ }
+ }
+ hal_driver_init_count = hal_driver_init_count + 1;
+
+ xge_hal_driver_debug_module_mask_set(0xffffffff);
+ xge_hal_driver_debug_level_set(XGE_TRACE);
+
+xdi_out:
+ LEAVE_FUNCTION
+ return status;
+}
+
+/******************************************
+ * Function: xge_media_init
+ * Parameters: Device pointer
+ * Return: None
+ * Description: Initializes, adds and sets
+ * media
+ ******************************************/
+void
+xge_media_init(device_t devc)
+{
+ xgelldev_t *lldev = (xgelldev_t *)device_get_softc(devc);
+
+ ENTER_FUNCTION
+
+ /* Initialize Media */
+ ifmedia_init(&lldev->xge_media, IFM_IMASK, xge_ifmedia_change,
+ xge_ifmedia_status);
+
+ /* Add supported media */
+ ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+
+ /* Set media */
+ ifmedia_set(&lldev->xge_media, IFM_ETHER | IFM_AUTO);
+
+ LEAVE_FUNCTION
+}
+
+/*
+ * xge_pci_space_save
+ * Save PCI configuration space
+ * @dev Device structure
+ */
+void
+xge_pci_space_save(device_t dev)
+{
+ ENTER_FUNCTION
+
+ struct pci_devinfo *dinfo = NULL;
+
+ dinfo = device_get_ivars(dev);
+ xge_trace(XGE_TRACE, "Saving PCI configuration space");
+ pci_cfg_save(dev, dinfo, 0);
+
+ LEAVE_FUNCTION
+}
+
+/*
+ * xge_pci_space_restore
+ * Restore saved PCI configuration space
+ * @dev Device structure
+ */
+void
+xge_pci_space_restore(device_t dev)
+{
+ ENTER_FUNCTION
+
+ struct pci_devinfo *dinfo = NULL;
+
+ dinfo = device_get_ivars(dev);
+ xge_trace(XGE_TRACE, "Restoring PCI configuration space");
+ pci_cfg_restore(dev, dinfo);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * xge_attach
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: None
+ * Description: Connects the driver to the
+ * system if the probe routine returned success
+ ******************************************/
+int
+xge_attach(device_t dev)
+{
+ xge_hal_device_config_t *device_config;
+ xge_hal_ring_config_t *pRingConfig;
+ xge_hal_device_attr_t attr;
+ xgelldev_t *lldev;
+ xge_hal_device_t *hldev;
+ pci_info_t *pci_info;
+ struct ifnet *ifnetp;
+ char *mesg;
+ char *desc;
+ int rid;
+ int rid0;
+ int rid1;
+ int error;
+ u64 val64 = 0;
+ int retValue = 0;
+ int mode = 0;
+ int buffer_index, buffer_length, index;
+
+ ENTER_FUNCTION
+
+ device_config = xge_malloc(sizeof(xge_hal_device_config_t));
+ if(!device_config) {
+ xge_ctrace(XGE_ERR, "Malloc of device config failed");
+ retValue = ENOMEM;
+ goto attach_out_config;
+ }
+
+ lldev = (xgelldev_t *) device_get_softc(dev);
+ if(!lldev) {
+ xge_ctrace(XGE_ERR, "Adapter softc structure allocation failed");
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ lldev->device = dev;
+
+ /* Initialize mutex */
+ if(mtx_initialized(&lldev->xge_lock) == 0) {
+ mtx_init((&lldev->xge_lock), "xge", MTX_NETWORK_LOCK, MTX_DEF);
+ }
+
+ error = xge_driver_initialize();
+ if(error != XGE_HAL_OK) {
+ xge_ctrace(XGE_ERR, "Initializing driver failed");
+ freeResources(dev, 1);
+ retValue = ENXIO;
+ goto attach_out;
+ }
+
+ /* HAL device */
+ hldev = (xge_hal_device_t *)xge_malloc(sizeof(xge_hal_device_t));
+ if(!hldev) {
+ xge_trace(XGE_ERR, "Allocating memory for xge_hal_device_t failed");
+ freeResources(dev, 2);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ lldev->devh = hldev;
+
+ /* Our private structure */
+ pci_info = (pci_info_t*) xge_malloc(sizeof(pci_info_t));
+ if(!pci_info) {
+ xge_trace(XGE_ERR, "Allocating memory for pci_info_t failed");
+ freeResources(dev, 3);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ lldev->pdev = pci_info;
+ pci_info->device = dev;
+
+ /* Set bus master */
+ pci_enable_busmaster(dev);
+
+ /* Get virtual address for BAR0 */
+ rid0 = PCIR_BAR(0);
+ pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
+ RF_ACTIVE);
+ if(pci_info->regmap0 == NULL) {
+ xge_trace(XGE_ERR, "NULL handler for BAR0");
+ freeResources(dev, 4);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ attr.bar0 = (char *)pci_info->regmap0;
+
+ pci_info->bar0resource =
+ (busresource_t*) xge_malloc(sizeof(busresource_t));
+ if(pci_info->bar0resource == NULL) {
+ xge_trace(XGE_ERR, "Allocating memory for bar0resources failed");
+ freeResources(dev, 5);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ ((struct busresources *)(pci_info->bar0resource))->bus_tag =
+ rman_get_bustag(pci_info->regmap0);
+ ((struct busresources *)(pci_info->bar0resource))->bus_handle =
+ rman_get_bushandle(pci_info->regmap0);
+ ((struct busresources *)(pci_info->bar0resource))->bar_start_addr =
+ pci_info->regmap0;
+
+ /* Get virtual address for BAR1 */
+ rid1 = PCIR_BAR(2);
+ pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
+ RF_ACTIVE);
+ if(pci_info->regmap1 == NULL) {
+ xge_trace(XGE_ERR, "NULL handler for BAR1");
+ freeResources(dev, 6);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ attr.bar1 = (char *)pci_info->regmap1;
+
+ pci_info->bar1resource =
+ (busresource_t*) xge_malloc(sizeof(busresource_t));
+ if(pci_info->bar1resource == NULL) {
+ xge_trace(XGE_ERR, "Allocating memory for bar0resources failed");
+ freeResources(dev, 7);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+ ((struct busresources *)(pci_info->bar1resource))->bus_tag =
+ rman_get_bustag(pci_info->regmap1);
+ ((struct busresources *)(pci_info->bar1resource))->bus_handle =
+ rman_get_bushandle(pci_info->regmap1);
+ ((struct busresources *)(pci_info->bar1resource))->bar_start_addr =
+ pci_info->regmap1;
+
+ /* Save PCI config space */
+ xge_pci_space_save(dev);
+
+ attr.regh0 = (busresource_t *) pci_info->bar0resource;
+ attr.regh1 = (busresource_t *) pci_info->bar1resource;
+ attr.irqh = lldev->irqhandle;
+ attr.cfgh = pci_info;
+ attr.pdev = pci_info;
+
+ /* Initialize device configuration parameters */
+ xge_init_params(device_config, dev);
+
+ /* Initialize HAL device */
+ error = xge_hal_device_initialize(hldev, &attr, device_config);
+ if(error != XGE_HAL_OK) {
+ switch(error) {
+ case XGE_HAL_ERR_DRIVER_NOT_INITIALIZED:
+ xge_trace(XGE_ERR, "XGE_HAL_ERR_DRIVER_NOT_INITIALIZED");
+ break;
+
+ case XGE_HAL_ERR_OUT_OF_MEMORY:
+ xge_trace(XGE_ERR, "XGE_HAL_ERR_OUT_OF_MEMORY");
+ break;
+
+ case XGE_HAL_ERR_BAD_SUBSYSTEM_ID:
+ xge_trace(XGE_ERR, "XGE_HAL_ERR_BAD_SUBSYSTEM_ID");
+ break;
+
+ case XGE_HAL_ERR_INVALID_MAC_ADDRESS:
+ xge_trace(XGE_ERR, "XGE_HAL_ERR_INVALID_MAC_ADDRESS");
+ break;
+
+ case XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING:
+ xge_trace(XGE_ERR, "XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING");
+ break;
+
+ case XGE_HAL_ERR_SWAPPER_CTRL:
+ xge_trace(XGE_ERR, "XGE_HAL_ERR_SWAPPER_CTRL");
+ break;
+
+ case XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT:
+ xge_trace(XGE_ERR, "XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT");
+ break;
+ }
+ xge_trace(XGE_ERR, "Initializing HAL device failed (error: %d)\n",
+ error);
+ freeResources(dev, 8);
+ retValue = ENXIO;
+ goto attach_out;
+ }
+
+ desc = (char *) malloc(100, M_DEVBUF, M_NOWAIT);
+ if(desc == NULL) {
+ retValue = ENOMEM;
+ }
+ else {
+ sprintf(desc, "%s (Rev %d) Driver v%s \n%s: Serial Number: %s ",
+ hldev->vpd_data.product_name, hldev->revision, DRIVER_VERSION,
+ device_get_nameunit(dev), hldev->vpd_data.serial_num);
+ printf("%s: Xframe%s %s\n", device_get_nameunit(dev),
+ ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I": "II"),
+ desc);
+ free(desc, M_DEVBUF);
+
+ }
+
+ if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
+ error = xge_hal_mgmt_reg_read(hldev, 0,
+ xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
+ if(error != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Error for getting bus speed");
+ }
+ mesg = (char *) xge_malloc(20);
+ if(mesg == NULL) {
+ freeResources(dev, 8);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+
+ sprintf(mesg, "%s: Device is on %s bit", device_get_nameunit(dev),
+ (val64 & BIT(8)) ? "32":"64");
+
+ mode = (u8)((val64 & vBIT(0xF, 0, 4)) >> 60);
+ switch(mode) {
+ case 0x00: xge_os_printf("%s PCI 33MHz bus", mesg); break;
+ case 0x01: xge_os_printf("%s PCI 66MHz bus", mesg); break;
+ case 0x02: xge_os_printf("%s PCIX(M1) 66MHz bus", mesg); break;
+ case 0x03: xge_os_printf("%s PCIX(M1) 100MHz bus", mesg); break;
+ case 0x04: xge_os_printf("%s PCIX(M1) 133MHz bus", mesg); break;
+ case 0x05: xge_os_printf("%s PCIX(M2) 133MHz bus", mesg); break;
+ case 0x06: xge_os_printf("%s PCIX(M2) 200MHz bus", mesg); break;
+ case 0x07: xge_os_printf("%s PCIX(M2) 266MHz bus", mesg); break;
+ }
+ free(mesg, M_DEVBUF);
+ }
+
+ xge_hal_device_private_set(hldev, lldev);
+
+ error = xge_interface_setup(dev);
+ if(error != 0) {
+ retValue = error;
+ goto attach_out;
+ }
+
+ ifnetp = lldev->ifnetp;
+ ifnetp->if_mtu = device_config->mtu;
+
+ xge_media_init(dev);
+
+ /* Interrupt */
+ rid = 0;
+ lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if(lldev->irq == NULL) {
+ xge_trace(XGE_ERR, "NULL handler for IRQ");
+ freeResources(dev, 10);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+
+ /* Associate interrupt handler with the device */
+ error = bus_setup_intr(dev, lldev->irq, INTR_TYPE_NET | INTR_MPSAFE,
+#if __FreeBSD_version > 700030
+ xge_intr_filter,
+#endif
+ (void *)xge_intr, lldev, &lldev->irqhandle);
+ if(error != 0) {
+ xge_trace(XGE_ERR,
+ "Associating interrupt handler with device failed");
+ freeResources(dev, 11);
+ retValue = ENXIO;
+ goto attach_out;
+ }
+
+ /* Create DMA tags */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* Parent */
+ PAGE_SIZE, /* Alignment */
+ 0, /* Bounds */
+ BUS_SPACE_MAXADDR, /* Low Address */
+ BUS_SPACE_MAXADDR, /* High Address */
+ NULL, /* Filter Function */
+ NULL, /* Filter Function Arguments */
+ MCLBYTES * MAX_SEGS, /* Maximum Size */
+ MAX_SEGS, /* Number of Segments */
+ MCLBYTES, /* Maximum Segment Size */
+ BUS_DMA_ALLOCNOW, /* Flags */
+ NULL, /* Lock Function */
+ NULL, /* Lock Function Arguments */
+ (&lldev->dma_tag_tx)); /* DMA Tag */
+ if(error != 0) {
+ xge_trace(XGE_ERR, "Tx DMA tag creation failed");
+ freeResources(dev, 12);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* Parent */
+ PAGE_SIZE, /* Alignment */
+ 0, /* Bounds */
+ BUS_SPACE_MAXADDR, /* Low Address */
+ BUS_SPACE_MAXADDR, /* High Address */
+ NULL, /* Filter Function */
+ NULL, /* Filter Function Arguments */
+ MJUMPAGESIZE, /* Maximum Size */
+ 1, /* Number of Segments */
+ MJUMPAGESIZE, /* Maximum Segment Size */
+ BUS_DMA_ALLOCNOW, /* Flags */
+ NULL, /* Lock Function */
+ NULL, /* Lock Function Arguments */
+ (&lldev->dma_tag_rx)); /* DMA Tag */
+
+ if(error != 0) {
+ xge_trace(XGE_ERR, "Rx DMA tag creation failed");
+ freeResources(dev, 13);
+ retValue = ENOMEM;
+ goto attach_out;
+ }
+
+ /*Updating lldev->buffer_mode parameter*/
+ pRingConfig = &(hldev->config.ring);
+
+ if((device_config->mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) <= PAGE_SIZE) {
+#if defined(XGE_FEATURE_BUFFER_MODE_3)
+ xge_os_printf("%s: 3 Buffer Mode Enabled",
+ device_get_nameunit(dev));
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
+ }
+ pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
+ lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[2] = device_config->mtu;
+ lldev->rxd_mbuf_cnt = 3;
+#else
+#if defined(XGE_FEATURE_BUFFER_MODE_2)
+ xge_os_printf("%s: 2 Buffer Mode Enabled",
+ device_get_nameunit(dev));
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
+ }
+ pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_2;
+ lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[1] = device_config->mtu;
+ lldev->rxd_mbuf_cnt = 2;
+#else
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
+ lldev->rxd_mbuf_len[0] = device_config->mtu;
+ lldev->rxd_mbuf_cnt = 1;
+#endif
+#endif
+ }
+ else {
+ xge_os_printf("%s: 5 Buffer Mode Enabled",
+ device_get_nameunit(dev));
+ xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
+ }
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
+ buffer_length = device_config->mtu;
+ buffer_index = 2;
+ lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
+
+ while(buffer_length > PAGE_SIZE) {
+ buffer_length -= PAGE_SIZE;
+ lldev->rxd_mbuf_len[buffer_index] = PAGE_SIZE;
+ buffer_index++;
+ }
+
+ BUFALIGN(buffer_length);
+
+ lldev->rxd_mbuf_len[buffer_index] = buffer_length;
+ lldev->rxd_mbuf_cnt = buffer_index;
+ }
+
+#ifdef XGE_FEATURE_LRO
+ xge_os_printf("%s: LRO (Large Receive Offload) Enabled",
+ device_get_nameunit(dev));
+#endif
+
+#ifdef XGE_FEATURE_TSO
+ xge_os_printf("%s: TSO (TCP Segmentation Offload) enabled",
+ device_get_nameunit(dev));
+#endif
+
+attach_out:
+ free(device_config, M_DEVBUF);
+attach_out_config:
+ LEAVE_FUNCTION
+ return retValue;
+}
+
+/******************************************
+ * freeResources
+ * Parameters: Device structure, error (used
+ * to branch freeing)
+ * Return: None
+ * Description: Frees allocated resources
+ ******************************************/
+void
+freeResources(device_t dev, int error)
+{
+ xgelldev_t *lldev;
+ pci_info_t *pci_info;
+ xge_hal_device_t *hldev;
+ int rid, status;
+
+ ENTER_FUNCTION
+
+ /* LL Device */
+ lldev = (xgelldev_t *) device_get_softc(dev);
+ pci_info = lldev->pdev;
+
+ /* HAL Device */
+ hldev = lldev->devh;
+
+ switch(error) {
+ case 0:
+ status = bus_dma_tag_destroy(lldev->dma_tag_rx);
+ if(status) {
+ xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
+ }
+
+ case 13:
+ status = bus_dma_tag_destroy(lldev->dma_tag_tx);
+ if(status) {
+ xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
+ }
+
+ case 12:
+ /* Teardown interrupt handler - device association */
+ bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
+
+ case 11:
+ /* Release IRQ */
+ bus_release_resource(dev, SYS_RES_IRQ, 0, lldev->irq);
+
+ case 10:
+ /* Media */
+ ifmedia_removeall(&lldev->xge_media);
+
+ /* Detach Ether */
+ ether_ifdetach(lldev->ifnetp);
+ if_free(lldev->ifnetp);
+
+ xge_hal_device_private_set(hldev, NULL);
+ xge_hal_device_disable(hldev);
+
+ case 9:
+ /* HAL Device */
+ xge_hal_device_terminate(hldev);
+
+ case 8:
+ /* Restore PCI configuration space */
+ xge_pci_space_restore(dev);
+
+ /* Free bar1resource */
+ free(pci_info->bar1resource, M_DEVBUF);
+
+ case 7:
+ /* Release BAR1 */
+ rid = PCIR_BAR(2);
+ bus_release_resource(dev, SYS_RES_MEMORY, rid,
+ pci_info->regmap1);
+
+ case 6:
+ /* Free bar0resource */
+ free(pci_info->bar0resource, M_DEVBUF);
+
+ case 5:
+ /* Release BAR0 */
+ rid = PCIR_BAR(0);
+ bus_release_resource(dev, SYS_RES_MEMORY, rid,
+ pci_info->regmap0);
+
+ case 4:
+ /* Disable Bus Master */
+ pci_disable_busmaster(dev);
+
+ /* Free pci_info_t */
+ lldev->pdev = NULL;
+ free(pci_info, M_DEVBUF);
+
+ case 3:
+ /* Free device configuration struct and HAL device */
+ free(hldev, M_DEVBUF);
+
+ case 2:
+ /* Terminate HAL driver */
+ hal_driver_init_count = hal_driver_init_count - 1;
+ if(!hal_driver_init_count) {
+ xge_hal_driver_terminate();
+ }
+
+ case 1:
+ if(mtx_initialized(&lldev->xge_lock) != 0) {
+ mtx_destroy(&lldev->xge_lock);
+ }
+ }
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * xge_detach
+ * Parameters: Device structure
+ * Return: 0
+ * Description: Detaches the driver from the
+ * kernel subsystem.
+ ******************************************/
+int
+xge_detach(device_t dev)
+{
+ xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev);
+
+ ENTER_FUNCTION
+
+ mtx_lock(&lldev->xge_lock);
+ lldev->in_detach = 1;
+ xge_stop(lldev);
+ mtx_unlock(&lldev->xge_lock);
+
+ freeResources(dev, 0);
+
+ LEAVE_FUNCTION
+
+ return 0;
+}
+
+/******************************************
+ * xge_shutdown
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: None
+ * Description: Gets called when the system
+ * is about to be shutdown.
+ ******************************************/
+int
+xge_shutdown(device_t dev)
+{
+ xgelldev_t *lldev = (xgelldev_t *) device_get_softc(dev);
+
+ ENTER_FUNCTION
+ mtx_lock(&lldev->xge_lock);
+ xge_stop(lldev);
+ mtx_unlock(&lldev->xge_lock);
+ LEAVE_FUNCTION
+ return 0;
+}
+
+/******************************************
+ * Function: xge_interface_setup
+ * Parameters: Device pointer
+ * Return: 0/ENXIO/ENOMEM
+ * Description: Sets up the interface
+ * through ifnet pointer
+ ******************************************/
+int
+xge_interface_setup(device_t dev)
+{
+ u8 mcaddr[ETHER_ADDR_LEN];
+ xge_hal_status_e status_code;
+ xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev);
+ struct ifnet *ifnetp;
+ xge_hal_device_t *hldev = lldev->devh;
+ int retValue = 0;
+
+ ENTER_FUNCTION
+
+ /* Get the MAC address of the device */
+ status_code = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
+ if(status_code != XGE_HAL_OK) {
+ switch(status_code) {
+ case XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING:
+ xge_trace(XGE_ERR,
+ "Failed to retrieve MAC address (timeout)");
+ break;
+
+ case XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES:
+ xge_trace(XGE_ERR, "Invalid MAC address index");
+ break;
+
+ default:
+ xge_trace(XGE_TRACE, "Default Case");
+ break;
+ }
+ freeResources(dev, 9);
+ retValue = ENXIO;
+ goto ifsetup_out;
+ }
+
+ /* Get interface ifnet structure for this Ether device */
+ ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
+ if(ifnetp == NULL) {
+ xge_trace(XGE_ERR, "Allocating/getting ifnet structure failed");
+ freeResources(dev, 9);
+ retValue = ENOMEM;
+ goto ifsetup_out;
+ }
+
+ /* Initialize interface ifnet structure */
+ if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
+ ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
+
+ /*
+ * TODO: Can't set more than 2Gbps. -- Higher value results in overflow.
+ * But there is no effect in performance even if you set this to 10 Mbps
+ */
+ ifnetp->if_baudrate = IF_Gbps(2);
+ ifnetp->if_init = xge_init;
+ ifnetp->if_softc = lldev;
+ ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifnetp->if_ioctl = xge_ioctl;
+ ifnetp->if_start = xge_send;
+
+ /* TODO: Check and assign optimal value */
+ ifnetp->if_snd.ifq_maxlen = IFQ_MAXLEN;
+
+ ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
+ IFCAP_HWCSUM;
+
+ ifnetp->if_capenable = ifnetp->if_capabilities;
+
+#ifdef XGE_FEATURE_TSO
+ ifnetp->if_capabilities |= IFCAP_TSO4;
+ ifnetp->if_capenable |= IFCAP_TSO4;
+#endif
+
+ /* Attach the interface */
+ ether_ifattach(ifnetp, mcaddr);
+
+ifsetup_out:
+ LEAVE_FUNCTION
+
+ return retValue;
+}
+
+/******************************************
+ * xgell_callback_link_up
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer as void *
+ * Return: None
+ * Description: Called by HAL to notify
+ * hardware link up state change
+ ******************************************/
+void
+xgell_callback_link_up(void *userdata)
+{
+ xgelldev_t *lldev = (xgelldev_t *)userdata;
+ struct ifnet *ifnetp = lldev->ifnetp;
+
+ ENTER_FUNCTION
+
+ ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
+ if_link_state_change(ifnetp, LINK_STATE_UP);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * xgell_callback_link_down
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer as void *
+ * Return: None
+ * Description: Called by HAL to notify
+ * hardware link up state change
+ ******************************************/
+void
+xgell_callback_link_down(void *userdata)
+{
+ xgelldev_t *lldev = (xgelldev_t *)userdata;
+ struct ifnet *ifnetp = lldev->ifnetp;
+
+ ENTER_FUNCTION
+
+ ifnetp->if_flags |= IFF_DRV_OACTIVE;
+ if_link_state_change(ifnetp, LINK_STATE_DOWN);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * xgell_callback_crit_err
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer as void *, event,
+ * serr_data ->
+ * Return: None
+ * Description: Called by HAL on serious
+ * error event
+ ******************************************/
+void
+xgell_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
+{
+ ENTER_FUNCTION
+
+ xge_trace(XGE_ERR, "Critical Error");
+ xgell_reset(userdata);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * xgell_callback_event
+ * Parameters: Queue item
+ * Return: None
+ * Description: Called by HAL in case of
+ * some unknown to HAL events.
+ ******************************************/
+void
+xgell_callback_event(xge_queue_item_t *item)
+{
+ xgelldev_t *lldev = NULL;
+ xge_hal_device_t *hldev = NULL;
+ struct ifnet *ifnetp = NULL;
+
+ ENTER_FUNCTION
+
+ hldev = item->context;
+ lldev = xge_hal_device_private(hldev);
+ ifnetp = lldev->ifnetp;
+
+ if(item->event_type == XGE_LL_EVENT_TRY_XMIT_AGAIN) {
+ if(lldev->initialized) {
+ if(xge_hal_channel_dtr_count(lldev->fifo_channel_0) > 0) {
+ ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
+ }
+ else {
+ /* try next time */
+ xge_queue_produce_context(
+ xge_hal_device_queue(lldev->devh),
+ XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
+ }
+ }
+ }
+ else if(item->event_type == XGE_LL_EVENT_DEVICE_RESETTING) {
+ xgell_reset(item->context);
+ }
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: xge_ifmedia_change
+ * Parameters: Pointer to ifnet structure
+ * Return: 0 for success, EINVAL if media
+ * type is not IFM_ETHER.
+ * Description: Media change driver callback
+ ******************************************/
+int
+xge_ifmedia_change(struct ifnet *ifnetp)
+{
+ xgelldev_t *lldev = ifnetp->if_softc;
+ struct ifmedia *ifmediap = &lldev->xge_media;
+
+ ENTER_FUNCTION
+ LEAVE_FUNCTION
+
+ return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
+}
+
+/******************************************
+ * Function: xge_ifmedia_status
+ * Parameters: Pointer to ifnet structure
+ * ifmediareq structure pointer
+ * through which status of media
+ * will be returned.
+ * Return: None
+ * Description: Media status driver callback
+ ******************************************/
+void
+xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
+{
+ xge_hal_status_e status;
+ u64 regvalue;
+ xgelldev_t *lldev = ifnetp->if_softc;
+ xge_hal_device_t *hldev = lldev->devh;
+
+ ENTER_FUNCTION
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ status = xge_hal_mgmt_reg_read(hldev, 0,
+ xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
+ if(status != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Getting adapter status failed");
+ return;
+ }
+
+ if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
+ if_link_state_change(ifnetp, LINK_STATE_UP);
+ }
+ else {
+ if_link_state_change(ifnetp, LINK_STATE_DOWN);
+ }
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: xge_ioctl
+ * Parameters: Pointer to ifnet structure,
+ * command -> indicates requests,
+ * data -> passed values (if any)
+ * Return:
+ * Description: IOCTL entry point. Called
+ * when the user wants to
+ * configure the interface
+ ******************************************/
+int
+xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
+{
+ struct ifmedia *ifmediap;
+ xge_hal_stats_hw_info_t *hw_stats;
+ xge_hal_pci_config_t *pci_conf;
+ xge_hal_device_config_t *device_conf;
+ xge_hal_stats_sw_err_t *tcode;
+ xge_hal_stats_device_info_t *intr;
+ bar0reg_t *reg;
+ xge_hal_status_e status_code;
+ xge_hal_device_t *hldev;
+ void *regInfo;
+ u64 value;
+ u64 offset;
+ char *pAccess;
+ char *version;
+ int retValue = 0, index = 0, buffer_mode = 0;
+ struct ifreq *ifreqp = (struct ifreq *) data;
+ xgelldev_t *lldev = ifnetp->if_softc;
+
+ ifmediap = &lldev->xge_media;
+ hldev = lldev->devh;
+
+ if(lldev->in_detach) {
+ return retValue;
+ }
+
+ switch(command) {
+ /* Set/Get ifnet address */
+ case SIOCSIFADDR:
+ case SIOCGIFADDR:
+ ether_ioctl(ifnetp, command, data);
+ break;
+
+ /* Set ifnet MTU */
+ case SIOCSIFMTU:
+ retValue = changeMtu(lldev, ifreqp->ifr_mtu);
+ break;
+
+ /* Set ifnet flags */
+ case SIOCSIFFLAGS:
+ mtx_lock(&lldev->xge_lock);
+ if(ifnetp->if_flags & IFF_UP) {
+ /* Link status is UP */
+ if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
+ xge_init_locked(lldev);
+ }
+ xge_disable_promisc(lldev);
+ xge_enable_promisc(lldev);
+ }
+ else {
+ /* Link status is DOWN */
+ /* If device is in running, make it down */
+ if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
+ xge_stop(lldev);
+ }
+ }
+ mtx_unlock(&lldev->xge_lock);
+ break;
+
+ /* Add/delete multicast address */
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
+ xge_setmulti(lldev);
+ }
+ break;
+
+ /* Set/Get net media */
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
+ break;
+
+ /* Set capabilities */
+ case SIOCSIFCAP:
+ mtx_lock(&lldev->xge_lock);
+ int mask = 0;
+ mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
+#if defined(__FreeBSD_version) && (__FreeBSD_version >= 700026)
+ if(mask & IFCAP_TSO4) {
+ if(ifnetp->if_capenable & IFCAP_TSO4) {
+ ifnetp->if_capenable &= ~IFCAP_TSO4;
+ ifnetp->if_hwassist &= ~CSUM_TSO;
+ }
+
+ /*enable tso only if txcsum is enabled*/
+ if(ifnetp->if_capenable & IFCAP_TXCSUM) {
+ ifnetp->if_capenable |= IFCAP_TSO4;
+ ifnetp->if_hwassist |= CSUM_TSO;
+ }
+ }
+#endif
+ mtx_unlock(&lldev->xge_lock);
+ break;
+
+ /* Custom IOCTL 0 :
+ * Used to get Statistics & PCI configuration through application */
+ case SIOCGPRIVATE_0:
+ pAccess = (char*) ifreqp->ifr_data;
+ if(*pAccess == XGE_QUERY_STATS) {
+ mtx_lock(&lldev->xge_lock);
+ status_code = xge_hal_stats_hw(hldev, &hw_stats);
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR,
+ "Getting statistics failed (Status: %d)",
+ status_code);
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ }
+ copyout(hw_stats, ifreqp->ifr_data,
+ sizeof(xge_hal_stats_hw_info_t));
+ mtx_unlock(&lldev->xge_lock);
+ }
+ else if(*pAccess == XGE_QUERY_PCICONF) {
+ pci_conf = xge_malloc(sizeof(xge_hal_pci_config_t));
+ if(pci_conf == NULL) {
+ return(ENOMEM);
+ }
+ mtx_lock(&lldev->xge_lock);
+ status_code = xge_hal_mgmt_pci_config(hldev, pci_conf,
+ sizeof(xge_hal_pci_config_t));
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR,
+ "Getting PCIconfiguration failed (Status: %d)",
+ status_code);
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ }
+ copyout(pci_conf, ifreqp->ifr_data,
+ sizeof(xge_hal_pci_config_t));
+ mtx_unlock(&lldev->xge_lock);
+ free(pci_conf, M_DEVBUF);
+ }
+ else if(*pAccess ==XGE_QUERY_INTRSTATS) {
+ intr = xge_malloc(sizeof(xge_hal_stats_device_info_t));
+ if(intr == NULL) {
+ return(ENOMEM);
+ }
+ mtx_lock(&lldev->xge_lock);
+ status_code =xge_hal_mgmt_device_stats(hldev, intr,
+ sizeof(xge_hal_stats_device_info_t));
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR,
+ "Getting intr statistics failed (Status: %d)",
+ status_code);
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ }
+ copyout(intr, ifreqp->ifr_data,
+ sizeof(xge_hal_stats_device_info_t));
+ mtx_unlock(&lldev->xge_lock);
+ free(intr, M_DEVBUF);
+ }
+ else if(*pAccess == XGE_QUERY_TCODE) {
+ tcode = xge_malloc(sizeof(xge_hal_stats_sw_err_t));
+ if(tcode == NULL) {
+ return(ENOMEM);
+ }
+ mtx_lock(&lldev->xge_lock);
+ status_code =xge_hal_mgmt_sw_stats(hldev, tcode,
+ sizeof(xge_hal_stats_sw_err_t));
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR,
+ "Getting tcode statistics failed (Status: %d)",
+ status_code);
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ }
+ copyout(tcode, ifreqp->ifr_data,
+ sizeof(xge_hal_stats_sw_err_t));
+ mtx_unlock(&lldev->xge_lock);
+ free(tcode, M_DEVBUF);
+ }
+ else if(*pAccess ==XGE_READ_VERSION) {
+ version = xge_malloc(BUFFER_SIZE);
+ if(version == NULL) {
+ return(ENOMEM);
+ }
+ mtx_lock(&lldev->xge_lock);
+ strcpy(version,DRIVER_VERSION);
+ copyout(version, ifreqp->ifr_data, BUFFER_SIZE);
+ mtx_unlock(&lldev->xge_lock);
+ free(version, M_DEVBUF);
+ }
+ else if(*pAccess == XGE_QUERY_DEVCONF) {
+ device_conf = xge_malloc(sizeof(xge_hal_device_config_t));
+ if(device_conf == NULL) {
+ return(ENOMEM);
+ }
+ mtx_lock(&lldev->xge_lock);
+ status_code = xge_hal_mgmt_device_config(hldev, device_conf,
+ sizeof(xge_hal_device_config_t));
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR,
+ "Getting devconfig failed (Status: %d)",
+ status_code);
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ }
+ if(copyout(device_conf, ifreqp->ifr_data,
+ sizeof(xge_hal_device_config_t)) != 0) {
+ xge_trace(XGE_ERR, "Device configuration copyout erro");
+ }
+ mtx_unlock(&lldev->xge_lock);
+ free(device_conf, M_DEVBUF);
+ }
+ else if(*pAccess == XGE_QUERY_BUFFER_MODE) {
+ buffer_mode = lldev->buffer_mode;
+ if(copyout(&buffer_mode, ifreqp->ifr_data,
+ sizeof(int)) != 0) {
+ xge_trace(XGE_ERR, "Error with copyout of buffermode");
+ retValue = EINVAL;
+ }
+ }
+ else if((*pAccess == XGE_SET_BUFFER_MODE_1) ||
+ (*pAccess == XGE_SET_BUFFER_MODE_2) ||
+ (*pAccess == XGE_SET_BUFFER_MODE_3) ||
+ (*pAccess == XGE_SET_BUFFER_MODE_5)) {
+ switch(*pAccess) {
+ case XGE_SET_BUFFER_MODE_1: *pAccess = 'Y'; break;
+ case XGE_SET_BUFFER_MODE_2:
+ case XGE_SET_BUFFER_MODE_3:
+ case XGE_SET_BUFFER_MODE_5: *pAccess = 'N'; break;
+ }
+ if(copyout(pAccess, ifreqp->ifr_data,
+ sizeof(pAccess)) != 0) {
+ xge_trace(XGE_ERR,
+ "Copyout of chgbufmode result failed");
+ }
+ }
+ else {
+ xge_trace(XGE_TRACE, "Nothing is matching");
+ }
+ break;
+
+ /*
+ * Custom IOCTL 1 :
+ * Used to get BAR0 register values through application program
+ */
+ case SIOCGPRIVATE_1:
+ reg = (bar0reg_t *) ifreqp->ifr_data;
+ if(strcmp(reg->option,"-r") == 0) {
+ offset = reg->offset;
+ value = 0x0000;
+ mtx_lock(&lldev->xge_lock);
+ status_code = xge_hal_mgmt_reg_read(hldev, 0, offset,
+ &value );
+ if(status_code == XGE_HAL_OK) {
+ reg->value = value;
+ }
+ else {
+ xge_trace(XGE_ERR, "Getting register value failed");
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ break;
+ }
+ copyout(reg, ifreqp->ifr_data, sizeof(bar0reg_t));
+ mtx_unlock(&lldev->xge_lock);
+ }
+ else if(strcmp(reg->option,"-w") == 0) {
+ offset = reg->offset;
+ value = reg->value;
+ mtx_lock(&lldev->xge_lock);
+ status_code = xge_hal_mgmt_reg_write(hldev, 0, offset,
+ value );
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Getting register value failed");
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ break;
+ }
+ value = 0x0000;
+ status_code = xge_hal_mgmt_reg_read(hldev, 0, offset,
+ &value);
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Getting register value failed");
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ break;
+ }
+ if(reg->value != value) {
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ break;
+ }
+ mtx_unlock(&lldev->xge_lock);
+ }
+ else
+ {
+ offset = 0x0000;
+ value = 0x0000;
+ regInfo = (void *)ifreqp->ifr_data;
+
+ mtx_lock(&lldev->xge_lock);
+ for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
+ index++, offset += 0x0008) {
+ status_code = xge_hal_mgmt_reg_read(hldev, 0, offset,
+ &value);
+ if(status_code == XGE_HAL_OK) {
+ *( ( u64 *)( ( u64 * )regInfo + index ) ) = value;
+ }
+ else {
+ xge_trace(XGE_ERR, "Getting register value failed");
+ mtx_unlock(&lldev->xge_lock);
+ retValue = EINVAL;
+ break;
+ }
+ }
+
+ copyout(regInfo, ifreqp->ifr_data,
+ sizeof(xge_hal_pci_bar0_t));
+ mtx_unlock(&lldev->xge_lock);
+ }
+ break;
+
+ default:
+ retValue = EINVAL;
+ break;
+ }
+ return retValue;
+}
+
+/******************************************
+ * Function: xge_init
+ * Parameters: Pointer to per-device
+ * xgelldev_t structure as void*.
+ * Return: None
+ * Description: Init entry point.
+ ******************************************/
+void
+xge_init(void *plldev)
+{
+ ENTER_FUNCTION
+
+ xgelldev_t *lldev = (xgelldev_t *)plldev;
+
+ mtx_lock(&lldev->xge_lock);
+ xge_init_locked(lldev);
+ mtx_unlock(&lldev->xge_lock);
+
+ LEAVE_FUNCTION
+}
+
+void
+xge_init_locked(void *pdevin)
+{
+ ENTER_FUNCTION
+
+ xgelldev_t *lldev = (xgelldev_t *)pdevin;
+ struct ifnet *ifnetp = lldev->ifnetp;
+ device_t dev = lldev->device;
+
+ mtx_assert((&lldev->xge_lock), MA_OWNED);
+
+ /* If device is in running state, initializing is not required */
+ if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
+ return;
+ }
+
+ /* Initializing timer */
+ callout_init(&lldev->timer, CALLOUT_MPSAFE);
+
+ xge_initialize(dev, XGE_HAL_CHANNEL_OC_NORMAL);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: xge_timer
+ * Parameters: Pointer to per-device
+ * xgelldev_t structure as void*.
+ * Return: None
+ * Description: Polls the changes.
+ ******************************************/
+void
+xge_timer(void *devp)
+{
+ xgelldev_t *lldev = (xgelldev_t *)devp;
+ xge_hal_device_t *hldev = lldev->devh;
+
+ /* Poll for changes */
+ xge_hal_device_poll(hldev);
+
+ /* Reset timer */
+ callout_reset(&lldev->timer, hz, xge_timer, lldev);
+
+ return;
+}
+
+/******************************************
+ * Function: xge_stop
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: None
+ * Description: Deactivates the interface
+ * (Called on "ifconfig down"
+ ******************************************/
+void
+xge_stop(xgelldev_t *lldev)
+{
+ struct ifnet *ifnetp = lldev->ifnetp;
+ device_t dev = lldev->device;
+
+ ENTER_FUNCTION
+
+ mtx_assert((&lldev->xge_lock), MA_OWNED);
+
+ /* If device is not in "Running" state, return */
+ if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
+ goto xfstop_out;
+ }
+
+ xge_terminate(dev, XGE_HAL_CHANNEL_OC_NORMAL);
+
+xfstop_out:
+ LEAVE_FUNCTION
+ return;
+}
+
+/*
+ * xge_intr_filter
+ *
+ * ISR filter function
+ * @handle softc/lldev per device structure
+ */
+int
+xge_intr_filter(void *handle)
+{
+ xgelldev_t *lldev = NULL;
+ xge_hal_device_t *hldev = NULL;
+ xge_hal_pci_bar0_t *bar0 = NULL;
+ device_t dev = NULL;
+ u16 retValue = FILTER_STRAY;
+ u64 val64 = 0;
+
+ lldev = (xgelldev_t *)handle;
+ hldev = lldev->devh;
+ dev = lldev->device;
+ bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+
+ val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
+ &bar0->general_int_status);
+ retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
+
+ return retValue;
+}
+
+/******************************************
+ * xge_intr
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: None
+ * Description: Interrupt service routine
+ ******************************************/
+void
+xge_intr(void *plldev)
+{
+ xge_hal_status_e status;
+ xgelldev_t *lldev = (xgelldev_t *)plldev;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
+ struct ifnet *ifnetp = lldev->ifnetp;
+
+ mtx_lock(&lldev->xge_lock);
+ if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
+ status = xge_hal_device_handle_irq(hldev);
+
+ if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd))) {
+ xge_send_locked(ifnetp);
+ }
+ }
+ mtx_unlock(&lldev->xge_lock);
+ return;
+}
+
+/********************************************
+ * Function : xgell_rx_open
+ * Parameters: Queue index, channel
+ * open/close/reopen flag
+ * Return: 0 or ENODEV
+ * Description: Initialize and open all Rx
+ * channels.
+ ******************************************/
+int
+xgell_rx_open(int qid, xgelldev_t *lldev, xge_hal_channel_reopen_e rflag)
+{
+ u64 adapter_status = 0x0;
+ int retValue = 0;
+ xge_hal_status_e status_code;
+
+ ENTER_FUNCTION
+
+ xge_hal_channel_attr_t attr = {
+ .post_qid = qid,
+ .compl_qid = 0,
+ .callback = xgell_rx_compl,
+ .per_dtr_space = sizeof(xgell_rx_priv_t),
+ .flags = 0,
+ .type = XGE_HAL_CHANNEL_TYPE_RING,
+ .userdata = lldev,
+ .dtr_init = xgell_rx_initial_replenish,
+ .dtr_term = xgell_rx_term
+ };
+
+ /* If device is not ready, return */
+ if(xge_hal_device_status(lldev->devh, &adapter_status)) {
+ xge_trace(XGE_ERR, "Device is not ready. Adapter status: 0x%llx",
+ (unsigned long long) adapter_status);
+ retValue = -ENODEV;
+ goto rxopen_out;
+ }
+
+ /* Open ring channel */
+ status_code = xge_hal_channel_open(lldev->devh, &attr,
+ &lldev->ring_channel[qid], rflag);
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Can not open Rx RING channel, Status: %d\n",
+ status_code);
+ retValue = -ENODEV;
+ goto rxopen_out;
+ }
+
+rxopen_out:
+ LEAVE_FUNCTION
+
+ return retValue;
+}
+
+/******************************************
+ * Function: xgell_tx_open
+ * Parameters: Channel
+ * open/close/reopen flag
+ * Return: 0 or ENODEV
+ * Description: Initialize and open all Tx
+ * channels.
+ ******************************************/
+int
+xgell_tx_open(xgelldev_t *lldev, xge_hal_channel_reopen_e tflag)
+{
+ xge_hal_status_e status_code;
+ u64 adapter_status = 0x0;
+ int retValue = 0;
+
+ ENTER_FUNCTION
+
+ xge_hal_channel_attr_t attr = {
+ .post_qid = 0,
+ .compl_qid = 0,
+ .callback = xgell_tx_compl,
+ .per_dtr_space = sizeof(xgell_tx_priv_t),
+ .flags = 0,
+ .type = XGE_HAL_CHANNEL_TYPE_FIFO,
+ .userdata = lldev,
+ .dtr_init = xgell_tx_initial_replenish,
+ .dtr_term = xgell_tx_term
+ };
+
+ /* If device is not ready, return */
+ if(xge_hal_device_status(lldev->devh, &adapter_status)) {
+ xge_trace(XGE_ERR, "Device is not ready. Adapter status: 0x%llx\n",
+ (unsigned long long) adapter_status);
+ retValue = -ENODEV;
+ goto txopen_out;
+ }
+
+ /* Open FIFO channel */
+ status_code = xge_hal_channel_open(lldev->devh, &attr,
+ &lldev->fifo_channel_0, tflag);
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Can not open Tx FIFO channel, Status: %d\n",
+ status_code);
+ retValue = -ENODEV;
+ goto txopen_out;
+ }
+
+txopen_out:
+ LEAVE_FUNCTION
+
+ return retValue;
+}
+
+/******************************************
+ * Function: xgell_channel_open
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: None
+ * Description: Opens both Rx and Tx channels.
+ ******************************************/
+int
+xgell_channel_open(xgelldev_t *lldev, xge_hal_channel_reopen_e option)
+{
+ int status = XGE_HAL_OK;
+ int index = 0;
+ int index2 = 0;
+
+ ENTER_FUNCTION
+
+ /* Open ring (Rx) channel */
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ if((status = xgell_rx_open(index, lldev, option))) {
+ xge_trace(XGE_ERR, "Opening Rx channel failed (Status: %d)\n",
+ status);
+ for(index2 = 0; index2 < index; index2++) {
+ xge_hal_channel_close(lldev->ring_channel[index2], option);
+ }
+ return status;
+ }
+ }
+#ifdef XGE_FEATURE_LRO
+ status = xge_hal_lro_init(1, lldev->devh);
+ if (status != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "cannot init Rx LRO got status code %d", status);
+ return -ENODEV;
+ }
+#endif
+
+ /* Open FIFO (Tx) channel */
+ if((status = xgell_tx_open(lldev, option))) {
+ xge_trace(XGE_ERR, "Opening Tx channel failed (Status: %d)\n",
+ status);
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ xge_hal_channel_close(lldev->ring_channel[index], option);
+ }
+ }
+
+ LEAVE_FUNCTION
+ return status;
+}
+
+/******************************************
+ * Function: xgell_channel_close
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: 0 for success, non-zero for
+ * failure
+ * Description: Closes both Tx and Rx channels
+ ******************************************/
+int
+xgell_channel_close(xgelldev_t *lldev, xge_hal_channel_reopen_e option)
+{
+ int index;
+
+ ENTER_FUNCTION
+
+ DELAY(1000 * 1000);
+
+ /* Close FIFO (Tx) channel */
+ xge_hal_channel_close(lldev->fifo_channel_0, option);
+
+ /* Close Ring (Rx) channel */
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ xge_hal_channel_close(lldev->ring_channel[index], option);
+ }
+
+ LEAVE_FUNCTION
+
+ return 0;
+}
+
+
+/******************************************
+ * Function: dmamap_cb
+ * Parameters: Parameter passed from dmamap
+ * function, Segment, Number of
+ * segments, error (if any)
+ * Return: None
+ * Description: Callback function used for
+ * DMA mapping
+ ******************************************/
+void
+dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ if(!error) {
+ *(bus_addr_t *) arg = segs->ds_addr;
+ }
+}
+
+/******************************************
+ * Function: xgell_reset
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: HAL status code/EPERM
+ * Description: Resets the device
+ ******************************************/
+void
+xgell_reset(xgelldev_t *lldev)
+{
+ device_t dev = lldev->device;
+
+ ENTER_FUNCTION
+
+ xge_trace(XGE_TRACE, "Reseting the chip");
+
+ mtx_lock(&lldev->xge_lock);
+
+ /* If the device is not initialized, return */
+ if(!lldev->initialized) {
+ goto xreset_out;
+ }
+
+ xge_terminate(dev, XGE_HAL_CHANNEL_OC_NORMAL);
+
+ xge_initialize(dev, XGE_HAL_CHANNEL_OC_NORMAL);
+
+xreset_out:
+ LEAVE_FUNCTION
+ mtx_unlock(&lldev->xge_lock);
+
+ return;
+}
+
+/******************************************
+ * Function: xge_setmulti
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer
+ * Return: None
+ * Description: Set an address as a multicast
+ * address
+ ******************************************/
+void
+xge_setmulti(xgelldev_t *lldev)
+{
+ ENTER_FUNCTION
+ struct ifmultiaddr *ifma;
+ u8 *lladdr;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
+ struct ifnet *ifnetp = lldev->ifnetp;
+ int index = 0;
+ int offset = 1;
+ int table_size = 47;
+ xge_hal_status_e status = XGE_HAL_OK;
+ u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
+ status = xge_hal_device_mcast_enable(hldev);
+ lldev->all_multicast = 1;
+ }
+ else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
+ status = xge_hal_device_mcast_disable(hldev);
+ lldev->all_multicast = 0;
+ }
+
+ if(status != XGE_HAL_OK) {
+ printf("Failed to %s multicast (status: %d)\n",
+ (ifnetp->if_flags & IFF_ALLMULTI ? "enable" : "disable"),
+ status);
+ }
+
+ /* Updating address list */
+ IF_ADDR_LOCK(ifnetp);
+ index = 0;
+ TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
+ if(ifma->ifma_addr->sa_family != AF_LINK) {
+ continue;
+ }
+ lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+ index += 1;
+ }
+ IF_ADDR_UNLOCK(ifnetp);
+
+ if((!lldev->all_multicast) && (index)) {
+ lldev->macaddr_count = (index + 1);
+ if(lldev->macaddr_count > table_size) {
+ return;
+ }
+
+ /* Clear old addresses */
+ for(index = 0; index < 48; index++) {
+ xge_hal_device_macaddr_set(hldev, (offset + index),
+ initial_addr);
+ }
+ }
+
+ /* Add new addresses */
+ IF_ADDR_LOCK(ifnetp);
+ index = 0;
+ TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
+ if(ifma->ifma_addr->sa_family != AF_LINK) {
+ continue;
+ }
+ lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+ xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
+ index += 1;
+ }
+ IF_ADDR_UNLOCK(ifnetp);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: xge_enable_promisc
+ * Parameters: Adapter structure
+ * Return: None
+ * Description: Enables promiscuous mode
+ ******************************************/
+void
+xge_enable_promisc(xgelldev_t *lldev)
+{
+ struct ifnet *ifnetp = lldev->ifnetp;
+ xge_hal_device_t *hldev = lldev->devh;
+ xge_hal_pci_bar0_t *bar0 = NULL;
+ u64 val64 = 0;
+
+ ENTER_FUNCTION
+
+ bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
+
+ if(ifnetp->if_flags & IFF_PROMISC) {
+ xge_hal_device_promisc_enable(lldev->devh);
+
+ /*
+ * When operating in promiscuous mode, don't strip the VLAN tag
+ */
+ val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
+ &bar0->rx_pa_cfg);
+ val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
+ val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
+ xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
+ &bar0->rx_pa_cfg);
+
+ xge_trace(XGE_TRACE, "Promiscuous mode ON");
+ }
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: xge_disable_promisc
+ * Parameters: Adapter structure
+ * Return: None
+ * Description: Disables promiscuous mode
+ ******************************************/
+void
+xge_disable_promisc(xgelldev_t *lldev)
+{
+ xge_hal_device_t *hldev = lldev->devh;
+ xge_hal_pci_bar0_t *bar0 = NULL;
+ u64 val64 = 0;
+
+ ENTER_FUNCTION
+
+ bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
+
+ xge_hal_device_promisc_disable(lldev->devh);
+
+ /*
+ * Strip VLAN tag when operating in non-promiscuous mode
+ */
+ val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
+ &bar0->rx_pa_cfg);
+ val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
+ val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
+ xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
+ &bar0->rx_pa_cfg);
+
+ xge_trace(XGE_TRACE, "Promiscuous mode OFF");
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: changeMtu
+ * Parameters: Pointer to per-device
+ * xgelldev_t structure, New
+ * MTU size.
+ * Return: None
+ * Description: Changes MTU size to requested
+ ******************************************/
+int
+changeMtu(xgelldev_t *lldev, int NewMtu)
+{
+ struct ifnet *ifnetp = lldev->ifnetp;
+ xge_hal_device_t *hldev = lldev->devh;
+ int retValue = 0;
+
+ ENTER_FUNCTION
+
+ do {
+ /* Check requested MTU size for boundary */
+ if(xge_hal_device_mtu_check(hldev, NewMtu) != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Invalid MTU");
+ retValue = EINVAL;
+ break;
+ }
+
+ if(lldev->initialized != 0) {
+ mtx_lock(&lldev->xge_lock);
+ if_down(ifnetp);
+ xge_stop(lldev);
+ ifnetp->if_mtu = NewMtu;
+ changeBufmode(lldev, NewMtu);
+ xge_init_locked((void *)lldev);
+ if_up(ifnetp);
+ mtx_unlock(&lldev->xge_lock);
+ }
+ else {
+ ifnetp->if_mtu = NewMtu;
+ changeBufmode(lldev, NewMtu);
+ }
+ } while(FALSE);
+
+ LEAVE_FUNCTION
+ return retValue;
+}
+
+/******************************************
+ * Function: changeBufmode
+ * Parameters: Pointer to per-device
+ * xgelldev_t structure, New
+ * MTU size.
+ * Return: None
+ * Description: Updates RingConfiguration structure
+ * depending the NewMtu size.
+ ******************************************/
+int
+changeBufmode (xgelldev_t *lldev, int NewMtu)
+{
+ xge_hal_ring_config_t * pRingConfig;
+ xge_hal_device_t *hldev = lldev->devh;
+ device_t dev = lldev->device;
+ int buffer_length = 0, buffer_index = 0, index;
+
+ pRingConfig = &(hldev->config.ring);
+ xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
+
+ if((NewMtu + XGE_HAL_MAC_HEADER_MAX_SIZE) <= MJUMPAGESIZE) {
+#if defined(XGE_FEATURE_BUFFER_MODE_3)
+ xge_os_printf("%s: 3 Buffer Mode Enabled",
+ device_get_nameunit(dev));
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
+ }
+ pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
+ lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[2] = NewMtu;
+ lldev->rxd_mbuf_cnt = 3;
+#else
+#if defined(XGE_FEATURE_BUFFER_MODE_2)
+ xge_os_printf("%s: 2 Buffer Mode Enabled",
+ device_get_nameunit(dev));
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
+ }
+ pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_2;
+ lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[1] = NewMtu;
+ lldev->rxd_mbuf_cnt = 2;
+#else
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
+ }
+ pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
+ lldev->rxd_mbuf_len[0] = NewMtu;
+ lldev->rxd_mbuf_cnt = 1;
+#endif
+#endif
+ }
+ else {
+#if defined(XGE_FEATURE_BUFFER_MODE_3) || defined (XGE_FEATURE_BUFFER_MODE_2)
+ xge_os_printf("2 or 3 Buffer mode is not supported for given MTU");
+ xge_os_printf("So changing buffer mode to 5 buffer mode\n");
+#endif
+ xge_os_printf("%s: 5 Buffer Mode Enabled",
+ device_get_nameunit(dev));
+ for(index = 0; index < XGE_RING_COUNT; index++) {
+ pRingConfig->queue[index].buffer_mode =
+ XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
+ }
+ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
+ buffer_length = NewMtu;
+ buffer_index = 2;
+ lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
+ lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
+
+ while(buffer_length > MJUMPAGESIZE) {
+ buffer_length -= MJUMPAGESIZE;
+ lldev->rxd_mbuf_len[buffer_index] = MJUMPAGESIZE;
+ buffer_index++;
+ }
+
+ BUFALIGN(buffer_length);
+
+ lldev->rxd_mbuf_len[buffer_index] = buffer_length;
+ lldev->rxd_mbuf_cnt = buffer_index+1;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*************************************************************
+ * xge_initialize
+ *
+ * @dev: Device structure
+ * @option: Normal/Reset option for channels
+ *
+ * Called by both init and reset functions to enable device, interrupts, and to
+ * open channels.
+ *
+ **************************************************************/
+void xge_initialize(device_t dev, xge_hal_channel_reopen_e option)
+{
+ ENTER_FUNCTION
+
+ struct ifaddr *ifaddrp;
+ struct sockaddr_dl *sockaddrp;
+ unsigned char *macaddr;
+ xgelldev_t *lldev = (xgelldev_t *) device_get_softc(dev);
+ xge_hal_device_t *hldev = lldev->devh;
+ struct ifnet *ifnetp = lldev->ifnetp;
+ int status = XGE_HAL_OK;
+
+ xge_trace(XGE_TRACE, "Set MTU size");
+ status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
+ if(status != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Setting HAL device MTU failed (Status: %d)",
+ status);
+ goto init_sub_out;
+ }
+
+
+ /* Enable HAL device */
+ xge_hal_device_enable(hldev);
+
+ /* Get MAC address and update in HAL */
+ ifaddrp = ifaddr_byindex(ifnetp->if_index);
+ sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
+ sockaddrp->sdl_type = IFT_ETHER;
+ sockaddrp->sdl_alen = ifnetp->if_addrlen;
+ macaddr = LLADDR(sockaddrp);
+ xge_trace(XGE_TRACE,
+ "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
+ *(macaddr + 4), *(macaddr + 5));
+ status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
+ if(status != XGE_HAL_OK) {
+ xge_trace(XGE_ERR,
+ "Setting MAC address failed (Status: %d)\n", status);
+ }
+
+ /* Opening channels */
+ mtx_unlock(&lldev->xge_lock);
+ status = xgell_channel_open(lldev, option);
+ mtx_lock(&lldev->xge_lock);
+ if(status != 0) {
+ goto init_sub_out;
+ }
+
+ /* Set appropriate flags */
+ ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
+
+ /* Checksum capability */
+ ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
+ (CSUM_TCP | CSUM_UDP) : 0;
+
+#ifdef XGE_FEATURE_TSO
+ if(ifnetp->if_capenable & IFCAP_TSO4)
+ ifnetp->if_hwassist |= CSUM_TSO;
+#endif
+
+ /* Enable interrupts */
+ xge_hal_device_intr_enable(hldev);
+
+ callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
+
+ /* Disable promiscuous mode */
+ xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
+ xge_enable_promisc(lldev);
+
+ /* Device is initialized */
+ lldev->initialized = 1;
+ xge_os_mdelay(1000);
+
+init_sub_out:
+ LEAVE_FUNCTION
+ return;
+}
+
+/*******************************************************
+ * xge_terminate
+ *
+ * @dev: Device structure
+ * @option: Normal/Reset option for channels
+ *
+ * Called by both stop and reset functions to disable device, interrupts, and to
+ * close channels.
+ ******************************************************/
+void xge_terminate(device_t dev, xge_hal_channel_reopen_e option)
+{
+ ENTER_FUNCTION
+
+ xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev);
+ xge_hal_device_t *hldev = lldev->devh;
+ struct ifnet *ifnetp = lldev->ifnetp;
+
+ /* Set appropriate flags */
+ ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ /* Stop timer */
+ callout_stop(&lldev->timer);
+
+ /* Disable interrupts */
+ xge_hal_device_intr_disable(hldev);
+
+ mtx_unlock(&lldev->xge_lock);
+ xge_queue_flush(xge_hal_device_queue(lldev->devh));
+ mtx_lock(&lldev->xge_lock);
+
+ /* Disable HAL device */
+ if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "Disabling HAL device failed");
+ }
+
+ /* Close Tx and Rx channels */
+ xgell_channel_close(lldev, option);
+
+ /* Reset HAL device */
+ xge_hal_device_reset(hldev);
+
+ xge_os_mdelay(1000);
+ lldev->initialized = 0;
+
+ if_link_state_change(ifnetp, LINK_STATE_DOWN);
+
+ LEAVE_FUNCTION
+}
+
+/******************************************
+ * Function: xgell_set_mbuf_cflags
+ * Parameters: mbuf structure pointer
+ * Return: None
+ * Description: This fuction will set the csum_flag of the mbuf
+ ******************************************/
+void xgell_set_mbuf_cflags(mbuf_t pkt)
+{
+ pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+ pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ pkt->m_pkthdr.csum_data = htons(0xffff);
+}
+
+#ifdef XGE_FEATURE_LRO
+/******************************************
+ * Function: xgell_lro_flush_sessions
+ * Parameters: Per adapter xgelldev_t
+ * Return: None
+ * Description: This function will flush the LRO session and send the
+ * accumulated LRO packet to Upper layer.
+ ******************************************/
+void xgell_lro_flush_sessions(xgelldev_t *lldev)
+{
+ lro_t *lro;
+ struct ifnet *ifnetp = lldev->ifnetp;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
+
+ while (NULL != (lro = xge_hal_lro_get_next_session(hldev))) {
+ xgell_set_mbuf_cflags(lro->os_buf);
+
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, lro->os_buf);
+ mtx_lock(&lldev->xge_lock);
+
+ xge_hal_lro_close_session(lro);
+ }
+}
+
+/******************************************
+ * Function: xgell_accumulate_large_rx
+ * Parameters: Descriptor info structure, current mbuf structure,
+ * packet length, Per adapter structure, Rx Desc private structure
+ * Return: None
+ * Description: This function will accumulate packets to form the LRO
+ * packets based on various condition.
+ ******************************************/
+void xgell_accumulate_large_rx(xge_hal_dtr_info_t *ext_info,mbuf_t pkt,
+ int pkt_length, xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv)
+{
+ tcplro_t *tcp;
+ lro_t *lro, *lro_end3;
+ xge_hal_status_e status;
+ unsigned char * temp;
+ struct ifnet *ifnetp = lldev->ifnetp;
+
+ status = xge_hal_accumulate_large_rx(pkt->m_data, &tcp, &pkt_length,
+ &lro, ext_info, lldev->devh, &lro_end3);
+ pkt->m_next = NULL;
+ temp = (unsigned char *)tcp;
+
+ if(status == XGE_HAL_INF_LRO_BEGIN) {
+ pkt->m_flags |= M_PKTHDR;
+ pkt->m_pkthdr.rcvif = ifnetp;
+ lro->os_buf = lro->os_buf_end = pkt;
+ }
+ else if(status == XGE_HAL_INF_LRO_CONT) {
+ /*
+ * Current mbuf will be combine to form LRO frame,
+ * So mask the pkthdr of the flag variable for current mbuf
+ */
+ pkt->m_flags = pkt->m_flags & 0xFFFD; //Mask pkthdr
+ pkt->m_data = (u8 *)tcp;
+ pkt->m_len = pkt_length;
+
+ /*
+ * Combine the current mbuf to the LRO frame and update
+ * the LRO's pkthdr len accordingly
+ */
+ lro->os_buf_end->m_next = pkt;
+ lro->os_buf_end = pkt;
+ lro->os_buf->m_pkthdr.len += pkt_length;
+ }
+ else if(status == XGE_HAL_INF_LRO_END_2) {
+ lro->os_buf->m_flags |= M_EOR;
+
+ /* Update the Checksum flags of the LRO frames */
+ xgell_set_mbuf_cflags(lro->os_buf);
+
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /*
+ * Current packet can not be combined with LRO frame.
+ * Flush the previous LRO frames and send the current packet
+ * seperately
+ */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, lro->os_buf);
+ (*ifnetp->if_input)(ifnetp, pkt);
+ mtx_lock(&lldev->xge_lock);
+ xge_hal_lro_close_session(lro);
+ }
+ else if(status == XGE_HAL_INF_LRO_END_1) {
+ pkt->m_flags = pkt->m_flags & 0xFFFD;
+ pkt->m_data = (u8 *)tcp;
+ pkt->m_len = pkt_length;
+ lro->os_buf_end->m_next = pkt;
+ lro->os_buf->m_pkthdr.len += pkt_length;
+ xgell_set_mbuf_cflags(lro->os_buf);
+ lro->os_buf->m_flags |= M_EOR;
+
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, lro->os_buf);
+ mtx_lock(&lldev->xge_lock);
+
+ xge_hal_lro_close_session(lro);
+ }
+ else if(status == XGE_HAL_INF_LRO_END_3) {
+ pkt->m_flags |= M_PKTHDR;
+ pkt->m_len = pkt_length;
+ pkt->m_pkthdr.len = pkt_length;
+ lro_end3->os_buf = lro_end3->os_buf_end = pkt;
+ lro->os_buf->m_flags |= M_EOR;
+ xgell_set_mbuf_cflags(lro->os_buf);
+
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, lro->os_buf);
+ mtx_lock(&lldev->xge_lock);
+ xge_hal_lro_close_session(lro);
+ }
+ else if((status == XGE_HAL_INF_LRO_UNCAPABLE) ||
+ (status == XGE_HAL_INF_LRO_SESSIONS_XCDED)) {
+ pkt->m_flags |= M_PKTHDR;
+ pkt->m_len = pkt_length;
+ pkt->m_pkthdr.len = pkt_length;
+
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, pkt);
+ mtx_lock(&lldev->xge_lock);
+ }
+}
+#endif
+
+/******************************************
+ * Function: xgell_rx_compl
+ * Parameters: Channel handle, descriptor,
+ * transfer code, userdata
+ * (not used)
+ * Return: HAL status code
+ * Description: If the interrupt is because
+ * of a received frame or if
+ * the receive ring contains
+ * fresh as yet un-processed
+ * frames, this function is
+ * called.
+ ******************************************/
+xge_hal_status_e
+xgell_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
+ void *userdata)
+{
+ xge_hal_dtr_info_t ext_info;
+ xge_hal_status_e status_code;
+ struct ifnet *ifnetp;
+ device_t dev;
+ int index;
+ mbuf_t mbuf_up = NULL;
+ xgell_rx_priv_t *rxd_priv = NULL, old_rxd_priv;
+ u16 vlan_tag;
+
+// ENTER_FUNCTION
+
+
+ /*get the user data portion*/
+ xgelldev_t *lldev = xge_hal_channel_userdata(channelh);
+ if(!lldev) {
+ xge_ctrace(XGE_TRACE, "xgeX: %s: Failed to get user data",
+ __FUNCTION__);
+ return XGE_HAL_FAIL;
+ }
+ dev = lldev->device;
+
+ mtx_assert((&lldev->xge_lock), MA_OWNED);
+
+ /* get the interface pointer */
+ ifnetp = lldev->ifnetp;
+
+ do {
+ if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
+ return XGE_HAL_FAIL;
+ }
+
+ if(t_code) {
+ xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
+ xge_hal_device_handle_tcode(channelh, dtr, t_code);
+ xge_hal_ring_dtr_post(channelh,dtr);
+ continue;
+ }
+
+ /* Get the private data for this descriptor*/
+ rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
+ dtr);
+ if(!rxd_priv) {
+ xge_trace(XGE_ERR, "Failed to get descriptor private data");
+ return XGE_HAL_FAIL;
+ }
+
+ /* Taking backup of rxd_priv structure details of current packet */
+ xge_os_memcpy(&old_rxd_priv, rxd_priv, sizeof(xgell_rx_priv_t));
+
+ /* Prepare one buffer to send it to upper layer -- since the upper
+ * layer frees the buffer do not use rxd_priv->buffer
+ * Meanwhile prepare a new buffer, do mapping, use it in the
+ * current descriptor and post descriptor back to ring channel */
+ mbuf_up = rxd_priv->bufferArray[0];
+
+ /* Gets details of mbuf i.e., packet length */
+ xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
+
+ status_code =
+ (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
+ xgell_get_buf(dtr, rxd_priv, lldev, 0) :
+ xgell_get_buf_3b_5b(dtr, rxd_priv, lldev);
+
+ if(status_code != XGE_HAL_OK) {
+ xge_trace(XGE_ERR, "No memory");
+
+ /*
+ * Do not deliver the received buffer to the stack. Instead,
+ * Re-post the descriptor with the same buffer
+ */
+
+ /* Get back previous rxd_priv structure before posting */
+ xge_os_memcpy(rxd_priv, &old_rxd_priv, sizeof(xgell_rx_priv_t));
+
+ xge_hal_ring_dtr_post(channelh, dtr);
+ continue;
+ }
+
+ /* Get the extended information */
+ xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
+
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ /*
+ * As we have allocated a new mbuf for this descriptor, post
+ * this descriptor with new mbuf back to ring channel
+ */
+ vlan_tag = ext_info.vlan;
+ xge_hal_ring_dtr_post(channelh, dtr);
+ if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
+ (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
+ (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
+ (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
+ /* set Checksum Flag */
+ xgell_set_mbuf_cflags(mbuf_up);
+#ifdef XGE_FEATURE_LRO
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ xgell_accumulate_large_rx(&ext_info, mbuf_up,
+ mbuf_up->m_len, lldev, rxd_priv);
+ }
+#else
+ /* Post-Read sync for buffers*/
+ bus_dmamap_sync(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
+
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, mbuf_up);
+ mtx_lock(&lldev->xge_lock);
+#endif
+ }
+ else {
+ /*
+ * Packet with erroneous checksum , let the upper layer
+ * deal with it
+ */
+
+ /* Post-Read sync for buffers*/
+ bus_dmamap_sync(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
+
+#ifdef XGE_FEATURE_LRO
+ xgell_lro_flush_sessions(lldev);
+#endif
+
+ if (vlan_tag) {
+ mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
+ mbuf_up->m_flags |= M_VLANTAG;
+ }
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, mbuf_up);
+ mtx_lock(&lldev->xge_lock);
+ }
+ }
+ else {
+ /*
+ * As we have allocated a new mbuf for this descriptor, post
+ * this descriptor with new mbuf back to ring channel
+ */
+ xge_hal_ring_dtr_post(channelh, dtr);
+ if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
+ (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
+ (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
+ (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
+ /* set Checksum Flag */
+ xgell_set_mbuf_cflags(mbuf_up);
+#ifdef XGE_FEATURE_LRO
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ xgell_accumulate_large_rx(&ext_info, mbuf_up,
+ mbuf_up->m_len, lldev, rxd_priv);
+ }
+#else
+ /* Post-Read sync for buffers*/
+ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index].dma_map,
+ BUS_DMASYNC_POSTREAD);
+ }
+
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, mbuf_up);
+ mtx_lock(&lldev->xge_lock);
+#endif
+ }
+ else {
+ /*
+ * Packet with erroneous checksum , let the upper layer
+ * deal with it
+ */
+ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index].dma_map,
+ BUS_DMASYNC_POSTREAD);
+ }
+
+#ifdef XGE_FEATURE_LRO
+ xgell_lro_flush_sessions(lldev);
+#endif
+ /* Send it up */
+ mtx_unlock(&lldev->xge_lock);
+ (*ifnetp->if_input)(ifnetp, mbuf_up);
+ mtx_lock(&lldev->xge_lock);
+ }
+ }
+ } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
+ == XGE_HAL_OK);
+#ifdef XGE_FEATURE_LRO
+ xgell_lro_flush_sessions(lldev);
+#endif
+
+// LEAVE_FUNCTION
+
+ return XGE_HAL_OK;
+}
+
+/******************************************
+ * Function: xge_ring_dtr_get
+ * Parameters: mbuf pointer, channel handler
+ * descriptot, Per adapter xgelldev_t
+ * structure pointer,
+ * Rx private structure
+ * Return: HAL status code
+ * Description: Updates the mbuf lengths
+ * depending on packet lengths.
+ ******************************************/
+int
+xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
+ xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv)
+{
+ mbuf_t m;
+ int pkt_length[5]={0,0}, pkt_len=0;
+ dma_addr_t dma_data[5];
+ int index;
+
+ m = mbuf_up;
+ pkt_len = 0;
+
+ if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ xge_os_memzero(pkt_length, sizeof(pkt_length));
+
+ /*
+ * Retrieve data of interest from the completed descriptor -- This
+ * returns the packet length
+ */
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
+ }
+ else {
+ xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
+ }
+
+ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
+ m->m_len = pkt_length[index];
+
+ if(index < (lldev->rxd_mbuf_cnt-1)) {
+ m->m_next = rxd_priv->bufferArray[index + 1];
+ m = m->m_next;
+ }
+ else {
+ m->m_next = NULL;
+ }
+ pkt_len+=pkt_length[index];
+ }
+
+ /*
+ * Since 2 buffer mode is an exceptional case where data is in 3rd
+ * buffer but not in 2nd buffer
+ */
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
+ m->m_len = pkt_length[2];
+ pkt_len+=pkt_length[2];
+ }
+
+ /*
+ * Update length of newly created buffer to be sent up with packet
+ * length
+ */
+ mbuf_up->m_pkthdr.len = pkt_len;
+ }
+ else {
+ /*
+ * Retrieve data of interest from the completed descriptor -- This
+ * returns the packet length
+ */
+ xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
+
+ /*
+ * Update length of newly created buffer to be sent up with packet
+ * length
+ */
+ mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
+ }
+
+return XGE_HAL_OK;
+}
+
+
+/******************************************
+ * Function: xge_send
+ * Parameters: Pointer to ifnet structure
+ * Return: None
+ * Description: Transmit entry point
+ ******************************************/
+void
+xge_send(struct ifnet *ifnetp)
+{
+ xgelldev_t *lldev = ifnetp->if_softc;
+
+ mtx_lock(&lldev->xge_lock);
+ xge_send_locked(ifnetp);
+ mtx_unlock(&lldev->xge_lock);
+}
+
+void
+xge_send_locked(struct ifnet *ifnetp)
+{
+ xge_hal_dtr_h dtr;
+ static bus_dma_segment_t segs[MAX_SEGS];
+ xge_hal_status_e status_code;
+ unsigned int max_fragments;
+ xgelldev_t *lldev = ifnetp->if_softc;
+ xge_hal_channel_h channelh = lldev->fifo_channel_0;
+ mbuf_t m_head = NULL;
+ mbuf_t m_buf = NULL;
+ xgell_tx_priv_t *ll_tx_priv = NULL;
+ register unsigned int count = 0;
+ unsigned int nsegs = 0;
+ u16 vlan_tag;
+
+ max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
+
+ mtx_assert((&lldev->xge_lock), MA_OWNED);
+
+ /* If device is not initialized, return */
+ if((!lldev->initialized) ||
+ (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))) {
+ xge_trace(XGE_ERR, "Device is not initialized");
+ return;
+ }
+
+ /*
+ * Get the number of free descriptors in the FIFO channel and return if
+ * the count is less than the XGELL_TX_LEVEL_LOW -- the low threshold
+ */
+ count = xge_hal_channel_dtr_count(channelh);
+ if(count <= XGELL_TX_LEVEL_LOW) {
+ ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
+ xge_trace(XGE_TRACE, "Free descriptor count %d/%d at low threshold",
+ count, XGELL_TX_LEVEL_LOW);
+
+ /* Serialized -- through queue */
+ xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
+ XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev);
+ return;
+ }
+
+ /* This loop will be executed for each packet in the kernel maintained
+ * queue -- each packet can be with fragments as an mbuf chain */
+ while((ifnetp->if_snd.ifq_head) &&
+ (xge_hal_channel_dtr_count(channelh) > XGELL_TX_LEVEL_LOW)) {
+ IF_DEQUEUE(&ifnetp->if_snd, m_head);
+
+ for(count = 0, m_buf = m_head; m_buf != NULL;
+ m_buf = m_buf->m_next) {
+ if(m_buf->m_len) {
+ count += 1;
+ }
+ }
+
+ if(count >= max_fragments) {
+ m_buf = m_defrag(m_head, M_DONTWAIT);
+ if(m_buf != NULL) {
+ m_head = m_buf;
+ }
+ }
+
+ /* Reserve descriptors */
+ status_code = xge_hal_fifo_dtr_reserve(channelh, &dtr);
+ if(status_code) {
+ switch(status_code) {
+ case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
+ xge_trace(XGE_ERR, "Channel is not ready");
+ break;
+
+ case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
+ xge_trace(XGE_ERR, "Out of descriptors");
+ break;
+
+ default:
+ xge_trace(XGE_ERR,
+ "Reserving (Tx) descriptors failed. Status %d",
+ status_code);
+ }
+ goto out2;
+ break;
+ }
+
+ vlan_tag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
+ xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
+
+ /* Update Tx private structure for this descriptor */
+ ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
+ ll_tx_priv->buffer = m_head;
+
+ /*
+ * Do mapping -- Required DMA tag has been created in xge_init
+ * function and DMA maps have already been created in the
+ * xgell_tx_replenish function.
+ * Returns number of segments through nsegs
+ */
+ if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
+ ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
+ xge_trace(XGE_ERR, "DMA map load with segments failed");
+ goto out2;
+ }
+
+ /* Set descriptor buffer for header and each fragment/segment */
+ count = 0;
+ do {
+ xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
+ (dma_addr_t)htole64(segs[count].ds_addr),
+ segs[count].ds_len);
+ count = count + 1;
+ } while(count < nsegs);
+
+ /* Pre-write Sync of mapping */
+ bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
+ BUS_DMASYNC_PREWRITE);
+
+#ifdef XGE_FEATURE_TSO
+ if((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
+ xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
+ }
+#endif
+ /* Checksum */
+ if(ifnetp->if_hwassist > 0) {
+ xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
+ | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
+ }
+
+ /* Post descriptor to FIFO channel */
+ xge_hal_fifo_dtr_post(channelh, dtr);
+
+ /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
+ * listener so that we can use tools like tcpdump */
+ ETHER_BPF_MTAP(ifnetp, m_head);
+ }
+ goto out1;
+out2:
+ /* Prepend the packet back to queue */
+ IF_PREPEND(&ifnetp->if_snd, m_head);
+out1:
+ ifnetp->if_timer = 15;
+}
+
+/******************************************
+ * Function: xgell_get_buf
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer, descriptor,
+ * Rx private structure, rxd_priv buffer
+ * buffer index for mapping
+ * Return: HAL status code
+ * Description: Gets buffer from system mbuf
+ * buffer pool.
+ ******************************************/
+int
+xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
+ xgelldev_t *lldev, int index)
+{
+ register mbuf_t mp = NULL;
+ struct ifnet *ifnetp = lldev->ifnetp;
+ int retValue = XGE_HAL_OK;
+ bus_addr_t paddr;
+ int BUFLEN = 0, CLUSTLEN = 0;
+
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ CLUSTLEN = MJUMPAGESIZE;
+ BUFLEN = MJUMPAGESIZE;
+ }
+ else {
+ BUFLEN = lldev->rxd_mbuf_len[index];
+ if(BUFLEN < MCLBYTES) {
+ CLUSTLEN = MCLBYTES;
+ }
+ else {
+ CLUSTLEN = MJUMPAGESIZE;
+ }
+ }
+
+ /* Get mbuf with attached cluster */
+ mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, CLUSTLEN);
+ if(!mp) {
+ xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
+ retValue = XGE_HAL_FAIL;
+ goto getbuf_out;
+ }
+
+ /* Update mbuf's length, packet length and receive interface */
+ mp->m_len = mp->m_pkthdr.len = BUFLEN;
+ mp->m_pkthdr.rcvif = ifnetp;
+
+ /* Unload DMA map of mbuf in current descriptor */
+ bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map);
+
+ /* Load DMA map */
+ if(bus_dmamap_load(lldev->dma_tag_rx , rxd_priv->dmainfo[index].dma_map,
+ mtod(mp, void*), mp->m_len, dmamap_cb , &paddr , 0)) {
+ xge_trace(XGE_ERR, "Loading DMA map failed");
+ m_freem(mp);
+ retValue = XGE_HAL_FAIL;
+ goto getbuf_out;
+ }
+
+ /* Update descriptor private data */
+ rxd_priv->bufferArray[index] = mp;
+ rxd_priv->dmainfo[index].dma_phyaddr = htole64(paddr);
+
+ /* Pre-Read/Write sync */
+ bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Set descriptor buffer */
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
+ MJUMPAGESIZE);
+ }
+
+getbuf_out:
+ return retValue;
+}
+
+/******************************************
+ * Function: xgell_get_buf_3b_5b
+ * Parameters: Per adapter xgelldev_t
+ * structure pointer, descriptor,
+ * Rx private structure
+ * Return: HAL status code
+ * Description: Gets buffers from system mbuf
+ * buffer pool.
+ ******************************************/
+int
+xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
+ xgelldev_t *lldev)
+{
+ bus_addr_t dma_pointers[5];
+ int dma_sizes[5];
+ int retValue = XGE_HAL_OK, index;
+ int newindex = 0;
+
+ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
+ retValue = xgell_get_buf(dtrh, rxd_priv, lldev, index);
+ if(retValue != XGE_HAL_OK) {
+ for(newindex = 0; newindex < index; newindex++) {
+ m_freem(rxd_priv->bufferArray[newindex]);
+ }
+ return retValue;
+ }
+ }
+
+ for(index = 0; index < lldev->buffer_mode; index++) {
+ if(lldev->rxd_mbuf_len[index] != 0) {
+ dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
+ dma_sizes[index] = lldev->rxd_mbuf_len[index];
+ }
+ else {
+ dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
+ dma_sizes[index] = 1;
+ }
+ }
+
+ /* Assigning second buffer to third pointer in 2 buffer mode */
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
+ dma_pointers[2] = dma_pointers[1];
+ dma_sizes[2] = dma_sizes[1];
+ dma_sizes[1] = 1;
+ }
+
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
+ }
+ else {
+ xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
+ }
+
+ return retValue;
+}
+
+/******************************************
+ * Function: xgell_tx_compl
+ * Parameters: Channel handle, descriptor,
+ * transfer code,
+ * userdata -> per adapter
+ * xgelldev_t structure as void *
+ * Return: HAL status code
+ * Description: If an interrupt was raised
+ * to indicate DMA complete of
+ * the Tx packet, this function
+ * is called. It identifies the
+ * last TxD whose buffer was
+ * freed and frees all skbs
+ * whose data have already DMA'ed
+ * into the NICs internal memory.
+ ******************************************/
+xge_hal_status_e
+xgell_tx_compl(xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtr, u8 t_code, void *userdata)
+{
+ xgell_tx_priv_t *ll_tx_priv;
+ mbuf_t m_buffer;
+ xgelldev_t *lldev = (xgelldev_t *)userdata;
+ struct ifnet *ifnetp = lldev->ifnetp;
+
+ ifnetp->if_timer = 0;
+
+ /* For each completed descriptor: Get private structure, free buffer,
+ * do unmapping, and free descriptor */
+ do {
+ if(t_code) {
+ xge_trace(XGE_TRACE, "t_code %d", t_code);
+ xge_hal_device_handle_tcode(channelh, dtr, t_code);
+ }
+
+ ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
+ m_buffer = ll_tx_priv->buffer;
+ bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
+ m_freem(m_buffer);
+ ll_tx_priv->buffer = NULL;
+ xge_hal_fifo_dtr_free(channelh, dtr);
+ } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
+ == XGE_HAL_OK);
+ ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ return XGE_HAL_OK;
+}
+
+/******************************************
+ * Function: xgell_tx_initial_replenish
+ * Parameters: Channel handle, descriptor,
+ * index (not used), userdata
+ * (not used), channel
+ * open/close/reopen option.
+ * Return: HAL status code
+ * Description: Creates DMA maps to be used
+ * for Tx
+ ******************************************/
+xge_hal_status_e
+xgell_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int index, void *userdata, xge_hal_channel_reopen_e reopen)
+{
+ xgell_tx_priv_t *txd_priv = NULL;
+ int retValue = XGE_HAL_OK;
+ device_t dev = NULL;
+
+ /* Get the user data portion from channel handle */
+ xgelldev_t *lldev = xge_hal_channel_userdata(channelh);
+ if(lldev == NULL) {
+ xge_trace(XGE_ERR, "Failed to get user data");
+ retValue = XGE_HAL_FAIL;
+ goto txinit_out;
+ }
+ dev = lldev->device;
+
+ /* Get the private data */
+ txd_priv = (xgell_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
+ if(txd_priv == NULL) {
+ xge_trace(XGE_ERR, "Failed to get descriptor private data");
+ retValue = XGE_HAL_FAIL;
+ goto txinit_out;
+ }
+
+ /* Create DMA map for this descriptor */
+ if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
+ &txd_priv->dma_map)) {
+ xge_trace(XGE_ERR, "DMA map creation for Tx descriptor failed");
+ retValue = XGE_HAL_FAIL;
+ goto txinit_out;
+ }
+
+txinit_out:
+ return retValue;
+}
+
+/******************************************
+ * Function: xgell_rx_initial_replenish
+ * Parameters: Channel handle, descriptor,
+ * ring index, userdata
+ * (not used), channel
+ * open/close/reopen option.
+ * Return: HAL status code
+ * Description: Replenish descriptor with
+ * rx_buffer in Rx buffer pool.
+ ******************************************/
+xge_hal_status_e
+xgell_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int index, void *userdata, xge_hal_channel_reopen_e reopen)
+{
+ xgell_rx_priv_t *rxd_priv = NULL;
+ int retValue = XGE_HAL_OK;
+ struct ifnet *ifnetp;
+ device_t dev;
+ int index1, index2;
+
+ /* Get the user data portion from channel handle */
+ xgelldev_t *lldev = xge_hal_channel_userdata(channelh);
+ if(lldev == NULL) {
+ xge_ctrace(XGE_ERR, "xgeX: %s: Failed to get user data",
+ __FUNCTION__);
+ retValue = XGE_HAL_FAIL;
+ goto rxinit_out;
+ }
+ dev = lldev->device;
+
+ /* Get the private data */
+ rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
+ if(rxd_priv == NULL) {
+ xge_trace(XGE_ERR, "Failed to get descriptor private data");
+ retValue = XGE_HAL_FAIL;
+ goto rxinit_out;
+ }
+
+ rxd_priv->bufferArray =
+ malloc(((sizeof(rxd_priv->bufferArray)) * (lldev->rxd_mbuf_cnt)),
+ M_DEVBUF, M_NOWAIT);
+
+ if(rxd_priv->bufferArray == NULL) {
+ xge_trace(XGE_ERR,
+ "Failed to allocate buffers for Rxd private structure");
+ retValue = XGE_HAL_FAIL;
+ goto rxinit_out;
+ }
+
+ ifnetp = lldev->ifnetp;
+
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ /* Create DMA map for these descriptors*/
+ if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
+ &rxd_priv->dmainfo[0].dma_map)) {
+ xge_trace(XGE_ERR,
+ "DMA map creation for Rx descriptor failed");
+ retValue = XGE_HAL_FAIL;
+ goto rxinit_err_out;
+ }
+ /* Get a buffer, attach it to this descriptor */
+ retValue = xgell_get_buf(dtrh, rxd_priv, lldev, 0);
+ }
+ else {
+ for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
+ /* Create DMA map for this descriptor */
+ if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
+ &rxd_priv->dmainfo[index1].dma_map)) {
+ xge_trace(XGE_ERR,
+ "Jumbo DMA map creation for Rx descriptor failed");
+ for(index2 = index1 - 1; index2 >= 0; index2--) {
+ bus_dmamap_destroy(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index2].dma_map);
+ }
+ retValue = XGE_HAL_FAIL;
+ goto rxinit_err_out;
+ }
+ }
+ retValue = xgell_get_buf_3b_5b(dtrh, rxd_priv, lldev);
+ }
+
+ if(retValue != XGE_HAL_OK) {
+ for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
+ bus_dmamap_destroy(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index1].dma_map);
+ }
+ goto rxinit_err_out;
+ }
+ else {
+ goto rxinit_out;
+ }
+
+rxinit_err_out:
+ free(rxd_priv->bufferArray,M_DEVBUF);
+rxinit_out:
+ return retValue;
+}
+
+/******************************************
+ * Function: xgell_rx_term
+ * Parameters: Channel handle, descriptor,
+ * descriptor state, userdata
+ * (not used), channel
+ * open/close/reopen option.
+ * Return: None
+ * Description: Called by HAL to terminate
+ * all DTRs for ring channels.
+ ******************************************/
+void
+xgell_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ xge_hal_dtr_state_e state, void *userdata,
+ xge_hal_channel_reopen_e reopen)
+{
+ xgell_rx_priv_t *rxd_priv;
+ xgelldev_t *lldev;
+ struct ifnet *ifnetp;
+ device_t dev;
+ int index;
+
+// ENTER_FUNCTION
+
+ /* Descriptor state is not "Posted" */
+ if(state != XGE_HAL_DTR_STATE_POSTED) {
+ xge_ctrace(XGE_ERR, "xgeX: %s: Descriptor not posted\n",
+ __FUNCTION__);
+ goto rxterm_out;
+ }
+
+ /* Get the user data portion */
+ lldev = xge_hal_channel_userdata(channelh);
+
+ dev = lldev->device;
+ ifnetp = lldev->ifnetp;
+
+ /* Get the private data */
+ rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
+
+ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* Do unmapping and destory DMA map */
+ bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map);
+ m_freem(rxd_priv->bufferArray[0]);
+ bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map);
+ }
+ else {
+ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
+ /* Post-Read sync */
+ bus_dmamap_sync(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
+
+ /* Do unmapping and destory DMA map */
+ bus_dmamap_unload(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index].dma_map);
+
+ bus_dmamap_destroy(lldev->dma_tag_rx,
+ rxd_priv->dmainfo[index].dma_map);
+
+ /* Free the buffer */
+ m_free(rxd_priv->bufferArray[index]);
+ }
+ }
+ free(rxd_priv->bufferArray,M_DEVBUF);
+
+ /* Free the descriptor */
+ xge_hal_ring_dtr_free(channelh, dtrh);
+
+rxterm_out:
+// LEAVE_FUNCTION
+ return;
+}
+
+
+/******************************************
+ * Function: xgell_tx_term
+ * Parameters: Channel handle, descriptor,
+ * descriptor state, userdata
+ * (not used), channel
+ * open/close/reopen option.
+ * Return: None
+ * Description: Called by HAL to terminate
+ * all DTRs for fifo channels.
+ ******************************************/
+void
+xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
+ xge_hal_dtr_state_e state, void *userdata,
+ xge_hal_channel_reopen_e reopen)
+{
+ xgell_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
+ xgelldev_t *lldev = (xgelldev_t *)userdata;
+
+// ENTER_FUNCTION
+
+ /* Destroy DMA map */
+ bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
+
+// LEAVE_FUNCTION
+}
+
+/******************************************
+ * xge_methods
+ *
+ * FreeBSD device interface entry points
+ ******************************************/
+static device_method_t xge_methods[] = {
+ DEVMETHOD(device_probe, xge_probe),
+ DEVMETHOD(device_attach, xge_attach),
+ DEVMETHOD(device_detach, xge_detach),
+ DEVMETHOD(device_shutdown, xge_shutdown),
+ {0, 0}
+};
+
+static driver_t xge_driver = {
+ "nxge",
+ xge_methods,
+ sizeof(xgelldev_t),
+};
+static devclass_t xge_devclass;
+DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
diff --git a/sys/dev/nxge/if_nxge.h b/sys/dev/nxge/if_nxge.h
new file mode 100644
index 0000000..1e99a62
--- /dev/null
+++ b/sys/dev/nxge/if_nxge.h
@@ -0,0 +1,287 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * if_xge.h
+ */
+
+#ifndef _IF_XGE_H
+#define _IF_XGE_H
+
+#include <dev/nxge/include/xgehal.h>
+#include <dev/nxge/xge-osdep.h>
+
+#if defined(XGE_FEATURE_TSO) && (__FreeBSD_version < 700026)
+#undef XGE_FEATURE_TSO
+#endif
+
+#if defined(XGE_FEATURE_LRO)
+#if __FreeBSD_version < 700047
+#undef XGE_FEATURE_LRO
+#undef XGE_HAL_CONFIG_LRO
+#else
+#define XGE_HAL_CONFIG_LRO
+#endif
+#endif
+
+#ifdef FUNC_PRINT
+#define ENTER_FUNCTION xge_os_printf("Enter\t==>[%s]\n", __FUNCTION__);
+#define LEAVE_FUNCTION xge_os_printf("Leave\t<==[%s]\n", __FUNCTION__);
+#else
+#define ENTER_FUNCTION
+#define LEAVE_FUNCTION
+#endif
+
+/* Printing description, Copyright */
+#define DRIVER_VERSION XGELL_VERSION_MAJOR"." \
+ XGELL_VERSION_MINOR"." \
+ XGELL_VERSION_FIX"." \
+ XGELL_VERSION_BUILD
+#define COPYRIGHT_STRING "Copyright(c) 2002-2007 Neterion Inc."
+#define PRINT_COPYRIGHT xge_os_printf("%s", COPYRIGHT_STRING)
+
+/* Printing */
+#define xge_trace(trace, fmt, args...) xge_debug_ll(trace, fmt, ## args);
+#define xge_ctrace(trace, fmt...) xge_debug_ll(trace, fmt);
+
+#define BUFALIGN(buffer_length) \
+ if((buffer_length % 128) != 0) { \
+ buffer_length += (128 - (buffer_length % 128)); \
+ }
+
+static inline void *
+xge_malloc(unsigned long size) {
+ void *vaddr = malloc(size, M_DEVBUF, M_NOWAIT);
+ bzero(vaddr, size);
+ return vaddr;
+}
+
+#define SINGLE_ALLOC 0
+#define MULTI_ALLOC 1
+#define SAVE 0
+#define RESTORE 1
+#define UP 1
+#define DOWN 0
+#define XGE_DEFAULT_USER_HARDCODED -1
+#define MAX_MBUF_FRAGS 20 /* Maximum number of fragments */
+#define MAX_SEGS 100 /* Maximum number of segments */
+#define XGELL_TX_LEVEL_LOW 16
+#define XGE_RING_COUNT XGE_HAL_MIN_RING_NUM
+#define BUFFER_SIZE 20
+
+/* Default values to configuration parameters */
+#define XGE_DEFAULT_INITIAL_MTU 1500
+#define XGE_DEFAULT_LATENCY_TIMER -1
+#define XGE_DEFAULT_MAX_SPLITS_TRANS -1
+#define XGE_DEFAULT_MMRB_COUNT -1
+#define XGE_DEFAULT_SHARED_SPLITS 0
+#define XGE_DEFAULT_ISR_POLLING_CNT 8
+#define XGE_DEFAULT_STATS_REFRESH_TIME_SEC 4
+#define XGE_DEFAULT_MAC_RMAC_BCAST_EN 1
+#define XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD 5
+#define XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD 5
+#define XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN 1
+#define XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN 1
+#define XGE_DEFAULT_MAC_RMAC_PAUSE_TIME 65535
+#define XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3 187
+#define XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7 187
+#define XGE_DEFAULT_FIFO_MEMBLOCK_SIZE PAGE_SIZE
+#define XGE_DEFAULT_FIFO_RESERVE_THRESHOLD 0
+#define XGE_DEFAULT_FIFO_MAX_FRAGS 64
+#define XGE_DEFAULT_FIFO_QUEUE_INTR 0
+#define XGE_DEFAULT_FIFO_QUEUE_MAX 2048
+#define XGE_DEFAULT_FIFO_QUEUE_INITIAL 2048
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A 5
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B 10
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C 20
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A 15
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B 30
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C 45
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D 60
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN 1
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN 1
+#define XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US 8000
+#define XGE_DEFAULT_FIFO_ALIGNMENT_SIZE sizeof(u64)
+#define XGE_DEFAULT_RING_MEMBLOCK_SIZE PAGE_SIZE
+#define XGE_DEFAULT_RING_STRIP_VLAN_TAG 1
+#define XGE_DEFAULT_RING_QUEUE_MAX 16
+#define XGE_DEFAULT_RING_QUEUE_INITIAL 16
+#define XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB 32
+#define XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS 16
+#define XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US 1000
+#define XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A 5
+#define XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B 10
+#define XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C 50
+#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_A 1
+#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_B 8
+#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_C 16
+#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_D 32
+#define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN 1
+#define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US 250
+
+/* Values to identify the requests from getinfo tool in ioctl */
+#define XGE_QUERY_STATS 1
+#define XGE_QUERY_PCICONF 2
+#define XGE_QUERY_INTRSTATS 3
+#define XGE_QUERY_DEVCONF 4
+#define XGE_READ_VERSION 5
+#define XGE_QUERY_TCODE 6
+#define XGE_SET_BUFFER_MODE_1 7
+#define XGE_SET_BUFFER_MODE_2 8
+#define XGE_SET_BUFFER_MODE_3 9
+#define XGE_SET_BUFFER_MODE_5 10
+#define XGE_QUERY_BUFFER_MODE 11
+
+#define XGE_OFFSET_OF_LAST_REG 0x3180
+
+#define VENDOR_ID_AMD 0x1022
+#define DEVICE_ID_8131_PCI_BRIDGE 0x7450
+
+typedef struct mbuf *mbuf_t;
+
+typedef enum xgell_event_e {
+ XGE_LL_EVENT_TRY_XMIT_AGAIN = XGE_LL_EVENT_BASE + 1,
+ XGE_LL_EVENT_DEVICE_RESETTING = XGE_LL_EVENT_BASE + 2,
+} xgell_event_e;
+
+/* Adapter structure */
+typedef struct xgelldev {
+ device_t device; /* Device */
+ struct ifnet *ifnetp; /* Interface ifnet structure */
+ struct resource *irq; /* Resource structure for IRQ */
+ void *irqhandle; /* IRQ handle */
+ pci_info_t *pdev;
+ struct ifmedia xge_media; /* In-kernel representation of a */
+ /* single supported media type */
+ xge_hal_device_t *devh; /* HAL: Device Handle */
+ xge_hal_channel_h ring_channel[XGE_HAL_MAX_FIFO_NUM];
+ /* Ring channel */
+ xge_hal_channel_h fifo_channel_0; /* FIFO channel */
+ struct mtx xge_lock; /* Mutex - Default */
+ struct callout timer; /* Timer for polling */
+ struct xge_hal_stats_hw_info_t *hwstats; /* Hardware Statistics */
+ int saved_regs[16]; /* To save register space */
+ int xge_mtu; /* MTU */
+ int initialized; /* Flag: Initialized or not */
+ bus_dma_tag_t dma_tag_tx; /* Tag for dtr dma mapping (Tx) */
+ bus_dma_tag_t dma_tag_rx; /* Tag for dtr dma mapping (Rx) */
+ int all_multicast; /* All multicast flag */
+ int macaddr_count; /* Multicast address count */
+ int in_detach; /* To avoid ioctl during detach */
+ int buffer_mode; /* Buffer Mode */
+ int rxd_mbuf_cnt; /* Number of buffers used */
+ int rxd_mbuf_len[5];/* Buffer lengths */
+} xgelldev_t;
+
+/* Rx descriptor private structure */
+typedef struct {
+ mbuf_t *bufferArray;
+ struct xge_dma_mbuf dmainfo[5];
+} xgell_rx_priv_t;
+
+/* Tx descriptor private structure */
+typedef struct {
+ mbuf_t buffer;
+ bus_dmamap_t dma_map;
+} xgell_tx_priv_t;
+
+/* BAR0 Register */
+typedef struct barregister {
+ char option[2];
+ u64 offset;
+ u64 value;
+}bar0reg_t;
+
+void xge_init_params(xge_hal_device_config_t *dconfig, device_t dev);
+void xge_init(void *);
+void xge_init_locked(void *);
+void xge_stop(xgelldev_t *);
+void freeResources(device_t, int);
+void xgell_callback_link_up(void *);
+void xgell_callback_link_down(void *);
+void xgell_callback_crit_err(void *, xge_hal_event_e, u64);
+void xgell_callback_event(xge_queue_item_t *);
+int xge_ifmedia_change(struct ifnet *);
+void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
+int xge_ioctl(struct ifnet *, unsigned long, caddr_t);
+void xge_timer(void *);
+int xge_intr_filter(void *);
+void xge_intr(void *);
+int xgell_rx_open(int, xgelldev_t *, xge_hal_channel_reopen_e);
+int xgell_tx_open(xgelldev_t *, xge_hal_channel_reopen_e);
+int xgell_channel_close(xgelldev_t *, xge_hal_channel_reopen_e);
+int xgell_channel_open(xgelldev_t *, xge_hal_channel_reopen_e);
+xge_hal_status_e xgell_rx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
+xge_hal_status_e xgell_tx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
+xge_hal_status_e xgell_tx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
+ int, void *, xge_hal_channel_reopen_e);
+xge_hal_status_e xgell_rx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
+ int, void *, xge_hal_channel_reopen_e);
+void xgell_rx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
+ void *, xge_hal_channel_reopen_e);
+void xgell_tx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
+ void *, xge_hal_channel_reopen_e);
+void xgell_set_mbuf_cflags(mbuf_t);
+void xge_send(struct ifnet *);
+void xge_send_locked(struct ifnet *);
+int xgell_get_multimode_normalbuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
+ xgelldev_t *lldev);
+int xgell_get_multimode_jumbobuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
+ xgelldev_t *lldev, int lock);
+int xgell_get_second_buffer(xgell_rx_priv_t *rxd_priv, xgelldev_t *lldev);
+int xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
+ xgelldev_t *lldev, int index);
+int xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
+ xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv);
+int xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
+ xgelldev_t *lldev);
+void dmamap_cb(void *, bus_dma_segment_t *, int, int);
+void xgell_reset(xgelldev_t *);
+void xge_setmulti(xgelldev_t *);
+void xge_enable_promisc(xgelldev_t *);
+void xge_disable_promisc(xgelldev_t *);
+int changeMtu(xgelldev_t *, int);
+int changeBufmode(xgelldev_t *, int);
+void xge_initialize(device_t, xge_hal_channel_reopen_e);
+void xge_terminate(device_t, xge_hal_channel_reopen_e);
+void if_up_locked(xgelldev_t *);
+void if_down_locked(xgelldev_t *);
+int xge_probe(device_t);
+int xge_driver_initialize(void);
+void xge_media_init(device_t);
+void xge_pci_space_save(device_t);
+void xge_pci_space_restore(device_t);
+int xge_attach(device_t);
+int xge_interface_setup(device_t);
+int xge_detach(device_t);
+int xge_shutdown(device_t);
+int xge_suspend(device_t);
+int xge_resume(device_t);
+
+#endif // _IF_XGE_H
+
diff --git a/sys/dev/nxge/include/build-version.h b/sys/dev/nxge/include/build-version.h
new file mode 100644
index 0000000..b9b5e00
--- /dev/null
+++ b/sys/dev/nxge/include/build-version.h
@@ -0,0 +1,6 @@
+/* $FreeBSD$ */
+#ifndef BUILD_VERSION_H
+#define BUILD_VERSION_H
+/* Do not edit! Automatically generated when released.*/
+#define GENERATED_BUILD_VERSION "10294"
+#endif /* BUILD_VERSION_H */
diff --git a/sys/dev/nxge/include/version.h b/sys/dev/nxge/include/version.h
new file mode 100644
index 0000000..0a212f4
--- /dev/null
+++ b/sys/dev/nxge/include/version.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : version.h
+ *
+ * Description: versioning file
+ *
+ * Created: 3 September 2004
+ */
+
+#ifndef VERSION_H
+#define VERSION_H
+
+#include <dev/nxge/include/build-version.h>
+
+#define XGE_HAL_VERSION_MAJOR "2"
+#define XGE_HAL_VERSION_MINOR "5"
+#define XGE_HAL_VERSION_FIX "0"
+#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION
+#define XGE_HAL_VERSION XGE_HAL_VERSION_MAJOR"."XGE_HAL_VERSION_MINOR"."\
+ XGE_HAL_VERSION_FIX"."XGE_HAL_VERSION_BUILD
+#define XGE_HAL_DESC XGE_DRIVER_NAME" v."XGE_HAL_VERSION
+
+/* Link Layer versioning */
+#include <dev/nxge/xgell-version.h>
+
+#endif /* VERSION_H */
diff --git a/sys/dev/nxge/include/xge-debug.h b/sys/dev/nxge/include/xge-debug.h
new file mode 100644
index 0000000..a4efbcb
--- /dev/null
+++ b/sys/dev/nxge/include/xge-debug.h
@@ -0,0 +1,568 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-debug.h
+ *
+ * Description: debug facilities
+ *
+ * Created: 6 May 2004
+ */
+
+#ifndef XGE_DEBUG_H
+#define XGE_DEBUG_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+
+__EXTERN_BEGIN_DECLS
+
+/*
+ * __FUNCTION__ is, together with __PRETTY_FUNCTION__ or something similar,
+ * a gcc extension. we'll have to #ifdef around that, and provide some
+ * meaningful replacement for those, so to make some gcc versions happier
+ */
+#ifndef __func__
+#ifdef __FUNCTION__
+#define __func__ __FUNCTION__
+#endif
+#endif
+
+
+#ifdef XGE_DEBUG_FP
+#define XGE_DEBUG_FP_DEVICE 0x1
+#define XGE_DEBUG_FP_CHANNEL 0x2
+#define XGE_DEBUG_FP_FIFO 0x4
+#define XGE_DEBUG_FP_RING 0x8
+#endif
+
+/**
+ * enum xge_debug_level_e
+ * @XGE_NONE: debug disabled
+ * @XGE_ERR: all errors going to be logged out
+ * @XGE_TRACE: all errors plus all kind of verbose tracing print outs
+ * going to be logged out. Very noisy.
+ *
+ * This enumeration going to be used to switch between different
+ * debug levels during runtime if DEBUG macro defined during
+ * compilation. If DEBUG macro not defined than code will be
+ * compiled out.
+ */
+typedef enum xge_debug_level_e {
+ XGE_NONE = 0,
+ XGE_TRACE = 1,
+ XGE_ERR = 2,
+} xge_debug_level_e;
+
+#define XGE_DEBUG_MODULE_MASK_DEF 0x30000030
+#define XGE_DEBUG_LEVEL_DEF XGE_ERR
+
+#if defined(XGE_DEBUG_TRACE_MASK) || defined(XGE_DEBUG_ERR_MASK)
+
+extern unsigned long *g_module_mask;
+extern int *g_level;
+
+#ifndef XGE_DEBUG_TRACE_MASK
+#define XGE_DEBUG_TRACE_MASK 0
+#endif
+
+#ifndef XGE_DEBUG_ERR_MASK
+#define XGE_DEBUG_ERR_MASK 0
+#endif
+
+/*
+ * @XGE_COMPONENT_HAL_CONFIG: do debug for xge core config module
+ * @XGE_COMPONENT_HAL_FIFO: do debug for xge core fifo module
+ * @XGE_COMPONENT_HAL_RING: do debug for xge core ring module
+ * @XGE_COMPONENT_HAL_CHANNEL: do debug for xge core channel module
+ * @XGE_COMPONENT_HAL_DEVICE: do debug for xge core device module
+ * @XGE_COMPONENT_HAL_DMQ: do debug for xge core DMQ module
+ * @XGE_COMPONENT_HAL_UMQ: do debug for xge core UMQ module
+ * @XGE_COMPONENT_HAL_SQ: do debug for xge core SQ module
+ * @XGE_COMPONENT_HAL_SRQ: do debug for xge core SRQ module
+ * @XGE_COMPONENT_HAL_CQRQ: do debug for xge core CRQ module
+ * @XGE_COMPONENT_HAL_POOL: do debug for xge core memory pool module
+ * @XGE_COMPONENT_HAL_BITMAP: do debug for xge core BITMAP module
+ * @XGE_COMPONENT_CORE: do debug for xge KMA core module
+ * @XGE_COMPONENT_OSDEP: do debug for xge KMA os dependent parts
+ * @XGE_COMPONENT_LL: do debug for xge link layer module
+ * @XGE_COMPONENT_ALL: activate debug for all modules with no exceptions
+ *
+ * This enumeration going to be used to distinguish modules
+ * or libraries during compilation and runtime. Makefile must declare
+ * XGE_DEBUG_MODULE_MASK macro and set it to proper value.
+ */
+#define XGE_COMPONENT_HAL_CONFIG 0x00000001
+#define XGE_COMPONENT_HAL_FIFO 0x00000002
+#define XGE_COMPONENT_HAL_RING 0x00000004
+#define XGE_COMPONENT_HAL_CHANNEL 0x00000008
+#define XGE_COMPONENT_HAL_DEVICE 0x00000010
+#define XGE_COMPONENT_HAL_MM 0x00000020
+#define XGE_COMPONENT_HAL_QUEUE 0x00000040
+#define XGE_COMPONENT_HAL_INTERRUPT 0x00000080
+#define XGE_COMPONENT_HAL_STATS 0x00000100
+#ifdef XGEHAL_RNIC
+#define XGE_COMPONENT_HAL_DMQ 0x00000200
+#define XGE_COMPONENT_HAL_UMQ 0x00000400
+#define XGE_COMPONENT_HAL_SQ 0x00000800
+#define XGE_COMPONENT_HAL_SRQ 0x00001000
+#define XGE_COMPONENT_HAL_CQRQ 0x00002000
+#define XGE_COMPONENT_HAL_POOL 0x00004000
+#define XGE_COMPONENT_HAL_BITMAP 0x00008000
+#endif
+
+ /* space for CORE_XXX */
+#define XGE_COMPONENT_OSDEP 0x10000000
+#define XGE_COMPONENT_LL 0x20000000
+#define XGE_COMPONENT_ALL 0xffffffff
+
+#ifndef XGE_DEBUG_MODULE_MASK
+#error "XGE_DEBUG_MODULE_MASK macro must be defined for DEBUG mode..."
+#endif
+
+#ifndef __GNUC__
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ #define xge_trace_aux(fmt) xge_os_vatrace(g_xge_os_tracebuf, fmt)
+#else
+ #define xge_trace_aux(fmt) xge_os_vaprintf(fmt)
+#endif
+
+/**
+ * xge_debug
+ * @level: level of debug verbosity.
+ * @fmt: printf like format string
+ *
+ * Provides logging facilities. Can be customized on per-module
+ * basis or/and with debug levels. Input parameters, except
+ * module and level, are the same as posix printf. This function
+ * may be compiled out if DEBUG macro was never defined.
+ * See also: xge_debug_level_e{}.
+ */
+#define xge_debug(module, level, fmt) { \
+if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \
+ (level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
+ level >= *g_level && module & *(unsigned int *)g_module_mask) { \
+ xge_trace_aux(fmt); \
+ } \
+}
+#else /* __GNUC__ */
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ #define xge_trace_aux(fmt...) xge_os_trace(g_xge_os_tracebuf, fmt)
+#else
+ #define xge_trace_aux(fmt...) xge_os_printf(fmt)
+#endif
+
+#define xge_debug(module, level, fmt...) { \
+if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \
+ (level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
+ level >= *g_level && module & *(unsigned int *)g_module_mask) { \
+ xge_trace_aux(fmt); \
+ } \
+}
+#endif /* __GNUC__ */
+
+#if (XGE_COMPONENT_HAL_STATS & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_stats(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_STATS;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_stats(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_STATS, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_stats(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_stats(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+/* Interrupt Related */
+#if (XGE_COMPONENT_HAL_INTERRUPT & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_interrupt(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_INTERRUPT;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_interrupt(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_INTERRUPT, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_interrupt(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_interrupt(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_QUEUE & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_queue(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_QUEUE;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_queue(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_QUEUE, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_queue(xge_debug_level_e level, char *fmt,
+...) {}
+#else /* __GNUC__ */
+#define xge_debug_queue(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_MM & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_mm(xge_debug_level_e level, char *fmt, ...)
+{
+ u32 module = XGE_COMPONENT_HAL_MM;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_mm(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_MM, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_mm(xge_debug_level_e level, char *fmt, ...)
+{}
+#else /* __GNUC__ */
+#define xge_debug_mm(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_CONFIG & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_config(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_CONFIG;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_config(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_CONFIG, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_config(xge_debug_level_e level, char *fmt,
+...) {}
+#else /* __GNUC__ */
+#define xge_debug_config(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_FIFO & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_FIFO;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_fifo(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_FIFO, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_fifo(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_RING & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_RING;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_ring(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_RING, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_ring(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_CHANNEL & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_CHANNEL;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_channel(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_CHANNEL, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_channel(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_DEVICE & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_DEVICE;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_device(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_DEVICE, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_device(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#ifdef XGEHAL_RNIC
+
+#if (XGE_COMPONENT_HAL_DMQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_DMQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_dmq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_DMQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_dmq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_UMQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_UMQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_umq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_UMQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_umq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_SQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_SQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_sq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_SQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_sq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_SRQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_SRQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_srq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_SRQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_srq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_CQRQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_CQRQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_cqrq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_CQRQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_cqrq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_POOL & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_POOL;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_pool(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_POOL, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_pool(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_BITMAP & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_BITMAP;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_bitmap(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_BITMAP, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_bitmap(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#endif
+
+#if (XGE_COMPONENT_OSDEP & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_OSDEP;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_osdep(level, fmt...) \
+ xge_debug(XGE_COMPONENT_OSDEP, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_osdep(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_LL & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...)
+{
+ u32 module = XGE_COMPONENT_LL;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_ll(level, fmt...) \
+ xge_debug(XGE_COMPONENT_LL, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_ll(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#else
+
+static inline void xge_debug_interrupt(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_stats(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_queue(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_mm(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_config(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_hal(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
+
+#endif /* end of XGE_DEBUG_*_MASK */
+
+#ifdef XGE_DEBUG_ASSERT
+
+/**
+ * xge_assert
+ * @test: C-condition to check
+ * @fmt: printf like format string
+ *
+ * This function implements traditional assert. By default assertions
+ * are enabled. It can be disabled by defining XGE_DEBUG_ASSERT macro in
+ * compilation
+ * time.
+ */
+#define xge_assert(test) { \
+ if (!(test)) xge_os_bug("bad cond: "#test" at %s:%d\n", \
+ __FILE__, __LINE__); }
+#else
+#define xge_assert(test)
+#endif /* end of XGE_DEBUG_ASSERT */
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_DEBUG_H */
diff --git a/sys/dev/nxge/include/xge-defs.h b/sys/dev/nxge/include/xge-defs.h
new file mode 100644
index 0000000..744a6b9
--- /dev/null
+++ b/sys/dev/nxge/include/xge-defs.h
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-defs.h
+ *
+ * Description: global definitions
+ *
+ * Created: 13 May 2004
+ */
+
+#ifndef XGE_DEFS_H
+#define XGE_DEFS_H
+
+#define XGE_PCI_VENDOR_ID 0x17D5
+#define XGE_PCI_DEVICE_ID_XENA_1 0x5731
+#define XGE_PCI_DEVICE_ID_XENA_2 0x5831
+#define XGE_PCI_DEVICE_ID_HERC_1 0x5732
+#define XGE_PCI_DEVICE_ID_HERC_2 0x5832
+#define XGE_PCI_DEVICE_ID_TITAN_1 0x5733
+#define XGE_PCI_DEVICE_ID_TITAN_2 0x5833
+
+#define XGE_DRIVER_NAME "Xge driver"
+#define XGE_DRIVER_VENDOR "Neterion, Inc"
+#define XGE_CHIP_FAMILY "Xframe"
+#define XGE_SUPPORTED_MEDIA_0 "Fiber"
+
+#include <dev/nxge/include/version.h>
+
+#if defined(__cplusplus)
+#define __EXTERN_BEGIN_DECLS extern "C" {
+#define __EXTERN_END_DECLS }
+#else
+#define __EXTERN_BEGIN_DECLS
+#define __EXTERN_END_DECLS
+#endif
+
+__EXTERN_BEGIN_DECLS
+
+/*---------------------------- DMA attributes ------------------------------*/
+/* Used in xge_os_dma_malloc() and xge_os_dma_map() */
+/*---------------------------- DMA attributes ------------------------------*/
+
+/* XGE_OS_DMA_REQUIRES_SYNC - should be defined or
+ NOT defined in the Makefile */
+#define XGE_OS_DMA_CACHELINE_ALIGNED 0x1
+/* Either STREAMING or CONSISTENT should be used.
+ The combination of both or none is invalid */
+#define XGE_OS_DMA_STREAMING 0x2
+#define XGE_OS_DMA_CONSISTENT 0x4
+#define XGE_OS_SPRINTF_STRLEN 64
+
+/*---------------------------- common stuffs -------------------------------*/
+
+#define XGE_OS_LLXFMT "%llx"
+#define XGE_OS_NEWLINE "\n"
+#ifdef XGE_OS_MEMORY_CHECK
+typedef struct {
+ void *ptr;
+ int size;
+ char *file;
+ int line;
+} xge_os_malloc_t;
+
+#define XGE_OS_MALLOC_CNT_MAX 64*1024
+extern xge_os_malloc_t g_malloc_arr[XGE_OS_MALLOC_CNT_MAX];
+extern int g_malloc_cnt;
+
+#define XGE_OS_MEMORY_CHECK_MALLOC(_vaddr, _size, _file, _line) { \
+ if (_vaddr) { \
+ int i; \
+ for (i=0; i<g_malloc_cnt; i++) { \
+ if (g_malloc_arr[i].ptr == NULL) { \
+ break; \
+ } \
+ } \
+ if (i == g_malloc_cnt) { \
+ g_malloc_cnt++; \
+ if (g_malloc_cnt >= XGE_OS_MALLOC_CNT_MAX) { \
+ xge_os_bug("g_malloc_cnt exceed %d", \
+ XGE_OS_MALLOC_CNT_MAX); \
+ } \
+ } \
+ g_malloc_arr[i].ptr = _vaddr; \
+ g_malloc_arr[i].size = _size; \
+ g_malloc_arr[i].file = _file; \
+ g_malloc_arr[i].line = _line; \
+ for (i=0; i<_size; i++) { \
+ *((char *)_vaddr+i) = 0x5a; \
+ } \
+ } \
+}
+
+#define XGE_OS_MEMORY_CHECK_FREE(_vaddr, _check_size) { \
+ int i; \
+ for (i=0; i<XGE_OS_MALLOC_CNT_MAX; i++) { \
+ if (g_malloc_arr[i].ptr == _vaddr) { \
+ g_malloc_arr[i].ptr = NULL; \
+ if(_check_size && g_malloc_arr[i].size!=_check_size) { \
+ xge_os_printf("OSPAL: freeing with wrong " \
+ "size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \
+ (int)_check_size, \
+ g_malloc_arr[i].file, \
+ g_malloc_arr[i].line, \
+ (unsigned long long)(ulong_t) \
+ g_malloc_arr[i].ptr, \
+ g_malloc_arr[i].size); \
+ } \
+ break; \
+ } \
+ } \
+ if (i == XGE_OS_MALLOC_CNT_MAX) { \
+ xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \
+ (unsigned long long)(ulong_t)_vaddr); \
+ } \
+}
+#else
+#define XGE_OS_MEMORY_CHECK_MALLOC(ptr, size, file, line)
+#define XGE_OS_MEMORY_CHECK_FREE(vaddr, check_size)
+#endif
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_DEFS_H */
diff --git a/sys/dev/nxge/include/xge-list.h b/sys/dev/nxge/include/xge-list.h
new file mode 100644
index 0000000..c49424d
--- /dev/null
+++ b/sys/dev/nxge/include/xge-list.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-list.h
+ *
+ * Description: Generic bi-directional linked list implementation
+ *
+ * Created: 14 May 2004
+ */
+
+#ifndef XGE_LIST_H
+#define XGE_LIST_H
+
+#include <dev/nxge/include/xge-debug.h>
+
+__EXTERN_BEGIN_DECLS
+
+/**
+ * struct xge_list_t - List item.
+ * @prev: Previous list item.
+ * @next: Next list item.
+ *
+ * Item of a bi-directional linked list.
+ */
+typedef struct xge_list_t {
+ struct xge_list_t* prev;
+ struct xge_list_t* next;
+} xge_list_t;
+
+/**
+ * xge_list_init - Initialize linked list.
+ * header: first element of the list (head)
+ *
+ * Initialize linked list.
+ * See also: xge_list_t{}.
+ */
+static inline void xge_list_init (xge_list_t *header)
+{
+ header->next = header;
+ header->prev = header;
+}
+
+/**
+ * xge_list_is_empty - Is the list empty?
+ * header: first element of the list (head)
+ *
+ * Determine whether the bi-directional list is empty. Return '1' in
+ * case of 'empty'.
+ * See also: xge_list_t{}.
+ */
+static inline int xge_list_is_empty(xge_list_t *header)
+{
+ xge_assert(header != NULL);
+
+ return header->next == header;
+}
+
+/**
+ * xge_list_first_get - Return the first item from the linked list.
+ * header: first element of the list (head)
+ *
+ * Returns the next item from the header.
+ * Returns NULL if the next item is header itself
+ * See also: xge_list_remove(), xge_list_insert(), xge_list_t{}.
+ */
+static inline xge_list_t *xge_list_first_get(xge_list_t *header)
+{
+ xge_assert(header != NULL);
+ xge_assert(header->next != NULL);
+ xge_assert(header->prev != NULL);
+
+ if(header->next == header)
+ return NULL;
+ else
+ return header->next;
+}
+
+/**
+ * xge_list_remove - Remove the specified item from the linked list.
+ * item: element of the list
+ *
+ * Remove item from a list.
+ * See also: xge_list_insert(), xge_list_t{}.
+ */
+static inline void xge_list_remove(xge_list_t *item)
+{
+ xge_assert(item != NULL);
+ xge_assert(item->next != NULL);
+ xge_assert(item->prev != NULL);
+
+ item->next->prev = item->prev;
+ item->prev->next = item->next;
+#ifdef XGE_DEBUG_ASSERT
+ item->next = item->prev = NULL;
+#endif
+}
+
+/**
+ * xge_list_insert - Insert a new item after the specified item.
+ * new_item: new element of the list
+ * prev_item: element of the list after which the new element is
+ * inserted
+ *
+ * Insert new item (new_item) after given item (prev_item).
+ * See also: xge_list_remove(), xge_list_insert_before(), xge_list_t{}.
+ */
+static inline void xge_list_insert (xge_list_t *new_item,
+ xge_list_t *prev_item)
+{
+ xge_assert(new_item != NULL);
+ xge_assert(prev_item != NULL);
+ xge_assert(prev_item->next != NULL);
+
+ new_item->next = prev_item->next;
+ new_item->prev = prev_item;
+ prev_item->next->prev = new_item;
+ prev_item->next = new_item;
+}
+
+/**
+ * xge_list_insert_before - Insert a new item before the specified item.
+ * new_item: new element of the list
+ * next_item: element of the list after which the new element is inserted
+ *
+ * Insert new item (new_item) before given item (next_item).
+ */
+static inline void xge_list_insert_before (xge_list_t *new_item,
+ xge_list_t *next_item)
+{
+ xge_assert(new_item != NULL);
+ xge_assert(next_item != NULL);
+ xge_assert(next_item->next != NULL);
+
+ new_item->next = next_item;
+ new_item->prev = next_item->prev;
+ next_item->prev->next = new_item;
+ next_item->prev = new_item;
+}
+
+#define xge_list_for_each(_p, _h) \
+ for (_p = (_h)->next, xge_os_prefetch(_p->next); _p != (_h); \
+ _p = _p->next, xge_os_prefetch(_p->next))
+
+#define xge_list_for_each_safe(_p, _n, _h) \
+ for (_p = (_h)->next, _n = _p->next; _p != (_h); \
+ _p = _n, _n = _p->next)
+
+#ifdef __GNUC__
+/**
+ * xge_container_of - Given a member, return the containing structure.
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * Cast a member of a structure out to the containing structure.
+ */
+#define xge_container_of(ptr, type, member) ({ \
+ __typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)(void *)( (char *)__mptr - ((size_t) &((type *)0)->member) );})
+#else
+/* type unsafe version */
+#define xge_container_of(ptr, type, member) \
+ ((type*)(void*)((char*)(ptr) - ((size_t) &((type *)0)->member)))
+#endif
+
+/**
+ * xge_offsetof - Offset of the member in the containing structure.
+ * @t: struct name.
+ * @m: the name of the member within the struct.
+ *
+ * Return the offset of the member @m in the structure @t.
+ */
+#define xge_offsetof(t, m) ((size_t) (&((t *)0)->m))
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_LIST_H */
diff --git a/sys/dev/nxge/include/xge-os-pal.h b/sys/dev/nxge/include/xge-os-pal.h
new file mode 100644
index 0000000..5c92fe6
--- /dev/null
+++ b/sys/dev/nxge/include/xge-os-pal.h
@@ -0,0 +1,138 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-os-pal.h
+ *
+ * Description: top-level header file. works just like switching between
+ * os-depndent parts
+ *
+ * Created: 6st May 2004
+ */
+
+#ifndef XGE_OS_PAL_H
+#define XGE_OS_PAL_H
+
+#include <dev/nxge/include/xge-defs.h>
+
+__EXTERN_BEGIN_DECLS
+
+/*--------------------------- platform switch ------------------------------*/
+
+/* platform specific header */
+#include <dev/nxge/xge-osdep.h>
+#ifdef XGEHAL_RNIC
+#define IN
+#define OUT
+#endif
+
+#if !defined(XGE_OS_PLATFORM_64BIT) && !defined(XGE_OS_PLATFORM_32BIT)
+#error "either 32bit or 64bit switch must be defined!"
+#endif
+
+#if !defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_HOST_LITTLE_ENDIAN)
+#error "either little endian or big endian switch must be defined!"
+#endif
+
+#if defined(XGE_OS_PLATFORM_64BIT)
+#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a5a5a5a5a
+#else
+#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a
+#endif
+
+#define XGE_OS_TRACE_MSGBUF_MAX 512
+typedef struct xge_os_tracebuf_t {
+ int wrapped_once; /* circular buffer been wrapped */
+ int timestamp; /* whether timestamps are enabled */
+ volatile int offset; /* offset within the tracebuf */
+ int size; /* total size of trace buffer */
+ char msg[XGE_OS_TRACE_MSGBUF_MAX]; /* each individual buffer */
+ int msgbuf_max; /* actual size of msg buffer */
+ char *data; /* pointer to data buffer */
+} xge_os_tracebuf_t;
+extern xge_os_tracebuf_t *g_xge_os_tracebuf;
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+extern xge_os_tracebuf_t *g_xge_os_tracebuf;
+extern char *dmesg_start;
+
+/* Calculate the size of the msg and copy it into the global buffer */
+#define __xge_trace(tb) { \
+ int msgsize = xge_os_strlen(tb->msg) + 2; \
+ int offset = tb->offset; \
+ if (msgsize != 2 && msgsize < tb->msgbuf_max) { \
+ int leftsize = tb->size - offset; \
+ if ((msgsize + tb->msgbuf_max) > leftsize) { \
+ xge_os_memzero(tb->data + offset, leftsize); \
+ offset = 0; \
+ tb->wrapped_once = 1; \
+ } \
+ xge_os_memcpy(tb->data + offset, tb->msg, msgsize-1); \
+ *(tb->data + offset + msgsize-1) = '\n'; \
+ *(tb->data + offset + msgsize) = 0; \
+ offset += msgsize; \
+ tb->offset = offset; \
+ dmesg_start = tb->data + offset; \
+ *tb->msg = 0; \
+ } \
+}
+
+#define xge_os_vatrace(tb, fmt) { \
+ if (tb != NULL) { \
+ char *_p = tb->msg; \
+ if (tb->timestamp) { \
+ xge_os_timestamp(tb->msg); \
+ _p = tb->msg + xge_os_strlen(tb->msg); \
+ } \
+ xge_os_vasprintf(_p, fmt); \
+ __xge_trace(tb); \
+ } \
+}
+
+#ifdef __GNUC__
+#define xge_os_trace(tb, fmt...) { \
+ if (tb != NULL) { \
+ if (tb->timestamp) { \
+ xge_os_timestamp(tb->msg); \
+ } \
+ xge_os_sprintf(tb->msg + xge_os_strlen(tb->msg), fmt); \
+ __xge_trace(tb); \
+ } \
+}
+#endif /* __GNUC__ */
+
+#else
+#define xge_os_vatrace(tb, fmt)
+#ifdef __GNUC__
+#define xge_os_trace(tb, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_OS_PAL_H */
diff --git a/sys/dev/nxge/include/xge-os-template.h b/sys/dev/nxge/include/xge-os-template.h
new file mode 100644
index 0000000..4d50e6e
--- /dev/null
+++ b/sys/dev/nxge/include/xge-os-template.h
@@ -0,0 +1,614 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-os-template.h
+ *
+ * Description: Template for creating platform-dependent "glue" code.
+ *
+ * Created: 6 May 2004
+ */
+
+#ifndef XGE_OS_TEMPLATE_H
+#define XGE_OS_TEMPLATE_H
+
+#ifndef TEMPLATE
+# error "should not be compiled for platforms other than TEMPLATE..."
+#endif
+
+/* ------------------------- includes and defines ------------------------- */
+
+/*
+ * Note:
+ *
+ * - on some operating systems like Linux & FreeBSD, there is a macro
+ * by using which it is possible to determine endiennes automatically
+ */
+#define XGE_OS_HOST_BIG_ENDIAN TEMPLATE
+
+#define XGE_OS_HOST_PAGE_SIZE TEMPLATE
+
+/* ---------------------- fixed size primitive types -----------------------*/
+
+/*
+ * Note:
+ *
+ * - u## - means ## bits unsigned int/long
+ * - all names must be preserved since HAL using them.
+ * - ulong_t is platform specific, i.e. for 64bit - 64bit size, for
+ * 32bit - 32bit size
+ */
+#define TEMPLATE u8
+#define TEMPLATE u16
+#define TEMPLATE u32
+#define TEMPLATE u64
+#define TEMPLATE ulong_t
+#define TEMPLATE ptrdiff_t
+#define TEMPLATE dma_addr_t
+#define TEMPLATE spinlock_t
+typedef TEMPLATE pci_dev_h;
+typedef TEMPLATE pci_reg_h;
+typedef TEMPLATE pci_dma_h;
+typedef TEMPLATE pci_irq_h;
+typedef TEMPLATE pci_cfg_h;
+typedef TEMPLATE pci_dma_acc_h;
+
+/* -------------------------- "libc" functionality -------------------------*/
+
+/*
+ * Note:
+ *
+ * - "libc" functionality maps one-to-one to be posix-like
+ */
+/* Note: use typedef: xge_os_memzero(void* mem, int size); */
+#define xge_os_memzero TEMPLATE
+
+/* Note: the 1st argument MUST be destination, like in:
+ * void *memcpy(void *dest, const void *src, size_t n);
+ */
+#define xge_os_memcpy TEMPLATE
+
+/* Note: should accept format (the 1st argument) and a variable
+ * number of arguments thereafter.. */
+#define xge_os_printf(fmt...) TEMPLATE
+
+#define xge_os_vasprintf(buf, fmt...) TEMPLATE
+
+#define xge_os_sprintf(buf, fmt, ...) TEMPLATE
+
+#define xge_os_timestamp(buf) TEMPLATE
+
+#define xge_os_println TEMPLATE
+
+/* -------------------- synchronization primitives -------------------------*/
+
+/*
+ * Note:
+ *
+ * - use spin_lock in interrupts or in threads when there is no races
+ * with interrupt
+ * - use spin_lock_irqsave in threads if there is a race with interrupt
+ * - use spin_lock_irqsave for nested locks
+ */
+
+/*
+ * Initialize the spin lock.
+ */
+#define xge_os_spin_lock_init(lockp, ctxh) TEMPLATE
+/*
+ * Initialize the spin lock (IRQ version).
+ */
+#define xge_os_spin_lock_init_irq(lockp, ctxh) TEMPLATE
+/*
+ * Destroy the lock.
+ */
+#define xge_os_spin_lock_destroy(lockp, ctxh) TEMPLATE
+
+/*
+ * Destroy the lock (IRQ version).
+ */
+#define xge_os_spin_lock_destroy_irq(lockp, ctxh) TEMPLATE
+/*
+ * Acquire the lock.
+ */
+#define xge_os_spin_lock(lockp) TEMPLATE
+/*
+ * Release the lock.
+ */
+#define xge_os_spin_unlock(lockp) TEMPLATE
+/*
+ * Acquire the lock(IRQ version).
+ */
+#define xge_os_spin_lock_irq(lockp, flags) TEMPLATE
+/*
+ * Release the lock(IRQ version).
+ */
+#define xge_os_spin_unlock_irq(lockp, flags) TEMPLATE
+/*
+ * Write memory barrier.
+ */
+#define xge_os_wmb() TEMPLATE
+/*
+ * Delay (in micro seconds).
+ */
+#define xge_os_udelay(us) TEMPLATE
+/*
+ * Delay (in milli seconds).
+ */
+#define xge_os_mdelay(ms) TEMPLATE
+/*
+ * Compare and exchange.
+ */
+#define xge_os_cmpxchg(targetp, cmp, newval) TEMPLATE
+
+
+
+/* ------------------------- misc primitives -------------------------------*/
+
+#define xge_os_prefetch TEMPLATE
+#define xge_os_prefetchw TEMPLATE
+#define xge_os_bug(fmt...) TEMPLATE
+
+/* -------------------------- compiler stuffs ------------------------------*/
+
+#define __xge_os_attr_cacheline_aligned TEMPLATE
+
+/* ---------------------- memory primitives --------------------------------*/
+
+/**
+ * xge_os_malloc - Allocate non DMA-able memory.
+ * @pdev: Device context. Some OSs require device context to perform
+ * operations on memory.
+ * @size: Size to allocate.
+ *
+ * Allocate @size bytes of memory. This allocation can sleep, and
+ * therefore, and therefore it requires process context. In other words,
+ * xge_os_malloc() cannot be called from the interrupt context.
+ * Use xge_os_free() to free the allocated block.
+ *
+ * Returns: Pointer to allocated memory, NULL - on failure.
+ *
+ * See also: xge_os_free().
+ */
+static inline void *xge_os_malloc(IN pci_dev_h pdev,
+ IN unsigned long size)
+{ TEMPLATE; }
+
+/**
+ * xge_os_free - Free non DMA-able memory.
+ * @pdev: Device context. Some OSs require device context to perform
+ * operations on memory.
+ * @vaddr: Address of the allocated memory block.
+ * @size: Some OS's require to provide size on free
+ *
+ * Free the memory area obtained via xge_os_malloc().
+ * This call may also sleep, and therefore it cannot be used inside
+ * interrupt.
+ *
+ * See also: xge_os_malloc().
+ */
+static inline void xge_os_free(IN pci_dev_h pdev,
+ IN const void *vaddr,
+ IN unsigned long size)
+{ TEMPLATE; }
+
+/**
+ * xge_os_vaddr - Get Virtual address for the given physical address.
+ * @pdev: Device context. Some OSs require device context to perform
+ * operations on memory.
+ * @vaddr: Physical Address of the memory block.
+ * @size: Some OS's require to provide size
+ *
+ * Get the virtual address for physical address.
+ * This call may also sleep, and therefore it cannot be used inside
+ * interrupt.
+ *
+ * See also: xge_os_malloc().
+ */
+static inline void xge_os_vaddr(IN pci_dev_h pdev,
+ IN const void *vaddr,
+ IN unsigned long size)
+{ TEMPLATE; }
+
+/**
+ * xge_os_dma_malloc - Allocate DMA-able memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @size: Size (in bytes) to allocate.
+ * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
+ * XGE_OS_DMA_STREAMING,
+ * XGE_OS_DMA_CONSISTENT
+ * Note that the last two flags are mutually exclusive.
+ * @p_dmah: Handle used to map the memory onto the corresponding device memory
+ * space. See xge_os_dma_map(). The handle is an out-parameter
+ * returned by the function.
+ * @p_dma_acch: One more DMA handle used subsequently to free the
+ * DMA object (via xge_os_dma_free()).
+ * Note that this and the previous handle have
+ * physical meaning for Solaris; on Windows and Linux the
+ * corresponding value will be simply a pointer to PCI device.
+ * The value is returned by this function.
+ *
+ * Allocate DMA-able contiguous memory block of the specified @size.
+ * This memory can be subsequently freed using xge_os_dma_free().
+ * Note: can be used inside interrupt context.
+ *
+ * Returns: Pointer to allocated memory(DMA-able), NULL on failure.
+ *
+ */
+static inline void *xge_os_dma_malloc(IN pci_dev_h pdev,
+ IN unsigned long size,
+ IN int dma_flags,
+ OUT pci_dma_h *p_dmah,
+ OUT pci_dma_acc_h *p_dma_acch)
+{ TEMPLATE; }
+
+/**
+ * xge_os_dma_free - Free previously allocated DMA-able memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @vaddr: Virtual address of the DMA-able memory.
+ * @p_dma_acch: DMA handle used to free the resource.
+ * @p_dmah: DMA handle used for mapping. See xge_os_dma_malloc().
+ *
+ * Free DMA-able memory originally allocated by xge_os_dma_malloc().
+ * Note: can be used inside interrupt.
+ * See also: xge_os_dma_malloc().
+ */
+static inline void xge_os_dma_free (IN pci_dev_h pdev,
+ IN const void *vaddr,
+ IN pci_dma_acc_h *p_dma_acch,
+ IN pci_dma_h *p_dmah)
+{ TEMPLATE; }
+
+/* ----------------------- io/pci/dma primitives ---------------------------*/
+
+#define XGE_OS_DMA_DIR_TODEVICE TEMPLATE
+#define XGE_OS_DMA_DIR_FROMDEVICE TEMPLATE
+#define XGE_OS_DMA_DIR_BIDIRECTIONAL TEMPLATE
+
+/**
+ * xge_os_pci_read8 - Read one byte from device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Address of the result.
+ *
+ * Read byte value from the specified @regh PCI configuration space at the
+ * specified offset = @where.
+ * Returns: 0 - success, non-zero - failure.
+ */
+static inline int xge_os_pci_read8(IN pci_dev_h pdev,
+ IN pci_cfg_h cfgh,
+ IN int where,
+ IN u8 *val)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pci_write8 - Write one byte into device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Value to write.
+ *
+ * Write byte value into the specified PCI configuration space
+ * Returns: 0 - success, non-zero - failure.
+ */
+static inline int xge_os_pci_write8(IN pci_dev_h pdev,
+ IN pci_cfg_h cfgh,
+ IN int where,
+ IN u8 val)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pci_read16 - Read 16bit word from device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Address of the 16bit result.
+ *
+ * Read 16bit value from the specified PCI configuration space at the
+ * specified offset.
+ * Returns: 0 - success, non-zero - failure.
+ */
+static inline int xge_os_pci_read16(IN pci_dev_h pdev,
+ IN pci_cfg_h cfgh,
+ IN int where,
+ IN u16 *val)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pci_write16 - Write 16bit word into device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Value to write.
+ *
+ * Write 16bit value into the specified @offset in PCI
+ * configuration space.
+ * Returns: 0 - success, non-zero - failure.
+ */
+static inline int xge_os_pci_write16(IN pci_dev_h pdev,
+ IN pci_cfg_h cfgh,
+ IN int where,
+ IN u16 val)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pci_read32 - Read 32bit word from device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Address of 32bit result.
+ *
+ * Read 32bit value from the specified PCI configuration space at the
+ * specified offset.
+ * Returns: 0 - success, non-zero - failure.
+ */
+static inline int xge_os_pci_read32(IN pci_dev_h pdev,
+ IN pci_cfg_h cfgh,
+ IN int where,
+ IN u32 *val)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pci_write32 - Write 32bit word into device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Value to write.
+ *
+ * Write 32bit value into the specified @offset in PCI
+ * configuration space.
+ * Returns: 0 - success, non-zero - failure.
+ */
+static inline int xge_os_pci_write32(IN pci_dev_h pdev,
+ IN pci_cfg_h cfgh,
+ IN int where,
+ IN u32 val)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO..
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 1 byte value read from the specified (mapped) memory space address.
+ */
+static inline u8 xge_os_pio_mem_read8(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_write64 - Write 1 byte into device memory mapped
+ * space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO..
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write byte value into the specified (mapped) device memory space.
+ */
+static inline void xge_os_pio_mem_write8(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN u8 val,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_read16 - Read 16bit from device memory mapped space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 16bit value read from the specified (mapped) memory space address.
+ */
+static inline u16 xge_os_pio_mem_read16(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN void *addr)
+{
+TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_write16 - Write 16bit into device memory mapped space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write 16bit value into the specified (mapped) device memory space.
+ */
+static inline void xge_os_pio_mem_write16(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN u16 val,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_read32 - Read 32bit from device memory mapped space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 32bit value read from the specified (mapped) memory space address.
+ */
+static inline u32 xge_os_pio_mem_read32(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_write32 - Write 32bit into device memory space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write 32bit value into the specified (mapped) device memory space.
+ */
+static inline void xge_os_pio_mem_write32(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN u32 val,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_read64 - Read 64bit from device memory mapped space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 64bit value read from the specified (mapped) memory space address.
+ */
+static inline u64 xge_os_pio_mem_read64(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_pio_mem_write64 - Write 64bit into device memory space.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write 64bit value into the specified (mapped) device memory space.
+ */
+static inline void xge_os_pio_mem_write64(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN u64 val,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_flush_bridge - Flush the bridge.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Flush the bridge.
+ */
+static inline void xge_os_flush_bridge(IN pci_dev_h pdev,
+ IN pci_reg_h regh,
+ IN void *addr)
+{ TEMPLATE; }
+
+/**
+ * xge_os_dma_map - Map DMA-able memory block to, or from, or
+ * to-and-from device.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @dmah: DMA handle used to map the memory block. Obtained via
+ * xge_os_dma_malloc().
+ * @vaddr: Virtual address of the DMA-able memory.
+ * @size: Size (in bytes) to be mapped.
+ * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
+ * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
+ * XGE_OS_DMA_STREAMING,
+ * XGE_OS_DMA_CONSISTENT
+ * Note that the last two flags are mutually exclusive.
+ *
+ * Map a single memory block.
+ *
+ * Returns: DMA address of the memory block,
+ * XGE_OS_INVALID_DMA_ADDR on failure.
+ *
+ * See also: xge_os_dma_malloc(), xge_os_dma_unmap(),
+ * xge_os_dma_sync().
+ */
+static inline dma_addr_t xge_os_dma_map(IN pci_dev_h pdev,
+ IN pci_dma_h dmah,
+ IN void *vaddr,
+ IN size_t size,
+ IN int dir,
+ IN int dma_flags)
+{ TEMPLATE; }
+
+/**
+ * xge_os_dma_unmap - Unmap DMA-able memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @dmah: DMA handle used to map the memory block. Obtained via
+ * xge_os_dma_malloc().
+ * @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
+ * @size: Size (in bytes) to be unmapped.
+ * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
+ *
+ * Unmap a single DMA-able memory block that was previously mapped
+ * using xge_os_dma_map().
+ * See also: xge_os_dma_malloc(), xge_os_dma_map().
+ */
+static inline void xge_os_dma_unmap(IN pci_dev_h pdev,
+ IN pci_dma_h dmah,
+ IN dma_addr_t dma_addr,
+ IN size_t size,
+ IN int dir)
+{ TEMPLATE; }
+
+/**
+ * xge_os_dma_sync - Synchronize mapped memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @dmah: DMA handle used to map the memory block. Obtained via
+ * xge_os_dma_malloc().
+ * @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
+ * @dma_offset: Offset from start of the blocke. Used by Solaris only.
+ * @length: Size of the block.
+ * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
+ *
+ * Make physical and CPU memory consistent for a single
+ * streaming mode DMA translation.
+ * This API compiles to NOP on cache-coherent platforms.
+ * On non cache-coherent platforms, depending on the direction
+ * of the "sync" operation, this API will effectively
+ * either invalidate CPU cache (that might contain old data),
+ * or flush CPU cache to update physical memory.
+ * See also: xge_os_dma_malloc(), xge_os_dma_map(),
+ * xge_os_dma_unmap().
+ */
+static inline void xge_os_dma_sync(IN pci_dev_h pdev,
+ IN pci_dma_h dmah,
+ IN dma_addr_t dma_addr,
+ IN u64 dma_offset,
+ IN size_t length,
+ IN int dir)
+{ TEMPLATE; }
+
+#endif /* XGE_OS_TEMPLATE_H */
diff --git a/sys/dev/nxge/include/xge-queue.h b/sys/dev/nxge/include/xge-queue.h
new file mode 100644
index 0000000..6745888
--- /dev/null
+++ b/sys/dev/nxge/include/xge-queue.h
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-queue.h
+ *
+ * Description: serialized event queue
+ *
+ * Created: 7 June 2004
+ */
+
+#ifndef XGE_QUEUE_H
+#define XGE_QUEUE_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-defs.h>
+#include <dev/nxge/include/xge-list.h>
+#include <dev/nxge/include/xgehal-event.h>
+
+__EXTERN_BEGIN_DECLS
+
+#define XGE_QUEUE_BUF_SIZE 0x1000
+#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16
+
+/**
+ * enum xge_queue_status_e - Enumerates return codes of the xge_queue
+ * manipulation APIs.
+ * @XGE_QUEUE_IS_FULL: Queue is full, need to grow.
+ * @XGE_QUEUE_IS_EMPTY: Queue is empty.
+ * @XGE_QUEUE_OUT_OF_MEMORY: Out of memory.
+ * @XGE_QUEUE_NOT_ENOUGH_SPACE: Exceeded specified event size,
+ * see xge_queue_consume().
+ * @XGE_QUEUE_OK: Neither one of the codes listed above.
+ *
+ * Enumerates return codes of xge_queue_consume()
+ * and xge_queue_produce() APIs.
+ */
+typedef enum xge_queue_status_e {
+ XGE_QUEUE_OK = 0,
+ XGE_QUEUE_IS_FULL = 1,
+ XGE_QUEUE_IS_EMPTY = 2,
+ XGE_QUEUE_OUT_OF_MEMORY = 3,
+ XGE_QUEUE_NOT_ENOUGH_SPACE = 4
+} xge_queue_status_e;
+
+typedef void* xge_queue_h;
+
+/**
+ * struct xge_queue_item_t - Queue item.
+ * @item: List item. Note that the queue is "built" on top of
+ * the bi-directional linked list.
+ * @event_type: Event type. Includes (but is not restricted to)
+ * one of the xge_hal_event_e{} enumerated types.
+ * @data_size: Size of the enqueued user data. Note that xge_queue_t
+ * items are allowed to have variable sizes.
+ * @is_critical: For critical events, e.g. ECC.
+ * @context: Opaque (void*) "context", for instance event producer object.
+ *
+ * Item of the xge_queue_t{}. The queue is protected
+ * in terms of multi-threaded concurrent access.
+ * See also: xge_queue_t{}.
+ */
+typedef struct xge_queue_item_t {
+ xge_list_t item;
+ xge_hal_event_e event_type;
+ int data_size;
+ int is_critical;
+ void *context;
+} xge_queue_item_t;
+
+/**
+ * function xge_queued_f - Item-enqueued callback.
+ * @data: Per-queue context independent of the event. E.g., device handle.
+ * @event_type: HAL or ULD-defined event type. Note that HAL own
+ * events are enumerated by xge_hal_event_e{}.
+ *
+ * Per-queue optional callback. If not NULL, called by HAL each
+ * time an event gets added to the queue.
+ */
+typedef void (*xge_queued_f) (void *data, int event_type);
+
+/**
+ * struct xge_queue_t - Protected dynamic queue of variable-size items.
+ * @start_ptr: Points to the start of the queue.
+ * @end_ptr: Points to the end of the queue.
+ * @head_ptr: Points to the head of the queue. It gets changed during queue
+ * produce/consume operations.
+ * @tail_ptr: Points to the tail of the queue. It gets changed during queue
+ * produce/consume operations.
+ * @lock: Lock for queue operations(syncronization purpose).
+ * @pages_initial:Number of pages to be initially allocated at the time
+ * of queue creation.
+ * @pages_max: Max number of pages that can be allocated in the queue.
+ * @pages_current: Number of pages currently allocated
+ * @list_head: Points to the list of queue elements that are produced, but yet
+ * to be consumed.
+ * @signal_callback: (TODO)
+ * @pdev: PCI device handle
+ * @irqh: PCI device IRQ handle.
+ * @queued_func: Optional callback function to be called each time a new
+ * item is added to the queue.
+ * @queued_data: Arguments to the callback function.
+ * @has_critical_event: Non-zero, if the queue contains a critical event,
+ * see xge_hal_event_e{}.
+ * Protected dynamically growing queue. The queue is used to support multiple
+ * producer/consumer type scenarios. The queue is a strict FIFO: first come
+ * first served.
+ * Queue users may "produce" (see xge_queue_produce()) and "consume"
+ * (see xge_queue_consume()) items (a.k.a. events) variable sizes.
+ * See also: xge_queue_item_t{}.
+ */
+typedef struct xge_queue_t {
+ void *start_ptr;
+ void *end_ptr;
+ void *head_ptr;
+ void *tail_ptr;
+ spinlock_t lock;
+ unsigned int pages_initial;
+ unsigned int pages_max;
+ unsigned int pages_current;
+ xge_list_t list_head;
+ pci_dev_h pdev;
+ pci_irq_h irqh;
+ xge_queued_f queued_func;
+ void *queued_data;
+ int has_critical_event;
+} xge_queue_t;
+
+/* ========================== PUBLIC API ================================= */
+
+xge_queue_h xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
+ int pages_max, xge_queued_f queued_func, void *queued_data);
+
+void xge_queue_destroy(xge_queue_h queueh);
+
+void* xge_queue_item_data(xge_queue_item_t *item);
+
+xge_queue_status_e
+xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
+ int is_critical, const int data_size, void *data);
+
+static inline xge_queue_status_e
+xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) {
+ return xge_queue_produce(queueh, event_type, context, 0, 0, 0);
+}
+
+xge_queue_status_e xge_queue_consume(xge_queue_h queueh, int data_max_size,
+ xge_queue_item_t *item);
+
+void xge_queue_flush(xge_queue_h queueh);
+
+/* ========================== PRIVATE API ================================= */
+
+xge_queue_status_e __io_queue_grow(xge_queue_h qh);
+
+int __queue_get_reset_critical (xge_queue_h qh);
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_QUEUE_H */
diff --git a/sys/dev/nxge/include/xgehal-channel.h b/sys/dev/nxge/include/xgehal-channel.h
new file mode 100644
index 0000000..8d82530
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-channel.h
@@ -0,0 +1,507 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-channel.h
+ *
+ * Description: HAL channel object functionality
+ *
+ * Created: 19 May 2004
+ */
+
+#ifndef XGE_HAL_CHANNEL_H
+#define XGE_HAL_CHANNEL_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-list.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xgehal-stats.h>
+
+__EXTERN_BEGIN_DECLS
+
+/**
+ * enum xge_hal_channel_type_e - Enumerated channel types.
+ * @XGE_HAL_CHANNEL_TYPE_FIFO: fifo.
+ * @XGE_HAL_CHANNEL_TYPE_RING: ring.
+ * @XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: Send Queue
+ * @XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: Receive Queue
+ * @XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: Receive queue completion queue
+ * @XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: Up message queue
+ * @XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: Down message queue
+ * @XGE_HAL_CHANNEL_TYPE_MAX: Maximum number of HAL-supported
+ * (and recognized) channel types. Currently: two.
+ *
+ * Enumerated channel types. Currently there are only two link-layer
+ * channels - Xframe fifo and Xframe ring. In the future the list will grow.
+ */
+typedef enum xge_hal_channel_type_e {
+ XGE_HAL_CHANNEL_TYPE_FIFO,
+ XGE_HAL_CHANNEL_TYPE_RING,
+ XGE_HAL_CHANNEL_TYPE_SEND_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_MAX
+} xge_hal_channel_type_e;
+
+/**
+ * enum xge_hal_channel_flag_e - Channel flags.
+ * @XGE_HAL_CHANNEL_FLAG_NONE: zero (nil) flag.
+ * @XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK: use lock when posting transmit
+ * descriptor.
+ * @XGE_HAL_CHANNEL_FLAG_FREE_RXD: to-be-defined.
+ *
+ * Channel opening flags. Reserved for future usage.
+ */
+typedef enum xge_hal_channel_flag_e {
+ XGE_HAL_CHANNEL_FLAG_NONE = 0x0,
+ XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK = 0x1,
+ XGE_HAL_CHANNEL_FLAG_FREE_RXD = 0x2
+} xge_hal_channel_flag_e;
+
+/**
+ * enum xge_hal_dtr_state_e - Descriptor (DTR) state.
+ * @XGE_HAL_DTR_STATE_NONE: Invalid state.
+ * @XGE_HAL_DTR_STATE_AVAIL: Descriptor is available for reservation
+ * (via xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_reserve(), etc.).
+ * @XGE_HAL_DTR_STATE_POSTED: Descriptor is posted for processing by the
+ * device.
+ * @XGE_HAL_DTR_STATE_FREED: Descriptor is free and can be reused for
+ * filling-in and posting later.
+ *
+ * Xframe/HAL descriptor states. For more on descriptor states and transitions
+ * please refer to ch_intern{}.
+ *
+ * See also: xge_hal_channel_dtr_term_f{}.
+ */
+typedef enum xge_hal_dtr_state_e {
+ XGE_HAL_DTR_STATE_NONE = 0,
+ XGE_HAL_DTR_STATE_AVAIL = 1,
+ XGE_HAL_DTR_STATE_POSTED = 2,
+ XGE_HAL_DTR_STATE_FREED = 3
+} xge_hal_dtr_state_e;
+
+/**
+ * enum xge_hal_channel_reopen_e - Channel open, close, or reopen option.
+ * @XGE_HAL_CHANNEL_RESET_ONLY: Do not (de)allocate channel; used with
+ * xge_hal_channel_open(), xge_hal_channel_close().
+ * @XGE_HAL_CHANNEL_OC_NORMAL: Do (de)allocate channel; used with
+ * xge_hal_channel_open(), xge_hal_channel_close().
+ *
+ * Enumerates options used with channel open and close operations.
+ * The @XGE_HAL_CHANNEL_RESET_ONLY can be used when resetting the device;
+ * in this case there is actually no need to free and then again malloc
+ * the memory (including DMA-able memory) used for channel operation.
+ */
+typedef enum xge_hal_channel_reopen_e {
+ XGE_HAL_CHANNEL_RESET_ONLY = 1,
+ XGE_HAL_CHANNEL_OC_NORMAL = 2
+} xge_hal_channel_reopen_e;
+
+/**
+ * function xge_hal_channel_callback_f - Channel callback.
+ * @channelh: Channel "containing" 1 or more completed descriptors.
+ * @dtrh: First completed descriptor.
+ * @t_code: Transfer code, as per Xframe User Guide.
+ * Returned by HAL.
+ * @host_control: Opaque 64bit data stored by ULD inside the Xframe
+ * descriptor prior to posting the latter on the channel
+ * via xge_hal_fifo_dtr_post() or xge_hal_ring_dtr_post().
+ * The @host_control is returned as is to the ULD with each
+ * completed descriptor.
+ * @userdata: Opaque per-channel data specified at channel open
+ * time, via xge_hal_channel_open().
+ *
+ * Channel completion callback (type declaration). A single per-channel
+ * callback is specified at channel open time, via
+ * xge_hal_channel_open().
+ * Typically gets called as part of the processing of the Interrupt
+ * Service Routine.
+ *
+ * Channel callback gets called by HAL if, and only if, there is at least
+ * one new completion on a given ring or fifo channel. Upon processing the
+ * first @dtrh ULD is _supposed_ to continue consuming completions
+ * usingáone of the following HAL APIs:
+ * - xge_hal_fifo_dtr_next_completed()
+ * or
+ * - xge_hal_ring_dtr_next_completed().
+ *
+ * Note that failure to process new completions in a timely fashion
+ * leads to XGE_HAL_INF_OUT_OF_DESCRIPTORS condition.
+ *
+ * Non-zero @t_code means failure to process (transmit or receive, depending
+ * on the channel type) the descriptor.
+ *
+ * In the "transmit" case the failure could happen, for instance, when the
+ * link is down, in which case Xframe completes the descriptor because it
+ * is not able to send the data out.
+ *
+ * For details please refer to Xframe User Guide.
+ *
+ * See also: xge_hal_fifo_dtr_next_completed(),
+ * xge_hal_ring_dtr_next_completed(), xge_hal_channel_dtr_term_f{}.
+ */
+typedef xge_hal_status_e (*xge_hal_channel_callback_f)
+ (xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ u8 t_code, void *userdata);
+
+/**
+ * function xge_hal_channel_dtr_init_f - Initialize descriptor callback.
+ * @channelh: Channel "containing" the @dtrh descriptor.
+ * @dtrh: Descriptor.
+ * @index: Index of the descriptor in the channel's set of descriptors.
+ * @userdata: Per-channel user data (a.k.a. context) specified at
+ * channel open time, via xge_hal_channel_open().
+ * @reopen: See xge_hal_channel_reopen_e{}.
+ *
+ * Initialize descriptor callback. Unless NULL is specified in the
+ * xge_hal_channel_attr_t{} structure passed to xge_hal_channel_open()),
+ * HAL invokes the callback as part of the xge_hal_channel_open()
+ * implementation.
+ * For the ring type of channel the ULD is expected to fill in this descriptor
+ * with buffer(s) and control information.
+ * For the fifo type of channel the ULD could use the callback to
+ * pre-set DMA mappings and/or alignment buffers.
+ *
+ * See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_term_f{}.
+ */
+typedef xge_hal_status_e (*xge_hal_channel_dtr_init_f)
+ (xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtrh,
+ int index,
+ void *userdata,
+ xge_hal_channel_reopen_e reopen);
+
+/**
+ * function xge_hal_channel_dtr_term_f - Terminate descriptor callback.
+ * @channelh: Channel "containing" the @dtrh descriptor.
+ * @dtrh: First completed descriptor.
+ * @state: One of the xge_hal_dtr_state_e{} enumerated states.
+ * @userdata: Per-channel user data (a.k.a. context) specified at
+ * channel open time, via xge_hal_channel_open().
+ * @reopen: See xge_hal_channel_reopen_e{}.
+ *
+ * Terminate descriptor callback. Unless NULL is specified in the
+ * xge_hal_channel_attr_t{} structure passed to xge_hal_channel_open()),
+ * HAL invokes the callback as part of closing the corresponding
+ * channel, prior to de-allocating the channel and associated data
+ * structures (including descriptors).
+ * ULD should utilize the callback to (for instance) unmap
+ * and free DMA data buffers associated with the posted (state =
+ * XGE_HAL_DTR_STATE_POSTED) descriptors,
+ * as well as other relevant cleanup functions.
+ *
+ * See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_init_f{}.
+ */
+typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtrh,
+ xge_hal_dtr_state_e state,
+ void *userdata,
+ xge_hal_channel_reopen_e reopen);
+
+
+/**
+ * struct xge_hal_channel_attr_t - Channel open "template".
+ * @type: xge_hal_channel_type_e channel type.
+ * @vp_id: Virtual path id
+ * @post_qid: Queue ID to post descriptors. For the link layer this
+ * number should be in the 0..7 range.
+ * @compl_qid: Completion queue ID. Must be set to zero for the link layer.
+ * @callback: Channel completion callback. HAL invokes the callback when there
+ * are new completions on that channel. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @dtr_init: Channel's descriptor-initialize callback.
+ * See xge_hal_channel_dtr_init_f{}.
+ * If not NULL, HAL invokes the callback when opening
+ * the channel via xge_hal_channel_open().
+ * @dtr_term: Channel's descriptor-terminate callback. If not NULL,
+ * HAL invokes the callback when closing the corresponding channel.
+ * See also xge_hal_channel_dtr_term_f{}.
+ * @userdata: User-defined "context" of _that_ channel. Passed back to the
+ * user as one of the @callback, @dtr_init, and @dtr_term arguments.
+ * @per_dtr_space: If specified (i.e., greater than zero): extra space
+ * reserved by HAL per each transmit or receive (depending on the
+ * channel type) descriptor. Can be used to store,
+ * and retrieve on completion, information specific
+ * to the upper-layer.
+ * @flags: xge_hal_channel_flag_e enumerated flags.
+ *
+ * Channel open "template". User fills the structure with channel
+ * attributes and passes it to xge_hal_channel_open().
+ * Usage: See ex_open{}.
+ */
+typedef struct xge_hal_channel_attr_t {
+ xge_hal_channel_type_e type;
+#ifdef XGEHAL_RNIC
+ u32 vp_id;
+#endif
+ int post_qid;
+ int compl_qid;
+ xge_hal_channel_callback_f callback;
+ xge_hal_channel_dtr_init_f dtr_init;
+ xge_hal_channel_dtr_term_f dtr_term;
+ void *userdata;
+ int per_dtr_space;
+ xge_hal_channel_flag_e flags;
+} xge_hal_channel_attr_t;
+
+/*
+ * xge_hal_channel_t
+ * ---------- complete/free section ---------------
+ * @item: List item; used to maintain a list of open channels.
+ * @callback: Channel completion callback. See
+ * xge_hal_channel_callback_f.
+ * @compl_index: Completion index. At any point in time points on the
+ * position in the channel, which will contain next
+ * to-be-completed descriptor.
+ * @length: Channel length. Currently allocated number of descriptors.
+ * The channel length "grows" when more descriptors get allocated.
+ * See _hal_mempool_grow.
+ * @free_arr: Free array. Contains completed descriptors that were freed
+ * (i.e., handed over back to HAL) by ULD.
+ * See xge_hal_fifo_dtr_free(), xge_hal_ring_dtr_free().
+ * @free_lock: Lock to protect @free_arr.
+ * ----------- reserve/post section ---------------
+ * @post_index: Post index. At any point in time points on the
+ * position in the channel, which'll contain next to-be-posted
+ * descriptor.
+ * @post_lock: Lock to serialize multiple concurrent "posters" of descriptors
+ * on the given channel.
+ * @reserve_arr: Reserve array. Contains descriptors that can be reserved
+ * by ULD for the subsequent send or receive operation.
+ * See xge_hal_fifo_dtr_reserve(),
+ * xge_hal_ring_dtr_reserve().
+ * @reserve_length: Length of the @reserve_arr. The length dynamically
+ * changes: it decrements each time descriptor is reserved.
+ * @reserve_lock: Lock to serialize multiple concurrent threads accessing
+ * @reserve_arr.
+ * @reserve_threshold: Reserve threshold. Minimal number of free descriptors
+ * that ought to be preserved in the channel at all times.
+ * Note that @reserve_threshold >= 0 &&
+ * @reserve_threshold < @reserve_max.
+ * ------------ common section --------------------
+ * @devh: Device handle. HAL device object that contains _this_ channel.
+ * @dmah: Channel's DMA address. Used to synchronize (to/from device)
+ * descriptors.
+ * @regh0: Base address of the device memory space handle. Copied from HAL device
+ * at channel open time.
+ * @regh1: Base address of the device memory space handle. Copied from HAL device
+ * at channel open time.
+ * @userdata: Per-channel opaque (void*) user-defined context, which may be
+ * upper-layer driver object, ULP connection, etc.
+ * Once channel is open, @userdata is passed back to user via
+ * xge_hal_channel_callback_f.
+ * @work_arr: Work array. Contains descriptors posted to the channel.
+ * Note that at any point in time @work_arr contains 3 types of
+ * descriptors:
+ * 1) posted but not yet consumed by Xframe device;
+ * 2) consumed but not yet completed;
+ * 3) completed but not yet freed
+ * (via xge_hal_fifo_dtr_free() or xge_hal_ring_dtr_free())
+ * @saved_arr: Array used internally to optimize channel full-duplex
+ * operation.
+ * @stats: Channel statistcis. Includes HAL internal counters, including
+ * for instance, number of times out-of-descriptors
+ * (see XGE_HAL_INF_OUT_OF_DESCRIPTORS) condition happened.
+ * ------------- "slow" section ------------------
+ * @type: Channel type. See xge_hal_channel_type_e{}.
+ * @vp_id: Virtual path id
+ * @post_qid: Identifies Xframe queue used for posting descriptors.
+ * @compl_qid: Identifies Xframe completion queue.
+ * @flags: Channel flags. See xge_hal_channel_flag_e{}.
+ * @reserve_initial: Initial number of descriptors allocated at channel open
+ * time (see xge_hal_channel_open()). The number of
+ * channel descriptors can grow at runtime
+ * up to @reserve_max value.
+ * @reserve_max: Maximum number of channel descriptors. See @reserve_initial.
+ * @is_open: True, if channel is open; false - otherwise.
+ * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
+ * to store per-operation control information.
+ * HAL channel object. HAL devices (see xge_hal_device_t{}) contains
+ * zero or more channels. HAL channel contains zero or more descriptors. The
+ * latter are used by ULD(s) to manage the device and/or send and receive data
+ * to remote peer(s) via the channel.
+ *
+ * See also: xge_hal_channel_type_e{}, xge_hal_channel_flag_e,
+ * xge_hal_channel_callback_f{}
+ */
+typedef struct {
+ /* complete/free section */
+ xge_list_t item;
+ xge_hal_channel_callback_f callback;
+ void **free_arr;
+ int length;
+ int free_length;
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) || \
+ defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ spinlock_t free_lock;
+#endif
+ int compl_index;
+ unsigned int usage_cnt;
+ unsigned int poll_bytes;
+ int unused0;
+
+ /* reserve/post data path section */
+#ifdef __XGE_WIN__
+ int __xge_os_attr_cacheline_aligned
+ post_index;
+#else
+ int post_index
+ __xge_os_attr_cacheline_aligned;
+#endif
+ spinlock_t reserve_lock;
+ spinlock_t post_lock;
+
+ void **reserve_arr;
+ int reserve_length;
+ int reserve_threshold;
+ int reserve_top;
+ int unused1;
+
+ /* common section */
+ xge_hal_device_h devh;
+ pci_dev_h pdev;
+ pci_reg_h regh0;
+ pci_reg_h regh1;
+ void *userdata;
+ void **work_arr;
+ void **saved_arr;
+ void **orig_arr;
+ xge_hal_stats_channel_info_t stats;
+
+ /* slow section */
+ xge_hal_channel_type_e type;
+#ifdef XGEHAL_RNIC
+ u32 vp_id;
+#endif
+ int post_qid;
+ int compl_qid;
+ xge_hal_channel_flag_e flags;
+ int reserve_initial;
+ int reserve_max;
+ int is_open;
+ int per_dtr_space;
+ xge_hal_channel_dtr_term_f dtr_term;
+ xge_hal_channel_dtr_init_f dtr_init;
+ /* MSI stuff */
+ u32 msi_msg;
+ u8 rti;
+ u8 tti;
+ u16 unused2;
+ /* MSI-X stuff */
+ u64 msix_address;
+ u32 msix_data;
+ int msix_idx;
+ volatile int in_interrupt;
+ unsigned int magic;
+#ifdef __XGE_WIN__
+} __xge_os_attr_cacheline_aligned xge_hal_channel_t ;
+#else
+} xge_hal_channel_t __xge_os_attr_cacheline_aligned;
+#endif
+
+/* ========================== CHANNEL PRIVATE API ========================= */
+
+xge_hal_status_e
+__hal_channel_initialize(xge_hal_channel_h channelh,
+ xge_hal_channel_attr_t *attr, void **reserve_arr,
+ int reserve_initial, int reserve_max, int reserve_threshold);
+
+void __hal_channel_terminate(xge_hal_channel_h channelh);
+
+xge_hal_channel_t*
+__hal_channel_allocate(xge_hal_device_h devh, int post_qid,
+#ifdef XGEHAL_RNIC
+ u32 vp_id,
+#endif
+ xge_hal_channel_type_e type);
+
+void __hal_channel_free(xge_hal_channel_t *channel);
+
+#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_CHANNEL)
+#define __HAL_STATIC_CHANNEL
+#define __HAL_INLINE_CHANNEL
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_complete(xge_hal_channel_h channelh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_dealloc(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int offset);
+
+/* ========================== CHANNEL PUBLIC API ========================= */
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_channel_dtr_count(xge_hal_channel_h channelh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
+xge_hal_channel_userdata(xge_hal_channel_h channelh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_channel_id(xge_hal_channel_h channelh);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
+ int copy_size);
+
+#else /* XGE_FASTPATH_EXTERN */
+#define __HAL_STATIC_CHANNEL static
+#define __HAL_INLINE_CHANNEL inline
+#include <dev/nxge/xgehal/xgehal-channel-fp.c>
+#endif /* XGE_FASTPATH_INLINE */
+
+xge_hal_status_e
+xge_hal_channel_open(xge_hal_device_h hldev, xge_hal_channel_attr_t *attr,
+ xge_hal_channel_h *channel,
+ xge_hal_channel_reopen_e reopen);
+
+void xge_hal_channel_close(xge_hal_channel_h channelh,
+ xge_hal_channel_reopen_e reopen);
+
+void xge_hal_channel_abort(xge_hal_channel_h channelh,
+ xge_hal_channel_reopen_e reopen);
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_CHANNEL_H */
diff --git a/sys/dev/nxge/include/xgehal-config.h b/sys/dev/nxge/include/xgehal-config.h
new file mode 100644
index 0000000..c7bde29
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-config.h
@@ -0,0 +1,1012 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-config.h
+ *
+ * Description: Xframe configuration.
+ *
+ * Created: 14 May 2004
+ */
+
+#ifndef XGE_HAL_CONFIG_H
+#define XGE_HAL_CONFIG_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xge-queue.h>
+
+__EXTERN_BEGIN_DECLS
+
+#define XGE_HAL_DEFAULT_USE_HARDCODE -1
+
+#ifdef XGEHAL_RNIC
+#define XGE_HAL_MAX_VIRTUAL_PATHS 17
+#else
+#define XGE_HAL_MAX_VIRTUAL_PATHS 8
+#endif
+#define XGE_HAL_MAX_INTR_PER_VP 4
+
+
+/**
+ * struct xge_hal_tti_config_t - Xframe Tx interrupt configuration.
+ * @enabled: Set to 1, if TTI feature is enabled.
+ * @urange_a: Link utilization range A. The value from 0 to 100%.
+ * @ufc_a: Frame count for the utilization range A. Interrupt will be generated
+ * each time when (and only when) the line is utilized no more
+ * than @urange_a percent in the transmit direction,
+ * and number of transmitted frames is greater or equal @ufc_a.
+ * @urange_b: Link utilization range B.
+ * @ufc_b: Frame count for the utilization range B.
+ * @urange_c: Link utilization range C.
+ * @ufc_c: Frame count for the utilization range C.
+ * @urange_d: Link utilization range D.
+ * @ufc_d: Frame count for the utilization range D.
+ * @timer_val_us: Interval of time, in microseconds, at which transmit timer
+ * interrupt is to be generated. Note that unless @timer_ci_en
+ * is set, the timer interrupt is generated only in presence
+ * of the transmit traffic. Note also that timer interrupt
+ * and utilization interrupt are two separate interrupt
+ * sources.
+ * @timer_ac_en: Enable auto-cancel. That is, reset the timer if utilization
+ * interrupt was generated during the interval.
+ * @timer_ci_en: Enable/disable continuous interrupt. Set this value
+ * to 1 in order to generate continuous interrupt
+ * at fixed @timer_val intervals of time, independently
+ * of whether there is transmit traffic or not.
+ * @enabled: Set to 1, if TTI feature is enabled.
+ *
+ * Xframe transmit interrupt configuration.
+ * See Xframe User Guide, Section 3.5 "Device Interrupts"
+ * for more details. Note also (min, max)
+ * ranges in the body of the xge_hal_tx_intr_config_t structure.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_tti_config_t{} structure. Please refer to the
+ * corresponding header file.
+ */
+typedef struct xge_hal_tti_config_t {
+
+ int enabled;
+#define XGE_HAL_TTI_ENABLE 1
+#define XGE_HAL_TTI_DISABLE 0
+
+ /* Line utilization interrupts */
+
+ int urange_a;
+#define XGE_HAL_MIN_TX_URANGE_A 0
+#define XGE_HAL_MAX_TX_URANGE_A 100
+
+ int ufc_a;
+#define XGE_HAL_MIN_TX_UFC_A 0
+#define XGE_HAL_MAX_TX_UFC_A 65535
+
+ int urange_b;
+#define XGE_HAL_MIN_TX_URANGE_B 0
+#define XGE_HAL_MAX_TX_URANGE_B 100
+
+ int ufc_b;
+#define XGE_HAL_MIN_TX_UFC_B 0
+#define XGE_HAL_MAX_TX_UFC_B 65535
+
+ int urange_c;
+#define XGE_HAL_MIN_TX_URANGE_C 0
+#define XGE_HAL_MAX_TX_URANGE_C 100
+
+ int ufc_c;
+#define XGE_HAL_MIN_TX_UFC_C 0
+#define XGE_HAL_MAX_TX_UFC_C 65535
+
+ int ufc_d;
+#define XGE_HAL_MIN_TX_UFC_D 0
+#define XGE_HAL_MAX_TX_UFC_D 65535
+
+ int timer_val_us;
+#define XGE_HAL_MIN_TX_TIMER_VAL 0
+#define XGE_HAL_MAX_TX_TIMER_VAL 65535
+
+ int timer_ac_en;
+#define XGE_HAL_MIN_TX_TIMER_AC_EN 0
+#define XGE_HAL_MAX_TX_TIMER_AC_EN 1
+
+ int timer_ci_en;
+#define XGE_HAL_MIN_TX_TIMER_CI_EN 0
+#define XGE_HAL_MAX_TX_TIMER_CI_EN 1
+
+
+} xge_hal_tti_config_t;
+
+/**
+ * struct xge_hal_rti_config_t - Xframe Rx interrupt configuration.
+ * @urange_a: Link utilization range A. The value from 0 to 100%.
+ * @ufc_a: Frame count for the utilization range A. Interrupt will be generated
+ * each time when (and only when) the line is utilized no more
+ * than @urange_a percent inbound,
+ * and number of received frames is greater or equal @ufc_a.
+ * @urange_b: Link utilization range B.
+ * @ufc_b: Frame count for the utilization range B.
+ * @urange_c: Link utilization range C.
+ * @ufc_c: Frame count for the utilization range C.
+ * @urange_d: Link utilization range D.
+ * @ufc_d: Frame count for the utilization range D.
+ * @timer_ac_en: Enable auto-cancel. That is, reset the timer if utilization
+ * interrupt was generated during the interval.
+ * @timer_val_us: Interval of time, in microseconds, at which receive timer
+ * interrupt is to be generated. The timer interrupt is generated
+ * only in presence of the inbound traffic. Note also that timer
+ * interrupt and utilization interrupt are two separate interrupt
+ * sources.
+ *
+ * Xframe receive interrupt configuration.
+ * See Xframe User Guide, Section 3.5 "Device Interrupts"
+ * for more details. Note also (min, max)
+ * ranges in the body of the xge_hal_intr_config_t structure.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_rti_config_t{} structure. Please refer to the
+ * corresponding header file.
+ */
+typedef struct xge_hal_rti_config_t {
+
+ int urange_a;
+#define XGE_HAL_MIN_RX_URANGE_A 0
+#define XGE_HAL_MAX_RX_URANGE_A 127
+
+ int ufc_a;
+#define XGE_HAL_MIN_RX_UFC_A 0
+#define XGE_HAL_MAX_RX_UFC_A 65535
+
+ int urange_b;
+#define XGE_HAL_MIN_RX_URANGE_B 0
+#define XGE_HAL_MAX_RX_URANGE_B 127
+
+ int ufc_b;
+#define XGE_HAL_MIN_RX_UFC_B 0
+#define XGE_HAL_MAX_RX_UFC_B 65535
+
+ int urange_c;
+#define XGE_HAL_MIN_RX_URANGE_C 0
+#define XGE_HAL_MAX_RX_URANGE_C 127
+
+ int ufc_c;
+#define XGE_HAL_MIN_RX_UFC_C 0
+#define XGE_HAL_MAX_RX_UFC_C 65535
+
+ int ufc_d;
+#define XGE_HAL_MIN_RX_UFC_D 0
+#define XGE_HAL_MAX_RX_UFC_D 65535
+
+ int timer_ac_en;
+#define XGE_HAL_MIN_RX_TIMER_AC_EN 0
+#define XGE_HAL_MAX_RX_TIMER_AC_EN 1
+
+ int timer_val_us;
+#define XGE_HAL_MIN_RX_TIMER_VAL 0
+#define XGE_HAL_MAX_RX_TIMER_VAL 65535
+
+} xge_hal_rti_config_t;
+
+/**
+ * struct xge_hal_fifo_queue_t - Single fifo configuration.
+ * @max: Max numbers of TxDLs (that is, lists of Tx descriptors) per queue.
+ * @initial: Initial numbers of TxDLs per queue (can grow up to @max).
+ * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
+ * Use 0 otherwise.
+ * @intr_vector: TBD
+ * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
+ * which generally improves latency of the host bridge operation
+ * (see PCI specification). For valid values please refer
+ * to xge_hal_fifo_queue_t{} in the driver sources.
+ * @priority: TBD
+ * @configured: Boolean. Use 1 to specify that the fifo is configured.
+ * Only "configured" fifos can be activated and used to post
+ * Tx descriptors. Any subset of 8 available fifos can be
+ * "configured".
+ * @tti: TBD
+ *
+ * Single fifo configuration.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_fifo_queue_t{} structure. Please refer to the
+ * corresponding header file.
+ * See also: xge_hal_fifo_config_t{}
+ */
+typedef struct xge_hal_fifo_queue_t {
+ int max;
+ int initial;
+#define XGE_HAL_MIN_FIFO_QUEUE_LENGTH 2
+#define XGE_HAL_MAX_FIFO_QUEUE_LENGTH 8192
+
+ int intr;
+#define XGE_HAL_MIN_FIFO_QUEUE_INTR 0
+#define XGE_HAL_MAX_FIFO_QUEUE_INTR 1
+
+ int intr_vector;
+#define XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR 0
+#define XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR 64
+
+ int no_snoop_bits;
+#define XGE_HAL_MIN_FIFO_QUEUE_NO_SNOOP_DISABLED 0
+#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_TXD 1
+#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_BUFFER 2
+#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_ALL 3
+
+ int priority;
+#define XGE_HAL_MIN_FIFO_PRIORITY 0
+#define XGE_HAL_MAX_FIFO_PRIORITY 63
+
+ int configured;
+#define XGE_HAL_MIN_FIFO_CONFIGURED 0
+#define XGE_HAL_MAX_FIFO_CONFIGURED 1
+
+#define XGE_HAL_MAX_FIFO_TTI_NUM 7
+#define XGE_HAL_MAX_FIFO_TTI_RING_0 56
+ xge_hal_tti_config_t tti[XGE_HAL_MAX_FIFO_TTI_NUM];
+
+} xge_hal_fifo_queue_t;
+
+/**
+ * struct xge_hal_fifo_config_t - Configuration of all 8 fifos.
+ * @max_frags: Max number of Tx buffers per TxDL (that is, per single
+ * transmit operation).
+ * No more than 256 transmit buffers can be specified.
+ * @max_aligned_frags: Number of fragments to be aligned out of
+ * maximum fragments (see @max_frags).
+ * @reserve_threshold: Descriptor reservation threshold.
+ * At least @reserve_threshold descriptors will remain
+ * unallocated at all times.
+ * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
+ * bytes. Setting @memblock_size to page size ensures
+ * by-page allocation of descriptors. 128K bytes is the
+ * maximum supported block size.
+ * @queue: Array of per-fifo configurations.
+ * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
+ * (e.g., to align on a cache line).
+ *
+ * Configuration of all Xframe fifos. Includes array of xge_hal_fifo_queue_t
+ * structures.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_fifo_config_t{} structure. Please refer to the
+ * corresponding header file.
+ * See also: xge_hal_ring_queue_t{}.
+ */
+typedef struct xge_hal_fifo_config_t {
+ int max_frags;
+#define XGE_HAL_MIN_FIFO_FRAGS 1
+#define XGE_HAL_MAX_FIFO_FRAGS 256
+
+ int reserve_threshold;
+#define XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD 0
+#define XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD 8192
+
+ int memblock_size;
+#define XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE 4096
+#define XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE 131072
+
+ int alignment_size;
+#define XGE_HAL_MIN_ALIGNMENT_SIZE 0
+#define XGE_HAL_MAX_ALIGNMENT_SIZE 65536
+
+ int max_aligned_frags;
+ /* range: (1, @max_frags) */
+
+#define XGE_HAL_MIN_FIFO_NUM 1
+#define XGE_HAL_MAX_FIFO_NUM_HERC 8
+#define XGE_HAL_MAX_FIFO_NUM_TITAN (XGE_HAL_MAX_VIRTUAL_PATHS - 1)
+#define XGE_HAL_MAX_FIFO_NUM (XGE_HAL_MAX_VIRTUAL_PATHS)
+ xge_hal_fifo_queue_t queue[XGE_HAL_MAX_FIFO_NUM];
+} xge_hal_fifo_config_t;
+
+/**
+ * struct xge_hal_rts_port_t - RTS port entry
+ * @num: Port number
+ * @udp: Port is UDP (default TCP)
+ * @src: Port is Source (default Destination)
+ */
+typedef struct xge_hal_rts_port_t {
+ int num;
+ int udp;
+ int src;
+} xge_hal_rts_port_t;
+
+/**
+ * struct xge_hal_ring_queue_t - Single ring configuration.
+ * @max: Max numbers of RxD blocks per queue
+ * @initial: Initial numbers of RxD blocks per queue
+ * (can grow up to @max)
+ * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
+ * to Xframe User Guide.
+ * @dram_size_mb: Size (in MB) of Xframe DRAM used for _that_ ring.
+ * Note that 64MB of available
+ * on-board DRAM is shared between receive rings.
+ * If a single ring is used, @dram_size_mb can be set to 64.
+ * Sum of all rings' @dram_size_mb cannot exceed 64.
+ * @intr_vector: TBD
+ * @backoff_interval_us: Time (in microseconds), after which Xframe
+ * tries to download RxDs posted by the host.
+ * Note that the "backoff" does not happen if host posts receive
+ * descriptors in the timely fashion.
+ * @max_frm_len: Maximum frame length that can be received on _that_ ring.
+ * Setting this field to -1 ensures that the ring will
+ * "accept" MTU-size frames (note that MTU can be changed at
+ * runtime).
+ * Any value other than (-1) specifies a certain "hard"
+ * limit on the receive frame sizes.
+ * The field can be used to activate receive frame-length based
+ * steering.
+ * @priority: Ring priority. 0 - highest, 7 - lowest. The value is used
+ * to give prioritized access to PCI-X. See Xframe documentation
+ * for details.
+ * @rth_en: Enable Receive Traffic Hashing (RTH).
+ * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
+ * which generally improves latency of the host bridge operation
+ * (see PCI specification). For valid values please refer
+ * to xge_hal_ring_queue_t{} in the driver sources.
+ * @indicate_max_pkts: Sets maximum number of received frames to be processed
+ * within single interrupt.
+ * @configured: Boolean. Use 1 to specify that the ring is configured.
+ * Only "configured" rings can be activated and used to post
+ * Rx descriptors. Any subset of 8 available rings can be
+ * "configured".
+ * @rts_mac_en: 1 - To enable Receive MAC address steering.
+ * 0 - To disable Receive MAC address steering.
+ * @rth_en: TBD
+ * @rts_port_en: TBD
+ * @rts_ports: TBD
+ * @rti: Xframe receive interrupt configuration.
+ *
+ * Single ring configuration.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_ring_queue_t{} structure. Please refer to the
+ * corresponding header file.
+ * See also: xge_hal_fifo_config_t{}.
+ */
+typedef struct xge_hal_ring_queue_t {
+ int max;
+ int initial;
+#define XGE_HAL_MIN_RING_QUEUE_BLOCKS 1
+#define XGE_HAL_MAX_RING_QUEUE_BLOCKS 64
+
+ int buffer_mode;
+#define XGE_HAL_RING_QUEUE_BUFFER_MODE_1 1
+#define XGE_HAL_RING_QUEUE_BUFFER_MODE_2 2
+#define XGE_HAL_RING_QUEUE_BUFFER_MODE_3 3
+#define XGE_HAL_RING_QUEUE_BUFFER_MODE_5 5
+
+ int dram_size_mb;
+#define XGE_HAL_MIN_RING_QUEUE_SIZE 0
+#define XGE_HAL_MAX_RING_QUEUE_SIZE_XENA 64
+#define XGE_HAL_MAX_RING_QUEUE_SIZE_HERC 32
+
+ int intr_vector;
+#define XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR 0
+#define XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR 64
+
+ int backoff_interval_us;
+#define XGE_HAL_MIN_BACKOFF_INTERVAL_US 1
+#define XGE_HAL_MAX_BACKOFF_INTERVAL_US 125000
+
+ int max_frm_len;
+#define XGE_HAL_MIN_MAX_FRM_LEN -1
+#define XGE_HAL_MAX_MAX_FRM_LEN 9622
+
+ int priority;
+#define XGE_HAL_MIN_RING_PRIORITY 0
+#define XGE_HAL_MAX_RING_PRIORITY 7
+
+ int no_snoop_bits;
+#define XGE_HAL_MIN_RING_QUEUE_NO_SNOOP_DISABLED 0
+#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_RXD 1
+#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_BUFFER 2
+#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_ALL 3
+
+ int indicate_max_pkts;
+#define XGE_HAL_MIN_RING_INDICATE_MAX_PKTS 1
+#define XGE_HAL_MAX_RING_INDICATE_MAX_PKTS 65536
+
+ int configured;
+#define XGE_HAL_MIN_RING_CONFIGURED 0
+#define XGE_HAL_MAX_RING_CONFIGURED 1
+
+ int rts_mac_en;
+#define XGE_HAL_MIN_RING_RTS_MAC_EN 0
+#define XGE_HAL_MAX_RING_RTS_MAC_EN 1
+
+ int rth_en;
+#define XGE_HAL_MIN_RING_RTH_EN 0
+#define XGE_HAL_MAX_RING_RTH_EN 1
+
+ int rts_port_en;
+#define XGE_HAL_MIN_RING_RTS_PORT_EN 0
+#define XGE_HAL_MAX_RING_RTS_PORT_EN 1
+
+#define XGE_HAL_MAX_STEERABLE_PORTS 32
+ xge_hal_rts_port_t rts_ports[XGE_HAL_MAX_STEERABLE_PORTS];
+
+ xge_hal_rti_config_t rti;
+
+} xge_hal_ring_queue_t;
+
+/**
+ * struct xge_hal_ring_config_t - Array of ring configurations.
+ * @memblock_size: Ring descriptors are allocated in blocks of @mem_block_size
+ * bytes. Setting @memblock_size to page size ensures
+ * by-page allocation of descriptors. 128K bytes is the
+ * upper limit.
+ * @scatter_mode: Xframe supports two receive scatter modes: A and B.
+ * For details please refer to Xframe User Guide.
+ * @strip_vlan_tag: TBD
+ * @queue: Array of all Xframe ring configurations.
+ *
+ * Array of ring configurations.
+ * See also: xge_hal_ring_queue_t{}.
+ */
+typedef struct xge_hal_ring_config_t {
+
+ int memblock_size;
+#define XGE_HAL_MIN_RING_MEMBLOCK_SIZE 4096
+#define XGE_HAL_MAX_RING_MEMBLOCK_SIZE 131072
+
+ int scatter_mode;
+#define XGE_HAL_RING_QUEUE_SCATTER_MODE_A 0
+#define XGE_HAL_RING_QUEUE_SCATTER_MODE_B 1
+
+ int strip_vlan_tag;
+#define XGE_HAL_RING_DONOT_STRIP_VLAN_TAG 0
+#define XGE_HAL_RING_STRIP_VLAN_TAG 1
+
+#define XGE_HAL_MIN_RING_NUM 1
+#define XGE_HAL_MAX_RING_NUM_HERC 8
+#define XGE_HAL_MAX_RING_NUM_TITAN (XGE_HAL_MAX_VIRTUAL_PATHS - 1)
+#define XGE_HAL_MAX_RING_NUM (XGE_HAL_MAX_VIRTUAL_PATHS)
+ xge_hal_ring_queue_t queue[XGE_HAL_MAX_RING_NUM];
+
+} xge_hal_ring_config_t;
+
+/**
+ * struct xge_hal_mac_config_t - MAC configuration.
+ * @media: Transponder type.
+ * @tmac_util_period: The sampling period over which the transmit utilization
+ * is calculated.
+ * @rmac_util_period: The sampling period over which the receive utilization
+ * is calculated.
+ * @rmac_strip_pad: Determines whether padding of received frames is removed by
+ * the MAC or sent to the host.
+ * @rmac_bcast_en: Enable frames containing broadcast address to be
+ * passed to the host.
+ * @rmac_pause_gen_en: Received pause generation enable.
+ * @rmac_pause_rcv_en: Receive pause enable.
+ * @rmac_pause_time: The value to be inserted in outgoing pause frames.
+ * Has units of pause quanta (one pause quanta = 512 bit times).
+ * @mc_pause_threshold_q0q3: Contains thresholds for pause frame generation
+ * for queues 0 through 3. The threshold value indicates portion of the
+ * individual receive buffer queue size. Thresholds have a range of 0 to
+ * 255, allowing 256 possible watermarks in a queue.
+ * @mc_pause_threshold_q4q7: Contains thresholds for pause frame generation
+ * for queues 4 through 7. The threshold value indicates portion of the
+ * individual receive buffer queue size. Thresholds have a range of 0 to
+ * 255, allowing 256 possible watermarks in a queue.
+ *
+ * MAC configuration. This includes various aspects of configuration, including:
+ * - Pause frame threshold;
+ * - sampling rate to calculate link utilization;
+ * - enabling/disabling broadcasts.
+ *
+ * See Xframe User Guide for more details.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_mac_config_t{} structure. Please refer to the
+ * corresponding include file.
+ */
+typedef struct xge_hal_mac_config_t {
+ int media;
+#define XGE_HAL_MIN_MEDIA 0
+#define XGE_HAL_MEDIA_SR 0
+#define XGE_HAL_MEDIA_SW 1
+#define XGE_HAL_MEDIA_LR 2
+#define XGE_HAL_MEDIA_LW 3
+#define XGE_HAL_MEDIA_ER 4
+#define XGE_HAL_MEDIA_EW 5
+#define XGE_HAL_MAX_MEDIA 5
+
+ int tmac_util_period;
+#define XGE_HAL_MIN_TMAC_UTIL_PERIOD 0
+#define XGE_HAL_MAX_TMAC_UTIL_PERIOD 15
+
+ int rmac_util_period;
+#define XGE_HAL_MIN_RMAC_UTIL_PERIOD 0
+#define XGE_HAL_MAX_RMAC_UTIL_PERIOD 15
+
+ int rmac_bcast_en;
+#define XGE_HAL_MIN_RMAC_BCAST_EN 0
+#define XGE_HAL_MAX_RMAC_BCAST_EN 1
+
+ int rmac_pause_gen_en;
+#define XGE_HAL_MIN_RMAC_PAUSE_GEN_EN 0
+#define XGE_HAL_MAX_RMAC_PAUSE_GEN_EN 1
+
+ int rmac_pause_rcv_en;
+#define XGE_HAL_MIN_RMAC_PAUSE_RCV_EN 0
+#define XGE_HAL_MAX_RMAC_PAUSE_RCV_EN 1
+
+ int rmac_pause_time;
+#define XGE_HAL_MIN_RMAC_HIGH_PTIME 16
+#define XGE_HAL_MAX_RMAC_HIGH_PTIME 65535
+
+ int mc_pause_threshold_q0q3;
+#define XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3 0
+#define XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3 254
+
+ int mc_pause_threshold_q4q7;
+#define XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7 0
+#define XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7 254
+
+} xge_hal_mac_config_t;
+
+#ifdef XGEHAL_RNIC
+
+/*
+ * struct xge_hal_vp_config_t - Configuration of virtual path
+ * @vp_id: Virtual Path Id
+ * @vp_valid: Flag specifies if the configuration is valid
+ * @bitmap_intr_num: Interrupt Number associated with the bitmap
+ * @nce_oid_start: The start of the NCE ID range
+ * @nce_oid_end: The end of the NCE ID range
+ * @session_oid_start: The start of the Session ID range
+ * @session_oid_end: The end of the Session ID range
+ * @srq_oid_start: The start of the HSRQ ID range
+ * @srq_oid_end: The end of the SRQ ID range
+ * @cqrq_oid_start: The start of the CQRQ range
+ * @cqrq_oid_end: The end of the CQRQ range
+ * @umq_length: Length of up message queue
+ * @umq_int_ctrl: Interrupt control for up Message queue
+ * @umq_gen_compl: Generate completion for up message queue
+ * @dmq_length: Length of down message queue
+ * @dmq_int_ctrl: Interrupt control for down Message queue
+ * @dmq_gen_compl: Generate completion for up message queue
+ *
+ * This structure is used by the driver to pass the configuration parameters to
+ * configure Virtual Path.
+ */
+typedef struct xge_hal_vp_config_t{
+ u32 vp_id;
+ u32 vp_valid;
+#define XGE_HAL_VP_CONFIG_INVALID 0
+#define XGE_HAL_VP_CONFIG_VALID 1
+
+ int bitmap_intr_num;
+#define XGE_HAL_BITMAP_INTR_NUM_MIN 0
+#define XGE_HAL_BITMAP_INTR_NUM_MAX 3
+
+ u32 nce_oid_start;
+ u32 nce_oid_end;
+ u32 session_oid_start;
+ u32 session_oid_end;
+ u32 srq_oid_start;
+ u32 srq_oid_end;
+ u32 cqrq_oid_start;
+ u32 cqrq_oid_end;
+ u32 umq_length;
+ u32 umq_int_ctrl;
+ u32 umq_gen_compl;
+ u32 dmq_length;
+ u32 dmq_int_ctrl;
+ u32 dmq_gen_compl;
+}xge_hal_vp_config_t;
+
+#endif
+
+/**
+ * struct xge_hal_device_config_t - Device configuration.
+ * @mtu: Current mtu size.
+ * @isr_polling_cnt: Maximum number of times to "poll" for Tx and Rx
+ * completions. Used in xge_hal_device_handle_irq().
+ * @latency_timer: Specifies, in units of PCI bus clocks, and in conformance
+ * with the PCI Specification, the value of the Latency Timer
+ * for this PCI bus master.
+ * Specify either zero or -1 to use BIOS default.
+ * @napi_weight: (TODO)
+ * @max_splits_trans: Maximum number of PCI-X split transactions.
+ * Specify (-1) to use BIOS default.
+ * @mmrb_count: Maximum Memory Read Byte Count. Use (-1) to use default
+ * BIOS value. Otherwise: mmrb_count = 0 corresponds to 512B;
+ * 1 - 1KB, 2 - 2KB, and 3 - 4KB.
+ * @shared_splits: The number of Outstanding Split Transactions that is
+ * shared by Tx and Rx requests. The device stops issuing Tx
+ * requests once the number of Outstanding Split Transactions is
+ * equal to the value of Shared_Splits.
+ * A value of zero indicates that the Tx and Rx share all allocated
+ * Split Requests, i.e. the device can issue both types (Tx and Rx)
+ * of read requests until the number of Maximum Outstanding Split
+ * Transactions is reached.
+ * @stats_refresh_time_sec: Sets the default interval for automatic stats transfer
+ * to the host. This includes MAC stats as well as PCI stats.
+ * See xge_hal_stats_hw_info_t{}.
+ * @pci_freq_mherz: PCI clock frequency, e.g.: 133 for 133MHz.
+ * @intr_mode: Line, MSI, or MSI-X interrupt.
+ * @sched_timer_us: If greater than zero, specifies time interval
+ * (in microseconds) for the device to generate
+ * interrupt. Note that unlike tti and rti interrupts,
+ * the scheduled interrupt is generated independently of
+ * whether there is transmit or receive traffic, respectively.
+ * @sched_timer_one_shot: 1 - generate scheduled interrupt only once.
+ * 0 - generate scheduled interrupt periodically at the specified
+ * @sched_timer_us interval.
+ *
+ * @ring: See xge_hal_ring_config_t{}.
+ * @mac: See xge_hal_mac_config_t{}.
+ * @tti: See xge_hal_tti_config_t{}.
+ * @fifo: See xge_hal_fifo_config_t{}.
+ *
+ * @dump_on_serr: Dump adapter state ("about", statistics, registers) on SERR#.
+ * @dump_on_eccerr: Dump adapter state ("about", statistics, registers) on
+ * ECC error.
+ * @dump_on_parityerr: Dump adapter state ("about", statistics, registers) on
+ * parity error.
+ * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
+ * @rth_bucket_size: RTH bucket width (in bits). For valid range please see
+ * xge_hal_device_config_t{} in the driver sources.
+ * @rth_spdm_en: Enable Receive Traffic Hashing(RTH) using SPDM(Socket Pair
+ * Direct Match).
+ * @rth_spdm_use_l4: Set to 1, if the L4 ports are used in the calculation of
+ * hash value in the RTH SPDM based steering.
+ * @rxufca_intr_thres: (TODO)
+ * @rxufca_lo_lim: (TODO)
+ * @rxufca_hi_lim: (TODO)
+ * @rxufca_lbolt_period: (TODO)
+ * @link_valid_cnt: link-valid counting is done only at device-open time,
+ * to determine with the specified certainty that the link is up. See also
+ * @link_retry_cnt.
+ * @link_retry_cnt: Max number of polls for link-up. Done only at device
+ * open time. Reducing this value as well as the previous @link_valid_cnt,
+ * speeds up device startup, which may be important if the driver
+ * is compiled into OS.
+ * @link_stability_period: Specify the period for which the link must be
+ * stable in order for the adapter to declare "LINK UP".
+ * The enumerated settings (see Xframe-II UG) are:
+ * 0 ........... instantaneous
+ * 1 ........... 500 ´s
+ * 2 ........... 1 ms
+ * 3 ........... 64 ms
+ * 4 ........... 256 ms
+ * 5 ........... 512 ms
+ * 6 ........... 1 s
+ * 7 ........... 2 s
+ * @device_poll_millis: Specify the interval (in mulliseconds) between
+ * successive xge_hal_device_poll() runs.
+ * stable in order for the adapter to declare "LINK UP".
+ * @no_isr_events: TBD
+ * @lro_sg_size: TBD
+ * @lro_frm_len: TBD
+ * @bimodal_interrupts: Enable bimodal interrupts in device
+ * @bimodal_timer_lo_us: TBD
+ * @bimodal_timer_hi_us: TBD
+ * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
+ * @rts_qos_en: TBD
+ * @rts_port_en: TBD
+ * @vp_config: Configuration for virtual paths
+ * @max_cqe_groups: The maximum number of adapter CQE group blocks a CQRQ
+ * can own at any one time.
+ * @max_num_wqe_od_groups: The maximum number of WQE Headers/OD Groups that
+ * this S-RQ can own at any one time.
+ * @no_wqe_threshold: Maximum number of times adapter polls WQE Hdr blocks for
+ * WQEs before generating a message or interrupt.
+ * @refill_threshold_high:This field provides a hysteresis upper bound for
+ * automatic adapter refill operations.
+ * @refill_threshold_low:This field provides a hysteresis lower bound for
+ * automatic adapter refill operations.
+ * @eol_policy:This field sets the policy for handling the end of list condition.
+ * 2'b00 - When EOL is reached,poll until last block wrapper size is no longer 0.
+ * 2'b01 - Send UMQ message when EOL is reached.
+ * 2'b1x - Poll until the poll_count_max is reached and if still EOL,send UMQ message
+ * @eol_poll_count_max:sets the maximum number of times the queue manager will poll for
+ * a non-zero block wrapper before giving up and sending a UMQ message
+ * @ack_blk_limit: Limit on the maximum number of ACK list blocks that can be held
+ * by a session at any one time.
+ * @poll_or_doorbell: TBD
+ *
+ * Xframe configuration.
+ * Contains per-device configuration parameters, including:
+ * - latency timer (settable via PCI configuration space);
+ * - maximum number of split transactions;
+ * - maximum number of shared splits;
+ * - stats sampling interval, etc.
+ *
+ * In addition, xge_hal_device_config_t{} includes "subordinate"
+ * configurations, including:
+ * - fifos and rings;
+ * - MAC (see xge_hal_mac_config_t{}).
+ *
+ * See Xframe User Guide for more details.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the xge_hal_device_config_t{} structure. Please refer to the
+ * corresponding include file.
+ * See also: xge_hal_tti_config_t{}, xge_hal_stats_hw_info_t{},
+ * xge_hal_mac_config_t{}.
+ */
+typedef struct xge_hal_device_config_t {
+ int mtu;
+#define XGE_HAL_MIN_INITIAL_MTU XGE_HAL_MIN_MTU
+#define XGE_HAL_MAX_INITIAL_MTU XGE_HAL_MAX_MTU
+
+ int isr_polling_cnt;
+#define XGE_HAL_MIN_ISR_POLLING_CNT 0
+#define XGE_HAL_MAX_ISR_POLLING_CNT 65536
+
+ int latency_timer;
+#define XGE_HAL_USE_BIOS_DEFAULT_LATENCY -1
+#define XGE_HAL_MIN_LATENCY_TIMER 8
+#define XGE_HAL_MAX_LATENCY_TIMER 255
+
+ int napi_weight;
+#define XGE_HAL_DEF_NAPI_WEIGHT 64
+
+ int max_splits_trans;
+#define XGE_HAL_USE_BIOS_DEFAULT_SPLITS -1
+#define XGE_HAL_ONE_SPLIT_TRANSACTION 0
+#define XGE_HAL_TWO_SPLIT_TRANSACTION 1
+#define XGE_HAL_THREE_SPLIT_TRANSACTION 2
+#define XGE_HAL_FOUR_SPLIT_TRANSACTION 3
+#define XGE_HAL_EIGHT_SPLIT_TRANSACTION 4
+#define XGE_HAL_TWELVE_SPLIT_TRANSACTION 5
+#define XGE_HAL_SIXTEEN_SPLIT_TRANSACTION 6
+#define XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION 7
+
+ int mmrb_count;
+#define XGE_HAL_DEFAULT_BIOS_MMRB_COUNT -1
+#define XGE_HAL_MIN_MMRB_COUNT 0 /* 512b */
+#define XGE_HAL_MAX_MMRB_COUNT 3 /* 4k */
+
+ int shared_splits;
+#define XGE_HAL_MIN_SHARED_SPLITS 0
+#define XGE_HAL_MAX_SHARED_SPLITS 31
+
+ int stats_refresh_time_sec;
+#define XGE_HAL_STATS_REFRESH_DISABLE 0
+#define XGE_HAL_MIN_STATS_REFRESH_TIME 1
+#define XGE_HAL_MAX_STATS_REFRESH_TIME 300
+
+ int pci_freq_mherz;
+#define XGE_HAL_PCI_FREQ_MHERZ_33 33
+#define XGE_HAL_PCI_FREQ_MHERZ_66 66
+#define XGE_HAL_PCI_FREQ_MHERZ_100 100
+#define XGE_HAL_PCI_FREQ_MHERZ_133 133
+#define XGE_HAL_PCI_FREQ_MHERZ_266 266
+
+ int intr_mode;
+#define XGE_HAL_INTR_MODE_IRQLINE 0
+#define XGE_HAL_INTR_MODE_MSI 1
+#define XGE_HAL_INTR_MODE_MSIX 2
+
+ int sched_timer_us;
+#define XGE_HAL_SCHED_TIMER_DISABLED 0
+#define XGE_HAL_SCHED_TIMER_MIN 0
+#define XGE_HAL_SCHED_TIMER_MAX 0xFFFFF
+
+ int sched_timer_one_shot;
+#define XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE 0
+#define XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE 1
+
+ xge_hal_ring_config_t ring;
+ xge_hal_mac_config_t mac;
+ xge_hal_fifo_config_t fifo;
+
+ int dump_on_serr;
+#define XGE_HAL_DUMP_ON_SERR_DISABLE 0
+#define XGE_HAL_DUMP_ON_SERR_ENABLE 1
+
+ int dump_on_eccerr;
+#define XGE_HAL_DUMP_ON_ECCERR_DISABLE 0
+#define XGE_HAL_DUMP_ON_ECCERR_ENABLE 1
+
+ int dump_on_parityerr;
+#define XGE_HAL_DUMP_ON_PARITYERR_DISABLE 0
+#define XGE_HAL_DUMP_ON_PARITYERR_ENABLE 1
+
+ int rth_en;
+#define XGE_HAL_RTH_DISABLE 0
+#define XGE_HAL_RTH_ENABLE 1
+
+ int rth_bucket_size;
+#define XGE_HAL_MIN_RTH_BUCKET_SIZE 1
+#define XGE_HAL_MAX_RTH_BUCKET_SIZE 8
+
+ int rth_spdm_en;
+#define XGE_HAL_RTH_SPDM_DISABLE 0
+#define XGE_HAL_RTH_SPDM_ENABLE 1
+
+ int rth_spdm_use_l4;
+#define XGE_HAL_RTH_SPDM_USE_L4 1
+
+ int rxufca_intr_thres;
+#define XGE_HAL_RXUFCA_INTR_THRES_MIN 1
+#define XGE_HAL_RXUFCA_INTR_THRES_MAX 4096
+
+ int rxufca_lo_lim;
+#define XGE_HAL_RXUFCA_LO_LIM_MIN 1
+#define XGE_HAL_RXUFCA_LO_LIM_MAX 16
+
+ int rxufca_hi_lim;
+#define XGE_HAL_RXUFCA_HI_LIM_MIN 1
+#define XGE_HAL_RXUFCA_HI_LIM_MAX 256
+
+ int rxufca_lbolt_period;
+#define XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN 1
+#define XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX 1024
+
+ int link_valid_cnt;
+#define XGE_HAL_LINK_VALID_CNT_MIN 0
+#define XGE_HAL_LINK_VALID_CNT_MAX 127
+
+ int link_retry_cnt;
+#define XGE_HAL_LINK_RETRY_CNT_MIN 0
+#define XGE_HAL_LINK_RETRY_CNT_MAX 127
+
+ int link_stability_period;
+#define XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD 2 /* 1ms */
+#define XGE_HAL_MIN_LINK_STABILITY_PERIOD 0 /* instantaneous */
+#define XGE_HAL_MAX_LINK_STABILITY_PERIOD 7 /* 2s */
+
+ int device_poll_millis;
+#define XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS 1000
+#define XGE_HAL_MIN_DEVICE_POLL_MILLIS 1
+#define XGE_HAL_MAX_DEVICE_POLL_MILLIS 100000
+
+ int no_isr_events;
+#define XGE_HAL_NO_ISR_EVENTS_MIN 0
+#define XGE_HAL_NO_ISR_EVENTS_MAX 1
+
+ int lro_sg_size;
+#define XGE_HAL_LRO_DEFAULT_SG_SIZE 10
+#define XGE_HAL_LRO_MIN_SG_SIZE 1
+#define XGE_HAL_LRO_MAX_SG_SIZE 64
+
+ int lro_frm_len;
+#define XGE_HAL_LRO_DEFAULT_FRM_LEN 65536
+#define XGE_HAL_LRO_MIN_FRM_LEN 4096
+#define XGE_HAL_LRO_MAX_FRM_LEN 65536
+
+ int bimodal_interrupts;
+#define XGE_HAL_BIMODAL_INTR_MIN -1
+#define XGE_HAL_BIMODAL_INTR_MAX 1
+
+ int bimodal_timer_lo_us;
+#define XGE_HAL_BIMODAL_TIMER_LO_US_MIN 1
+#define XGE_HAL_BIMODAL_TIMER_LO_US_MAX 127
+
+ int bimodal_timer_hi_us;
+#define XGE_HAL_BIMODAL_TIMER_HI_US_MIN 128
+#define XGE_HAL_BIMODAL_TIMER_HI_US_MAX 65535
+
+ int rts_mac_en;
+#define XGE_HAL_RTS_MAC_DISABLE 0
+#define XGE_HAL_RTS_MAC_ENABLE 1
+
+ int rts_qos_en;
+#define XGE_HAL_RTS_QOS_DISABLE 0
+#define XGE_HAL_RTS_QOS_ENABLE 1
+
+ int rts_port_en;
+#define XGE_HAL_RTS_PORT_DISABLE 0
+#define XGE_HAL_RTS_PORT_ENABLE 1
+
+#ifdef XGEHAL_RNIC
+
+ xge_hal_vp_config_t vp_config[XGE_HAL_MAX_VIRTUAL_PATHS];
+
+ int max_cqe_groups;
+#define XGE_HAL_MAX_CQE_GROUPS_MIN 1
+#define XGE_HAL_MAX_CQE_GROUPS_MAX 16
+
+ int max_num_wqe_od_groups;
+#define XGE_HAL_MAX_NUM_OD_GROUPS_MIN 1
+#define XGE_HAL_MAX_NUM_OD_GROUPS_MAX 16
+
+ int no_wqe_threshold;
+#define XGE_HAL_NO_WQE_THRESHOLD_MIN 1
+#define XGE_HAL_NO_WQE_THRESHOLD_MAX 16
+
+ int refill_threshold_high;
+#define XGE_HAL_REFILL_THRESHOLD_HIGH_MIN 1
+#define XGE_HAL_REFILL_THRESHOLD_HIGH_MAX 16
+
+ int refill_threshold_low;
+#define XGE_HAL_REFILL_THRESHOLD_LOW_MIN 1
+#define XGE_HAL_REFILL_THRESHOLD_LOW_MAX 16
+
+ int ack_blk_limit;
+#define XGE_HAL_ACK_BLOCK_LIMIT_MIN 1
+#define XGE_HAL_ACK_BLOCK_LIMIT_MAX 16
+
+ int poll_or_doorbell;
+#define XGE_HAL_POLL_OR_DOORBELL_POLL 1
+#define XGE_HAL_POLL_OR_DOORBELL_DOORBELL 0
+
+
+#endif
+
+} xge_hal_device_config_t;
+
+/**
+ * struct xge_hal_driver_config_t - HAL (layer) configuration.
+ * @periodic_poll_interval_millis: Interval, in milliseconds, which is used to
+ * periodically poll HAL, i.e, invoke
+ * xge_hal_device_poll().
+ * Note that HAL does not maintain its own
+ * polling context. HAL relies on ULD to
+ * provide one.
+ * @queue_size_initial: Initial size of the HAL protected event queue.
+ * The queue is shared by HAL and upper-layer drivers.
+ * The queue is used to exchange and process slow-path
+ * events. See xge_hal_event_e.
+ * @queue_size_max: Maximum size of the HAL queue. Depending on the load,
+ * the queue may grow at run-time up to @queue_max_size.
+ * @tracebuf_size: Size of the trace buffer. Set it to '0' to disable.
+ * HAL configuration. (Note: do not confuse HAL layer with (possibly multiple)
+ * HAL devices.)
+ * Currently this structure contains just a few basic values.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the structure. Please refer to the corresponding header file.
+ * See also: xge_hal_device_poll()
+ */
+typedef struct xge_hal_driver_config_t {
+ int queue_size_initial;
+#define XGE_HAL_MIN_QUEUE_SIZE_INITIAL 1
+#define XGE_HAL_MAX_QUEUE_SIZE_INITIAL 16
+
+ int queue_size_max;
+#define XGE_HAL_MIN_QUEUE_SIZE_MAX 1
+#define XGE_HAL_MAX_QUEUE_SIZE_MAX 16
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ int tracebuf_size;
+#define XGE_HAL_MIN_CIRCULAR_ARR 4096
+#define XGE_HAL_MAX_CIRCULAR_ARR 1048576
+#define XGE_HAL_DEF_CIRCULAR_ARR XGE_OS_HOST_PAGE_SIZE
+
+ int tracebuf_timestamp_en;
+#define XGE_HAL_MIN_TIMESTAMP_EN 0
+#define XGE_HAL_MAX_TIMESTAMP_EN 1
+#endif
+
+} xge_hal_driver_config_t;
+
+
+/* ========================== PRIVATE API ================================= */
+
+xge_hal_status_e
+__hal_device_config_check_common (xge_hal_device_config_t *new_config);
+
+xge_hal_status_e
+__hal_device_config_check_xena (xge_hal_device_config_t *new_config);
+
+xge_hal_status_e
+__hal_device_config_check_herc (xge_hal_device_config_t *new_config);
+
+xge_hal_status_e
+__hal_driver_config_check (xge_hal_driver_config_t *new_config);
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_CONFIG_H */
diff --git a/sys/dev/nxge/include/xgehal-device.h b/sys/dev/nxge/include/xgehal-device.h
new file mode 100644
index 0000000..22bc792
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-device.h
@@ -0,0 +1,1036 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-device.h
+ *
+ * Description: HAL device object functionality
+ *
+ * Created: 14 May 2004
+ */
+
+#ifndef XGE_HAL_DEVICE_H
+#define XGE_HAL_DEVICE_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-queue.h>
+#include <dev/nxge/include/xgehal-event.h>
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xgehal-regs.h>
+#include <dev/nxge/include/xgehal-channel.h>
+#include <dev/nxge/include/xgehal-stats.h>
+#include <dev/nxge/include/xgehal-ring.h>
+#ifdef XGEHAL_RNIC
+#include "xgehal-common-regs.h"
+#include "xgehal-pcicfg-mgmt-regs.h"
+#include "xgehal-mrpcim-regs.h"
+#include "xgehal-srpcim-regs.h"
+#include "xgehal-vpath-regs.h"
+#include "xgehal-bitmap.h"
+#include "xgehal-virtualpath.h"
+#include "xgehal-lbwrapper.h"
+#include "xgehal-blockpool.h"
+#include "xgehal-regpool.h"
+#endif
+
+__EXTERN_BEGIN_DECLS
+
+#define XGE_HAL_VPD_LENGTH 80
+#define XGE_HAL_CARD_XENA_VPD_ADDR 0x50
+#define XGE_HAL_CARD_HERC_VPD_ADDR 0x80
+#define XGE_HAL_VPD_READ_COMPLETE 0x80
+#define XGE_HAL_VPD_BUFFER_SIZE 128
+#define XGE_HAL_DEVICE_XMSI_WAIT_MAX_MILLIS 500
+#define XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS 500
+#define XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS 500
+#define XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS 50
+#define XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS 250
+#define XGE_HAL_DEVICE_SPDM_READY_WAIT_MAX_MILLIS 250 /* TODO */
+
+#define XGE_HAL_MAGIC 0x12345678
+#define XGE_HAL_DEAD 0xDEADDEAD
+#define XGE_HAL_DUMP_BUF_SIZE 0x4000
+
+#define XGE_HAL_LRO_MAX_BUCKETS 32
+
+/**
+ * enum xge_hal_card_e - Xframe adapter type.
+ * @XGE_HAL_CARD_UNKNOWN: Unknown device.
+ * @XGE_HAL_CARD_XENA: Xframe I device.
+ * @XGE_HAL_CARD_HERC: Xframe II (PCI-266Mhz) device.
+ * @XGE_HAL_CARD_TITAN: Xframe ER (PCI-266Mhz) device.
+ *
+ * Enumerates Xframe adapter types. The corresponding PCI device
+ * IDs are listed in the file xgehal-defs.h.
+ * (See XGE_PCI_DEVICE_ID_XENA_1, etc.)
+ *
+ * See also: xge_hal_device_check_id().
+ */
+typedef enum xge_hal_card_e {
+ XGE_HAL_CARD_UNKNOWN = 0,
+ XGE_HAL_CARD_XENA = 1,
+ XGE_HAL_CARD_HERC = 2,
+ XGE_HAL_CARD_TITAN = 3,
+} xge_hal_card_e;
+
+/**
+ * struct xge_hal_device_attr_t - Device memory spaces.
+ * @regh0: BAR0 mapped memory handle (Solaris), or simply PCI device @pdev
+ * (Linux and the rest.)
+ * @regh1: BAR1 mapped memory handle. Same comment as above.
+ * @bar0: BAR0 virtual address.
+ * @bar1: BAR1 virtual address.
+ * @irqh: IRQ handle (Solaris).
+ * @cfgh: Configuration space handle (Solaris), or PCI device @pdev (Linux).
+ * @pdev: PCI device object.
+ *
+ * Device memory spaces. Includes configuration, BAR0, BAR1, etc. per device
+ * mapped memories. Also, includes a pointer to OS-specific PCI device object.
+ */
+typedef struct xge_hal_device_attr_t {
+ pci_reg_h regh0;
+ pci_reg_h regh1;
+ pci_reg_h regh2;
+ char *bar0;
+ char *bar1;
+ char *bar2;
+ pci_irq_h irqh;
+ pci_cfg_h cfgh;
+ pci_dev_h pdev;
+} xge_hal_device_attr_t;
+
+/**
+ * enum xge_hal_device_link_state_e - Link state enumeration.
+ * @XGE_HAL_LINK_NONE: Invalid link state.
+ * @XGE_HAL_LINK_DOWN: Link is down.
+ * @XGE_HAL_LINK_UP: Link is up.
+ *
+ */
+typedef enum xge_hal_device_link_state_e {
+ XGE_HAL_LINK_NONE,
+ XGE_HAL_LINK_DOWN,
+ XGE_HAL_LINK_UP
+} xge_hal_device_link_state_e;
+
+
+/**
+ * enum xge_hal_pci_mode_e - PIC bus speed and mode specific enumeration.
+ * @XGE_HAL_PCI_33MHZ_MODE: 33 MHZ pci mode.
+ * @XGE_HAL_PCI_66MHZ_MODE: 66 MHZ pci mode.
+ * @XGE_HAL_PCIX_M1_66MHZ_MODE: PCIX M1 66MHZ mode.
+ * @XGE_HAL_PCIX_M1_100MHZ_MODE: PCIX M1 100MHZ mode.
+ * @XGE_HAL_PCIX_M1_133MHZ_MODE: PCIX M1 133MHZ mode.
+ * @XGE_HAL_PCIX_M2_66MHZ_MODE: PCIX M2 66MHZ mode.
+ * @XGE_HAL_PCIX_M2_100MHZ_MODE: PCIX M2 100MHZ mode.
+ * @XGE_HAL_PCIX_M2_133MHZ_MODE: PCIX M3 133MHZ mode.
+ * @XGE_HAL_PCIX_M1_RESERVED: PCIX M1 reserved mode.
+ * @XGE_HAL_PCIX_M1_66MHZ_NS: PCIX M1 66MHZ mode not supported.
+ * @XGE_HAL_PCIX_M1_100MHZ_NS: PCIX M1 100MHZ mode not supported.
+ * @XGE_HAL_PCIX_M1_133MHZ_NS: PCIX M1 133MHZ not supported.
+ * @XGE_HAL_PCIX_M2_RESERVED: PCIX M2 reserved.
+ * @XGE_HAL_PCIX_533_RESERVED: PCIX 533 reserved.
+ * @XGE_HAL_PCI_BASIC_MODE: PCI basic mode, XENA specific value.
+ * @XGE_HAL_PCIX_BASIC_MODE: PCIX basic mode, XENA specific value.
+ * @XGE_HAL_PCI_INVALID_MODE: Invalid PCI or PCIX mode.
+ *
+ */
+typedef enum xge_hal_pci_mode_e {
+ XGE_HAL_PCI_33MHZ_MODE = 0x0,
+ XGE_HAL_PCI_66MHZ_MODE = 0x1,
+ XGE_HAL_PCIX_M1_66MHZ_MODE = 0x2,
+ XGE_HAL_PCIX_M1_100MHZ_MODE = 0x3,
+ XGE_HAL_PCIX_M1_133MHZ_MODE = 0x4,
+ XGE_HAL_PCIX_M2_66MHZ_MODE = 0x5,
+ XGE_HAL_PCIX_M2_100MHZ_MODE = 0x6,
+ XGE_HAL_PCIX_M2_133MHZ_MODE = 0x7,
+ XGE_HAL_PCIX_M1_RESERVED = 0x8,
+ XGE_HAL_PCIX_M1_66MHZ_NS = 0xA,
+ XGE_HAL_PCIX_M1_100MHZ_NS = 0xB,
+ XGE_HAL_PCIX_M1_133MHZ_NS = 0xC,
+ XGE_HAL_PCIX_M2_RESERVED = 0xD,
+ XGE_HAL_PCIX_533_RESERVED = 0xE,
+ XGE_HAL_PCI_BASIC_MODE = 0x10,
+ XGE_HAL_PCIX_BASIC_MODE = 0x11,
+ XGE_HAL_PCI_INVALID_MODE = 0x12,
+} xge_hal_pci_mode_e;
+
+/**
+ * enum xge_hal_pci_bus_frequency_e - PCI bus frequency enumeration.
+ * @XGE_HAL_PCI_BUS_FREQUENCY_33MHZ: PCI bus frequency 33MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_66MHZ: PCI bus frequency 66MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_100MHZ: PCI bus frequency 100MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_133MHZ: PCI bus frequency 133MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_200MHZ: PCI bus frequency 200MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_250MHZ: PCI bus frequency 250MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_266MHZ: PCI bus frequency 266MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN: Unrecognized PCI bus frequency value.
+ *
+ */
+typedef enum xge_hal_pci_bus_frequency_e {
+ XGE_HAL_PCI_BUS_FREQUENCY_33MHZ = 33,
+ XGE_HAL_PCI_BUS_FREQUENCY_66MHZ = 66,
+ XGE_HAL_PCI_BUS_FREQUENCY_100MHZ = 100,
+ XGE_HAL_PCI_BUS_FREQUENCY_133MHZ = 133,
+ XGE_HAL_PCI_BUS_FREQUENCY_200MHZ = 200,
+ XGE_HAL_PCI_BUS_FREQUENCY_250MHZ = 250,
+ XGE_HAL_PCI_BUS_FREQUENCY_266MHZ = 266,
+ XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN = 0
+} xge_hal_pci_bus_frequency_e;
+
+/**
+ * enum xge_hal_pci_bus_width_e - PCI bus width enumeration.
+ * @XGE_HAL_PCI_BUS_WIDTH_64BIT: 64 bit bus width.
+ * @XGE_HAL_PCI_BUS_WIDTH_32BIT: 32 bit bus width.
+ * @XGE_HAL_PCI_BUS_WIDTH_UNKNOWN: unknown bus width.
+ *
+ */
+typedef enum xge_hal_pci_bus_width_e {
+ XGE_HAL_PCI_BUS_WIDTH_64BIT = 0,
+ XGE_HAL_PCI_BUS_WIDTH_32BIT = 1,
+ XGE_HAL_PCI_BUS_WIDTH_UNKNOWN = 2,
+} xge_hal_pci_bus_width_e;
+
+#if defined (XGE_HAL_CONFIG_LRO)
+
+#define IP_TOTAL_LENGTH_OFFSET 2
+#define IP_FAST_PATH_HDR_MASK 0x45
+#define TCP_FAST_PATH_HDR_MASK1 0x50
+#define TCP_FAST_PATH_HDR_MASK2 0x10
+#define TCP_FAST_PATH_HDR_MASK3 0x18
+#define IP_SOURCE_ADDRESS_OFFSET 12
+#define IP_DESTINATION_ADDRESS_OFFSET 16
+#define TCP_DESTINATION_PORT_OFFSET 2
+#define TCP_SOURCE_PORT_OFFSET 0
+#define TCP_DATA_OFFSET_OFFSET 12
+#define TCP_WINDOW_OFFSET 14
+#define TCP_SEQUENCE_NUMBER_OFFSET 4
+#define TCP_ACKNOWLEDGEMENT_NUMBER_OFFSET 8
+
+typedef struct tcplro {
+ u16 source;
+ u16 dest;
+ u32 seq;
+ u32 ack_seq;
+ u8 doff_res;
+ u8 ctrl;
+ u16 window;
+ u16 check;
+ u16 urg_ptr;
+} tcplro_t;
+
+typedef struct iplro {
+ u8 version_ihl;
+ u8 tos;
+ u16 tot_len;
+ u16 id;
+ u16 frag_off;
+ u8 ttl;
+ u8 protocol;
+ u16 check;
+ u32 saddr;
+ u32 daddr;
+ /*The options start here. */
+} iplro_t;
+
+/*
+ * LRO object, one per each LRO session.
+*/
+typedef struct lro {
+ /* non-linear: contains scatter-gather list of
+ xframe-mapped received buffers */
+ OS_NETSTACK_BUF os_buf;
+ OS_NETSTACK_BUF os_buf_end;
+
+ /* link layer header of the first frame;
+ remains intack throughout the processing */
+ u8 *ll_hdr;
+
+ /* IP header - gets _collapsed_ */
+ iplro_t *ip_hdr;
+
+ /* transport header - gets _collapsed_ */
+ tcplro_t *tcp_hdr;
+
+ /* Next tcp sequence number */
+ u32 tcp_next_seq_num;
+ /* Current tcp seq & ack */
+ u32 tcp_seq_num;
+ u32 tcp_ack_num;
+
+ /* total number of accumulated (so far) frames */
+ int sg_num;
+
+ /* total data length */
+ int total_length;
+
+ /* receive side hash value, available from Hercules */
+ u32 rth_value;
+
+ /* In use */
+ u8 in_use;
+
+ /* Total length of the fragments clubbed with the inital frame */
+ u32 frags_len;
+
+ /* LRO frame contains time stamp, if (ts_off != -1) */
+ int ts_off;
+
+} lro_t;
+#endif
+
+/*
+ * xge_hal_spdm_entry_t
+ *
+ * Represents a single spdm entry in the SPDM table.
+ */
+typedef struct xge_hal_spdm_entry_t {
+ xge_hal_ipaddr_t src_ip;
+ xge_hal_ipaddr_t dst_ip;
+ u32 jhash_value;
+ u16 l4_sp;
+ u16 l4_dp;
+ u16 spdm_entry;
+ u8 in_use;
+ u8 is_tcp;
+ u8 is_ipv4;
+ u8 tgt_queue;
+} xge_hal_spdm_entry_t;
+
+#if defined(XGE_HAL_CONFIG_LRO)
+typedef struct {
+ lro_t lro_pool[XGE_HAL_LRO_MAX_BUCKETS];
+ int lro_next_idx;
+ lro_t *lro_recent;
+} xge_hal_lro_desc_t;
+#endif
+/*
+ * xge_hal_vpd_data_t
+ *
+ * Represents vpd capabilty structure
+ */
+typedef struct xge_hal_vpd_data_t {
+ u8 product_name[XGE_HAL_VPD_LENGTH];
+ u8 serial_num[XGE_HAL_VPD_LENGTH];
+} xge_hal_vpd_data_t;
+
+/*
+ * xge_hal_device_t
+ *
+ * HAL device object. Represents Xframe.
+ */
+typedef struct {
+ unsigned int magic;
+ pci_reg_h regh0;
+ pci_reg_h regh1;
+ pci_reg_h regh2;
+ char *bar0;
+ char *isrbar0;
+ char *bar1;
+ char *bar2;
+ pci_irq_h irqh;
+ pci_cfg_h cfgh;
+ pci_dev_h pdev;
+ xge_hal_pci_config_t pci_config_space;
+ xge_hal_pci_config_t pci_config_space_bios;
+ xge_hal_device_config_t config;
+ xge_list_t free_channels;
+ xge_list_t fifo_channels;
+ xge_list_t ring_channels;
+#ifdef XGEHAL_RNIC
+ __hal_bitmap_entry_t bitmap_table[XGE_HAL_MAX_BITMAP_BITS];
+ __hal_virtualpath_t virtual_paths[XGE_HAL_MAX_VIRTUAL_PATHS];
+ __hal_blockpool_t block_pool;
+ __hal_regpool_t reg_pool;
+#endif
+ volatile int is_initialized;
+ volatile int terminating;
+ xge_hal_stats_t stats;
+ macaddr_t macaddr[1];
+ xge_queue_h queueh;
+ volatile int mcast_refcnt;
+ int is_promisc;
+ volatile xge_hal_device_link_state_e link_state;
+ void *upper_layer_info;
+ xge_hal_device_attr_t orig_attr;
+ u16 device_id;
+ u8 revision;
+ int msi_enabled;
+ int hw_is_initialized;
+ u64 inject_serr;
+ u64 inject_ecc;
+ u8 inject_bad_tcode;
+ int inject_bad_tcode_for_chan_type;
+ int reset_needed_after_close;
+ int tti_enabled;
+ xge_hal_tti_config_t bimodal_tti[XGE_HAL_MAX_RING_NUM];
+ int bimodal_timer_val_us;
+ int bimodal_urange_a_en;
+ int bimodal_intr_cnt;
+ char *spdm_mem_base;
+ u16 spdm_max_entries;
+ xge_hal_spdm_entry_t **spdm_table;
+ spinlock_t spdm_lock;
+ u32 msi_mask;
+#if defined(XGE_HAL_CONFIG_LRO)
+ xge_hal_lro_desc_t lro_desc[XGE_HAL_MAX_RING_NUM];
+#endif
+ spinlock_t xena_post_lock;
+
+ /* bimodal workload stats */
+ int irq_workload_rxd[XGE_HAL_MAX_RING_NUM];
+ int irq_workload_rxcnt[XGE_HAL_MAX_RING_NUM];
+ int irq_workload_rxlen[XGE_HAL_MAX_RING_NUM];
+ int irq_workload_txd[XGE_HAL_MAX_FIFO_NUM];
+ int irq_workload_txcnt[XGE_HAL_MAX_FIFO_NUM];
+ int irq_workload_txlen[XGE_HAL_MAX_FIFO_NUM];
+
+ int mtu_first_time_set;
+ u64 rxufca_lbolt;
+ u64 rxufca_lbolt_time;
+ u64 rxufca_intr_thres;
+ char* dump_buf;
+ xge_hal_pci_mode_e pci_mode;
+ xge_hal_pci_bus_frequency_e bus_frequency;
+ xge_hal_pci_bus_width_e bus_width;
+ xge_hal_vpd_data_t vpd_data;
+ volatile int in_poll;
+ u64 msix_vector_table[XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR];
+} xge_hal_device_t;
+
+
+/* ========================== PRIVATE API ================================= */
+
+void
+__hal_device_event_queued(void *data, int event_type);
+
+xge_hal_status_e
+__hal_device_set_swapper(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_rth_it_configure(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_rth_spdm_configure(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status);
+
+xge_hal_status_e
+__hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
+ u16 spdm_entry, u64 *spdm_line_val);
+
+void __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val,
+ void *addr);
+
+void __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
+ void *addr);
+void __hal_device_get_vpd_data(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason);
+
+xge_hal_status_e
+__hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, int op, u64 mask,
+ int max_millis);
+xge_hal_status_e
+__hal_device_rts_mac_configure(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_rts_qos_configure(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_rts_port_configure(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_rti_configure(xge_hal_device_t *hldev, int runtime);
+
+void
+__hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag);
+
+void
+__hal_device_msix_intr_endis(xge_hal_device_t *hldev,
+ xge_hal_channel_t *channel, int flag);
+
+/* =========================== PUBLIC API ================================= */
+
+unsigned int
+__hal_fix_time_ival_herc(xge_hal_device_t *hldev,
+ unsigned int time_ival);
+xge_hal_status_e
+xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable,
+ u32 itable_size);
+
+void
+xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
+ u16 bucket_size);
+
+void
+xge_hal_rts_rth_init(xge_hal_device_t *hldev);
+
+void
+xge_hal_rts_rth_clr(xge_hal_device_t *hldev);
+
+void
+xge_hal_rts_rth_start(xge_hal_device_t *hldev);
+
+void
+xge_hal_rts_rth_stop(xge_hal_device_t *hldev);
+
+void
+xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key);
+
+xge_hal_status_e
+xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr);
+
+xge_hal_status_e
+xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index);
+
+int xge_hal_reinitialize_hw(xge_hal_device_t * hldev);
+
+/**
+ * xge_hal_device_rti_reconfigure
+ * @hldev: Hal Device
+ */
+static inline xge_hal_status_e
+xge_hal_device_rti_reconfigure(xge_hal_device_t *hldev)
+{
+ return __hal_device_rti_configure(hldev, 1);
+}
+
+/**
+ * xge_hal_device_rts_port_reconfigure
+ * @hldev: Hal Device
+ */
+static inline xge_hal_status_e
+xge_hal_device_rts_port_reconfigure(xge_hal_device_t *hldev)
+{
+ return __hal_device_rts_port_configure(hldev);
+}
+
+/**
+ * xge_hal_device_is_initialized - Returns 0 if device is not
+ * initialized, non-zero otherwise.
+ * @devh: HAL device handle.
+ *
+ * Returns 0 if device is not initialized, non-zero otherwise.
+ */
+static inline int
+xge_hal_device_is_initialized(xge_hal_device_h devh)
+{
+ return ((xge_hal_device_t*)devh)->is_initialized;
+}
+
+
+/**
+ * xge_hal_device_in_poll - non-zero, if xge_hal_device_poll() is executing.
+ * @devh: HAL device handle.
+ *
+ * Returns non-zero if xge_hal_device_poll() is executing, and 0 - otherwise.
+ */
+static inline int
+xge_hal_device_in_poll(xge_hal_device_h devh)
+{
+ return ((xge_hal_device_t*)devh)->in_poll;
+}
+
+
+/**
+ * xge_hal_device_inject_ecc - Inject ECC error.
+ * @devh: HAL device, pointer to xge_hal_device_t structure.
+ * @err_reg: Contains the error register.
+ *
+ * This function is used to inject ECC error into the driver flow.
+ * This facility can be used to test the driver flow in the
+ * case of ECC error is reported by the firmware.
+ *
+ * Returns: void
+ * See also: xge_hal_device_inject_serr(),
+ * xge_hal_device_inject_bad_tcode()
+ */
+static inline void
+xge_hal_device_inject_ecc(xge_hal_device_h devh, u64 err_reg)
+{
+ ((xge_hal_device_t*)devh)->inject_ecc = err_reg;
+}
+
+
+/**
+ * xge_hal_device_inject_serr - Inject SERR error.
+ * @devh: HAL device, pointer to xge_hal_device_t structure.
+ * @err_reg: Contains the error register.
+ *
+ * This function is used to inject SERR error into the driver flow.
+ * This facility can be used to test the driver flow in the
+ * case of SERR error is reported by firmware.
+ *
+ * Returns: void
+ * See also: xge_hal_device_inject_ecc(),
+ * xge_hal_device_inject_bad_tcode()
+ */
+static inline void
+xge_hal_device_inject_serr(xge_hal_device_h devh, u64 err_reg)
+{
+ ((xge_hal_device_t*)devh)->inject_serr = err_reg;
+}
+
+
+/**
+ * xge_hal_device_inject_bad_tcode - Inject Bad transfer code.
+ * @devh: HAL device, pointer to xge_hal_device_t structure.
+ * @chan_type: Channel type (fifo/ring).
+ * @t_code: Transfer code.
+ *
+ * This function is used to inject bad (Tx/Rx Data)transfer code
+ * into the driver flow.
+ *
+ * This facility can be used to test the driver flow in the
+ * case of bad transfer code reported by firmware for a Tx/Rx data
+ * transfer.
+ *
+ * Returns: void
+ * See also: xge_hal_device_inject_ecc(), xge_hal_device_inject_serr()
+ */
+static inline void
+xge_hal_device_inject_bad_tcode(xge_hal_device_h devh, int chan_type, u8 t_code)
+{
+ ((xge_hal_device_t*)devh)->inject_bad_tcode_for_chan_type = chan_type;
+ ((xge_hal_device_t*)devh)->inject_bad_tcode = t_code;
+}
+
+void xge_hal_device_msi_enable(xge_hal_device_h devh);
+
+/*
+ * xge_hal_device_msi_mode - Is MSI enabled?
+ * @devh: HAL device handle.
+ *
+ * Returns 0 if MSI is enabled for the specified device,
+ * non-zero otherwise.
+ */
+static inline int
+xge_hal_device_msi_mode(xge_hal_device_h devh)
+{
+ return ((xge_hal_device_t*)devh)->msi_enabled;
+}
+
+/**
+ * xge_hal_device_queue - Get per-device event queue.
+ * @devh: HAL device handle.
+ *
+ * Returns: event queue associated with the specified HAL device.
+ */
+static inline xge_queue_h
+xge_hal_device_queue (xge_hal_device_h devh)
+{
+ return ((xge_hal_device_t*)devh)->queueh;
+}
+
+/**
+ * xge_hal_device_attr - Get original (user-specified) device
+ * attributes.
+ * @devh: HAL device handle.
+ *
+ * Returns: original (user-specified) device attributes.
+ */
+static inline xge_hal_device_attr_t*
+xge_hal_device_attr(xge_hal_device_h devh)
+{
+ return &((xge_hal_device_t*)devh)->orig_attr;
+}
+
+/**
+ * xge_hal_device_private_set - Set ULD context.
+ * @devh: HAL device handle.
+ * @data: pointer to ULD context
+ *
+ * Use HAL device to set upper-layer driver (ULD) context.
+ *
+ * See also: xge_hal_device_from_private(), xge_hal_device_private()
+ */
+static inline void
+xge_hal_device_private_set(xge_hal_device_h devh, void *data)
+{
+ ((xge_hal_device_t*)devh)->upper_layer_info = data;
+}
+
+/**
+ * xge_hal_device_private - Get ULD context.
+ * @devh: HAL device handle.
+ *
+ * Use HAL device to get upper-layer driver (ULD) context.
+ *
+ * Returns: ULD context.
+ *
+ * See also: xge_hal_device_from_private(), xge_hal_device_private_set()
+ */
+static inline void*
+xge_hal_device_private(xge_hal_device_h devh)
+{
+ return ((xge_hal_device_t*)devh)->upper_layer_info;
+}
+
+/**
+ * xge_hal_device_from_private - Get HAL device object from private.
+ * @info_ptr: ULD context.
+ *
+ * Use ULD context to get HAL device.
+ *
+ * Returns: Device handle.
+ *
+ * See also: xge_hal_device_private(), xge_hal_device_private_set()
+ */
+static inline xge_hal_device_h
+xge_hal_device_from_private(void *info_ptr)
+{
+ return xge_container_of((void ** ) info_ptr, xge_hal_device_t,
+ upper_layer_info);
+}
+
+/**
+ * xge_hal_device_mtu_check - check MTU value for ranges
+ * @hldev: the device
+ * @new_mtu: new MTU value to check
+ *
+ * Will do sanity check for new MTU value.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_MTU_SIZE - MTU is invalid.
+ *
+ * See also: xge_hal_device_mtu_set()
+ */
+static inline xge_hal_status_e
+xge_hal_device_mtu_check(xge_hal_device_t *hldev, int new_mtu)
+{
+ if ((new_mtu < XGE_HAL_MIN_MTU) || (new_mtu > XGE_HAL_MAX_MTU)) {
+ return XGE_HAL_ERR_INVALID_MTU_SIZE;
+ }
+
+ return XGE_HAL_OK;
+}
+
+void xge_hal_device_bcast_enable(xge_hal_device_h devh);
+
+void xge_hal_device_bcast_disable(xge_hal_device_h devh);
+
+void xge_hal_device_terminating(xge_hal_device_h devh);
+
+xge_hal_status_e xge_hal_device_initialize(xge_hal_device_t *hldev,
+ xge_hal_device_attr_t *attr, xge_hal_device_config_t *config);
+
+void xge_hal_device_terminate(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_reset(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_macaddr_get(xge_hal_device_t *hldev,
+ int index, macaddr_t *macaddr);
+
+xge_hal_status_e xge_hal_device_macaddr_set(xge_hal_device_t *hldev,
+ int index, macaddr_t macaddr);
+
+xge_hal_status_e xge_hal_device_macaddr_clear(xge_hal_device_t *hldev,
+ int index);
+
+int xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted);
+
+xge_hal_status_e xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu);
+
+xge_hal_status_e xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status);
+
+void xge_hal_device_intr_enable(xge_hal_device_t *hldev);
+
+void xge_hal_device_intr_disable(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_mcast_enable(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_mcast_disable(xge_hal_device_t *hldev);
+
+void xge_hal_device_promisc_enable(xge_hal_device_t *hldev);
+
+void xge_hal_device_promisc_disable(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_disable(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_enable(xge_hal_device_t *hldev);
+
+xge_hal_status_e xge_hal_device_handle_tcode(xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtrh,
+ u8 t_code);
+
+xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
+ xge_hal_device_link_state_e *ls);
+
+void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
+ int one_shot);
+
+void xge_hal_device_poll(xge_hal_device_h devh);
+
+xge_hal_card_e xge_hal_device_check_id(xge_hal_device_h devh);
+
+int xge_hal_device_is_slot_freeze(xge_hal_device_h devh);
+
+xge_hal_status_e
+xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
+ xge_hal_pci_bus_frequency_e *bus_frequency,
+ xge_hal_pci_bus_width_e *bus_width);
+
+xge_hal_status_e
+xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
+ xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
+ u8 is_tcp, u8 is_ipv4, u8 tgt_queue);
+
+xge_hal_status_e
+xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
+ xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
+ u8 is_tcp, u8 is_ipv4);
+
+xge_hal_status_e
+xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index);
+
+int
+xge_hal_device_is_closed (xge_hal_device_h devh);
+
+/* private functions, don't use them in ULD */
+
+void __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg);
+
+u64 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg);
+
+
+/* Some function protoypes for MSI implementation. */
+xge_hal_status_e
+xge_hal_channel_msi_set (xge_hal_channel_h channelh, int msi,
+ u32 msg_val);
+void
+xge_hal_mask_msi(xge_hal_device_t *hldev);
+
+void
+xge_hal_unmask_msi(xge_hal_channel_h channelh);
+
+xge_hal_status_e
+xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx);
+
+xge_hal_status_e
+xge_hal_mask_msix(xge_hal_device_h devh, int msi_id);
+
+xge_hal_status_e
+xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id);
+
+#if defined(XGE_HAL_CONFIG_LRO)
+xge_hal_status_e
+xge_hal_lro_init(u32 lro_scale, xge_hal_device_t *hldev);
+#endif
+
+#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_DEVICE)
+#define __HAL_STATIC_DEVICE
+#define __HAL_INLINE_DEVICE
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int
+xge_hal_device_rev(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_clear_rx(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_clear_tx(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_continue_irq(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_handle_irq(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_bar0(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_isrbar0(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_bar1(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh,
+ char *bar1);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_tx(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_rx(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_all(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_unmask_tx(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_unmask_rx(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_unmask_all(xge_hal_device_t *hldev);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx);
+
+#if defined (XGE_HAL_CONFIG_LRO)
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u8
+__hal_header_parse_token_u8(u8 *string,u16 offset);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
+__hal_header_parse_token_u16(u8 *string,u16 offset);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u32
+__hal_header_parse_token_u32(u8 *string,u16 offset);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_header_update_u8(u8 *string, u16 offset, u8 val);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_header_update_u16(u8 *string, u16 offset, u16 val);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_header_update_u32(u8 *string, u16 offset, u32 val);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
+__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_ip_lro_capable(iplro_t *ip, xge_hal_dtr_info_t *ext_info);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_lro_capable(u8 *buffer, iplro_t **ip, tcplro_t **tcp,
+ xge_hal_dtr_info_t *ext_info);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_get_lro_session(u8 *eth_hdr, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
+ xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
+ xge_hal_lro_desc_t *ring_lro, lro_t **lro_end3);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_lro_under_optimal_thresh(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
+ xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_collapse_ip_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
+ xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_collapse_tcp_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
+ xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_append_lro(iplro_t *ip, tcplro_t **tcp, u32 *seg_len, lro_t *lro,
+ xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp,
+ u32 *seglen, lro_t **p_lro,
+ xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
+ lro_t **lro_end3);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
+ lro_t **lro, xge_hal_dtr_info_t *ext_info,
+ xge_hal_device_t *hldev, lro_t **lro_end3);
+
+void
+xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
+xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
+xge_hal_lro_get_next_session(xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
+ xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro,
+ int slot, u32 tcp_seg_len, int ts_off);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro);
+#endif
+
+#else /* XGE_FASTPATH_EXTERN */
+#define __HAL_STATIC_DEVICE static
+#define __HAL_INLINE_DEVICE inline
+#include <dev/nxge/xgehal/xgehal-device-fp.c>
+#endif /* XGE_FASTPATH_INLINE */
+
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_DEVICE_H */
diff --git a/sys/dev/nxge/include/xgehal-driver.h b/sys/dev/nxge/include/xgehal-driver.h
new file mode 100644
index 0000000..e669368
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-driver.h
@@ -0,0 +1,322 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-driver.h
+ *
+ * Description: HAL driver object functionality
+ *
+ * Created: 14 May 2004
+ */
+
+#ifndef XGE_HAL_DRIVER_H
+#define XGE_HAL_DRIVER_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-list.h>
+#include <dev/nxge/include/xge-queue.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xgehal-event.h>
+
+__EXTERN_BEGIN_DECLS
+
+/* maximum number of events consumed in a syncle poll() cycle */
+#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5
+
+
+/**
+ * function xge_uld_sched_timer_cb_f - Per-device periodic timer
+ * callback.
+ * @devh: HAL device handle.
+ * @userdata: Per-device user data (a.k.a. context) specified via
+ * xge_hal_device_initialize().
+ *
+ * Periodic or one-shot timer callback. If specified (that is, not NULL)
+ * HAL invokes this callback periodically. The call is performed in the
+ * interrupt context, or more exactly, in the context of HAL's ISR
+ * xge_hal_device_continue_irq().
+ *
+ * See also: xge_hal_device_initialize{}
+ */
+typedef void (*xge_uld_sched_timer_cb_f)(xge_hal_device_h devh, void *userdata);
+
+/**
+ * function xge_uld_link_up_f - Link-Up callback provided by upper-layer
+ * driver.
+ * @userdata: Opaque context set by the ULD via
+ * xge_hal_device_private_set()
+ * (typically - at HAL device iinitialization time).
+ *
+ * Link-up notification callback provided by the ULD.
+ * This is one of the per-driver callbacks, see xge_hal_uld_cbs_t{}.
+ *
+ * See also: xge_hal_uld_cbs_t{}, xge_uld_link_down_f{},
+ * xge_hal_driver_initialize(), xge_hal_device_private_set().
+ */
+typedef void (*xge_uld_link_up_f) (void *userdata);
+
+/**
+ * function xge_uld_link_down_f - Link-Down callback provided by
+ * upper-layer driver.
+ * @userdata: Opaque context set by the ULD via
+ * xge_hal_device_private_set()
+ * (typically - at HAL device iinitialization time).
+ *
+ * Link-Down notification callback provided by the upper-layer driver.
+ * This is one of the per-driver callbacks, see xge_hal_uld_cbs_t{}.
+ *
+ * See also: xge_hal_uld_cbs_t{}, xge_uld_link_up_f{},
+ * xge_hal_driver_initialize(), xge_hal_device_private_set().
+ */
+typedef void (*xge_uld_link_down_f) (void *userdata);
+
+/**
+ * function xge_uld_crit_err_f - Critical Error notification callback.
+ * @userdata: Opaque context set by the ULD via
+ * xge_hal_device_private_set()
+ * (typically - at HAL device iinitialization time).
+ * @type: Enumerated hw error, e.g.: double ECC.
+ * @serr_data: Xframe status.
+ * @ext_data: Extended data. The contents depends on the @type.
+ *
+ * Link-Down notification callback provided by the upper-layer driver.
+ * This is one of the per-driver callbacks, see xge_hal_uld_cbs_t{}.
+ *
+ * See also: xge_hal_uld_cbs_t{}, xge_hal_event_e{},
+ * xge_hal_device_private_set(), xge_hal_driver_initialize().
+ */
+typedef void (*xge_uld_crit_err_f) (void *userdata, xge_hal_event_e type,
+ u64 ext_data);
+
+/**
+ * function xge_uld_event_queued_f - Event-enqueued notification
+ * callback.
+ * @devh: HAL device handle.
+ * @event_type: HAL- or ULD-defined event type. Note that HAL
+ * events are enumerated by xge_hal_event_e{}.
+ *
+ * "Event-was-enqueued" notification callback provided by the upper-layer
+ * driver. The callback is invoked (if defined, i.e., not NULL in the
+ * xge_hal_uld_cbs_t{} structure) each time immediately after an event
+ * is enqueued.
+ *
+ * See also: xge_hal_uld_cbs_t{}, xge_hal_device_private_set(),
+ * xge_hal_driver_initialize().
+ */
+typedef void (*xge_uld_event_queued_f) (xge_hal_device_h devh, int event_type);
+
+/**
+ * function xge_uld_event_f - ULD event callback.
+ * @item: ULD-defined event, item of the xge_queue_t.
+ *
+ * ULD event callback.
+ * Upper-layer driver can use HAL queue to serialize certain slow-path
+ * events. HAL periodically polls the queue as part of the
+ * xge_hal_device_poll() processing. When/if HAL discovers in the queue
+ * an unkown event type it simply invokes the event callback
+ * (which must be non-NULL and supplied by the ULD in this case).
+ *
+ * See also: xge_hal_uld_cbs_t{}, xge_hal_device_poll(), xge_queue_t{},
+ * xge_hal_driver_initialize(), xge_queue_item_t{}.
+ */
+typedef void (*xge_uld_event_f) (xge_queue_item_t *item);
+
+/**
+ * function xge_uld_before_device_poll_f - ULD "before-poll" callback.
+ * @devh: HAL device handle.
+ *
+ * HAL invokes the callback from inside its xge_hal_device_poll()
+ * implementation %prior to accessing the @devh device. This allows ULD to
+ * perform per-device locking and/or context mapping, if required..
+ * The interface is currently used by AIX driver only.
+ * To avoid using/implementing the callback set the corresponding field
+ * in the xge_hal_uld_cbs_t{} structure to NULL.
+ *
+ * Returns: 0 on success, non-zero on failure.
+ *
+ * See also: xge_hal_driver_initialize(), xge_hal_uld_cbs_t{},
+ * xge_hal_device_poll().
+ */
+typedef int (*xge_uld_before_device_poll_f) (xge_hal_device_h devh);
+
+/**
+ * function xge_uld_after_device_poll_f - ULD "after-poll" callback.
+ * @devh: HAL device handle.
+ *
+ * Unless NULL is specified,
+ * HAL invokes the callback from inside its xge_hal_device_poll()
+ * implementation immediately %after it has completed polling the @devh
+ * device. This allows ULD to undo the affects of
+ * xge_uld_before_device_poll_f{}.
+ * The interface is currently used by AIX driver only.
+ *
+ * See also: xge_hal_driver_initialize(), xge_hal_uld_cbs_t{},
+ * xge_hal_device_poll().
+ */
+typedef void (*xge_uld_after_device_poll_f) (xge_hal_device_h devh);
+
+/**
+ * function xge_uld_xpak_alarm_log_f - ULD "XPAK alarm log" callback.
+ * @devh: HAL device handle.
+ * @type: TODO
+ *
+ * Unless NULL is specified,
+ * HAL invokes the callback from inside __hal_chk_xpak_counter()
+ */
+typedef void (*xge_uld_xpak_alarm_log_f) (xge_hal_device_h devh, xge_hal_xpak_alarm_type_e type);
+
+/**
+ * struct xge_hal_uld_cbs_t - Upper-layer driver "slow-path" callbacks.
+ * @link_up: See xge_uld_link_up_f{}.
+ * @link_down: See xge_uld_link_down_f{}.
+ * @crit_err: See xge_uld_crit_err_f{}.
+ * @event: See xge_uld_event_f{}.
+ * @event_queued: See xge_uld_event_queued_f{}.
+ * @before_device_poll: See xge_uld_before_device_poll_f{}.
+ * @after_device_poll: See xge_uld_after_device_poll_f{}.
+ * @sched_timer: See xge_uld_sched_timer_cb_f{}.
+ * @xpak_alarm_log: TODO
+ *
+ * Upper layer driver slow-path (per-driver) callbacks.
+ * Implemented by ULD and provided to HAL via
+ * xge_hal_driver_initialize().
+ * Note that these callbacks are not mandatory: HAL will not invoke
+ * a callback if NULL is specified.
+ *
+ * Note that in addition to those, there are curently 2 per-channel callbacks
+ * (completion and abort) specified at channel open time
+ * via xge_hal_channel_open().
+ *
+ * See also: xge_hal_driver_initialize().
+ */
+typedef struct xge_hal_uld_cbs_t {
+ xge_uld_link_up_f link_up;
+ xge_uld_link_down_f link_down;
+ xge_uld_crit_err_f crit_err;
+ xge_uld_event_f event;
+ xge_uld_event_queued_f event_queued;
+ xge_uld_before_device_poll_f before_device_poll;
+ xge_uld_after_device_poll_f after_device_poll;
+ xge_uld_sched_timer_cb_f sched_timer;
+ xge_uld_xpak_alarm_log_f xpak_alarm_log;
+} xge_hal_uld_cbs_t;
+
+/**
+ * struct xge_hal_driver_t - Represents HAL object.
+ * @config: HAL configuration.
+ * @devices: List of all PCI-enumerated Xframe devices in the system.
+ * A single xge_hal_driver_t instance contains zero or more
+ * Xframe devices.
+ * @devices_lock: Lock to protect %devices when inserting/removing.
+ * @is_initialized: True if HAL is initialized; false otherwise.
+ * @uld_callbacks: Upper-layer driver callbacks. See xge_hal_uld_cbs_t{}.
+ * @debug_module_mask: 32bit mask that defines which components of the
+ * driver are to be traced. The trace-able components are:
+ * XGE_COMPONENT_HAL_CONFIG 0x1
+ * XGE_COMPONENT_HAL_FIFO 0x2
+ * XGE_COMPONENT_HAL_RING 0x4
+ * XGE_COMPONENT_HAL_CHANNEL 0x8
+ * XGE_COMPONENT_HAL_DEVICE 0x10
+ * XGE_COMPONENT_HAL_MM 0x20
+ * XGE_COMPONENT_HAL_QUEUE 0x40
+ * XGE_COMPONENT_HAL_STATS 0x100
+ * XGE_COMPONENT_OSDEP 0x1000
+ * XGE_COMPONENT_LL 0x2000
+ * XGE_COMPONENT_TOE 0x4000
+ * XGE_COMPONENT_RDMA 0x8000
+ * XGE_COMPONENT_ALL 0xffffffff
+ * The @debug_module_mask allows to switch off and on tracing at runtime.
+ * In addition, the traces for the same trace-able components can be
+ * compiled out, based on the same mask provided via Makefile.
+ * @debug_level: See xge_debug_level_e{}.
+ *
+ * HAL (driver) object. There is a single instance of this structure per HAL.
+ */
+typedef struct xge_hal_driver_t {
+ xge_hal_driver_config_t config;
+ int is_initialized;
+ xge_hal_uld_cbs_t uld_callbacks;
+ u32 debug_module_mask;
+ int debug_level;
+} xge_hal_driver_t;
+
+extern xge_hal_driver_t *g_xge_hal_driver;
+
+static inline int
+xge_hal_driver_is_initialized(void) {
+ return g_xge_hal_driver->is_initialized;
+}
+
+static inline int
+xge_hal_driver_debug_module_mask(void)
+{
+ return g_xge_hal_driver->debug_module_mask;
+}
+
+static inline void
+xge_hal_driver_debug_module_mask_set(u32 new_mask)
+{
+#if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \
+ (defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
+ g_xge_hal_driver->debug_module_mask = new_mask;
+ g_module_mask = (unsigned long *)&g_xge_hal_driver->debug_module_mask;
+#endif
+}
+
+static inline int
+xge_hal_driver_debug_level(void) { return g_xge_hal_driver->debug_level; }
+
+static inline void
+xge_hal_driver_debug_level_set(int new_level)
+{
+#if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \
+ (defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
+ g_xge_hal_driver->debug_level = new_level;
+ g_level = &g_xge_hal_driver->debug_level;
+#endif
+}
+
+xge_hal_status_e xge_hal_driver_initialize(xge_hal_driver_config_t *config,
+ xge_hal_uld_cbs_t *uld_callbacks);
+
+void xge_hal_driver_terminate(void);
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+void xge_hal_driver_tracebuf_dump(void);
+
+xge_hal_status_e
+xge_hal_driver_tracebuf_read(int bufsize, char *retbuf, int *retsize);
+#else
+#define xge_hal_driver_tracebuf_dump()
+#define xge_hal_driver_tracebuf_read(a, b, c) (0);
+#endif
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_DRIVER_H */
diff --git a/sys/dev/nxge/include/xgehal-event.h b/sys/dev/nxge/include/xgehal-event.h
new file mode 100644
index 0000000..7d560d2
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-event.h
@@ -0,0 +1,85 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-event.h
+ *
+ * Description: event types
+ *
+ * Created: 7 June 2004
+ */
+
+#ifndef XGE_HAL_EVENT_H
+#define XGE_HAL_EVENT_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+
+__EXTERN_BEGIN_DECLS
+
+#define XGE_HAL_EVENT_BASE 0
+#define XGE_LL_EVENT_BASE 100
+
+/**
+ * enum xge_hal_event_e - Enumerates slow-path HAL events.
+ * @XGE_HAL_EVENT_UNKNOWN: Unknown (and invalid) event.
+ * @XGE_HAL_EVENT_SERR: Serious hardware error event.
+ * @XGE_HAL_EVENT_LINK_IS_UP: The link state has changed from 'down' to
+ * 'up'; upper-layer driver (typically, link layer) is
+ * supposed to wake the queue, etc.
+ * @XGE_HAL_EVENT_LINK_IS_DOWN: Link-down event.
+ * The link state has changed from 'down' to 'up';
+ * upper-layer driver is supposed to stop traffic, etc.
+ * @XGE_HAL_EVENT_ECCERR: ECC error event.
+ * @XGE_HAL_EVENT_PARITYERR: Parity error event.
+ * @XGE_HAL_EVENT_TARGETABORT: Target abort event. Used when device
+ * aborts transmit operation with the corresponding transfer code
+ * (for T_CODE enum see xgehal-fifo.h and xgehal-ring.h)
+ * @XGE_HAL_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
+ * slot-freeze from the rest critical events (e.g. ECC) when it is
+ * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
+ *
+ * xge_hal_event_e enumerates slow-path HAL eventis.
+ *
+ * See also: xge_hal_uld_cbs_t{}, xge_uld_link_up_f{},
+ * xge_uld_link_down_f{}.
+ */
+typedef enum xge_hal_event_e {
+ XGE_HAL_EVENT_UNKNOWN = 0,
+ /* HAL events */
+ XGE_HAL_EVENT_SERR = XGE_HAL_EVENT_BASE + 1,
+ XGE_HAL_EVENT_LINK_IS_UP = XGE_HAL_EVENT_BASE + 2,
+ XGE_HAL_EVENT_LINK_IS_DOWN = XGE_HAL_EVENT_BASE + 3,
+ XGE_HAL_EVENT_ECCERR = XGE_HAL_EVENT_BASE + 4,
+ XGE_HAL_EVENT_PARITYERR = XGE_HAL_EVENT_BASE + 5,
+ XGE_HAL_EVENT_TARGETABORT = XGE_HAL_EVENT_BASE + 6,
+ XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7,
+} xge_hal_event_e;
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_EVENT_H */
diff --git a/sys/dev/nxge/include/xgehal-fifo.h b/sys/dev/nxge/include/xgehal-fifo.h
new file mode 100644
index 0000000..6de6048
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-fifo.h
@@ -0,0 +1,363 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-fifo.h
+ *
+ * Description: Tx fifo object functionality
+ *
+ * Created: 19 May 2004
+ */
+
+#ifndef XGE_HAL_FIFO_H
+#define XGE_HAL_FIFO_H
+
+#include <dev/nxge/include/xgehal-channel.h>
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xgehal-mm.h>
+
+__EXTERN_BEGIN_DECLS
+
+/* HW fifo configuration */
+#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65
+#define XGE_HAL_FIFO_MAX_WRR 5
+#define XGE_HAL_FIFO_MAX_PARTITION 4
+#define XGE_HAL_FIFO_MAX_WRR_STATE 36
+#define XGE_HAL_FIFO_HW_PAIR_OFFSET 0x20000
+
+/* HW FIFO Weight Calender */
+#define XGE_HAL_FIFO_WRR_0 0x0706050407030602ULL
+#define XGE_HAL_FIFO_WRR_1 0x0507040601070503ULL
+#define XGE_HAL_FIFO_WRR_2 0x0604070205060700ULL
+#define XGE_HAL_FIFO_WRR_3 0x0403060705010207ULL
+#define XGE_HAL_FIFO_WRR_4 0x0604050300000000ULL
+/*
+ * xge_hal_fifo_hw_pair_t
+ *
+ * Represent a single fifo in the BAR1 memory space.
+ */
+typedef struct {
+ u64 txdl_pointer; /* offset 0x0 */
+
+ u64 reserved[2];
+
+ u64 list_control; /* offset 0x18 */
+#define XGE_HAL_TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
+#define XGE_HAL_TX_FIFO_FIRST_LIST BIT(14)
+#define XGE_HAL_TX_FIFO_LAST_LIST BIT(15)
+#define XGE_HAL_TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
+#define XGE_HAL_TX_FIFO_SPECIAL_FUNC BIT(23)
+#define XGE_HAL_TX_FIFO_NO_SNOOP(n) vBIT(n,30,2)
+} xge_hal_fifo_hw_pair_t;
+
+
+/* Bad TxDL transfer codes */
+#define XGE_HAL_TXD_T_CODE_OK 0x0
+#define XGE_HAL_TXD_T_CODE_UNUSED_1 0x1
+#define XGE_HAL_TXD_T_CODE_ABORT_BUFFER 0x2
+#define XGE_HAL_TXD_T_CODE_ABORT_DTOR 0x3
+#define XGE_HAL_TXD_T_CODE_UNUSED_5 0x5
+#define XGE_HAL_TXD_T_CODE_PARITY 0x7
+#define XGE_HAL_TXD_T_CODE_LOSS_OF_LINK 0xA
+#define XGE_HAL_TXD_T_CODE_GENERAL_ERR 0xF
+
+
+/**
+ * struct xge_hal_fifo_txd_t - TxD.
+ * @control_1: Control_1.
+ * @control_2: Control_2.
+ * @buffer_pointer: Buffer_Address.
+ * @host_control: Host_Control.Opaque 64bit data stored by ULD inside the Xframe
+ * descriptor prior to posting the latter on the channel
+ * via xge_hal_fifo_dtr_post() or xge_hal_ring_dtr_post().
+ * The %host_control is returned as is to the ULD with each
+ * completed descriptor.
+ *
+ * Transmit descriptor (TxD).Fifo descriptor contains configured number
+ * (list) of TxDs. * For more details please refer to Xframe User Guide,
+ * Section 5.4.2 "Transmit Descriptor (TxD) Format".
+ */
+typedef struct xge_hal_fifo_txd_t {
+ u64 control_1;
+#define XGE_HAL_TXD_LIST_OWN_XENA BIT(7)
+#define XGE_HAL_TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define XGE_HAL_GET_TXD_T_CODE(val) ((val & XGE_HAL_TXD_T_CODE)>>48)
+#define XGE_HAL_SET_TXD_T_CODE(x, val) (x |= (((u64)val & 0xF) << 48))
+#define XGE_HAL_TXD_GATHER_CODE (BIT(22) | BIT(23))
+#define XGE_HAL_TXD_GATHER_CODE_FIRST BIT(22)
+#define XGE_HAL_TXD_GATHER_CODE_LAST BIT(23)
+#define XGE_HAL_TXD_NO_LSO 0
+#define XGE_HAL_TXD_UDF_COF 1
+#define XGE_HAL_TXD_TCP_LSO 2
+#define XGE_HAL_TXD_UDP_LSO 3
+#define XGE_HAL_TXD_LSO_COF_CTRL(val) vBIT(val,30,2)
+#define XGE_HAL_TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
+#define XGE_HAL_TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
+#define XGE_HAL_TXD_GET_LSO_BYTES_SENT(val) ((val & vBIT(0xFFFF,16,16))>>32)
+ u64 control_2;
+#define XGE_HAL_TXD_TX_CKO_CONTROL (BIT(5)|BIT(6)|BIT(7))
+#define XGE_HAL_TXD_TX_CKO_IPV4_EN BIT(5)
+#define XGE_HAL_TXD_TX_CKO_TCP_EN BIT(6)
+#define XGE_HAL_TXD_TX_CKO_UDP_EN BIT(7)
+#define XGE_HAL_TXD_VLAN_ENABLE BIT(15)
+#define XGE_HAL_TXD_VLAN_TAG(val) vBIT(val,16,16)
+#define XGE_HAL_TXD_INT_NUMBER(val) vBIT(val,34,6)
+#define XGE_HAL_TXD_INT_TYPE_PER_LIST BIT(47)
+#define XGE_HAL_TXD_INT_TYPE_UTILZ BIT(46)
+#define XGE_HAL_TXD_SET_MARKER vBIT(0x6,0,4)
+
+ u64 buffer_pointer;
+
+ u64 host_control;
+
+} xge_hal_fifo_txd_t;
+
+typedef xge_hal_fifo_txd_t* xge_hal_fifo_txdl_t;
+
+/**
+ * struct xge_hal_fifo_t - Fifo channel.
+ * @channel: Channel "base" of this fifo, the common part of all HAL
+ * channels.
+ * @post_lock_ptr: Points to a lock that serializes (pointer, control) PIOs.
+ * Note that for Xena the serialization is done across all device
+ * fifos.
+ * @hw_pair: Per-fifo (Pointer, Control) pair used to send descriptors to the
+ * Xframe hardware (for details see Xframe user guide).
+ * @config: Fifo configuration, part of device configuration
+ * (see xge_hal_device_config_t{}).
+ * @no_snoop_bits: See xge_hal_fifo_config_t{}.
+ * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
+ * on TxDL please refer to Xframe UG.
+ * @interrupt_type: FIXME: to-be-defined.
+ * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
+ * per-TxDL HAL private space (xge_hal_fifo_txdl_priv_t).
+ * @priv_size: Per-Tx descriptor space reserved for upper-layer driver
+ * usage.
+ * @mempool: Memory pool, from which descriptors get allocated.
+ * @align_size: TBD
+ *
+ * Fifo channel.
+ * Note: The structure is cache line aligned.
+ */
+typedef struct xge_hal_fifo_t {
+ xge_hal_channel_t channel;
+ spinlock_t *post_lock_ptr;
+ xge_hal_fifo_hw_pair_t *hw_pair;
+ xge_hal_fifo_config_t *config;
+ int no_snoop_bits;
+ int txdl_per_memblock;
+ u64 interrupt_type;
+ int txdl_size;
+ int priv_size;
+ xge_hal_mempool_t *mempool;
+ int align_size;
+} __xge_os_attr_cacheline_aligned xge_hal_fifo_t;
+
+/**
+ * struct xge_hal_fifo_txdl_priv_t - Transmit descriptor HAL-private
+ * data.
+ * @dma_addr: DMA (mapped) address of _this_ descriptor.
+ * @dma_handle: DMA handle used to map the descriptor onto device.
+ * @dma_offset: Descriptor's offset in the memory block. HAL allocates
+ * descriptors in memory blocks (see
+ * xge_hal_fifo_config_t{})
+ * Each memblock is a contiguous block of DMA-able memory.
+ * @frags: Total number of fragments (that is, contiguous data buffers)
+ * carried by this TxDL.
+ * @align_vaddr_start: (TODO).
+ * @align_vaddr: Virtual address of the per-TxDL area in memory used for
+ * alignement. Used to place one or more mis-aligned fragments
+ * (the maximum defined by configration variable
+ * @max_aligned_frags).
+ * @align_dma_addr: DMA address translated from the @align_vaddr.
+ * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
+ * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
+ * @align_dma_offset: The current offset into the @align_vaddr area.
+ * Grows while filling the descriptor, gets reset.
+ * @align_used_frags: (TODO).
+ * @alloc_frags: Total number of fragments allocated.
+ * @dang_frags: Number of fragments kept from release until this TxDL is freed.
+ * @bytes_sent: TODO
+ * @unused: TODO
+ * @dang_txdl: (TODO).
+ * @next_txdl_priv: (TODO).
+ * @first_txdp: (TODO).
+ * @dang_dtrh: Pointer to TxDL (list) kept from release until this TxDL
+ * is freed.
+ * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
+ * TxDL list.
+ * @dtrh: Corresponding dtrh to this TxDL.
+ * @memblock: Pointer to the TxDL memory block or memory page.
+ * on the next send operation.
+ * @dma_object: DMA address and handle of the memory block that contains
+ * the descriptor. This member is used only in the "checked"
+ * version of the HAL (to enforce certain assertions);
+ * otherwise it gets compiled out.
+ * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
+ *
+ * Per-transmit decsriptor HAL-private data. HAL uses the space to keep DMA
+ * information associated with the descriptor. Note that ULD can ask HAL
+ * to allocate additional per-descriptor space for its own (ULD-specific)
+ * purposes.
+ *
+ * See also: xge_hal_ring_rxd_priv_t{}.
+ */
+typedef struct xge_hal_fifo_txdl_priv_t {
+ dma_addr_t dma_addr;
+ pci_dma_h dma_handle;
+ ptrdiff_t dma_offset;
+ int frags;
+ char *align_vaddr_start;
+ char *align_vaddr;
+ dma_addr_t align_dma_addr;
+ pci_dma_h align_dma_handle;
+ pci_dma_acc_h align_dma_acch;
+ ptrdiff_t align_dma_offset;
+ int align_used_frags;
+ int alloc_frags;
+ int dang_frags;
+ unsigned int bytes_sent;
+ int unused;
+ xge_hal_fifo_txd_t *dang_txdl;
+ struct xge_hal_fifo_txdl_priv_t *next_txdl_priv;
+ xge_hal_fifo_txd_t *first_txdp;
+ void *memblock;
+#ifdef XGE_DEBUG_ASSERT
+ xge_hal_mempool_dma_t *dma_object;
+#endif
+#ifdef XGE_OS_MEMORY_CHECK
+ int allocated;
+#endif
+} xge_hal_fifo_txdl_priv_t;
+
+/**
+ * xge_hal_fifo_get_max_frags_cnt - Return the max fragments allocated
+ * for the fifo.
+ * @channelh: Channel handle.
+ */
+static inline int
+xge_hal_fifo_get_max_frags_cnt(xge_hal_channel_h channelh)
+{
+ return ((xge_hal_fifo_t *)channelh)->config->max_frags;
+}
+/* ========================= FIFO PRIVATE API ============================= */
+
+xge_hal_status_e __hal_fifo_open(xge_hal_channel_h channelh,
+ xge_hal_channel_attr_t *attr);
+
+void __hal_fifo_close(xge_hal_channel_h channelh);
+
+void __hal_fifo_hw_initialize(xge_hal_device_h hldev);
+
+xge_hal_status_e
+__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+void
+__hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_FIFO)
+#define __HAL_STATIC_FIFO
+#define __HAL_INLINE_FIFO
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t*
+__hal_fifo_txdl_priv(xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ u64 ctrl_1);
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
+ xge_hal_fifo_txd_t *txdp, int txdl_count);
+
+/* ========================= FIFO PUBLIC API ============================== */
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void*
+xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO int
+xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channel, int dtr_sp_size,
+ xge_hal_dtr_h dtr_sp);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
+ xge_hal_dtr_h dtrs[]);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
+ u8 *t_code);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int frag_idx, dma_addr_t dma_pointer, int size);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
+ dma_addr_t dma_pointer, int size, int misaligned_size);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ void *vaddr, int size);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int frag_idx);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag);
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh);
+
+#else /* XGE_FASTPATH_EXTERN */
+#define __HAL_STATIC_FIFO static
+#define __HAL_INLINE_FIFO inline
+#include <dev/nxge/xgehal/xgehal-fifo-fp.c>
+#endif /* XGE_FASTPATH_INLINE */
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_FIFO_H */
diff --git a/sys/dev/nxge/include/xgehal-mgmt.h b/sys/dev/nxge/include/xgehal-mgmt.h
new file mode 100644
index 0000000..061320e
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-mgmt.h
@@ -0,0 +1,228 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-mgmt.h
+ *
+ * Description: management API
+ *
+ * Created: 1 September 2004
+ */
+
+#ifndef XGE_HAL_MGMT_H
+#define XGE_HAL_MGMT_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-debug.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xgehal-stats.h>
+#include <dev/nxge/include/xgehal-regs.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+__EXTERN_BEGIN_DECLS
+
+/**
+ * struct xge_hal_mgmt_about_info_t - About info.
+ * @vendor: PCI Vendor ID.
+ * @device: PCI Device ID.
+ * @subsys_vendor: PCI Subsystem Vendor ID.
+ * @subsys_device: PCI Subsystem Device ID.
+ * @board_rev: PCI Board revision, e.g. 3 - for Xena 3.
+ * @vendor_name: Neterion, Inc.
+ * @chip_name: Xframe.
+ * @media: Fiber, copper.
+ * @hal_major: HAL major version number.
+ * @hal_minor: HAL minor version number.
+ * @hal_fix: HAL fix number.
+ * @hal_build: HAL build number.
+ * @ll_major: Link-layer ULD major version number.
+ * @ll_minor: Link-layer ULD minor version number.
+ * @ll_fix: Link-layer ULD fix version number.
+ * @ll_build: Link-layer ULD build number.
+ * @transponder_temperature: TODO
+ */
+typedef struct xge_hal_mgmt_about_info_t {
+ u16 vendor;
+ u16 device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 board_rev;
+ char vendor_name[16];
+ char chip_name[16];
+ char media[16];
+ char hal_major[4];
+ char hal_minor[4];
+ char hal_fix[4];
+ char hal_build[16];
+ char ll_major[4];
+ char ll_minor[4];
+ char ll_fix[4];
+ char ll_build[16];
+ u32 transponder_temperature;
+} xge_hal_mgmt_about_info_t;
+
+typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t;
+typedef xge_hal_stats_pcim_info_t xge_hal_mgmt_pcim_stats_t;
+typedef xge_hal_stats_sw_err_t xge_hal_mgmt_sw_stats_t;
+typedef xge_hal_stats_device_info_t xge_hal_mgmt_device_stats_t;
+typedef xge_hal_stats_channel_info_t xge_hal_mgmt_channel_stats_t;
+typedef xge_hal_device_config_t xge_hal_mgmt_device_config_t;
+typedef xge_hal_driver_config_t xge_hal_mgmt_driver_config_t;
+typedef xge_hal_pci_config_t xge_hal_mgmt_pci_config_t;
+
+xge_hal_status_e
+xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info,
+ int size);
+
+xge_hal_status_e
+xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats,
+ int size);
+
+xge_hal_status_e
+xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out);
+
+xge_hal_status_e
+xge_hal_mgmt_pcim_stats(xge_hal_device_h devh,
+ xge_hal_mgmt_pcim_stats_t *pcim_stats, int size);
+
+xge_hal_status_e
+xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size,
+ char *out);
+
+xge_hal_status_e
+xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *hw_stats,
+ int size);
+
+xge_hal_status_e
+xge_hal_mgmt_device_stats(xge_hal_device_h devh,
+ xge_hal_mgmt_device_stats_t *device_stats, int size);
+
+xge_hal_status_e
+xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh,
+ xge_hal_mgmt_channel_stats_t *channel_stats, int size);
+
+xge_hal_status_e
+xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset,
+ u64 *value);
+
+xge_hal_status_e
+xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset,
+ u64 value);
+
+xge_hal_status_e
+xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset,
+ int bits, u32 *value);
+
+xge_hal_status_e
+xge_hal_mgmt_device_config(xge_hal_device_h devh,
+ xge_hal_mgmt_device_config_t *dev_config, int size);
+
+xge_hal_status_e
+xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config,
+ int size);
+
+xge_hal_status_e
+xge_hal_mgmt_pci_config(xge_hal_device_h devh,
+ xge_hal_mgmt_pci_config_t *pci_config, int size);
+
+xge_hal_status_e
+xge_hal_pma_loopback( xge_hal_device_h devh, int enable );
+
+xge_hal_status_e
+xge_hal_rldram_test(xge_hal_device_h devh, u64 * data);
+
+u16
+xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr );
+
+xge_hal_status_e
+xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value );
+
+u32
+xge_hal_read_xfp_current_temp(xge_hal_device_h devh);
+
+xge_hal_status_e
+xge_hal_read_eeprom(xge_hal_device_h devh, int off, u32* data);
+
+xge_hal_status_e
+xge_hal_write_eeprom(xge_hal_device_h devh, int off, u32 data, int cnt);
+
+xge_hal_status_e
+xge_hal_register_test(xge_hal_device_h devh, u64 *data);
+
+xge_hal_status_e
+xge_hal_eeprom_test(xge_hal_device_h devh, u64 *data);
+
+xge_hal_status_e
+xge_hal_bist_test(xge_hal_device_h devh, u64 *data);
+
+xge_hal_status_e
+xge_hal_link_test(xge_hal_device_h devh, u64 *data);
+
+int
+xge_hal_setpause_data(xge_hal_device_h devh, int tx, int rx);
+
+void
+xge_hal_getpause_data(xge_hal_device_h devh, int *tx, int *rx);
+
+void
+__hal_updt_stats_xpak(xge_hal_device_t *hldev);
+
+void
+__hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value);
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+xge_hal_status_e
+xge_hal_mgmt_trace_read(char *buffer, unsigned buf_size, unsigned *offset,
+ unsigned *read_length);
+#endif
+
+void
+xge_hal_restore_link_led(xge_hal_device_h devh);
+
+
+void
+xge_hal_flick_link_led(xge_hal_device_h devh);
+
+/*
+ * Some set of Xena3 Cards were known to have some link LED
+ * Problems. This macro identifies if the card is among them
+ * given its Sub system ID.
+ */
+#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
+ ((((subid >= 0x600B) && (subid <= 0x600D)) || \
+ ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0)
+#define CHECKBIT(value, nbit) (value & (1 << nbit))
+
+#ifdef XGE_HAL_USE_MGMT_AUX
+#include <dev/nxge/include/xgehal-mgmtaux.h>
+#endif
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_MGMT_H */
diff --git a/sys/dev/nxge/include/xgehal-mgmtaux.h b/sys/dev/nxge/include/xgehal-mgmtaux.h
new file mode 100644
index 0000000..6d4922e
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-mgmtaux.h
@@ -0,0 +1,95 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-mgmtaux.h
+ *
+ * Description: management auxiliary API
+ *
+ * Created: 1 September 2004
+ */
+
+#ifndef XGE_HAL_MGMTAUX_H
+#define XGE_HAL_MGMTAUX_H
+
+#include <dev/nxge/include/xgehal-mgmt.h>
+
+__EXTERN_BEGIN_DECLS
+
+#define XGE_HAL_AUX_SEPA ' '
+
+xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh,
+ unsigned int offset, int bufsize, char *retbuf,
+ int *retsize);
+
+xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh,
+ unsigned int offset, u64 value);
+
+xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh,
+ unsigned int offset, int bufsize, char *retbuf,
+ int *retsize);
+
+xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize);
+
+xge_hal_status_e xge_hal_aux_device_dump(xge_hal_device_h devh);
+
+
+xge_hal_status_e xge_hal_aux_driver_config_read(int bufsize, char *retbuf,
+ int *retsize);
+
+xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize);
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_MGMTAUX_H */
diff --git a/sys/dev/nxge/include/xgehal-mm.h b/sys/dev/nxge/include/xgehal-mm.h
new file mode 100644
index 0000000..5a8f836
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-mm.h
@@ -0,0 +1,174 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-mm.h
+ *
+ * Description: memory pool object
+ *
+ * Created: 28 May 2004
+ */
+
+#ifndef XGE_HAL_MM_H
+#define XGE_HAL_MM_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-debug.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xgehal-driver.h>
+
+__EXTERN_BEGIN_DECLS
+
+typedef void* xge_hal_mempool_h;
+
+/*
+ * struct xge_hal_mempool_dma_t - Represents DMA objects passed to the
+ caller.
+ */
+typedef struct xge_hal_mempool_dma_t {
+ dma_addr_t addr;
+ pci_dma_h handle;
+ pci_dma_acc_h acc_handle;
+} xge_hal_mempool_dma_t;
+
+/*
+ * xge_hal_mempool_item_f - Mempool item alloc/free callback
+ * @mempoolh: Memory pool handle.
+ * @item: Item that gets allocated or freed.
+ * @index: Item's index in the memory pool.
+ * @is_last: True, if this item is the last one in the pool; false - otherwise.
+ * userdat: Per-pool user context.
+ *
+ * Memory pool allocation/deallocation callback.
+ */
+typedef xge_hal_status_e (*xge_hal_mempool_item_f) (xge_hal_mempool_h mempoolh,
+ void *memblock, int memblock_index,
+ xge_hal_mempool_dma_t *dma_object, void *item,
+ int index, int is_last, void *userdata);
+
+/*
+ * struct xge_hal_mempool_t - Memory pool.
+ */
+typedef struct xge_hal_mempool_t {
+ xge_hal_mempool_item_f item_func_alloc;
+ xge_hal_mempool_item_f item_func_free;
+ void *userdata;
+ void **memblocks_arr;
+ void **memblocks_priv_arr;
+ xge_hal_mempool_dma_t *memblocks_dma_arr;
+ pci_dev_h pdev;
+ int memblock_size;
+ int memblocks_max;
+ int memblocks_allocated;
+ int item_size;
+ int items_max;
+ int items_initial;
+ int items_current;
+ int items_per_memblock;
+ void **items_arr;
+ void **shadow_items_arr;
+ int items_priv_size;
+} xge_hal_mempool_t;
+
+/*
+ * __hal_mempool_item - Returns pointer to the item in the mempool
+ * items array.
+ */
+static inline void*
+__hal_mempool_item(xge_hal_mempool_t *mempool, int index)
+{
+ return mempool->items_arr[index];
+}
+
+/*
+ * __hal_mempool_item_priv - will return pointer on per item private space
+ */
+static inline void*
+__hal_mempool_item_priv(xge_hal_mempool_t *mempool, int memblock_idx,
+ void *item, int *memblock_item_idx)
+{
+ ptrdiff_t offset;
+ void *memblock = mempool->memblocks_arr[memblock_idx];
+
+ xge_assert(memblock);
+
+ offset = (int)((char * )item - (char *)memblock);
+ xge_assert(offset >= 0 && offset < mempool->memblock_size);
+
+ (*memblock_item_idx) = (int) offset / mempool->item_size;
+ xge_assert((*memblock_item_idx) < mempool->items_per_memblock);
+
+ return (char*)mempool->memblocks_priv_arr[memblock_idx] +
+ (*memblock_item_idx) * mempool->items_priv_size;
+}
+
+/*
+ * __hal_mempool_items_arr - will return pointer to the items array in the
+ * mempool.
+ */
+static inline void*
+__hal_mempool_items_arr(xge_hal_mempool_t *mempool)
+{
+ return mempool->items_arr;
+}
+
+/*
+ * __hal_mempool_memblock - will return pointer to the memblock in the
+ * mempool memblocks array.
+ */
+static inline void*
+__hal_mempool_memblock(xge_hal_mempool_t *mempool, int memblock_idx)
+{
+ xge_assert(mempool->memblocks_arr[memblock_idx]);
+ return mempool->memblocks_arr[memblock_idx];
+}
+
+/*
+ * __hal_mempool_memblock_dma - will return pointer to the dma block
+ * corresponds to the memblock(identified by memblock_idx) in the mempool.
+ */
+static inline xge_hal_mempool_dma_t*
+__hal_mempool_memblock_dma(xge_hal_mempool_t *mempool, int memblock_idx)
+{
+ return mempool->memblocks_dma_arr + memblock_idx;
+}
+
+xge_hal_status_e __hal_mempool_grow(xge_hal_mempool_t *mempool,
+ int num_allocate, int *num_allocated);
+
+xge_hal_mempool_t* __hal_mempool_create(pci_dev_h pdev, int memblock_size,
+ int item_size, int private_size, int items_initial,
+ int items_max, xge_hal_mempool_item_f item_func_alloc,
+ xge_hal_mempool_item_f item_func_free, void *userdata);
+
+void __hal_mempool_destroy(xge_hal_mempool_t *mempool);
+
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_MM_H */
diff --git a/sys/dev/nxge/include/xgehal-regs.h b/sys/dev/nxge/include/xgehal-regs.h
new file mode 100644
index 0000000..89a2c4a
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-regs.h
@@ -0,0 +1,1377 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-regs.h
+ *
+ * Description: Xframe mem-mapped register space
+ *
+ * Created: 14 May 2004
+ */
+
+#ifndef XGE_HAL_REGS_H
+#define XGE_HAL_REGS_H
+
+typedef struct {
+
+/* General Control-Status Registers */
+ u64 general_int_status;
+#define XGE_HAL_GEN_INTR_TXPIC BIT(0)
+#define XGE_HAL_GEN_INTR_TXDMA BIT(1)
+#define XGE_HAL_GEN_INTR_TXMAC BIT(2)
+#define XGE_HAL_GEN_INTR_TXXGXS BIT(3)
+#define XGE_HAL_GEN_INTR_TXTRAFFIC BIT(8)
+#define XGE_HAL_GEN_INTR_RXPIC BIT(32)
+#define XGE_HAL_GEN_INTR_RXDMA BIT(33)
+#define XGE_HAL_GEN_INTR_RXMAC BIT(34)
+#define XGE_HAL_GEN_INTR_MC BIT(35)
+#define XGE_HAL_GEN_INTR_RXXGXS BIT(36)
+#define XGE_HAL_GEN_INTR_RXTRAFFIC BIT(40)
+#define XGE_HAL_GEN_ERROR_INTR (XGE_HAL_GEN_INTR_TXPIC | \
+ XGE_HAL_GEN_INTR_RXPIC | \
+ XGE_HAL_GEN_INTR_TXDMA | \
+ XGE_HAL_GEN_INTR_RXDMA | \
+ XGE_HAL_GEN_INTR_TXMAC | \
+ XGE_HAL_GEN_INTR_RXMAC | \
+ XGE_HAL_GEN_INTR_TXXGXS | \
+ XGE_HAL_GEN_INTR_RXXGXS | \
+ XGE_HAL_GEN_INTR_MC)
+
+ u64 general_int_mask;
+
+ u8 unused0[0x100 - 0x10];
+
+ u64 sw_reset;
+
+/* XGXS must be removed from reset only once. */
+#define XGE_HAL_SW_RESET_XENA vBIT(0xA5,0,8)
+#define XGE_HAL_SW_RESET_FLASH vBIT(0xA5,8,8)
+#define XGE_HAL_SW_RESET_EOI vBIT(0xA5,16,8)
+#define XGE_HAL_SW_RESET_XGXS vBIT(0xA5,24,8)
+#define XGE_HAL_SW_RESET_ALL (XGE_HAL_SW_RESET_XENA | \
+ XGE_HAL_SW_RESET_FLASH | \
+ XGE_HAL_SW_RESET_EOI | \
+ XGE_HAL_SW_RESET_XGXS)
+
+/* The SW_RESET register must read this value after a successful reset. */
+#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
+#define XGE_HAL_SW_RESET_RAW_VAL_XENA 0xA500000000ULL
+#define XGE_HAL_SW_RESET_RAW_VAL_HERC 0xA5A500000000ULL
+#else
+#define XGE_HAL_SW_RESET_RAW_VAL_XENA 0xA5000000ULL
+#define XGE_HAL_SW_RESET_RAW_VAL_HERC 0xA5A50000ULL
+#endif
+
+
+ u64 adapter_status;
+#define XGE_HAL_ADAPTER_STATUS_TDMA_READY BIT(0)
+#define XGE_HAL_ADAPTER_STATUS_RDMA_READY BIT(1)
+#define XGE_HAL_ADAPTER_STATUS_PFC_READY BIT(2)
+#define XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY BIT(3)
+#define XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT BIT(5)
+#define XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6)
+#define XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7)
+#define XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
+#define XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE vBIT(0x0F,8,8)
+#define XGE_HAL_ADAPTER_PCC_ENABLE_FOUR vBIT(0x0F,0,8)
+
+#define XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
+#define XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY BIT(24)
+#define XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY BIT(25)
+#define XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK BIT(30)
+#define XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK BIT(31)
+
+ u64 adapter_control;
+#define XGE_HAL_ADAPTER_CNTL_EN BIT(7)
+#define XGE_HAL_ADAPTER_EOI_TX_ON BIT(15)
+#define XGE_HAL_ADAPTER_LED_ON BIT(23)
+#define XGE_HAL_ADAPTER_UDPI(val) vBIT(val,36,4)
+#define XGE_HAL_ADAPTER_WAIT_INT BIT(48)
+#define XGE_HAL_ADAPTER_ECC_EN BIT(55)
+
+ u64 serr_source;
+#define XGE_HAL_SERR_SOURCE_PIC BIT(0)
+#define XGE_HAL_SERR_SOURCE_TXDMA BIT(1)
+#define XGE_HAL_SERR_SOURCE_RXDMA BIT(2)
+#define XGE_HAL_SERR_SOURCE_MAC BIT(3)
+#define XGE_HAL_SERR_SOURCE_MC BIT(4)
+#define XGE_HAL_SERR_SOURCE_XGXS BIT(5)
+#define XGE_HAL_SERR_SOURCE_ANY (XGE_HAL_SERR_SOURCE_PIC | \
+ XGE_HAL_SERR_SOURCE_TXDMA | \
+ XGE_HAL_SERR_SOURCE_RXDMA | \
+ XGE_HAL_SERR_SOURCE_MAC | \
+ XGE_HAL_SERR_SOURCE_MC | \
+ XGE_HAL_SERR_SOURCE_XGXS)
+
+ u64 pci_info;
+#define XGE_HAL_PCI_INFO vBIT(0xF,0,4)
+#define XGE_HAL_PCI_32_BIT BIT(8)
+
+ u8 unused0_1[0x160 - 0x128];
+
+ u64 ric_status;
+
+ u8 unused0_2[0x558 - 0x168];
+
+ u64 mbist_status;
+
+ u8 unused0_3[0x800 - 0x560];
+
+/* PCI-X Controller registers */
+ u64 pic_int_status;
+ u64 pic_int_mask;
+#define XGE_HAL_PIC_INT_TX BIT(0)
+#define XGE_HAL_PIC_INT_FLSH BIT(1)
+#define XGE_HAL_PIC_INT_MDIO BIT(2)
+#define XGE_HAL_PIC_INT_IIC BIT(3)
+#define XGE_HAL_PIC_INT_MISC BIT(4)
+#define XGE_HAL_PIC_INT_RX BIT(32)
+
+ u64 txpic_int_reg;
+#define XGE_HAL_TXPIC_INT_SCHED_INTR BIT(42)
+ u64 txpic_int_mask;
+#define XGE_HAL_PCIX_INT_REG_ECC_SG_ERR BIT(0)
+#define XGE_HAL_PCIX_INT_REG_ECC_DB_ERR BIT(1)
+#define XGE_HAL_PCIX_INT_REG_FLASHR_R_FSM_ERR BIT(8)
+#define XGE_HAL_PCIX_INT_REG_FLASHR_W_FSM_ERR BIT(9)
+#define XGE_HAL_PCIX_INT_REG_INI_TX_FSM_SERR BIT(10)
+#define XGE_HAL_PCIX_INT_REG_INI_TXO_FSM_ERR BIT(11)
+#define XGE_HAL_PCIX_INT_REG_TRT_FSM_SERR BIT(13)
+#define XGE_HAL_PCIX_INT_REG_SRT_FSM_SERR BIT(14)
+#define XGE_HAL_PCIX_INT_REG_PIFR_FSM_SERR BIT(15)
+#define XGE_HAL_PCIX_INT_REG_WRC_TX_SEND_FSM_SERR BIT(21)
+#define XGE_HAL_PCIX_INT_REG_RRC_TX_REQ_FSM_SERR BIT(23)
+#define XGE_HAL_PCIX_INT_REG_INI_RX_FSM_SERR BIT(48)
+#define XGE_HAL_PCIX_INT_REG_RA_RX_FSM_SERR BIT(50)
+/*
+#define XGE_HAL_PCIX_INT_REG_WRC_RX_SEND_FSM_SERR BIT(52)
+#define XGE_HAL_PCIX_INT_REG_RRC_RX_REQ_FSM_SERR BIT(54)
+#define XGE_HAL_PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR BIT(58)
+*/
+ u64 txpic_alarms;
+ u64 rxpic_int_reg;
+#define XGE_HAL_RX_PIC_INT_REG_SPDM_READY BIT(0)
+#define XGE_HAL_RX_PIC_INT_REG_SPDM_OVERWRITE_ERR BIT(44)
+#define XGE_HAL_RX_PIC_INT_REG_SPDM_PERR BIT(55)
+ u64 rxpic_int_mask;
+ u64 rxpic_alarms;
+
+ u64 flsh_int_reg;
+ u64 flsh_int_mask;
+#define XGE_HAL_PIC_FLSH_INT_REG_CYCLE_FSM_ERR BIT(63)
+#define XGE_HAL_PIC_FLSH_INT_REG_ERR BIT(62)
+ u64 flash_alarms;
+
+ u64 mdio_int_reg;
+ u64 mdio_int_mask;
+#define XGE_HAL_MDIO_INT_REG_MDIO_BUS_ERR BIT(0)
+#define XGE_HAL_MDIO_INT_REG_DTX_BUS_ERR BIT(8)
+#define XGE_HAL_MDIO_INT_REG_LASI BIT(39)
+ u64 mdio_alarms;
+
+ u64 iic_int_reg;
+ u64 iic_int_mask;
+#define XGE_HAL_IIC_INT_REG_BUS_FSM_ERR BIT(4)
+#define XGE_HAL_IIC_INT_REG_BIT_FSM_ERR BIT(5)
+#define XGE_HAL_IIC_INT_REG_CYCLE_FSM_ERR BIT(6)
+#define XGE_HAL_IIC_INT_REG_REQ_FSM_ERR BIT(7)
+#define XGE_HAL_IIC_INT_REG_ACK_ERR BIT(8)
+ u64 iic_alarms;
+
+ u64 msi_pending_reg;
+
+ u64 misc_int_reg;
+#define XGE_HAL_MISC_INT_REG_DP_ERR_INT BIT(0)
+#define XGE_HAL_MISC_INT_REG_LINK_DOWN_INT BIT(1)
+#define XGE_HAL_MISC_INT_REG_LINK_UP_INT BIT(2)
+ u64 misc_int_mask;
+ u64 misc_alarms;
+
+ u64 msi_triggered_reg;
+
+ u64 xfp_gpio_int_reg;
+ u64 xfp_gpio_int_mask;
+ u64 xfp_alarms;
+
+ u8 unused5[0x8E0 - 0x8C8];
+
+ u64 tx_traffic_int;
+#define XGE_HAL_TX_TRAFFIC_INT_n(n) BIT(n)
+ u64 tx_traffic_mask;
+
+ u64 rx_traffic_int;
+#define XGE_HAL_RX_TRAFFIC_INT_n(n) BIT(n)
+ u64 rx_traffic_mask;
+
+/* PIC Control registers */
+ u64 pic_control;
+#define XGE_HAL_PIC_CNTL_RX_ALARM_MAP_1 BIT(0)
+#define XGE_HAL_PIC_CNTL_ONE_SHOT_TINT BIT(1)
+#define XGE_HAL_PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,4)
+
+ u64 swapper_ctrl;
+#define XGE_HAL_SWAPPER_CTRL_PIF_R_FE BIT(0)
+#define XGE_HAL_SWAPPER_CTRL_PIF_R_SE BIT(1)
+#define XGE_HAL_SWAPPER_CTRL_PIF_W_FE BIT(8)
+#define XGE_HAL_SWAPPER_CTRL_PIF_W_SE BIT(9)
+#define XGE_HAL_SWAPPER_CTRL_RTH_FE BIT(10)
+#define XGE_HAL_SWAPPER_CTRL_RTH_SE BIT(11)
+#define XGE_HAL_SWAPPER_CTRL_TXP_FE BIT(16)
+#define XGE_HAL_SWAPPER_CTRL_TXP_SE BIT(17)
+#define XGE_HAL_SWAPPER_CTRL_TXD_R_FE BIT(18)
+#define XGE_HAL_SWAPPER_CTRL_TXD_R_SE BIT(19)
+#define XGE_HAL_SWAPPER_CTRL_TXD_W_FE BIT(20)
+#define XGE_HAL_SWAPPER_CTRL_TXD_W_SE BIT(21)
+#define XGE_HAL_SWAPPER_CTRL_TXF_R_FE BIT(22)
+#define XGE_HAL_SWAPPER_CTRL_TXF_R_SE BIT(23)
+#define XGE_HAL_SWAPPER_CTRL_RXD_R_FE BIT(32)
+#define XGE_HAL_SWAPPER_CTRL_RXD_R_SE BIT(33)
+#define XGE_HAL_SWAPPER_CTRL_RXD_W_FE BIT(34)
+#define XGE_HAL_SWAPPER_CTRL_RXD_W_SE BIT(35)
+#define XGE_HAL_SWAPPER_CTRL_RXF_W_FE BIT(36)
+#define XGE_HAL_SWAPPER_CTRL_RXF_W_SE BIT(37)
+#define XGE_HAL_SWAPPER_CTRL_XMSI_FE BIT(40)
+#define XGE_HAL_SWAPPER_CTRL_XMSI_SE BIT(41)
+#define XGE_HAL_SWAPPER_CTRL_STATS_FE BIT(48)
+#define XGE_HAL_SWAPPER_CTRL_STATS_SE BIT(49)
+
+ u64 pif_rd_swapper_fb;
+#define XGE_HAL_IF_RD_SWAPPER_FB 0x0123456789ABCDEFULL
+
+ u64 scheduled_int_ctrl;
+#define XGE_HAL_SCHED_INT_CTRL_TIMER_EN BIT(0)
+#define XGE_HAL_SCHED_INT_CTRL_ONE_SHOT BIT(1)
+#define XGE_HAL_SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6)
+#define XGE_HAL_SCHED_INT_PERIOD(val) vBIT(val,32,32)
+#define XGE_HAL_SCHED_INT_PERIOD_MASK 0xFFFFFFFF00000000ULL
+
+
+ u64 txreqtimeout;
+#define XGE_HAL_TXREQTO_VAL(val) vBIT(val,0,32)
+#define XGE_HAL_TXREQTO_EN BIT(63)
+
+ u64 statsreqtimeout;
+#define XGE_HAL_STATREQTO_VAL(n) TBD
+#define XGE_HAL_STATREQTO_EN BIT(63)
+
+ u64 read_retry_delay;
+ u64 read_retry_acceleration;
+ u64 write_retry_delay;
+ u64 write_retry_acceleration;
+
+ u64 xmsi_control;
+#define XGE_HAL_XMSI_EN BIT(0)
+#define XGE_HAL_XMSI_DIS_TINT_SERR BIT(1)
+#define XGE_HAL_XMSI_BYTE_COUNT(val) vBIT(val,13,3)
+
+ u64 xmsi_access;
+#define XGE_HAL_XMSI_WR_RDN BIT(7)
+#define XGE_HAL_XMSI_STROBE BIT(15)
+#define XGE_HAL_XMSI_NO(val) vBIT(val,26,6)
+
+ u64 xmsi_address;
+ u64 xmsi_data;
+
+ u64 rx_mat;
+#define XGE_HAL_SET_RX_MAT(ring, msi) vBIT(msi, (8 * ring), 8)
+
+ u8 unused6[0x8];
+
+ u64 tx_mat[8];
+#define XGE_HAL_SET_TX_MAT(fifo, msi) vBIT(msi, (8 * fifo), 8)
+
+ u64 xmsi_mask_reg;
+
+ /* Automated statistics collection */
+ u64 stat_byte_cnt;
+ u64 stat_cfg;
+#define XGE_HAL_STAT_CFG_STAT_EN BIT(0)
+#define XGE_HAL_STAT_CFG_ONE_SHOT_EN BIT(1)
+#define XGE_HAL_STAT_CFG_STAT_NS_EN BIT(8)
+#define XGE_HAL_STAT_CFG_STAT_RO BIT(9)
+#define XGE_HAL_XENA_PER_SEC 0x208d5
+#define XGE_HAL_SET_UPDT_PERIOD(n) vBIT(n,32,32)
+
+ u64 stat_addr;
+
+ /* General Configuration */
+ u64 mdio_control;
+#define XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(n) vBIT(n,0,16)
+#define XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(n) vBIT(n,19,5)
+#define XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(n) vBIT(n,27,5)
+#define XGE_HAL_MDIO_CONTROL_MMD_DATA(n) vBIT(n,32,16)
+#define XGE_HAL_MDIO_CONTROL_MMD_CTRL(n) vBIT(n,56,4)
+#define XGE_HAL_MDIO_CONTROL_MMD_OP(n) vBIT(n,60,2)
+#define XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(n) ((n>>16)&0xFFFF)
+#define XGE_HAL_MDIO_MMD_PMA_DEV_ADDR 0x01
+#define XGE_HAL_MDIO_DOM_REG_ADDR 0xA100
+#define XGE_HAL_MDIO_ALARM_FLAGS_ADDR 0xA070
+#define XGE_HAL_MDIO_WARN_FLAGS_ADDR 0xA074
+#define XGE_HAL_MDIO_CTRL_START 0xE
+#define XGE_HAL_MDIO_OP_ADDRESS 0x0
+#define XGE_HAL_MDIO_OP_WRITE 0x1
+#define XGE_HAL_MDIO_OP_READ 0x3
+#define XGE_HAL_MDIO_OP_READ_POST_INCREMENT 0x2
+#define XGE_HAL_MDIO_ALARM_TEMPHIGH 0x0080
+#define XGE_HAL_MDIO_ALARM_TEMPLOW 0x0040
+#define XGE_HAL_MDIO_ALARM_BIASHIGH 0x0008
+#define XGE_HAL_MDIO_ALARM_BIASLOW 0x0004
+#define XGE_HAL_MDIO_ALARM_POUTPUTHIGH 0x0002
+#define XGE_HAL_MDIO_ALARM_POUTPUTLOW 0x0001
+#define XGE_HAL_MDIO_WARN_TEMPHIGH 0x0080
+#define XGE_HAL_MDIO_WARN_TEMPLOW 0x0040
+#define XGE_HAL_MDIO_WARN_BIASHIGH 0x0008
+#define XGE_HAL_MDIO_WARN_BIASLOW 0x0004
+#define XGE_HAL_MDIO_WARN_POUTPUTHIGH 0x0002
+#define XGE_HAL_MDIO_WARN_POUTPUTLOW 0x0001
+
+ u64 dtx_control;
+
+ u64 i2c_control;
+#define XGE_HAL_I2C_CONTROL_DEV_ID(id) vBIT(id,1,3)
+#define XGE_HAL_I2C_CONTROL_ADDR(addr) vBIT(addr,5,11)
+#define XGE_HAL_I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2)
+#define XGE_HAL_I2C_CONTROL_READ BIT(24)
+#define XGE_HAL_I2C_CONTROL_NACK BIT(25)
+#define XGE_HAL_I2C_CONTROL_CNTL_START vBIT(0xE,28,4)
+#define XGE_HAL_I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4))
+#define XGE_HAL_I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF)
+#define XGE_HAL_I2C_CONTROL_SET_DATA(val) vBIT(val,32,32)
+
+ u64 beacon_control;
+ u64 misc_control;
+#define XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(val) vBIT(val,29,3)
+#define XGE_HAL_MISC_CONTROL_EXT_REQ_EN BIT(1)
+#define XGE_HAL_MISC_CONTROL_LINK_FAULT BIT(0)
+
+ u64 xfb_control;
+ u64 gpio_control;
+#define XGE_HAL_GPIO_CTRL_GPIO_0 BIT(8)
+
+ u64 txfifo_dw_mask;
+ u64 split_table_line_no;
+ u64 sc_timeout;
+ u64 pic_control_2;
+#define XGE_HAL_TXD_WRITE_BC(n) vBIT(n, 13, 3)
+ u64 ini_dperr_ctrl;
+ u64 wreq_split_mask;
+ u64 qw_per_rxd;
+ u8 unused7[0x300 - 0x250];
+
+ u64 pic_status;
+ u64 txp_status;
+ u64 txp_err_context;
+ u64 spdm_bir_offset;
+#define XGE_HAL_SPDM_PCI_BAR_NUM(spdm_bir_offset) \
+ (u8)(spdm_bir_offset >> 61)
+#define XGE_HAL_SPDM_PCI_BAR_OFFSET(spdm_bir_offset) \
+ (u32)((spdm_bir_offset >> 32) & 0x1FFFFFFF)
+ u64 spdm_overwrite;
+#define XGE_HAL_SPDM_OVERWRITE_ERR_SPDM_ENTRY(spdm_overwrite) \
+ (u8)((spdm_overwrite >> 48) & 0xff)
+#define XGE_HAL_SPDM_OVERWRITE_ERR_SPDM_DW(spdm_overwrite) \
+ (u8)((spdm_overwrite >> 40) & 0x3)
+#define XGE_HAL_SPDM_OVERWRITE_ERR_SPDM_LINE(spdm_overwrite) \
+ (u8)((spdm_overwrite >> 32) & 0x7)
+ u64 cfg_addr_on_dperr;
+ u64 pif_addr_on_dperr;
+ u64 tags_in_use;
+ u64 rd_req_types;
+ u64 split_table_line;
+ u64 unxp_split_add_ph;
+ u64 unexp_split_attr_ph;
+ u64 split_message;
+ u64 spdm_structure;
+#define XGE_HAL_SPDM_MAX_ENTRIES(spdm_structure) (u16)(spdm_structure >> 48)
+#define XGE_HAL_SPDM_INT_QW_PER_ENTRY(spdm_structure) \
+ (u8)((spdm_structure >> 40) & 0xff)
+#define XGE_HAL_SPDM_PCI_QW_PER_ENTRY(spdm_structure) \
+ (u8)((spdm_structure >> 32) & 0xff)
+
+ u64 txdw_ptr_cnt_0;
+ u64 txdw_ptr_cnt_1;
+ u64 txdw_ptr_cnt_2;
+ u64 txdw_ptr_cnt_3;
+ u64 txdw_ptr_cnt_4;
+ u64 txdw_ptr_cnt_5;
+ u64 txdw_ptr_cnt_6;
+ u64 txdw_ptr_cnt_7;
+ u64 rxdw_cnt_ring_0;
+ u64 rxdw_cnt_ring_1;
+ u64 rxdw_cnt_ring_2;
+ u64 rxdw_cnt_ring_3;
+ u64 rxdw_cnt_ring_4;
+ u64 rxdw_cnt_ring_5;
+ u64 rxdw_cnt_ring_6;
+ u64 rxdw_cnt_ring_7;
+
+ u8 unused8[0x410];
+
+/* TxDMA registers */
+ u64 txdma_int_status;
+ u64 txdma_int_mask;
+#define XGE_HAL_TXDMA_PFC_INT BIT(0)
+#define XGE_HAL_TXDMA_TDA_INT BIT(1)
+#define XGE_HAL_TXDMA_PCC_INT BIT(2)
+#define XGE_HAL_TXDMA_TTI_INT BIT(3)
+#define XGE_HAL_TXDMA_LSO_INT BIT(4)
+#define XGE_HAL_TXDMA_TPA_INT BIT(5)
+#define XGE_HAL_TXDMA_SM_INT BIT(6)
+ u64 pfc_err_reg;
+#define XGE_HAL_PFC_ECC_SG_ERR BIT(7)
+#define XGE_HAL_PFC_ECC_DB_ERR BIT(15)
+#define XGE_HAL_PFC_SM_ERR_ALARM BIT(23)
+#define XGE_HAL_PFC_MISC_0_ERR BIT(31)
+#define XGE_HAL_PFC_MISC_1_ERR BIT(32)
+#define XGE_HAL_PFC_PCIX_ERR BIT(39)
+ u64 pfc_err_mask;
+ u64 pfc_err_alarm;
+
+ u64 tda_err_reg;
+#define XGE_HAL_TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8)
+#define XGE_HAL_TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8)
+#define XGE_HAL_TDA_SM0_ERR_ALARM BIT(22)
+#define XGE_HAL_TDA_SM1_ERR_ALARM BIT(23)
+#define XGE_HAL_TDA_PCIX_ERR BIT(39)
+ u64 tda_err_mask;
+ u64 tda_err_alarm;
+
+ u64 pcc_err_reg;
+#define XGE_HAL_PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8)
+#define XGE_HAL_PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8)
+#define XGE_HAL_PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8)
+#define XGE_HAL_PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8)
+#define XGE_HAL_PCC_SM_ERR_ALARM vBIT(0xff,32,8)
+#define XGE_HAL_PCC_WR_ERR_ALARM vBIT(0xff,40,8)
+#define XGE_HAL_PCC_N_SERR vBIT(0xff,48,8)
+#define XGE_HAL_PCC_ENABLE_FOUR vBIT(0x0F,0,8)
+#define XGE_HAL_PCC_6_COF_OV_ERR BIT(56)
+#define XGE_HAL_PCC_7_COF_OV_ERR BIT(57)
+#define XGE_HAL_PCC_6_LSO_OV_ERR BIT(58)
+#define XGE_HAL_PCC_7_LSO_OV_ERR BIT(59)
+ u64 pcc_err_mask;
+ u64 pcc_err_alarm;
+
+ u64 tti_err_reg;
+#define XGE_HAL_TTI_ECC_SG_ERR BIT(7)
+#define XGE_HAL_TTI_ECC_DB_ERR BIT(15)
+#define XGE_HAL_TTI_SM_ERR_ALARM BIT(23)
+ u64 tti_err_mask;
+ u64 tti_err_alarm;
+
+ u64 lso_err_reg;
+#define XGE_HAL_LSO6_SEND_OFLOW BIT(12)
+#define XGE_HAL_LSO7_SEND_OFLOW BIT(13)
+#define XGE_HAL_LSO6_ABORT BIT(14)
+#define XGE_HAL_LSO7_ABORT BIT(15)
+#define XGE_HAL_LSO6_SM_ERR_ALARM BIT(22)
+#define XGE_HAL_LSO7_SM_ERR_ALARM BIT(23)
+ u64 lso_err_mask;
+ u64 lso_err_alarm;
+
+ u64 tpa_err_reg;
+#define XGE_HAL_TPA_TX_FRM_DROP BIT(7)
+#define XGE_HAL_TPA_SM_ERR_ALARM BIT(23)
+ u64 tpa_err_mask;
+ u64 tpa_err_alarm;
+
+ u64 sm_err_reg;
+#define XGE_HAL_SM_SM_ERR_ALARM BIT(15)
+ u64 sm_err_mask;
+ u64 sm_err_alarm;
+
+ u8 unused9[0x100 - 0xB8];
+
+/* TxDMA arbiter */
+ u64 tx_dma_wrap_stat;
+
+/* Tx FIFO controller */
+#define XGE_HAL_X_MAX_FIFOS 8
+#define XGE_HAL_X_FIFO_MAX_LEN 0x1FFF /*8191 */
+ u64 tx_fifo_partition_0;
+#define XGE_HAL_TX_FIFO_PARTITION_EN BIT(0)
+#define XGE_HAL_TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3)
+#define XGE_HAL_TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13)
+#define XGE_HAL_TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3)
+#define XGE_HAL_TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 )
+
+ u64 tx_fifo_partition_1;
+#define XGE_HAL_TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3)
+#define XGE_HAL_TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13)
+#define XGE_HAL_TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3)
+#define XGE_HAL_TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13)
+
+ u64 tx_fifo_partition_2;
+#define XGE_HAL_TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3)
+#define XGE_HAL_TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13)
+#define XGE_HAL_TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3)
+#define XGE_HAL_TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13)
+
+ u64 tx_fifo_partition_3;
+#define XGE_HAL_TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3)
+#define XGE_HAL_TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13)
+#define XGE_HAL_TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3)
+#define XGE_HAL_TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13)
+
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_0 0 /* highest */
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_1 1
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_2 2
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_3 3
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_4 4
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_5 5
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_6 6
+#define XGE_HAL_TX_FIFO_PARTITION_PRI_7 7 /* lowest */
+
+ u64 tx_w_round_robin_0;
+ u64 tx_w_round_robin_1;
+ u64 tx_w_round_robin_2;
+ u64 tx_w_round_robin_3;
+ u64 tx_w_round_robin_4;
+
+ u64 tti_command_mem;
+#define XGE_HAL_TTI_CMD_MEM_WE BIT(7)
+#define XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD BIT(15)
+#define XGE_HAL_TTI_CMD_MEM_STROBE_BEING_EXECUTED BIT(15)
+#define XGE_HAL_TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6)
+
+ u64 tti_data1_mem;
+#define XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26)
+#define XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2)
+#define XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN BIT(38)
+#define XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN BIT(39)
+#define XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7)
+#define XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7)
+#define XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7)
+
+ u64 tti_data2_mem;
+#define XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16)
+#define XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16)
+#define XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16)
+#define XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16)
+
+/* Tx Protocol assist */
+ u64 tx_pa_cfg;
+#define XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR BIT(1)
+#define XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
+#define XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
+#define XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR BIT(6)
+
+/* Recent add, used only debug purposes. */
+ u64 pcc_enable;
+
+ u64 pfc_monitor_0;
+ u64 pfc_monitor_1;
+ u64 pfc_monitor_2;
+ u64 pfc_monitor_3;
+ u64 txd_ownership_ctrl;
+ u64 pfc_read_cntrl;
+ u64 pfc_read_data;
+
+ u8 unused10[0x1700 - 0x11B0];
+
+ u64 txdma_debug_ctrl;
+
+ u8 unused11[0x1800 - 0x1708];
+
+/* RxDMA Registers */
+ u64 rxdma_int_status;
+#define XGE_HAL_RXDMA_RC_INT BIT(0)
+#define XGE_HAL_RXDMA_RPA_INT BIT(1)
+#define XGE_HAL_RXDMA_RDA_INT BIT(2)
+#define XGE_HAL_RXDMA_RTI_INT BIT(3)
+
+ u64 rxdma_int_mask;
+#define XGE_HAL_RXDMA_INT_RC_INT_M BIT(0)
+#define XGE_HAL_RXDMA_INT_RPA_INT_M BIT(1)
+#define XGE_HAL_RXDMA_INT_RDA_INT_M BIT(2)
+#define XGE_HAL_RXDMA_INT_RTI_INT_M BIT(3)
+
+ u64 rda_err_reg;
+#define XGE_HAL_RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8)
+#define XGE_HAL_RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8)
+#define XGE_HAL_RDA_FRM_ECC_SG_ERR BIT(23)
+#define XGE_HAL_RDA_FRM_ECC_DB_N_AERR BIT(31)
+#define XGE_HAL_RDA_SM1_ERR_ALARM BIT(38)
+#define XGE_HAL_RDA_SM0_ERR_ALARM BIT(39)
+#define XGE_HAL_RDA_MISC_ERR BIT(47)
+#define XGE_HAL_RDA_PCIX_ERR BIT(55)
+#define XGE_HAL_RDA_RXD_ECC_DB_SERR BIT(63)
+ u64 rda_err_mask;
+ u64 rda_err_alarm;
+
+ u64 rc_err_reg;
+#define XGE_HAL_RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8)
+#define XGE_HAL_RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8)
+#define XGE_HAL_RC_FTC_ECC_SG_ERR BIT(23)
+#define XGE_HAL_RC_FTC_ECC_DB_ERR BIT(31)
+#define XGE_HAL_RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8)
+#define XGE_HAL_RC_FTC_SM_ERR_ALARM BIT(47)
+#define XGE_HAL_RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8)
+ u64 rc_err_mask;
+ u64 rc_err_alarm;
+
+ u64 prc_pcix_err_reg;
+#define XGE_HAL_PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8)
+#define XGE_HAL_PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8)
+#define XGE_HAL_PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8)
+#define XGE_HAL_PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8)
+#define XGE_HAL_PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8)
+#define XGE_HAL_PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8)
+ u64 prc_pcix_err_mask;
+ u64 prc_pcix_err_alarm;
+
+ u64 rpa_err_reg;
+#define XGE_HAL_RPA_ECC_SG_ERR BIT(7)
+#define XGE_HAL_RPA_ECC_DB_ERR BIT(15)
+#define XGE_HAL_RPA_FLUSH_REQUEST BIT(22)
+#define XGE_HAL_RPA_SM_ERR_ALARM BIT(23)
+#define XGE_HAL_RPA_CREDIT_ERR BIT(31)
+ u64 rpa_err_mask;
+ u64 rpa_err_alarm;
+
+ u64 rti_err_reg;
+#define XGE_HAL_RTI_ECC_SG_ERR BIT(7)
+#define XGE_HAL_RTI_ECC_DB_ERR BIT(15)
+#define XGE_HAL_RTI_SM_ERR_ALARM BIT(23)
+ u64 rti_err_mask;
+ u64 rti_err_alarm;
+
+ u8 unused12[0x100 - 0x88];
+
+/* DMA arbiter */
+ u64 rx_queue_priority;
+#define XGE_HAL_RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3)
+#define XGE_HAL_RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3)
+#define XGE_HAL_RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3)
+#define XGE_HAL_RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3)
+#define XGE_HAL_RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3)
+#define XGE_HAL_RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3)
+#define XGE_HAL_RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3)
+#define XGE_HAL_RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3)
+
+#define XGE_HAL_RX_QUEUE_PRI_0 0 /* highest */
+#define XGE_HAL_RX_QUEUE_PRI_1 1
+#define XGE_HAL_RX_QUEUE_PRI_2 2
+#define XGE_HAL_RX_QUEUE_PRI_3 3
+#define XGE_HAL_RX_QUEUE_PRI_4 4
+#define XGE_HAL_RX_QUEUE_PRI_5 5
+#define XGE_HAL_RX_QUEUE_PRI_6 6
+#define XGE_HAL_RX_QUEUE_PRI_7 7 /* lowest */
+
+ u64 rx_w_round_robin_0;
+ u64 rx_w_round_robin_1;
+ u64 rx_w_round_robin_2;
+ u64 rx_w_round_robin_3;
+ u64 rx_w_round_robin_4;
+
+ /* Per-ring controller regs */
+#define XGE_HAL_RX_MAX_RINGS 8
+ u64 prc_rxd0_n[XGE_HAL_RX_MAX_RINGS];
+ u64 prc_ctrl_n[XGE_HAL_RX_MAX_RINGS];
+#define XGE_HAL_PRC_CTRL_RC_ENABLED BIT(7)
+#define XGE_HAL_PRC_CTRL_RING_MODE (BIT(14)|BIT(15))
+#define XGE_HAL_PRC_CTRL_RING_MODE_1 vBIT(0,14,2)
+#define XGE_HAL_PRC_CTRL_RING_MODE_3 vBIT(1,14,2)
+#define XGE_HAL_PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
+#define XGE_HAL_PRC_CTRL_RING_MODE_x vBIT(3,14,2)
+#define XGE_HAL_PRC_CTRL_NO_SNOOP(n) vBIT(n,22,2)
+#define XGE_HAL_PRC_CTRL_RTH_DISABLE BIT(31)
+#define XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
+#define XGE_HAL_PRC_CTRL_GROUP_READS BIT(38)
+#define XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
+
+ u64 prc_alarm_action;
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R0_STOP BIT(3)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R0_STOP BIT(7)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R1_STOP BIT(11)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R1_STOP BIT(15)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R2_STOP BIT(19)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R2_STOP BIT(23)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R3_STOP BIT(27)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R3_STOP BIT(31)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R4_STOP BIT(35)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R4_STOP BIT(39)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R5_STOP BIT(43)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R5_STOP BIT(47)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R6_STOP BIT(51)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R6_STOP BIT(55)
+#define XGE_HAL_PRC_ALARM_ACTION_RR_R7_STOP BIT(59)
+#define XGE_HAL_PRC_ALARM_ACTION_RW_R7_STOP BIT(63)
+
+/* Receive traffic interrupts */
+ u64 rti_command_mem;
+#define XGE_HAL_RTI_CMD_MEM_WE BIT(7)
+#define XGE_HAL_RTI_CMD_MEM_STROBE BIT(15)
+#define XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD BIT(15)
+#define XGE_HAL_RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED BIT(15)
+#define XGE_HAL_RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3)
+
+ u64 rti_data1_mem;
+#define XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29)
+#define XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN BIT(38)
+#define XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN BIT(39)
+#define XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7)
+#define XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7)
+#define XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7)
+
+ u64 rti_data2_mem;
+#define XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16)
+#define XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16)
+#define XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16)
+#define XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16)
+
+ u64 rx_pa_cfg;
+#define XGE_HAL_RX_PA_CFG_IGNORE_FRM_ERR BIT(1)
+#define XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
+#define XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
+#define XGE_HAL_RX_PA_CFG_SCATTER_MODE(n) vBIT(n,6,1)
+#define XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(n) vBIT(n,15,1)
+
+ u8 unused13_0[0x8];
+
+ u64 ring_bump_counter1;
+ u64 ring_bump_counter2;
+#define XGE_HAL_RING_BUMP_CNT(i, val) (u16)(val >> (48 - (16 * (i % 4))))
+
+ u8 unused13[0x700 - 0x1f0];
+
+ u64 rxdma_debug_ctrl;
+
+ u8 unused14[0x2000 - 0x1f08];
+
+/* Media Access Controller Register */
+ u64 mac_int_status;
+ u64 mac_int_mask;
+#define XGE_HAL_MAC_INT_STATUS_TMAC_INT BIT(0)
+#define XGE_HAL_MAC_INT_STATUS_RMAC_INT BIT(1)
+
+ u64 mac_tmac_err_reg;
+#define XGE_HAL_TMAC_ECC_DB_ERR BIT(15)
+#define XGE_HAL_TMAC_TX_BUF_OVRN BIT(23)
+#define XGE_HAL_TMAC_TX_CRI_ERR BIT(31)
+#define XGE_HAL_TMAC_TX_SM_ERR BIT(39)
+ u64 mac_tmac_err_mask;
+ u64 mac_tmac_err_alarm;
+
+ u64 mac_rmac_err_reg;
+#define XGE_HAL_RMAC_RX_BUFF_OVRN BIT(0)
+#define XGE_HAL_RMAC_RTH_SPDM_ECC_SG_ERR BIT(0)
+#define XGE_HAL_RMAC_RTS_ECC_DB_ERR BIT(0)
+#define XGE_HAL_RMAC_ECC_DB_ERR BIT(0)
+#define XGE_HAL_RMAC_RTH_SPDM_ECC_DB_ERR BIT(0)
+#define XGE_HAL_RMAC_LINK_STATE_CHANGE_INT BIT(0)
+#define XGE_HAL_RMAC_RX_SM_ERR BIT(39)
+ u64 mac_rmac_err_mask;
+ u64 mac_rmac_err_alarm;
+
+ u8 unused15[0x100 - 0x40];
+
+ u64 mac_cfg;
+#define XGE_HAL_MAC_CFG_TMAC_ENABLE BIT(0)
+#define XGE_HAL_MAC_CFG_RMAC_ENABLE BIT(1)
+#define XGE_HAL_MAC_CFG_LAN_NOT_WAN BIT(2)
+#define XGE_HAL_MAC_CFG_TMAC_LOOPBACK BIT(3)
+#define XGE_HAL_MAC_CFG_TMAC_APPEND_PAD BIT(4)
+#define XGE_HAL_MAC_CFG_RMAC_STRIP_FCS BIT(5)
+#define XGE_HAL_MAC_CFG_RMAC_STRIP_PAD BIT(6)
+#define XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE BIT(7)
+#define XGE_HAL_MAC_RMAC_DISCARD_PFRM BIT(8)
+#define XGE_HAL_MAC_RMAC_BCAST_ENABLE BIT(9)
+#define XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE BIT(10)
+#define XGE_HAL_MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8)
+
+ u64 tmac_avg_ipg;
+#define XGE_HAL_TMAC_AVG_IPG(val) vBIT(val,0,8)
+
+ u64 rmac_max_pyld_len;
+#define XGE_HAL_RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14)
+
+ u64 rmac_err_cfg;
+#define XGE_HAL_RMAC_ERR_FCS BIT(0)
+#define XGE_HAL_RMAC_ERR_FCS_ACCEPT BIT(1)
+#define XGE_HAL_RMAC_ERR_TOO_LONG BIT(1)
+#define XGE_HAL_RMAC_ERR_TOO_LONG_ACCEPT BIT(1)
+#define XGE_HAL_RMAC_ERR_RUNT BIT(2)
+#define XGE_HAL_RMAC_ERR_RUNT_ACCEPT BIT(2)
+#define XGE_HAL_RMAC_ERR_LEN_MISMATCH BIT(3)
+#define XGE_HAL_RMAC_ERR_LEN_MISMATCH_ACCEPT BIT(3)
+
+ u64 rmac_cfg_key;
+#define XGE_HAL_RMAC_CFG_KEY(val) vBIT(val,0,16)
+
+#define XGE_HAL_MAX_MAC_ADDRESSES 64
+#define XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET 63
+#define XGE_HAL_MAX_MAC_ADDRESSES_HERC 256
+#define XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC 255
+
+ u64 rmac_addr_cmd_mem;
+#define XGE_HAL_RMAC_ADDR_CMD_MEM_WE BIT(7)
+#define XGE_HAL_RMAC_ADDR_CMD_MEM_RD 0
+#define XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD BIT(15)
+#define XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING BIT(15)
+#define XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6)
+
+ u64 rmac_addr_data0_mem;
+#define XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48)
+#define XGE_HAL_RMAC_ADDR_DATA0_MEM_USER BIT(48)
+
+ u64 rmac_addr_data1_mem;
+#define XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48)
+
+ u8 unused16[0x8];
+
+/*
+ u64 rmac_addr_cfg;
+#define XGE_HAL_RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n)
+#define XGE_HAL_RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n)
+#define XGE_HAL_RMAC_ADDR_BCAST_EN vBIT(0)_48
+#define XGE_HAL_RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49
+*/
+ u64 tmac_ipg_cfg;
+
+ u64 rmac_pause_cfg;
+#define XGE_HAL_RMAC_PAUSE_GEN_EN BIT(0)
+#define XGE_HAL_RMAC_PAUSE_RCV_EN BIT(1)
+#define XGE_HAL_RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16)
+#define XGE_HAL_RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16)
+
+ u64 rmac_red_cfg;
+
+ u64 rmac_red_rate_q0q3;
+ u64 rmac_red_rate_q4q7;
+
+ u64 mac_link_util;
+#define XGE_HAL_MAC_TX_LINK_UTIL vBIT(0xFE,1,7)
+#define XGE_HAL_MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4)
+#define XGE_HAL_MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4)
+#define XGE_HAL_MAC_RX_LINK_UTIL vBIT(0xFE,33,7)
+#define XGE_HAL_MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4)
+#define XGE_HAL_MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4)
+
+#define XGE_HAL_MAC_LINK_UTIL_DISABLE (XGE_HAL_MAC_TX_LINK_UTIL_DISABLE | \
+ XGE_HAL_MAC_RX_LINK_UTIL_DISABLE)
+
+ u64 rmac_invalid_ipg;
+
+/* rx traffic steering */
+#define XGE_HAL_MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14)
+ u64 rts_frm_len_n[8];
+
+ u64 rts_qos_steering;
+
+#define XGE_HAL_MAX_DIX_MAP 4
+ u64 rts_dix_map_n[XGE_HAL_MAX_DIX_MAP];
+#define XGE_HAL_RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16)
+#define XGE_HAL_RTS_DIX_MAP_SCW(val) BIT(val,21)
+
+ u64 rts_q_alternates;
+ u64 rts_default_q;
+#define XGE_HAL_RTS_DEFAULT_Q(n) vBIT(n,5,3)
+
+ u64 rts_ctrl;
+#define XGE_HAL_RTS_CTRL_IGNORE_SNAP_OUI BIT(2)
+#define XGE_HAL_RTS_CTRL_IGNORE_LLC_CTRL BIT(3)
+#define XGE_HAL_RTS_CTRL_ENHANCED_MODE BIT(7)
+
+ u64 rts_pn_cam_ctrl;
+#define XGE_HAL_RTS_PN_CAM_CTRL_WE BIT(7)
+#define XGE_HAL_RTS_PN_CAM_CTRL_STROBE_NEW_CMD BIT(15)
+#define XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED BIT(15)
+#define XGE_HAL_RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8)
+ u64 rts_pn_cam_data;
+#define XGE_HAL_RTS_PN_CAM_DATA_TCP_SELECT BIT(7)
+#define XGE_HAL_RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16)
+#define XGE_HAL_RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8)
+
+ u64 rts_ds_mem_ctrl;
+#define XGE_HAL_RTS_DS_MEM_CTRL_WE BIT(7)
+#define XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD BIT(15)
+#define XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED BIT(15)
+#define XGE_HAL_RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6)
+ u64 rts_ds_mem_data;
+#define XGE_HAL_RTS_DS_MEM_DATA(n) vBIT(n,0,8)
+
+ u8 unused16_1[0x308 - 0x220];
+
+ u64 rts_vid_mem_ctrl;
+ u64 rts_vid_mem_data;
+ u64 rts_p0_p3_map;
+ u64 rts_p4_p7_map;
+ u64 rts_p8_p11_map;
+ u64 rts_p12_p15_map;
+
+ u64 rts_mac_cfg;
+#define XGE_HAL_RTS_MAC_SECT0_EN BIT(0)
+#define XGE_HAL_RTS_MAC_SECT1_EN BIT(1)
+#define XGE_HAL_RTS_MAC_SECT2_EN BIT(2)
+#define XGE_HAL_RTS_MAC_SECT3_EN BIT(3)
+#define XGE_HAL_RTS_MAC_SECT4_EN BIT(4)
+#define XGE_HAL_RTS_MAC_SECT5_EN BIT(5)
+#define XGE_HAL_RTS_MAC_SECT6_EN BIT(6)
+#define XGE_HAL_RTS_MAC_SECT7_EN BIT(7)
+
+ u8 unused16_2[0x380 - 0x340];
+
+ u64 rts_rth_cfg;
+#define XGE_HAL_RTS_RTH_EN BIT(3)
+#define XGE_HAL_RTS_RTH_BUCKET_SIZE(n) vBIT(n,4,4)
+#define XGE_HAL_RTS_RTH_ALG_SEL_MS BIT(11)
+#define XGE_HAL_RTS_RTH_TCP_IPV4_EN BIT(15)
+#define XGE_HAL_RTS_RTH_UDP_IPV4_EN BIT(19)
+#define XGE_HAL_RTS_RTH_IPV4_EN BIT(23)
+#define XGE_HAL_RTS_RTH_TCP_IPV6_EN BIT(27)
+#define XGE_HAL_RTS_RTH_UDP_IPV6_EN BIT(31)
+#define XGE_HAL_RTS_RTH_IPV6_EN BIT(35)
+#define XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN BIT(39)
+#define XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN BIT(43)
+#define XGE_HAL_RTS_RTH_IPV6_EX_EN BIT(47)
+
+ u64 rts_rth_map_mem_ctrl;
+#define XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE BIT(7)
+#define XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE BIT(15)
+#define XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(n) vBIT(n,24,8)
+
+ u64 rts_rth_map_mem_data;
+#define XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN BIT(3)
+#define XGE_HAL_RTS_RTH_MAP_MEM_DATA(n) vBIT(n,5,3)
+
+ u64 rts_rth_spdm_mem_ctrl;
+#define XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE BIT(15)
+#define XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(n) vBIT(n,21,3)
+#define XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(n) vBIT(n,24,8)
+
+ u64 rts_rth_spdm_mem_data;
+
+ u64 rts_rth_jhash_cfg;
+#define XGE_HAL_RTS_RTH_JHASH_GOLDEN(n) vBIT(n,0,32)
+#define XGE_HAL_RTS_RTH_JHASH_INIT_VAL(n) vBIT(n,32,32)
+
+ u64 rts_rth_hash_mask[5]; /* rth mask's 0...4 */
+ u64 rts_rth_hash_mask_5;
+#define XGE_HAL_RTH_HASH_MASK_5(n) vBIT(n,0,32)
+
+ u64 rts_rth_status;
+#define XGE_HAL_RTH_STATUS_SPDM_USE_L4 BIT(3)
+
+ u8 unused17[0x400 - 0x3E8];
+
+ u64 rmac_red_fine_q0q3;
+ u64 rmac_red_fine_q4q7;
+ u64 rmac_pthresh_cross;
+ u64 rmac_rthresh_cross;
+ u64 rmac_pnum_range[32];
+
+ u64 rmac_mp_crc_0;
+ u64 rmac_mp_mask_a_0;
+ u64 rmac_mp_mask_b_0;
+
+ u64 rmac_mp_crc_1;
+ u64 rmac_mp_mask_a_1;
+ u64 rmac_mp_mask_b_1;
+
+ u64 rmac_mp_crc_2;
+ u64 rmac_mp_mask_a_2;
+ u64 rmac_mp_mask_b_2;
+
+ u64 rmac_mp_crc_3;
+ u64 rmac_mp_mask_a_3;
+ u64 rmac_mp_mask_b_3;
+
+ u64 rmac_mp_crc_4;
+ u64 rmac_mp_mask_a_4;
+ u64 rmac_mp_mask_b_4;
+
+ u64 rmac_mp_crc_5;
+ u64 rmac_mp_mask_a_5;
+ u64 rmac_mp_mask_b_5;
+
+ u64 rmac_mp_crc_6;
+ u64 rmac_mp_mask_a_6;
+ u64 rmac_mp_mask_b_6;
+
+ u64 rmac_mp_crc_7;
+ u64 rmac_mp_mask_a_7;
+ u64 rmac_mp_mask_b_7;
+
+ u64 mac_ctrl;
+ u64 activity_control;
+
+ u8 unused17_2[0x700 - 0x5F0];
+
+ u64 mac_debug_ctrl;
+#define XGE_HAL_MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
+
+ u8 unused18[0x2800 - 0x2708];
+
+/* memory controller registers */
+ u64 mc_int_status;
+#define XGE_HAL_MC_INT_STATUS_MC_INT BIT(0)
+ u64 mc_int_mask;
+#define XGE_HAL_MC_INT_MASK_MC_INT BIT(0)
+
+ u64 mc_err_reg;
+#define XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L BIT(2) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U BIT(3) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L BIT(4) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U BIT(5) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L BIT(6)
+#define XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U BIT(7)
+#define XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L BIT(10) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U BIT(11) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L BIT(12) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U BIT(13) /* non-Xena */
+#define XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L BIT(14)
+#define XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U BIT(15)
+#define XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 BIT(17)
+#define XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 BIT(18) /* Xena: reset */
+#define XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 BIT(19)
+#define XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 BIT(20) /* Xena: reset */
+#define XGE_HAL_MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
+#define XGE_HAL_MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
+#define XGE_HAL_MC_ERR_REG_SM_ERR BIT(31)
+#define XGE_HAL_MC_ERR_REG_PL_LOCK_N BIT(39)
+
+ u64 mc_err_mask;
+ u64 mc_err_alarm;
+
+ u8 unused19[0x100 - 0x28];
+
+/* MC configuration */
+ u64 rx_queue_cfg;
+#define XGE_HAL_RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8)
+#define XGE_HAL_RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8)
+
+ u64 mc_rldram_mrs;
+#define XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE BIT(39)
+#define XGE_HAL_MC_RLDRAM_MRS_ENABLE BIT(47)
+
+ u64 mc_rldram_interleave;
+
+ u64 mc_pause_thresh_q0q3;
+ u64 mc_pause_thresh_q4q7;
+
+ u64 mc_red_thresh_q[8];
+
+ u8 unused20[0x200 - 0x168];
+ u64 mc_rldram_ref_per;
+ u8 unused21[0x220 - 0x208];
+ u64 mc_rldram_test_ctrl;
+#define XGE_HAL_MC_RLDRAM_TEST_MODE BIT(47)
+#define XGE_HAL_MC_RLDRAM_TEST_WRITE BIT(7)
+#define XGE_HAL_MC_RLDRAM_TEST_GO BIT(15)
+#define XGE_HAL_MC_RLDRAM_TEST_DONE BIT(23)
+#define XGE_HAL_MC_RLDRAM_TEST_PASS BIT(31)
+
+ u8 unused22[0x240 - 0x228];
+ u64 mc_rldram_test_add;
+ u8 unused23[0x260 - 0x248];
+ u64 mc_rldram_test_d0;
+ u8 unused24[0x280 - 0x268];
+ u64 mc_rldram_test_d1;
+ u8 unused25[0x300 - 0x288];
+ u64 mc_rldram_test_d2;
+ u8 unused26_1[0x2C00 - 0x2B08];
+ u64 mc_rldram_test_read_d0;
+ u8 unused26_2[0x20 - 0x8];
+ u64 mc_rldram_test_read_d1;
+ u8 unused26_3[0x40 - 0x28];
+ u64 mc_rldram_test_read_d2;
+ u8 unused26_4[0x60 - 0x48];
+ u64 mc_rldram_test_add_bkg;
+ u8 unused26_5[0x80 - 0x68];
+ u64 mc_rldram_test_d0_bkg;
+ u8 unused26_6[0xD00 - 0xC88];
+ u64 mc_rldram_test_d1_bkg;
+ u8 unused26_7[0x20 - 0x8];
+ u64 mc_rldram_test_d2_bkg;
+ u8 unused26_8[0x40 - 0x28];
+ u64 mc_rldram_test_read_d0_bkg;
+ u8 unused26_9[0x60 - 0x48];
+ u64 mc_rldram_test_read_d1_bkg;
+ u8 unused26_10[0x80 - 0x68];
+ u64 mc_rldram_test_read_d2_bkg;
+ u8 unused26_11[0xE00 - 0xD88];
+ u64 mc_rldram_generation;
+ u8 unused26_12[0x20 - 0x8];
+ u64 mc_driver;
+ u8 unused26_13[0x40 - 0x28];
+ u64 mc_rldram_ref_per_herc;
+#define XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(n) vBIT(n, 0, 16)
+ u8 unused26_14[0x660 - 0x648];
+ u64 mc_rldram_mrs_herc;
+#define XGE_HAL_MC_RLDRAM_MRS(n) vBIT(n, 14, 17)
+ u8 unused26_15[0x700 - 0x668];
+ u64 mc_debug_ctrl;
+
+ u8 unused27[0x3000 - 0x2f08];
+
+/* XGXG */
+ /* XGXS control registers */
+
+ u64 xgxs_int_status;
+#define XGE_HAL_XGXS_INT_STATUS_TXGXS BIT(0)
+#define XGE_HAL_XGXS_INT_STATUS_RXGXS BIT(1)
+ u64 xgxs_int_mask;
+#define XGE_HAL_XGXS_INT_MASK_TXGXS BIT(0)
+#define XGE_HAL_XGXS_INT_MASK_RXGXS BIT(1)
+
+ u64 xgxs_txgxs_err_reg;
+#define XGE_HAL_TXGXS_ECC_SG_ERR BIT(7)
+#define XGE_HAL_TXGXS_ECC_DB_ERR BIT(15)
+#define XGE_HAL_TXGXS_ESTORE_UFLOW BIT(31)
+#define XGE_HAL_TXGXS_TX_SM_ERR BIT(39)
+ u64 xgxs_txgxs_err_mask;
+ u64 xgxs_txgxs_err_alarm;
+
+ u64 xgxs_rxgxs_err_reg;
+#define XGE_HAL_RXGXS_ESTORE_OFLOW BIT(7)
+#define XGE_HAL_RXGXS_RX_SM_ERR BIT(39)
+ u64 xgxs_rxgxs_err_mask;
+ u64 xgxs_rxgxs_err_alarm;
+
+ u64 spi_err_reg;
+ u64 spi_err_mask;
+ u64 spi_err_alarm;
+
+ u8 unused28[0x100 - 0x58];
+
+ u64 xgxs_cfg;
+ u64 xgxs_status;
+
+ u64 xgxs_cfg_key;
+ u64 xgxs_efifo_cfg; /* CHANGED */
+ u64 rxgxs_ber_0; /* CHANGED */
+ u64 rxgxs_ber_1; /* CHANGED */
+
+ u64 spi_control;
+ u64 spi_data;
+ u64 spi_write_protect;
+
+ u8 unused29[0x80 - 0x48];
+
+ u64 xgxs_cfg_1;
+} xge_hal_pci_bar0_t;
+
+/* Using this strcture to calculate offsets */
+typedef struct xge_hal_pci_config_le_t {
+ u16 vendor_id; // 0x00
+ u16 device_id; // 0x02
+
+ u16 command; // 0x04
+ u16 status; // 0x06
+
+ u8 revision; // 0x08
+ u8 pciClass[3]; // 0x09
+
+ u8 cache_line_size; // 0x0c
+ u8 latency_timer; // 0x0d
+ u8 header_type; // 0x0e
+ u8 bist; // 0x0f
+
+ u32 base_addr0_lo; // 0x10
+ u32 base_addr0_hi; // 0x14
+
+ u32 base_addr1_lo; // 0x18
+ u32 base_addr1_hi; // 0x1C
+
+ u32 not_Implemented1; // 0x20
+ u32 not_Implemented2; // 0x24
+
+ u32 cardbus_cis_pointer; // 0x28
+
+ u16 subsystem_vendor_id; // 0x2c
+ u16 subsystem_id; // 0x2e
+
+ u32 rom_base; // 0x30
+ u8 capabilities_pointer; // 0x34
+ u8 rsvd_35[3]; // 0x35
+ u32 rsvd_38; // 0x38
+
+ u8 interrupt_line; // 0x3c
+ u8 interrupt_pin; // 0x3d
+ u8 min_grant; // 0x3e
+ u8 max_latency; // 0x3f
+
+ u8 msi_cap_id; // 0x40
+ u8 msi_next_ptr; // 0x41
+ u16 msi_control; // 0x42
+ u32 msi_lower_address; // 0x44
+ u32 msi_higher_address; // 0x48
+ u16 msi_data; // 0x4c
+ u16 msi_unused; // 0x4e
+
+ u8 vpd_cap_id; // 0x50
+ u8 vpd_next_cap; // 0x51
+ u16 vpd_addr; // 0x52
+ u32 vpd_data; // 0x54
+
+ u8 rsvd_b0[8]; // 0x58
+
+ u8 pcix_cap; // 0x60
+ u8 pcix_next_cap; // 0x61
+ u16 pcix_command; // 0x62
+
+ u32 pcix_status; // 0x64
+
+ u8 rsvd_b1[XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE-0x68];
+} xge_hal_pci_config_le_t; // 0x100
+
+typedef struct xge_hal_pci_config_t {
+#ifdef XGE_OS_HOST_BIG_ENDIAN
+ u16 device_id; // 0x02
+ u16 vendor_id; // 0x00
+
+ u16 status; // 0x06
+ u16 command; // 0x04
+
+ u8 pciClass[3]; // 0x09
+ u8 revision; // 0x08
+
+ u8 bist; // 0x0f
+ u8 header_type; // 0x0e
+ u8 latency_timer; // 0x0d
+ u8 cache_line_size; // 0x0c
+
+ u32 base_addr0_lo; // 0x10
+ u32 base_addr0_hi; // 0x14
+
+ u32 base_addr1_lo; // 0x18
+ u32 base_addr1_hi; // 0x1C
+
+ u32 not_Implemented1; // 0x20
+ u32 not_Implemented2; // 0x24
+
+ u32 cardbus_cis_pointer; // 0x28
+
+ u16 subsystem_id; // 0x2e
+ u16 subsystem_vendor_id; // 0x2c
+
+ u32 rom_base; // 0x30
+ u8 rsvd_35[3]; // 0x35
+ u8 capabilities_pointer; // 0x34
+ u32 rsvd_38; // 0x38
+
+ u8 max_latency; // 0x3f
+ u8 min_grant; // 0x3e
+ u8 interrupt_pin; // 0x3d
+ u8 interrupt_line; // 0x3c
+
+ u16 msi_control; // 0x42
+ u8 msi_next_ptr; // 0x41
+ u8 msi_cap_id; // 0x40
+ u32 msi_lower_address; // 0x44
+ u32 msi_higher_address; // 0x48
+ u16 msi_unused; // 0x4e
+ u16 msi_data; // 0x4c
+
+ u16 vpd_addr; // 0x52
+ u8 vpd_next_cap; // 0x51
+ u8 vpd_cap_id; // 0x50
+ u32 vpd_data; // 0x54
+
+ u8 rsvd_b0[8]; // 0x58
+
+ u16 pcix_command; // 0x62
+ u8 pcix_next_cap; // 0x61
+ u8 pcix_cap; // 0x60
+
+ u32 pcix_status; // 0x64
+#else
+ u16 vendor_id; // 0x00
+ u16 device_id; // 0x02
+
+ u16 command; // 0x04
+ u16 status; // 0x06
+
+ u8 revision; // 0x08
+ u8 pciClass[3]; // 0x09
+
+ u8 cache_line_size; // 0x0c
+ u8 latency_timer; // 0x0d
+ u8 header_type; // 0x0e
+ u8 bist; // 0x0f
+
+ u32 base_addr0_lo; // 0x10
+ u32 base_addr0_hi; // 0x14
+
+ u32 base_addr1_lo; // 0x18
+ u32 base_addr1_hi; // 0x1C
+
+ u32 not_Implemented1; // 0x20
+ u32 not_Implemented2; // 0x24
+
+ u32 cardbus_cis_pointer; // 0x28
+
+ u16 subsystem_vendor_id; // 0x2c
+ u16 subsystem_id; // 0x2e
+
+ u32 rom_base; // 0x30
+ u8 capabilities_pointer; // 0x34
+ u8 rsvd_35[3]; // 0x35
+ u32 rsvd_38; // 0x38
+
+ u8 interrupt_line; // 0x3c
+ u8 interrupt_pin; // 0x3d
+ u8 min_grant; // 0x3e
+ u8 max_latency; // 0x3f
+
+ u8 msi_cap_id; // 0x40
+ u8 msi_next_ptr; // 0x41
+ u16 msi_control; // 0x42
+ u32 msi_lower_address; // 0x44
+ u32 msi_higher_address; // 0x48
+ u16 msi_data; // 0x4c
+ u16 msi_unused; // 0x4e
+
+ u8 vpd_cap_id; // 0x50
+ u8 vpd_next_cap; // 0x51
+ u16 vpd_addr; // 0x52
+ u32 vpd_data; // 0x54
+
+ u8 rsvd_b0[8]; // 0x58
+
+ u8 pcix_cap; // 0x60
+ u8 pcix_next_cap; // 0x61
+ u16 pcix_command; // 0x62
+
+ u32 pcix_status; // 0x64
+
+#endif
+ u8 rsvd_b1[XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE-0x68];
+} xge_hal_pci_config_t; // 0x100
+
+#define XGE_HAL_REG_SPACE sizeof(xge_hal_pci_bar0_t)
+#define XGE_HAL_EEPROM_SIZE (0x01 << 11)
+
+#endif /* XGE_HAL_REGS_H */
diff --git a/sys/dev/nxge/include/xgehal-ring.h b/sys/dev/nxge/include/xgehal-ring.h
new file mode 100644
index 0000000..c3efdf0
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-ring.h
@@ -0,0 +1,473 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-ring.h
+ *
+ * Description: HAL Rx ring object functionality
+ *
+ * Created: 19 May 2004
+ */
+
+#ifndef XGE_HAL_RING_H
+#define XGE_HAL_RING_H
+
+#include <dev/nxge/include/xgehal-channel.h>
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xgehal-mm.h>
+
+__EXTERN_BEGIN_DECLS
+
+/* HW ring configuration */
+#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000
+
+#define XGE_HAL_RXD_T_CODE_OK 0x0
+#define XGE_HAL_RXD_T_CODE_PARITY 0x1
+#define XGE_HAL_RXD_T_CODE_ABORT 0x2
+#define XGE_HAL_RXD_T_CODE_PARITY_ABORT 0x3
+#define XGE_HAL_RXD_T_CODE_RDA_FAILURE 0x4
+#define XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO 0x5
+#define XGE_HAL_RXD_T_CODE_BAD_FCS 0x6
+#define XGE_HAL_RXD_T_CODE_BUFF_SIZE 0x7
+#define XGE_HAL_RXD_T_CODE_BAD_ECC 0x8
+#define XGE_HAL_RXD_T_CODE_UNUSED_C 0xC
+#define XGE_HAL_RXD_T_CODE_UNKNOWN 0xF
+
+#define XGE_HAL_RING_USE_MTU -1
+
+/* control_1 and control_2 formatting - same for all buffer modes */
+#define XGE_HAL_RXD_GET_L3_CKSUM(control_1) ((u16)(control_1>>16) & 0xFFFF)
+#define XGE_HAL_RXD_GET_L4_CKSUM(control_1) ((u16)(control_1 & 0xFFFF))
+
+#define XGE_HAL_RXD_MASK_VLAN_TAG vBIT(0xFFFF,48,16)
+#define XGE_HAL_RXD_SET_VLAN_TAG(control_2, val) control_2 |= (u16)val
+#define XGE_HAL_RXD_GET_VLAN_TAG(control_2) ((u16)(control_2 & 0xFFFF))
+
+#define XGE_HAL_RXD_POSTED_4_XFRAME BIT(7) /* control_1 */
+#define XGE_HAL_RXD_NOT_COMPLETED BIT(0) /* control_2 */
+#define XGE_HAL_RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define XGE_HAL_RXD_GET_T_CODE(control_1) \
+ ((control_1 & XGE_HAL_RXD_T_CODE)>>48)
+#define XGE_HAL_RXD_SET_T_CODE(control_1, val) \
+ (control_1 |= (((u64)val & 0xF) << 48))
+
+#define XGE_HAL_RXD_MASK_FRAME_TYPE vBIT(0x3,25,2)
+#define XGE_HAL_RXD_MASK_FRAME_PROTO vBIT(0xFFFF,24,8)
+#define XGE_HAL_RXD_GET_FRAME_TYPE(control_1) \
+ (u8)(0x3 & ((control_1 & XGE_HAL_RXD_MASK_FRAME_TYPE) >> 37))
+#define XGE_HAL_RXD_GET_FRAME_PROTO(control_1) \
+ (u8)((control_1 & XGE_HAL_RXD_MASK_FRAME_PROTO) >> 32)
+#define XGE_HAL_RXD_FRAME_PROTO_VLAN_TAGGED BIT(24)
+#define XGE_HAL_RXD_FRAME_PROTO_IPV4 BIT(27)
+#define XGE_HAL_RXD_FRAME_PROTO_IPV6 BIT(28)
+#define XGE_HAL_RXD_FRAME_PROTO_IP_FRAGMENTED BIT(29)
+#define XGE_HAL_RXD_FRAME_PROTO_TCP BIT(30)
+#define XGE_HAL_RXD_FRAME_PROTO_UDP BIT(31)
+#define XGE_HAL_RXD_FRAME_TCP_OR_UDP (XGE_HAL_RXD_FRAME_PROTO_TCP | \
+ XGE_HAL_RXD_FRAME_PROTO_UDP)
+
+/**
+ * enum xge_hal_frame_type_e - Ethernet frame format.
+ * @XGE_HAL_FRAME_TYPE_DIX: DIX (Ethernet II) format.
+ * @XGE_HAL_FRAME_TYPE_LLC: LLC format.
+ * @XGE_HAL_FRAME_TYPE_SNAP: SNAP format.
+ * @XGE_HAL_FRAME_TYPE_IPX: IPX format.
+ *
+ * Ethernet frame format.
+ */
+typedef enum xge_hal_frame_type_e {
+ XGE_HAL_FRAME_TYPE_DIX = 0x0,
+ XGE_HAL_FRAME_TYPE_LLC = 0x1,
+ XGE_HAL_FRAME_TYPE_SNAP = 0x2,
+ XGE_HAL_FRAME_TYPE_IPX = 0x3,
+} xge_hal_frame_type_e;
+
+/**
+ * enum xge_hal_frame_proto_e - Higher-layer ethernet protocols.
+ * @XGE_HAL_FRAME_PROTO_VLAN_TAGGED: VLAN.
+ * @XGE_HAL_FRAME_PROTO_IPV4: IPv4.
+ * @XGE_HAL_FRAME_PROTO_IPV6: IPv6.
+ * @XGE_HAL_FRAME_PROTO_IP_FRAGMENTED: IP fragmented.
+ * @XGE_HAL_FRAME_PROTO_TCP: TCP.
+ * @XGE_HAL_FRAME_PROTO_UDP: UDP.
+ * @XGE_HAL_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
+ *
+ * Higher layer ethernet protocols and options.
+ */
+typedef enum xge_hal_frame_proto_e {
+ XGE_HAL_FRAME_PROTO_VLAN_TAGGED = 0x80,
+ XGE_HAL_FRAME_PROTO_IPV4 = 0x10,
+ XGE_HAL_FRAME_PROTO_IPV6 = 0x08,
+ XGE_HAL_FRAME_PROTO_IP_FRAGMENTED = 0x04,
+ XGE_HAL_FRAME_PROTO_TCP = 0x02,
+ XGE_HAL_FRAME_PROTO_UDP = 0x01,
+ XGE_HAL_FRAME_PROTO_TCP_OR_UDP = (XGE_HAL_FRAME_PROTO_TCP | \
+ XGE_HAL_FRAME_PROTO_UDP)
+} xge_hal_frame_proto_e;
+
+/*
+ * xge_hal_ring_rxd_1_t
+ */
+typedef struct {
+ u64 host_control;
+ u64 control_1;
+ u64 control_2;
+#define XGE_HAL_RXD_1_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
+#define XGE_HAL_RXD_1_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
+#define XGE_HAL_RXD_1_GET_BUFFER0_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
+#define XGE_HAL_RXD_1_GET_RTH_VALUE(Control_2) \
+ (u32)((Control_2 & vBIT(0xFFFFFFFF,16,32))>>16)
+ u64 buffer0_ptr;
+} xge_hal_ring_rxd_1_t;
+
+/*
+ * xge_hal_ring_rxd_3_t
+ */
+typedef struct {
+ u64 host_control;
+ u64 control_1;
+
+ u64 control_2;
+#define XGE_HAL_RXD_3_MASK_BUFFER0_SIZE vBIT(0xFF,8,8)
+#define XGE_HAL_RXD_3_SET_BUFFER0_SIZE(val) vBIT(val,8,8)
+#define XGE_HAL_RXD_3_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
+#define XGE_HAL_RXD_3_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
+#define XGE_HAL_RXD_3_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
+#define XGE_HAL_RXD_3_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
+
+
+#define XGE_HAL_RXD_3_GET_BUFFER0_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFF,8,8))>>48)
+#define XGE_HAL_RXD_3_GET_BUFFER1_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
+#define XGE_HAL_RXD_3_GET_BUFFER2_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
+
+ u64 buffer0_ptr;
+ u64 buffer1_ptr;
+ u64 buffer2_ptr;
+} xge_hal_ring_rxd_3_t;
+
+/*
+ * xge_hal_ring_rxd_5_t
+ */
+typedef struct {
+#ifdef XGE_OS_HOST_BIG_ENDIAN
+ u32 host_control;
+ u32 control_3;
+#else
+ u32 control_3;
+ u32 host_control;
+#endif
+
+
+#define XGE_HAL_RXD_5_MASK_BUFFER3_SIZE vBIT(0xFFFF,32,16)
+#define XGE_HAL_RXD_5_SET_BUFFER3_SIZE(val) vBIT(val,32,16)
+#define XGE_HAL_RXD_5_MASK_BUFFER4_SIZE vBIT(0xFFFF,48,16)
+#define XGE_HAL_RXD_5_SET_BUFFER4_SIZE(val) vBIT(val,48,16)
+
+#define XGE_HAL_RXD_5_GET_BUFFER3_SIZE(Control_3) \
+ (int)((Control_3 & vBIT(0xFFFF,32,16))>>16)
+#define XGE_HAL_RXD_5_GET_BUFFER4_SIZE(Control_3) \
+ (int)((Control_3 & vBIT(0xFFFF,48,16)))
+
+ u64 control_1;
+ u64 control_2;
+
+#define XGE_HAL_RXD_5_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
+#define XGE_HAL_RXD_5_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
+#define XGE_HAL_RXD_5_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
+#define XGE_HAL_RXD_5_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
+#define XGE_HAL_RXD_5_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
+#define XGE_HAL_RXD_5_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
+
+
+#define XGE_HAL_RXD_5_GET_BUFFER0_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
+#define XGE_HAL_RXD_5_GET_BUFFER1_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
+#define XGE_HAL_RXD_5_GET_BUFFER2_SIZE(Control_2) \
+ (int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
+ u64 buffer0_ptr;
+ u64 buffer1_ptr;
+ u64 buffer2_ptr;
+ u64 buffer3_ptr;
+ u64 buffer4_ptr;
+} xge_hal_ring_rxd_5_t;
+
+#define XGE_HAL_RXD_GET_RTH_SPDM_HIT(Control_1) \
+ (u8)((Control_1 & BIT(18))>>45)
+#define XGE_HAL_RXD_GET_RTH_IT_HIT(Control_1) \
+ (u8)((Control_1 & BIT(19))>>44)
+#define XGE_HAL_RXD_GET_RTH_HASH_TYPE(Control_1) \
+ (u8)((Control_1 & vBIT(0xF,20,4))>>40)
+
+#define XGE_HAL_RXD_HASH_TYPE_NONE 0x0
+#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV4 0x1
+#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV4 0x2
+#define XGE_HAL_RXD_HASH_TYPE_IPV4 0x3
+#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6 0x4
+#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6 0x5
+#define XGE_HAL_RXD_HASH_TYPE_IPV6 0x6
+#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7
+#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8
+#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9
+
+typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE];
+
+#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8
+#define XGE_HAL_RING_MEMBLOCK_IDX_OFFSET 0xFF0
+
+#define XGE_HAL_RING_RXD_SIZEOF(n) \
+ (n==1 ? sizeof(xge_hal_ring_rxd_1_t) : \
+ (n==3 ? sizeof(xge_hal_ring_rxd_3_t) : \
+ sizeof(xge_hal_ring_rxd_5_t)))
+
+#define XGE_HAL_RING_RXDS_PER_BLOCK(n) \
+ (n==1 ? 127 : (n==3 ? 85 : 63))
+
+/**
+ * struct xge_hal_ring_rxd_priv_t - Receive descriptor HAL-private data.
+ * @dma_addr: DMA (mapped) address of _this_ descriptor.
+ * @dma_handle: DMA handle used to map the descriptor onto device.
+ * @dma_offset: Descriptor's offset in the memory block. HAL allocates
+ * descriptors in memory blocks of
+ * %XGE_HAL_RING_RXDBLOCK_SIZE
+ * bytes. Each memblock is contiguous DMA-able memory. Each
+ * memblock contains 1 or more 4KB RxD blocks visible to the
+ * Xframe hardware.
+ * @dma_object: DMA address and handle of the memory block that contains
+ * the descriptor. This member is used only in the "checked"
+ * version of the HAL (to enforce certain assertions);
+ * otherwise it gets compiled out.
+ * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
+ *
+ * Per-receive decsriptor HAL-private data. HAL uses the space to keep DMA
+ * information associated with the descriptor. Note that ULD can ask HAL
+ * to allocate additional per-descriptor space for its own (ULD-specific)
+ * purposes.
+ */
+typedef struct xge_hal_ring_rxd_priv_t {
+ dma_addr_t dma_addr;
+ pci_dma_h dma_handle;
+ ptrdiff_t dma_offset;
+#ifdef XGE_DEBUG_ASSERT
+ xge_hal_mempool_dma_t *dma_object;
+#endif
+#ifdef XGE_OS_MEMORY_CHECK
+ int allocated;
+#endif
+} xge_hal_ring_rxd_priv_t;
+
+/**
+ * struct xge_hal_ring_t - Ring channel.
+ * @channel: Channel "base" of this ring, the common part of all HAL
+ * channels.
+ * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
+ * as per Xframe User Guide.
+ * @indicate_max_pkts: Maximum number of packets processed within a single
+ * interrupt. Can be used to limit the time spent inside hw
+ * interrupt.
+ * @config: Ring configuration, part of device configuration
+ * (see xge_hal_device_config_t{}).
+ * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Xframe spec,
+ * 1-buffer mode descriptor is 32 byte long, etc.
+ * @rxd_priv_size: Per RxD size reserved (by HAL) for ULD to keep per-descriptor
+ * data (e.g., DMA handle for Solaris)
+ * @rxds_per_block: Number of descriptors per hardware-defined RxD
+ * block. Depends on the (1-,3-,5-) buffer mode.
+ * @mempool: Memory pool, the pool from which descriptors get allocated.
+ * (See xge_hal_mm.h).
+ * @rxdblock_priv_size: Reserved at the end of each RxD block. HAL internal
+ * usage. Not to confuse with @rxd_priv_size.
+ * @reserved_rxds_arr: Array of RxD pointers. At any point in time each
+ * entry in this array is available for allocation
+ * (via xge_hal_ring_dtr_reserve()) and posting.
+ * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
+ * Used in conjunction with @indicate_max_pkts.
+ * Ring channel.
+ *
+ * Note: The structure is cache line aligned to better utilize
+ * CPU cache performance.
+ */
+typedef struct xge_hal_ring_t {
+ xge_hal_channel_t channel;
+ int buffer_mode;
+ int indicate_max_pkts;
+ xge_hal_ring_config_t *config;
+ int rxd_size;
+ int rxd_priv_size;
+ int rxds_per_block;
+ xge_hal_mempool_t *mempool;
+ int rxdblock_priv_size;
+ void **reserved_rxds_arr;
+ int cmpl_cnt;
+} __xge_os_attr_cacheline_aligned xge_hal_ring_t;
+
+/**
+ * struct xge_hal_dtr_info_t - Extended information associated with a
+ * completed ring descriptor.
+ * @l3_cksum: Result of IP checksum check (by Xframe hardware).
+ * This field containing XGE_HAL_L3_CKSUM_OK would mean that
+ * the checksum is correct, otherwise - the datagram is
+ * corrupted.
+ * @l4_cksum: Result of TCP/UDP checksum check (by Xframe hardware).
+ * This field containing XGE_HAL_L4_CKSUM_OK would mean that
+ * the checksum is correct. Otherwise - the packet is
+ * corrupted.
+ * @frame: See xge_hal_frame_type_e{}.
+ * @proto: Reporting bits for various higher-layer protocols, including (but
+ * note restricted to) TCP and UDP. See xge_hal_frame_proto_e{}.
+ * @vlan: VLAN tag extracted from the received frame.
+ * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Xframe II
+ * hardware if RTH is enabled.
+ * @rth_it_hit: Set, If RTH hash value calculated by the Xframe II hardware
+ * has a matching entry in the Indirection table.
+ * @rth_spdm_hit: Set, If RTH hash value calculated by the Xframe II hardware
+ * has a matching entry in the Socket Pair Direct Match table.
+ * @rth_hash_type: RTH hash code of the function used to calculate the hash.
+ * @reserved_pad: Unused byte.
+ */
+typedef struct xge_hal_dtr_info_t {
+ int l3_cksum;
+ int l4_cksum;
+ int frame; /* zero or more of xge_hal_frame_type_e flags */
+ int proto; /* zero or more of xge_hal_frame_proto_e flags */
+ int vlan;
+ u32 rth_value;
+ u8 rth_it_hit;
+ u8 rth_spdm_hit;
+ u8 rth_hash_type;
+ u8 reserved_pad;
+} xge_hal_dtr_info_t;
+
+/* ========================== RING PRIVATE API ============================ */
+
+xge_hal_status_e __hal_ring_open(xge_hal_channel_h channelh,
+ xge_hal_channel_attr_t *attr);
+
+void __hal_ring_close(xge_hal_channel_h channelh);
+
+void __hal_ring_hw_initialize(xge_hal_device_h devh);
+
+void __hal_ring_mtu_set(xge_hal_device_h devh, int new_mtu);
+
+void __hal_ring_prc_enable(xge_hal_channel_h channelh);
+
+void __hal_ring_prc_disable(xge_hal_channel_h channelh);
+
+xge_hal_status_e __hal_ring_initial_replenish(xge_hal_channel_t *channel,
+ xge_hal_channel_reopen_e reopen);
+
+#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_RING)
+#define __HAL_STATIC_RING
+#define __HAL_INLINE_RING
+
+__HAL_STATIC_RING __HAL_INLINE_RING int
+__hal_ring_block_memblock_idx(xge_hal_ring_block_t *block);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx);
+
+__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
+__hal_ring_block_next_pointer(xge_hal_ring_block_t *block);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+__hal_ring_block_next_pointer_set(xge_hal_ring_block_t*block,
+ dma_addr_t dma_next);
+
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
+__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh);
+
+/* =========================== RING PUBLIC API ============================ */
+
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void*
+xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ xge_hal_dtr_info_t *ext_info);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ dma_addr_t *dma_pointer, int *pkt_length);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
+ int sizes[]);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ dma_addr_t dma_pointers[], int sizes[]);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
+ int sizes[]);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ dma_addr_t dma_pointer[], int sizes[]);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
+ u8 *t_code);
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh);
+
+#else /* XGE_FASTPATH_EXTERN */
+#define __HAL_STATIC_RING static
+#define __HAL_INLINE_RING inline
+#include <dev/nxge/xgehal/xgehal-ring-fp.c>
+#endif /* XGE_FASTPATH_INLINE */
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_RING_H */
diff --git a/sys/dev/nxge/include/xgehal-stats.h b/sys/dev/nxge/include/xgehal-stats.h
new file mode 100644
index 0000000..ffe0e6e
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-stats.h
@@ -0,0 +1,1601 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ * FileName : xgehal-stats.h
+ *
+ * Description: HW statistics object
+ *
+ * Created: 2 June 2004
+ */
+
+#ifndef XGE_HAL_STATS_H
+#define XGE_HAL_STATS_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-debug.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xgehal-config.h>
+
+__EXTERN_BEGIN_DECLS
+
+/**
+ * struct xge_hal_stats_hw_info_t - Xframe hardware statistics.
+ * Transmit MAC Statistics:
+ *
+ * @tmac_frms: Count of successfully transmitted MAC
+ * frames Note that this statistic may be inaccurate. The correct statistic may
+ * be derived by calcualating (tmac_ttl_octets - tmac_ttl_less_fb_octets) / 8
+ *
+ * @tmac_data_octets: Count of data and padding octets of successfully
+ * transmitted frames.
+ *
+ * @tmac_drop_frms: Count of frames that could not be sent for no other reason
+ * than internal MAC processing. Increments once whenever the
+ * transmit buffer is flushed (due to an ECC error on a memory descriptor).
+ *
+ * @tmac_mcst_frms: Count of successfully transmitted frames to a multicast
+ * address. Does not include frames sent to the broadcast address.
+ *
+ * @tmac_bcst_frms: Count of successfully transmitted frames to the broadcast
+ * address.
+ *
+ * @tmac_pause_ctrl_frms: Count of MAC PAUSE control frames that are
+ * transmitted. Since, the only control frames supported by this device
+ * are PAUSE frames, this register is a count of all transmitted MAC control
+ * frames.
+ *
+ * @tmac_ttl_octets: Count of total octets of transmitted frames, including
+ * framing characters.
+ *
+ * @tmac_ucst_frms: Count of transmitted frames containing a unicast address.
+ * @tmac_nucst_frms: Count of transmitted frames containing a non-unicast
+ * (broadcast, multicast) address.
+ *
+ * @tmac_any_err_frms: Count of transmitted frames containing any error that
+ * prevents them from being passed to the network. Increments if there is an ECC
+ * while reading the frame out of the transmit buffer.
+ *
+ * @tmac_ttl_less_fb_octets: Count of total octets of transmitted
+ * frames, not including framing characters (i.e. less framing bits)
+ *
+ * @tmac_vld_ip_octets: Count of total octets of transmitted IP datagrams that
+ * were passed to the network. Frames that are padded by the host have
+ * their padding counted as part of the IP datagram.
+ *
+ * @tmac_vld_ip: Count of transmitted IP datagrams that were passed to the
+ * network.
+ *
+ * @tmac_drop_ip: Count of transmitted IP datagrams that could not be passed to
+ * the network. Increments because of 1) an internal processing error (such as
+ * an uncorrectable ECC error); 2) a frame parsing error during IP checksum
+ * calculation.
+ *
+ * @tmac_icmp: Count of transmitted ICMP messages. Includes messages not sent
+ * due to problems within ICMP.
+ *
+ * @tmac_rst_tcp: Count of transmitted TCP segments containing the RST flag.
+ *
+ * @tmac_tcp: Count of transmitted TCP segments. Note that Xena has
+ * no knowledge of retransmission.
+ *
+ * @tmac_udp: Count of transmitted UDP datagrams.
+ * @reserved_0: Reserved.
+ *
+ * Receive MAC Statistics:
+ * @rmac_vld_frms: Count of successfully received MAC frames. Does not include
+ * frames received with frame-too-long, FCS, or length errors.
+ *
+ * @rmac_data_octets: Count of data and padding octets of successfully received
+ * frames. Does not include frames received with frame-too-long, FCS, or length
+ * errors.
+ *
+ * @rmac_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
+ * not include frames received with frame-too-long or frame-too-short error.
+ *
+ * @rmac_drop_frms: Count of received frames that could not be passed to the
+ * host because of 1) Random Early Discard (RED); 2) Frame steering algorithm
+ * found no available queue; 3) Receive ingress buffer overflow.
+ *
+ * @rmac_vld_mcst_frms: Count of successfully received MAC frames containing a
+ * multicast address. Does not include frames received with frame-too-long, FCS,
+ * or length errors.
+ *
+ * @rmac_vld_bcst_frms: Count of successfully received MAC frames containing a
+ * broadcast address. Does not include frames received with frame-too-long, FCS,
+ * or length errors.
+ *
+ * @rmac_in_rng_len_err_frms: Count of received frames with a length/type field
+ * value between 46 (42 for VLANtagged frames) and 1500 (also 1500 for
+ * VLAN-tagged frames), inclusive, that does not match the number of data octets
+ * (including pad) received. Also contains a count of received frames with a
+ * length/type field less than 46 (42 for VLAN-tagged frames) and the number of
+ * data octets (including pad) received is greater than 46 (42 for VLAN-tagged
+ * frames).
+ *
+ * @rmac_out_rng_len_err_frms: Count of received frames with length/type field
+ * between 1501 and 1535 decimal, inclusive.
+ *
+ * @rmac_long_frms: Count of received frames that are longer than
+ * rmac_max_pyld_len + 18 bytes (+22 bytes if VLAN-tagged).
+ *
+ * @rmac_pause_ctrl_frms: Count of received MAC PAUSE control frames.
+ *
+ * @rmac_unsup_ctrl_frms: Count of received MAC control frames
+ * that do not contain the PAUSE opcode. The sum of MAC_PAUSE_CTRL_FRMS and this
+ * register is a count of all received MAC control frames.
+ *
+ * @rmac_ttl_octets: Count of total octets of received frames, including framing
+ * characters.
+ *
+ * @rmac_accepted_ucst_frms: Count of successfully received frames
+ * containing a unicast address. Only includes frames that are passed to the
+ * system.
+ *
+ * @rmac_accepted_nucst_frms: Count of successfully received frames
+ * containing a non-unicast (broadcast or multicast) address. Only includes
+ * frames that are passed to the system. Could include, for instance,
+ * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG register is
+ * set to pass FCSerrored frames to the host.
+ *
+ * @rmac_discarded_frms: Count of received frames containing any error that
+ * prevents them from being passed to the system. Includes, for example,
+ * received pause frames that are discarded by the MAC and frames discarded
+ * because of their destination address.
+ *
+ * @rmac_drop_events: Because the RMAC drops one frame at a time, this stat
+ * matches rmac_drop_frms.
+ *
+ * @reserved_1: Reserved.
+ * @rmac_ttl_less_fb_octets: Count of total octets of received frames,
+ * not including framing characters (i.e. less framing bits).
+ *
+ * @rmac_ttl_frms: Count of all received MAC frames, including frames received
+ * with frame-too-long, FCS, or length errors.
+ *
+ * @reserved_2: Reserved.
+ * @reserved_3: Reserved.
+ * @rmac_usized_frms: Count of received frames of length (including FCS, but not
+ * framing bits) less than 64 octets, that are otherwise well-formed.
+ *
+ * @rmac_osized_frms: Count of received frames of length (including FCS, but not
+ * framing bits) more than 1518 octets, that are otherwise well-formed.
+ *
+ * @rmac_frag_frms: Count of received frames of length (including FCS, but not
+ * framing bits) less than 64 octets that had bad FCS. In other words, counts
+ * fragments (i.e. runts).
+ *
+ * @rmac_jabber_frms: Count of received frames of length (including FCS, but not
+ * framing bits) more than MTU octets that had bad FCS. In other words, counts
+ * jabbers.
+ *
+ * @reserved_4: Reserved.
+ * @rmac_ttl_64_frms: Count of all received MAC frames with length (including
+ * FCS, but not framing bits) of exactly 64 octets. Includes frames received
+ * with frame-too-long, FCS, or length errors.
+ *
+ * @rmac_ttl_65_127_frms: Count of all received MAC frames with length
+ * (including FCS, but not framing bits) of between 65 and 127 octets
+ * inclusive. Includes frames received with frame-too-long, FCS, or length
+ * errors.
+ * @reserved_5: Reserved.
+ * @rmac_ttl_128_255_frms: Count of all received MAC frames with length
+ * (including FCS, but not framing bits) of between 128 and 255 octets
+ * inclusive. Includes frames received with frame-too-long, FCS, or length
+ * errors.
+ *
+ * @rmac_ttl_256_511_frms: Count of all received MAC frames with length
+ * (including FCS, but not framing bits) of between 256 and 511 octets
+ * inclusive. Includes frames received with frame-too-long, FCS, or length
+ * errors.
+ *
+ * @reserved_6: Reserved.
+ * @rmac_ttl_512_1023_frms: Count of all received MAC frames with length
+ * (including FCS, but not framing bits) of between 512 and 1023 octets
+ * inclusive. Includes frames received with frame-too-long, FCS, or length
+ * errors.
+ *
+ * @rmac_ttl_1024_1518_frms: Count of all received MAC frames with length
+ * (including FCS, but not framing bits) of between 1024 and 1518 octets
+ * inclusive. Includes frames received with frame-too-long, FCS, or length
+ * errors.
+ * @reserved_7: Reserved.
+ * @rmac_ip: Count of received IP datagrams. Includes errored IP datagrams.
+ *
+ * @rmac_ip_octets: Count of number of octets in received IP datagrams. Includes
+ * errored IP datagrams.
+ *
+ * @rmac_hdr_err_ip: Count of received IP datagrams that are discarded due to IP
+ * header errors.
+ *
+ * @rmac_drop_ip: Count of received IP datagrams that could not be passed to the
+ * host because of 1) Random Early Discard (RED); 2) Frame steering algorithm
+ * found no available queue; 3) Receive ingress buffer overflow.
+ * @rmac_icmp: Count of received ICMP messages. Includes errored ICMP messages
+ * (due to ICMP checksum fail).
+ *
+ * @reserved_8: Reserved.
+ * @rmac_tcp: Count of received TCP segments. Since Xena is unaware of
+ * connection context, counts all received TCP segments, regardless of whether
+ * or not they pertain to an established connection.
+ *
+ * @rmac_udp: Count of received UDP datagrams.
+ * @rmac_err_drp_udp: Count of received UDP datagrams that were not delivered to
+ * the system because of 1) Random Early Discard (RED); 2) Frame steering
+ * algorithm found no available queue; 3) Receive ingress buffer overflow.
+ *
+ * @rmac_xgmii_err_sym: Count of the number of symbol errors in the received
+ * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII). Only includes
+ * symbol errors that are observed between the XGMII Start Frame Delimiter
+ * and End Frame Delimiter, inclusive. And only increments the count by one for
+ * each frame.
+ *
+ * @rmac_frms_q0: Count of number of frames that pass through queue 0 of receive
+ * buffer.
+ * @rmac_frms_q1: Count of number of frames that pass through queue 1 of receive
+ * buffer.
+ * @rmac_frms_q2: Count of number of frames that pass through queue 2 of receive
+ * buffer.
+ * @rmac_frms_q3: Count of number of frames that pass through queue 3 of receive
+ * buffer.
+ * @rmac_frms_q4: Count of number of frames that pass through queue 4 of receive
+ * buffer.
+ * @rmac_frms_q5: Count of number of frames that pass through queue 5 of receive
+ * buffer.
+ * @rmac_frms_q6: Count of number of frames that pass through queue 6 of receive
+ * buffer.
+ * @rmac_frms_q7: Count of number of frames that pass through queue 7 of receive
+ * buffer.
+ * @rmac_full_q0: Count of number of times that receive buffer queue 0 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q1: Count of number of times that receive buffer queue 1 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q2: Count of number of times that receive buffer queue 2 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q3: Count of number of times that receive buffer queue 3 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q4: Count of number of times that receive buffer queue 4 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q5: Count of number of times that receive buffer queue 5 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q6: Count of number of times that receive buffer queue 6 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_full_q7: Count of number of times that receive buffer queue 7 has
+ * filled up. If a queue is size 0, then this stat is incremented to a value of
+ * 1 when MAC receives its first frame.
+ *
+ * @rmac_pause_cnt: Count of number of pause quanta that the MAC has been in the
+ * paused state. Recall, one pause quantum equates to 512 bit times.
+ * @reserved_9: Reserved.
+ * @rmac_xgmii_data_err_cnt: This counter is incremented when either 1) The
+ * Reconcilliation Sublayer (RS) is expecting one control character and gets
+ * another (i.e. expecting Start control character and gets another control
+ * character); 2) Start control character is not in lane 0 or lane 4; 3) The RS
+ * gets a Start control character, but the start frame delimiter is not found in
+ * the correct location.
+ * @rmac_xgmii_ctrl_err_cnt: Maintains a count of unexpected or
+ * misplaced control characters occuring outside of normal data transmission
+ * (i.e. not included in RMAC_XGMII_DATA_ERR_CNT).
+ *
+ * @rmac_accepted_ip: Count of received IP datagrams that were passed to the
+ * system.
+ *
+ * @rmac_err_tcp: Count of received TCP segments containing errors. For example,
+ * bad TCP checksum.
+ *
+ * PCI (bus) Statistics:
+ * @rd_req_cnt: Counts the total number of read requests made by the device.
+ * @new_rd_req_cnt: Counts the requests made for a new read sequence (request
+ * made for the same sequence after a retry or disconnect response are not
+ * counted).
+ * @new_rd_req_rtry_cnt: Counts the Retry responses received on the start of
+ * the new read sequences.
+ * @rd_rtry_cnt: Counts the Retry responses received for read requests.
+ * @wr_rtry_rd_ack_cnt: Increments whenever a read request is accepted by
+ * the target after a write request was terminated with retry.
+ * @wr_req_cnt: Counts the total number of Write requests made by the device.
+ * @new_wr_req_cnt: Counts the requests made for a new write sequence (request
+ * made for the same sequence after a retry or disconnect response are not
+ * counted).
+ * @new_wr_req_rtry_cnt: Counts the requests made for a new write sequence
+ * (request made for the same sequence after a retry or disconnect response are
+ * not counted).
+ *
+ * @wr_rtry_cnt: Counts the Retry responses received for write requests.
+ * @wr_disc_cnt: Write Disconnect. Counts the target initiated disconnects
+ * on write transactions.
+ * @rd_rtry_wr_ack_cnt: Increments whenever a write request is accepted by the
+ * target after a read request was terminated with retry.
+ *
+ * @txp_wr_cnt: Counts the host write transactions to the Tx Pointer
+ * FIFOs.
+ * @txd_rd_cnt: Count of the Transmit Descriptor (TxD) read requests.
+ * @txd_wr_cnt: Count of the TxD write requests.
+ * @rxd_rd_cnt: Count of the Receive Descriptor (RxD) read requests.
+ * @rxd_wr_cnt: Count of the RxD write requests.
+ * @txf_rd_cnt: Count of transmit frame read requests. This will not
+ * equal the number of frames transmitted, as frame data is typically spread
+ * across multiple PCI transactions.
+ * @rxf_wr_cnt: Count of receive frame write requests.
+ *
+ * @tmac_frms_oflow: tbd
+ * @tmac_data_octets_oflow: tbd
+ * @tmac_mcst_frms_oflow: tbd
+ * @tmac_bcst_frms_oflow: tbd
+ * @tmac_ttl_octets_oflow: tbd
+ * @tmac_ucst_frms_oflow: tbd
+ * @tmac_nucst_frms_oflow: tbd
+ * @tmac_any_err_frms_oflow: tbd
+ * @tmac_vlan_frms: tbd
+ * @tmac_vld_ip_oflow: tbd
+ * @tmac_drop_ip_oflow: tbd
+ * @tmac_icmp_oflow: tbd
+ * @tmac_rst_tcp_oflow: tbd
+ * @tmac_udp_oflow: tbd
+ * @reserved_10: tbd
+ * @tpa_unknown_protocol: tbd
+ * @tpa_parse_failure: tbd
+ * @rmac_vld_frms_oflow: tbd
+ * @rmac_data_octets_oflow: tbd
+ * @rmac_vld_mcst_frms_oflow: tbd
+ * @rmac_vld_bcst_frms_oflow: tbd
+ * @rmac_ttl_octets_oflow: tbd
+ * @rmac_accepted_ucst_frms_oflow: tbd
+ * @rmac_accepted_nucst_frms_oflow: tbd
+ * @rmac_discarded_frms_oflow: tbd
+ * @rmac_drop_events_oflow: tbd
+ * @rmac_usized_frms_oflow: tbd
+ * @rmac_osized_frms_oflow: tbd
+ * @rmac_frag_frms_oflow: tbd
+ * @rmac_jabber_frms_oflow: tbd
+ * @rmac_ip_oflow: tbd
+ * @rmac_drop_ip_oflow: tbd
+ * @rmac_icmp_oflow: tbd
+ * @rmac_udp_oflow: tbd
+ * @reserved_11: tbd
+ * @rmac_err_drp_udp_oflow: tbd
+ * @rmac_pause_cnt_oflow: tbd
+ * @rmac_ttl_1519_4095_frms: tbd
+ * @rmac_ttl_4096_8191_frms: tbd
+ * @rmac_ttl_8192_max_frms: tbd
+ * @rmac_ttl_gt_max_frms: tbd
+ * @rmac_osized_alt_frms: tbd
+ * @rmac_jabber_alt_frms: tbd
+ * @rmac_gt_max_alt_frms: tbd
+ * @rmac_vlan_frms: tbd
+ * @rmac_fcs_discard: tbd
+ * @rmac_len_discard: tbd
+ * @rmac_da_discard: tbd
+ * @rmac_pf_discard: tbd
+ * @rmac_rts_discard: tbd
+ * @rmac_wol_discard: tbd
+ * @rmac_red_discard: tbd
+ * @rmac_ingm_full_discard: tbd
+ * @rmac_accepted_ip_oflow: tbd
+ * @reserved_12: tbd
+ * @link_fault_cnt: TBD
+ * @reserved_13: tbd
+ * Xframe hardware statistics.
+ */
+typedef struct xge_hal_stats_hw_info_t {
+#ifdef XGE_OS_HOST_BIG_ENDIAN
+/* Tx MAC statistics counters. */
+ u32 tmac_frms;
+ u32 tmac_data_octets;
+ u64 tmac_drop_frms;
+ u32 tmac_mcst_frms;
+ u32 tmac_bcst_frms;
+ u64 tmac_pause_ctrl_frms;
+ u32 tmac_ttl_octets;
+ u32 tmac_ucst_frms;
+ u32 tmac_nucst_frms;
+ u32 tmac_any_err_frms;
+ u64 tmac_ttl_less_fb_octets;
+ u64 tmac_vld_ip_octets;
+ u32 tmac_vld_ip;
+ u32 tmac_drop_ip;
+ u32 tmac_icmp;
+ u32 tmac_rst_tcp;
+ u64 tmac_tcp;
+ u32 tmac_udp;
+ u32 reserved_0;
+
+/* Rx MAC Statistics counters. */
+ u32 rmac_vld_frms;
+ u32 rmac_data_octets;
+ u64 rmac_fcs_err_frms;
+ u64 rmac_drop_frms;
+ u32 rmac_vld_mcst_frms;
+ u32 rmac_vld_bcst_frms;
+ u32 rmac_in_rng_len_err_frms;
+ u32 rmac_out_rng_len_err_frms;
+ u64 rmac_long_frms;
+ u64 rmac_pause_ctrl_frms;
+ u64 rmac_unsup_ctrl_frms;
+ u32 rmac_ttl_octets;
+ u32 rmac_accepted_ucst_frms;
+ u32 rmac_accepted_nucst_frms;
+ u32 rmac_discarded_frms;
+ u32 rmac_drop_events;
+ u32 reserved_1;
+ u64 rmac_ttl_less_fb_octets;
+ u64 rmac_ttl_frms;
+ u64 reserved_2;
+ u32 reserved_3;
+ u32 rmac_usized_frms;
+ u32 rmac_osized_frms;
+ u32 rmac_frag_frms;
+ u32 rmac_jabber_frms;
+ u32 reserved_4;
+ u64 rmac_ttl_64_frms;
+ u64 rmac_ttl_65_127_frms;
+ u64 reserved_5;
+ u64 rmac_ttl_128_255_frms;
+ u64 rmac_ttl_256_511_frms;
+ u64 reserved_6;
+ u64 rmac_ttl_512_1023_frms;
+ u64 rmac_ttl_1024_1518_frms;
+ u32 reserved_7;
+ u32 rmac_ip;
+ u64 rmac_ip_octets;
+ u32 rmac_hdr_err_ip;
+ u32 rmac_drop_ip;
+ u32 rmac_icmp;
+ u32 reserved_8;
+ u64 rmac_tcp;
+ u32 rmac_udp;
+ u32 rmac_err_drp_udp;
+ u64 rmac_xgmii_err_sym;
+ u64 rmac_frms_q0;
+ u64 rmac_frms_q1;
+ u64 rmac_frms_q2;
+ u64 rmac_frms_q3;
+ u64 rmac_frms_q4;
+ u64 rmac_frms_q5;
+ u64 rmac_frms_q6;
+ u64 rmac_frms_q7;
+ u16 rmac_full_q0;
+ u16 rmac_full_q1;
+ u16 rmac_full_q2;
+ u16 rmac_full_q3;
+ u16 rmac_full_q4;
+ u16 rmac_full_q5;
+ u16 rmac_full_q6;
+ u16 rmac_full_q7;
+ u32 rmac_pause_cnt;
+ u32 reserved_9;
+ u64 rmac_xgmii_data_err_cnt;
+ u64 rmac_xgmii_ctrl_err_cnt;
+ u32 rmac_accepted_ip;
+ u32 rmac_err_tcp;
+
+/* PCI/PCI-X Read transaction statistics. */
+ u32 rd_req_cnt;
+ u32 new_rd_req_cnt;
+ u32 new_rd_req_rtry_cnt;
+ u32 rd_rtry_cnt;
+ u32 wr_rtry_rd_ack_cnt;
+
+/* PCI/PCI-X write transaction statistics. */
+ u32 wr_req_cnt;
+ u32 new_wr_req_cnt;
+ u32 new_wr_req_rtry_cnt;
+ u32 wr_rtry_cnt;
+ u32 wr_disc_cnt;
+ u32 rd_rtry_wr_ack_cnt;
+
+/* DMA Transaction statistics. */
+ u32 txp_wr_cnt;
+ u32 txd_rd_cnt;
+ u32 txd_wr_cnt;
+ u32 rxd_rd_cnt;
+ u32 rxd_wr_cnt;
+ u32 txf_rd_cnt;
+ u32 rxf_wr_cnt;
+
+/* Enhanced Herc statistics */
+ u32 tmac_frms_oflow;
+ u32 tmac_data_octets_oflow;
+ u32 tmac_mcst_frms_oflow;
+ u32 tmac_bcst_frms_oflow;
+ u32 tmac_ttl_octets_oflow;
+ u32 tmac_ucst_frms_oflow;
+ u32 tmac_nucst_frms_oflow;
+ u32 tmac_any_err_frms_oflow;
+ u64 tmac_vlan_frms;
+ u32 tmac_vld_ip_oflow;
+ u32 tmac_drop_ip_oflow;
+ u32 tmac_icmp_oflow;
+ u32 tmac_rst_tcp_oflow;
+ u32 tmac_udp_oflow;
+ u32 tpa_unknown_protocol;
+ u32 tpa_parse_failure;
+ u32 reserved_10;
+ u32 rmac_vld_frms_oflow;
+ u32 rmac_data_octets_oflow;
+ u32 rmac_vld_mcst_frms_oflow;
+ u32 rmac_vld_bcst_frms_oflow;
+ u32 rmac_ttl_octets_oflow;
+ u32 rmac_accepted_ucst_frms_oflow;
+ u32 rmac_accepted_nucst_frms_oflow;
+ u32 rmac_discarded_frms_oflow;
+ u32 rmac_drop_events_oflow;
+ u32 rmac_usized_frms_oflow;
+ u32 rmac_osized_frms_oflow;
+ u32 rmac_frag_frms_oflow;
+ u32 rmac_jabber_frms_oflow;
+ u32 rmac_ip_oflow;
+ u32 rmac_drop_ip_oflow;
+ u32 rmac_icmp_oflow;
+ u32 rmac_udp_oflow;
+ u32 rmac_err_drp_udp_oflow;
+ u32 rmac_pause_cnt_oflow;
+ u32 reserved_11;
+ u64 rmac_ttl_1519_4095_frms;
+ u64 rmac_ttl_4096_8191_frms;
+ u64 rmac_ttl_8192_max_frms;
+ u64 rmac_ttl_gt_max_frms;
+ u64 rmac_osized_alt_frms;
+ u64 rmac_jabber_alt_frms;
+ u64 rmac_gt_max_alt_frms;
+ u64 rmac_vlan_frms;
+ u32 rmac_fcs_discard;
+ u32 rmac_len_discard;
+ u32 rmac_da_discard;
+ u32 rmac_pf_discard;
+ u32 rmac_rts_discard;
+ u32 rmac_wol_discard;
+ u32 rmac_red_discard;
+ u32 rmac_ingm_full_discard;
+ u32 rmac_accepted_ip_oflow;
+ u32 reserved_12;
+ u32 link_fault_cnt;
+ u32 reserved_13;
+#else
+/* Tx MAC statistics counters. */
+ u32 tmac_data_octets;
+ u32 tmac_frms;
+ u64 tmac_drop_frms;
+ u32 tmac_bcst_frms;
+ u32 tmac_mcst_frms;
+ u64 tmac_pause_ctrl_frms;
+ u32 tmac_ucst_frms;
+ u32 tmac_ttl_octets;
+ u32 tmac_any_err_frms;
+ u32 tmac_nucst_frms;
+ u64 tmac_ttl_less_fb_octets;
+ u64 tmac_vld_ip_octets;
+ u32 tmac_drop_ip;
+ u32 tmac_vld_ip;
+ u32 tmac_rst_tcp;
+ u32 tmac_icmp;
+ u64 tmac_tcp;
+ u32 reserved_0;
+ u32 tmac_udp;
+
+/* Rx MAC Statistics counters. */
+ u32 rmac_data_octets;
+ u32 rmac_vld_frms;
+ u64 rmac_fcs_err_frms;
+ u64 rmac_drop_frms;
+ u32 rmac_vld_bcst_frms;
+ u32 rmac_vld_mcst_frms;
+ u32 rmac_out_rng_len_err_frms;
+ u32 rmac_in_rng_len_err_frms;
+ u64 rmac_long_frms;
+ u64 rmac_pause_ctrl_frms;
+ u64 rmac_unsup_ctrl_frms;
+ u32 rmac_accepted_ucst_frms;
+ u32 rmac_ttl_octets;
+ u32 rmac_discarded_frms;
+ u32 rmac_accepted_nucst_frms;
+ u32 reserved_1;
+ u32 rmac_drop_events;
+ u64 rmac_ttl_less_fb_octets;
+ u64 rmac_ttl_frms;
+ u64 reserved_2;
+ u32 rmac_usized_frms;
+ u32 reserved_3;
+ u32 rmac_frag_frms;
+ u32 rmac_osized_frms;
+ u32 reserved_4;
+ u32 rmac_jabber_frms;
+ u64 rmac_ttl_64_frms;
+ u64 rmac_ttl_65_127_frms;
+ u64 reserved_5;
+ u64 rmac_ttl_128_255_frms;
+ u64 rmac_ttl_256_511_frms;
+ u64 reserved_6;
+ u64 rmac_ttl_512_1023_frms;
+ u64 rmac_ttl_1024_1518_frms;
+ u32 rmac_ip;
+ u32 reserved_7;
+ u64 rmac_ip_octets;
+ u32 rmac_drop_ip;
+ u32 rmac_hdr_err_ip;
+ u32 reserved_8;
+ u32 rmac_icmp;
+ u64 rmac_tcp;
+ u32 rmac_err_drp_udp;
+ u32 rmac_udp;
+ u64 rmac_xgmii_err_sym;
+ u64 rmac_frms_q0;
+ u64 rmac_frms_q1;
+ u64 rmac_frms_q2;
+ u64 rmac_frms_q3;
+ u64 rmac_frms_q4;
+ u64 rmac_frms_q5;
+ u64 rmac_frms_q6;
+ u64 rmac_frms_q7;
+ u16 rmac_full_q3;
+ u16 rmac_full_q2;
+ u16 rmac_full_q1;
+ u16 rmac_full_q0;
+ u16 rmac_full_q7;
+ u16 rmac_full_q6;
+ u16 rmac_full_q5;
+ u16 rmac_full_q4;
+ u32 reserved_9;
+ u32 rmac_pause_cnt;
+ u64 rmac_xgmii_data_err_cnt;
+ u64 rmac_xgmii_ctrl_err_cnt;
+ u32 rmac_err_tcp;
+ u32 rmac_accepted_ip;
+
+/* PCI/PCI-X Read transaction statistics. */
+ u32 new_rd_req_cnt;
+ u32 rd_req_cnt;
+ u32 rd_rtry_cnt;
+ u32 new_rd_req_rtry_cnt;
+
+/* PCI/PCI-X Write/Read transaction statistics. */
+ u32 wr_req_cnt;
+ u32 wr_rtry_rd_ack_cnt;
+ u32 new_wr_req_rtry_cnt;
+ u32 new_wr_req_cnt;
+ u32 wr_disc_cnt;
+ u32 wr_rtry_cnt;
+
+/* PCI/PCI-X Write / DMA Transaction statistics. */
+ u32 txp_wr_cnt;
+ u32 rd_rtry_wr_ack_cnt;
+ u32 txd_wr_cnt;
+ u32 txd_rd_cnt;
+ u32 rxd_wr_cnt;
+ u32 rxd_rd_cnt;
+ u32 rxf_wr_cnt;
+ u32 txf_rd_cnt;
+
+/* Enhanced Herc statistics */
+ u32 tmac_data_octets_oflow;
+ u32 tmac_frms_oflow;
+ u32 tmac_bcst_frms_oflow;
+ u32 tmac_mcst_frms_oflow;
+ u32 tmac_ucst_frms_oflow;
+ u32 tmac_ttl_octets_oflow;
+ u32 tmac_any_err_frms_oflow;
+ u32 tmac_nucst_frms_oflow;
+ u64 tmac_vlan_frms;
+ u32 tmac_drop_ip_oflow;
+ u32 tmac_vld_ip_oflow;
+ u32 tmac_rst_tcp_oflow;
+ u32 tmac_icmp_oflow;
+ u32 tpa_unknown_protocol;
+ u32 tmac_udp_oflow;
+ u32 reserved_10;
+ u32 tpa_parse_failure;
+ u32 rmac_data_octets_oflow;
+ u32 rmac_vld_frms_oflow;
+ u32 rmac_vld_bcst_frms_oflow;
+ u32 rmac_vld_mcst_frms_oflow;
+ u32 rmac_accepted_ucst_frms_oflow;
+ u32 rmac_ttl_octets_oflow;
+ u32 rmac_discarded_frms_oflow;
+ u32 rmac_accepted_nucst_frms_oflow;
+ u32 rmac_usized_frms_oflow;
+ u32 rmac_drop_events_oflow;
+ u32 rmac_frag_frms_oflow;
+ u32 rmac_osized_frms_oflow;
+ u32 rmac_ip_oflow;
+ u32 rmac_jabber_frms_oflow;
+ u32 rmac_icmp_oflow;
+ u32 rmac_drop_ip_oflow;
+ u32 rmac_err_drp_udp_oflow;
+ u32 rmac_udp_oflow;
+ u32 reserved_11;
+ u32 rmac_pause_cnt_oflow;
+ u64 rmac_ttl_1519_4095_frms;
+ u64 rmac_ttl_4096_8191_frms;
+ u64 rmac_ttl_8192_max_frms;
+ u64 rmac_ttl_gt_max_frms;
+ u64 rmac_osized_alt_frms;
+ u64 rmac_jabber_alt_frms;
+ u64 rmac_gt_max_alt_frms;
+ u64 rmac_vlan_frms;
+ u32 rmac_len_discard;
+ u32 rmac_fcs_discard;
+ u32 rmac_pf_discard;
+ u32 rmac_da_discard;
+ u32 rmac_wol_discard;
+ u32 rmac_rts_discard;
+ u32 rmac_ingm_full_discard;
+ u32 rmac_red_discard;
+ u32 reserved_12;
+ u32 rmac_accepted_ip_oflow;
+ u32 reserved_13;
+ u32 link_fault_cnt;
+#endif
+} xge_hal_stats_hw_info_t;
+
+/**
+ * struct xge_hal_stats_channel_into_t - HAL channel statistics.
+ * @full_cnt: TBD
+ * @usage_max: TBD
+ * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
+ * @max_compl_per_intr_cnt: Maximum number of completions per interrupt.
+ * @avg_compl_per_intr_cnt: Average number of completions per interrupt.
+ * Note that a total number of completed descriptors
+ * for the given channel can be calculated as
+ * (@traffic_intr_cnt * @avg_compl_per_intr_cnt).
+ * @total_compl_cnt: Total completion count.
+ * @total_compl_cnt == (@traffic_intr_cnt * @avg_compl_per_intr_cnt).
+ * @total_posts: Total number of descriptor postings on the channel.
+ * Counts the number of xge_hal_ring_dtr_post()
+ * or xge_hal_fifo_dtr_post() calls by ULD, for ring and fifo
+ * channel, respectively.
+ * @total_posts_many: Total number of posts on the channel that involved
+ * more than one descriptor. Counts the number of
+ * xge_hal_fifo_dtr_post_many() calls performed by ULD.
+ * @total_buffers: Total number of buffers posted on the channel.
+ * @copied_frags: TBD
+ * @copied_buffers: TBD
+ * @avg_buffers_per_post: Average number of buffers transferred in a single
+ * post operation.
+ * Calculated as @total_buffers/@total_posts.
+ * @avg_buffer_size: Average buffer size transferred by a single post
+ * operation on a fifo channel. The counter is not supported for a ring
+ * channel. Calculated as a total number of transmitted octets divided
+ * by @total_buffers.
+ * @avg_post_size: Average amount of data transferred by a single post.
+ * Calculated as a total number of transmitted octets divided by
+ * @total_posts.
+ * @ring_bump_cnt: Ring "bump" count. Number of times the hardware could
+ * not post receive data (and had to continue keeping it on-board)
+ * because of unavailable receive descriptor(s).
+ * @total_posts_dtrs_many: Total number of posts on the channel that involving
+ * more than one descriptor.
+ * @total_posts_frags_many: Total number of fragments posted on the channel
+ * during post requests of multiple descriptors.
+ * @total_posts_dang_dtrs: Total number of posts on the channel involving
+ * dangling descriptors.
+ * @total_posts_dang_frags: Total number of dangling fragments posted on the channel
+ * during post request containing multiple descriptors.
+ *
+ * HAL channel counters.
+ * See also: xge_hal_stats_device_info_t{}.
+ */
+typedef struct xge_hal_stats_channel_info_t {
+ u32 full_cnt;
+ u32 usage_max;
+ u32 reserve_free_swaps_cnt;
+ u32 avg_compl_per_intr_cnt;
+ u32 total_compl_cnt;
+ u32 total_posts;
+ u32 total_posts_many;
+ u32 total_buffers;
+ u32 copied_frags;
+ u32 copied_buffers;
+ u32 avg_buffers_per_post;
+ u32 avg_buffer_size;
+ u32 avg_post_size;
+ u32 ring_bump_cnt;
+ u32 total_posts_dtrs_many;
+ u32 total_posts_frags_many;
+ u32 total_posts_dang_dtrs;
+ u32 total_posts_dang_frags;
+} xge_hal_stats_channel_info_t;
+
+/**
+ * struct xge_hal_xpak_counter_t - HAL xpak error counters
+ * @excess_temp: excess transceiver_temperature count
+ * @excess_bias_current: excess laser_bias_current count
+ * @excess_laser_output: excess laser_output_power count
+ * @tick_period: tick count for each cycle
+ */
+typedef struct xge_hal_xpak_counter_t {
+ u32 excess_temp;
+ u32 excess_bias_current;
+ u32 excess_laser_output;
+ u32 tick_period;
+} xge_hal_xpak_counter_t;
+
+/**
+ * struct xge_hal_stats_xpak_t - HAL xpak stats
+ * @alarm_transceiver_temp_high: alarm_transceiver_temp_high count value
+ * @alarm_transceiver_temp_low : alarm_transceiver_temp_low count value
+ * @alarm_laser_bias_current_high: alarm_laser_bias_current_high count value
+ * @alarm_laser_bias_current_low: alarm_laser_bias_current_low count value
+ * @alarm_laser_output_power_high: alarm_laser_output_power_high count value
+ * @alarm_laser_output_power_low: alarm_laser_output_power_low count value
+ * @warn_transceiver_temp_high: warn_transceiver_temp_high count value
+ * @warn_transceiver_temp_low: warn_transceiver_temp_low count value
+ * @warn_laser_bias_current_high: warn_laser_bias_current_high count value
+ * @warn_laser_bias_current_low: warn_laser_bias_current_low count value
+ * @warn_laser_output_power_high: warn_laser_output_power_high count value
+ * @warn_laser_output_power_low: warn_laser_output_power_low count value
+ */
+typedef struct xge_hal_stats_xpak_t {
+ u16 alarm_transceiver_temp_high;
+ u16 alarm_transceiver_temp_low;
+ u16 alarm_laser_bias_current_high;
+ u16 alarm_laser_bias_current_low;
+ u16 alarm_laser_output_power_high;
+ u16 alarm_laser_output_power_low;
+ u16 warn_transceiver_temp_high;
+ u16 warn_transceiver_temp_low;
+ u16 warn_laser_bias_current_high;
+ u16 warn_laser_bias_current_low;
+ u16 warn_laser_output_power_high;
+ u16 warn_laser_output_power_low;
+} xge_hal_stats_xpak_t;
+
+
+
+/**
+ * struct xge_hal_stats_sw_err_t - HAL device error statistics.
+ * @sm_err_cnt: TBD
+ * @single_ecc_err_cnt: TBD
+ * @double_ecc_err_cnt: TBD
+ * @ecc_err_cnt: ECC error count.
+ * @parity_err_cnt: Parity error count.
+ * @serr_cnt: Number of exceptions indicated to the host via PCI SERR#.
+ * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
+ * (index) in this array reflects the transfer code type, for instance
+ * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
+ * Value rxd_t_code_err_cnt[i] reflects the
+ * number of times the corresponding transfer code was encountered.
+ *
+ * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
+ * (index) in this array reflects the transfer code type, for instance
+ * 0xA - "loss of link".
+ * Value txd_t_code_err_cnt[i] reflects the
+ * number of times the corresponding transfer code was encountered.
+ * @stats_xpak: TBD
+ * @xpak_counter: TBD
+ */
+typedef struct xge_hal_stats_sw_err_t {
+ u32 sm_err_cnt;
+ u32 single_ecc_err_cnt;
+ u32 double_ecc_err_cnt;
+ u32 ecc_err_cnt;
+ u32 parity_err_cnt;
+ u32 serr_cnt;
+ u32 rxd_t_code_err_cnt[16];
+ u32 txd_t_code_err_cnt[16];
+ xge_hal_stats_xpak_t stats_xpak;
+ xge_hal_xpak_counter_t xpak_counter;
+} xge_hal_stats_sw_err_t;
+
+/**
+ * struct xge_hal_stats_device_info_t - HAL own per-device statistics.
+ *
+ * @rx_traffic_intr_cnt: TBD
+ * @tx_traffic_intr_cnt: TBD
+ * @txpic_intr_cnt: TBD
+ * @txdma_intr_cnt: TBD
+ * @txmac_intr_cnt: TBD
+ * @txxgxs_intr_cnt: TBD
+ * @rxpic_intr_cnt: TBD
+ * @rxdma_intr_cnt: TBD
+ * @rxmac_intr_cnt: TBD
+ * @rxxgxs_intr_cnt: TBD
+ * @mc_intr_cnt: TBD
+ * @not_traffic_intr_cnt: Number of times the host was interrupted
+ * without new completions.
+ * "Non-traffic interrupt counter".
+ * @not_xge_intr_cnt: TBD
+ * @traffic_intr_cnt: Number of traffic interrupts for the device.
+ * @total_intr_cnt: Total number of traffic interrupts for the device.
+ * @total_intr_cnt == @traffic_intr_cnt +
+ * @not_traffic_intr_cnt
+ * @soft_reset_cnt: Number of times soft reset is done on this device.
+ * @rxufca_hi_adjust_cnt: TODO
+ * @rxufca_lo_adjust_cnt: TODO
+ * @bimodal_hi_adjust_cnt: TODO
+ * @bimodal_lo_adjust_cnt: TODO
+ *
+ * @tot_frms_lroised: TBD
+ * @tot_lro_sessions: TBD
+ * @lro_frm_len_exceed_cnt: TBD
+ * @lro_sg_exceed_cnt: TBD
+ * @lro_out_of_seq_pkt_cnt: TBD
+ * @lro_dup_pkt_cnt: TBD
+ *
+ * HAL per-device statistics.
+ * See also: xge_hal_stats_channel_info_t{}.
+ */
+typedef struct xge_hal_stats_device_info_t {
+ u32 rx_traffic_intr_cnt;
+ u32 tx_traffic_intr_cnt;
+ u32 txpic_intr_cnt;
+ u32 txdma_intr_cnt;
+ u32 pfc_err_cnt;
+ u32 tda_err_cnt;
+ u32 pcc_err_cnt;
+ u32 tti_err_cnt;
+ u32 lso_err_cnt;
+ u32 tpa_err_cnt;
+ u32 sm_err_cnt;
+ u32 txmac_intr_cnt;
+ u32 mac_tmac_err_cnt;
+ u32 txxgxs_intr_cnt;
+ u32 xgxs_txgxs_err_cnt;
+ u32 rxpic_intr_cnt;
+ u32 rxdma_intr_cnt;
+ u32 rc_err_cnt;
+ u32 rpa_err_cnt;
+ u32 rda_err_cnt;
+ u32 rti_err_cnt;
+ u32 rxmac_intr_cnt;
+ u32 mac_rmac_err_cnt;
+ u32 rxxgxs_intr_cnt;
+ u32 xgxs_rxgxs_err_cnt;
+ u32 mc_intr_cnt;
+ u32 not_traffic_intr_cnt;
+ u32 not_xge_intr_cnt;
+ u32 traffic_intr_cnt;
+ u32 total_intr_cnt;
+ u32 soft_reset_cnt;
+ u32 rxufca_hi_adjust_cnt;
+ u32 rxufca_lo_adjust_cnt;
+ u32 bimodal_hi_adjust_cnt;
+ u32 bimodal_lo_adjust_cnt;
+#ifdef XGE_HAL_CONFIG_LRO
+ u32 tot_frms_lroised;
+ u32 tot_lro_sessions;
+ u32 lro_frm_len_exceed_cnt;
+ u32 lro_sg_exceed_cnt;
+ u32 lro_out_of_seq_pkt_cnt;
+ u32 lro_dup_pkt_cnt;
+#endif
+} xge_hal_stats_device_info_t;
+
+#ifdef XGEHAL_RNIC
+
+/**
+ * struct xge_hal_vp_statistics_t - Virtual Path Statistics
+ *
+ * @no_nces: Number of NCEs on Adapter in this VP
+ * @no_sqs: Number of SQs on Adapter in this VP
+ * @no_srqs: Number of SRQs on Adapter in this VP
+ * @no_cqrqs: Number of CQRQs on Adapter in this VP
+ * @no_tcp_sessions: Number of TCP sessions on Adapter in this VP
+ * @no_lro_sessions: Number of LRO sessions on Adapter in this VP
+ * @no_spdm_sessions: Number of SPDM sessions on Adapter in this VP
+ *
+ * This structure contains fields to keep statistics of virtual path
+ */
+typedef struct xge_hal_vp_statistics_t {
+ u32 no_nces;
+ u32 no_sqs;
+ u32 no_srqs;
+ u32 no_cqrqs;
+ u32 no_tcp_sessions;
+ u32 no_lro_sessions;
+ u32 no_spdm_sessions;
+}xge_hal_vp_statistics_t;
+
+#endif
+
+
+/* ========================== XFRAME ER STATISTICS ======================== */
+#define XGE_HAL_MAC_LINKS 3
+#define XGE_HAL_MAC_AGGREGATORS 2
+#define XGE_HAL_VPATHS 17
+/**
+ * struct xge_hal_stats_link_info_t - XGMAC statistics for a link
+ *
+ * @tx_frms: Count of transmitted MAC frames for mac the link.
+ * @tx_ttl_eth_octets: Count of total octets of transmitted frames
+ * for mac the link.
+ * @tx_data_octets: Count of data and padding octets of transmitted
+ * frames for mac the link.
+ * @tx_mcst_frms: Count of multicast MAC frames for mac the link.
+ * @tx_bcst_frms: Count of broadcast MAC frames for mac the link.
+ * @tx_ucst_frms: Count of unicast MAC frames for mac the link.
+ * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag
+ * for mac the link.
+ * @tx_vld_ip: Count of transmitted IP datagrams for mac the link.
+ * @tx_vld_ip_octets: Count of transmitted IP octets for mac the link.
+ * @tx_icmp: Count of transmitted ICMP messages for mac the link.
+ * @tx_tcp: Count of transmitted TCP segments for mac the link.
+ * @tx_rst_tcp: Count of transmitted TCP segments containing the RST
+ * flag mac the link.
+ * @tx_udp: Count of transmitted UDP datagrams for mac the link.
+ * @tx_unknown_protocol: Count of transmitted packets of unknown
+ * protocol for mac the link.
+ * @tx_parse_error: Count of transmitted packets with parsing errors
+ * for mac the link.
+ * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames for mac
+ * the link.
+ * @tx_lacpdu_frms: Count of LACPDUs transmitted for mac the link.
+ * @tx_marker_pdu_frms: Count of Marker PDUs transmitted for mac the
+ * link.
+ * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted
+ * for mac the link.
+ * @tx_drop_ip: Count of dropped IP packets from the transmission path
+ * for mac the link.
+ * @tx_xgmii_char1_match: Count of the number of transmitted XGMII
+ * characters that match first pattern, for mac the link.
+ * @tx_xgmii_char2_match: Count of the number of transmitted XGMII
+ * characters that match second pattern, for mac the link.
+ * @tx_xgmii_column1_match: Count of the number of transmitted XGMII
+ * columns that match first pattern, for mac the link.
+ * @tx_xgmii_column2_match: Count of the number of transmitted XGMII
+ * columns that match second pattern, for mac the link.
+ * @tx_drop_frms: Count of frames dropped due to internal errors during
+ * transmission for mac the link.
+ * @tx_any_err_frms: Count of frames dropped due to any error during
+ * transmission for mac the link.
+ * @rx_ttl_frms: Count of all received MAC frames for mac the link.
+ * @rx_vld_frms: Count of all successfully received MAC frames for mac
+ * the link.
+ * @rx_offld_frms: Count of all offloaded received MAC frames for mac
+ * the link.
+ * @rx_ttl_eth_octets: Count of total octets of received frames, not
+ * including framing characters for mac the link.
+ * @rx_data_octets: Count of data and padding octets of successfully
+ * received frames for mac the link.
+ * @rx_offld_octets: Count of total octets, not including framing
+ * characters, of offloaded received frames for mac the link.
+ * @rx_vld_mcst_frms: Count of successfully received multicast MAC
+ * frames for mac the link.
+ * @rx_vld_bcst_frms: Count of successfully received broadcast MAC
+ * frames for mac the link.
+ * @rx_accepted_ucst_frms: Count of successfully received unicast MAC
+ * frames for mac the link.
+ * @rx_accepted_nucst_frms: Count of successfully received non-unicast
+ * MAC frames for mac the link.
+ * @rx_tagged_frms: Count of received frames containing a VLAN tag for
+ * mac the link.
+ * @rx_long_frms: Count of received frames that are longer than
+ * RX_MAX_PYLD_LEN + 18 bytes (+ 22 bytes if VLAN-tagged) for mac the link.
+ * @rx_usized_frms: Count of received frames of length less than 64
+ * octets, for mac the link.
+ * @rx_osized_frms: Count of received frames of length more than 1518
+ * octets for mac the link.
+ * @rx_frag_frms: Count of received frames of length less than 64
+ * octets that had bad FCS, for mac the link.
+ * @rx_jabber_frms: Count of received frames of length more than 1518
+ * octets that had bad FCS, for mac the link.
+ * @rx_ttl_64_frms: Count of all received MAC frames with length of
+ * exactly 64 octets, for mac the link.
+ * @rx_ttl_65_127_frms: Count of all received MAC frames with length
+ * of between 65 and 127 octets inclusive, for mac the link.
+ * @rx_ttl_128_255_frms: Count of all received MAC frames with length
+ * of between 128 and 255 octets inclusive, for mac the link.
+ * @rx_ttl_256_511_frms: Count of all received MAC frames with length
+ * of between 246 and 511 octets inclusive, for mac the link.
+ * @rx_ttl_512_1023_frms: Count of all received MAC frames with length
+ * of between 512 and 1023 octets inclusive, for mac the link.
+ * @rx_ttl_1024_1518_frms: Count of all received MAC frames with length
+ * of between 1024 and 1518 octets inclusive, for mac the link.
+ * @rx_ttl_1519_4095_frms: Count of all received MAC frames with length
+ * of between 1519 and 4095 octets inclusive, for mac the link.
+ * @rx_ttl_40956_8191_frms: Count of all received MAC frames with length
+ * of between 4096 and 8191 octets inclusive, for mac the link.
+ * @rx_ttl_8192_max_frms: Count of all received MAC frames with length
+ * of between 8192 and RX_MAX_PYLD_LEN+18 octets inclusive, for mac the link.
+ * @rx_ttl_gt_max_frms: Count of all received MAC frames with length
+ * exceeding RX_MAX_PYLD_LEN+18 octets inclusive, for mac the link.
+ * @rx_ip: Count of received IP datagrams, for mac the link.
+ * @rx_accepted_ip: Count of received and accepted IP datagrams,
+ * for mac the link.
+ * @rx_ip_octets: Count of number of octets in received IP datagrams,
+ * for mac the link.
+ * @rx_hdr_err_ip: Count of received IP datagrams that are discarded
+ * due to IP header errors, for mac the link.
+ * @rx_icmp: Count of received ICMP messages for mac the link.
+ * @rx_tcp: Count of received TCP segments for mac the link.
+ * @rx_udp: Count of received UDP datagrams for mac the link.
+ * @rx_err_tcp: Count of received TCP segments containing errors for
+ * mac the link.
+ * @rx_pause_cnt: Count of number of pause quanta that the MAC has
+ * been in the paused state, for mac the link.
+ * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames for
+ * mac the link.
+ * @rx_unsup_ctrl_frms: Count of received MAC control frames that do
+ * not contain the PAUSE opcode for mac the link.
+ * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS
+ * for mac the link.
+ * @rx_in_rng_len_err_frms: Count of received frames with a length/type
+ * field value between 46 and 1500 inclusive, that does not match the number
+ * of data octets received, for mac the link.
+ * @rx_out_rng_len_err_frms: Count of received frames with length/type
+ * field between 1501 and 1535 decimal, inclusive. for mac the link.
+ * @rx_drop_frms: Count of dropped frames from receive path for mac
+ * the link.
+ * @rx_discarded_frms: Count of discarded frames from receive path for
+ * mac the link.
+ * @rx_drop_ip: Count of droppen IP datagrams from receive path for
+ * mac the link.
+ * @rx_err_drp_udp: Count of droppen UDP datagrams from receive path
+ * for mac the link.
+ * @rx_lacpdu_frms: Count of valid LACPDUs received for mac the link.
+ * @rx_marker_pdu_frms: Count of valid Marker PDUs received for mac
+ * the link.
+ * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs
+ * received for mac the link.
+ * @rx_unknown_pdu_frms: Count of unknown PDUs received for mac the link.
+ * @rx_illegal_pdu_frms: Count of illegal PDUs received for mac the link.
+ * @rx_fcs_discard: Count of discarded PDUs received for mac the link.
+ * @rx_len_discard: Count of received frames that were discarded
+ * because of an invalid frame length, for mac the link.
+ * @rx_len_discard: Count of received frames that were discarded
+ * because of an invalid destination MAC address, for mac the link.
+ * @rx_pf_discard: Count of received frames that were discarded for
+ * mac the link.
+ * @rx_trash_discard: Count of received frames that were steered to the
+ * trash queue for mac the link.
+ * @rx_rts_discard: Count of received frames that were discarded by RTS
+ * logic for mac the link.
+ * @rx_wol_discard: Count of received frames that were discarded by WOL
+ * logic for mac the link.
+ * @rx_red_discard: Count of received frames that were discarded by RED
+ * logic for mac the link.
+ * @rx_ingm_full_discard: Count of received frames that were discarded
+ * because the internal ingress memory was full for mac the link.
+ * @rx_xgmii_data_err_cnt: Count of unexpected control characters
+ * during normal data transmission for mac the link.
+ * @rx_xgmii_ctrl_err_cnt: Count of unexpected or misplaced control
+ * characters occuring between times of normal data transmission for mac
+ * the link.
+ * @rx_xgmii_err_sym: Count of the number of symbol errors in the
+ * received XGMII data for mac the link.
+ * @rx_xgmii_char1_match: Count of the number of XGMII characters
+ * that match first pattern defined in MAC_STATS_RX_XGMII_CHAR_LINK_N.
+ * @rx_xgmii_char2_match: Count of the number of XGMII characters
+ * that match second pattern defined in MAC_STATS_RX_XGMII_CHAR_LINK_N.
+ * @rx_xgmii_column1_match: Count of the number of XGMII columns
+ * that match a pattern defined in MAC_STATS_RX_XGMII_COLUMN1_LINK_N.
+ * @rx_xgmii_column2_match: Count of the number of XGMII columns
+ * that match a pattern defined in MAC_STATS_RX_XGMII_COLUMN1_LINK_N.
+ * @rx_local_fault: Count of the number of local faults for mac the link.
+ * @rx_remote_fault: Count of the number of remote faults for mac the
+ * link.
+ * @rx_queue_full: Count of the number of frame destined for a full
+ * queue for mac the link.
+ */
+typedef struct xge_hal_stats_link_info_t {
+ u64 tx_frms;
+ u64 tx_ttl_eth_octets;
+ u64 tx_data_octets;
+ u64 tx_mcst_frms;
+ u64 tx_bcst_frms;
+ u64 tx_ucst_frms;
+ u64 tx_tagged_frms;
+ u64 tx_vld_ip;
+ u64 tx_vld_ip_octets;
+ u64 tx_icmp;
+ u64 tx_tcp;
+ u64 tx_rst_tcp;
+ u64 tx_udp;
+ u64 tx_unknown_protocol;
+ u64 tx_parse_error;
+ u64 tx_pause_ctrl_frms;
+ u64 tx_lacpdu_frms;
+ u64 tx_marker_pdu_frms;
+ u64 tx_marker_resp_pdu_frms;
+ u64 tx_drop_ip;
+ u64 tx_xgmii_char1_match;
+ u64 tx_xgmii_char2_match;
+ u64 tx_xgmii_column1_match;
+ u64 tx_xgmii_column2_match;
+ u64 tx_drop_frms;
+ u64 tx_any_err_frms;
+ u64 rx_ttl_frms;
+ u64 rx_vld_frms;
+ u64 rx_offld_frms;
+ u64 rx_ttl_eth_octets;
+ u64 rx_data_octets;
+ u64 rx_offld_octets;
+ u64 rx_vld_mcst_frms;
+ u64 rx_vld_bcst_frms;
+ u64 rx_accepted_ucst_frms;
+ u64 rx_accepted_nucst_frms;
+ u64 rx_tagged_frms;
+ u64 rx_long_frms;
+ u64 rx_usized_frms;
+ u64 rx_osized_frms;
+ u64 rx_frag_frms;
+ u64 rx_jabber_frms;
+ u64 rx_ttl_64_frms;
+ u64 rx_ttl_65_127_frms;
+ u64 rx_ttl_128_255_frms;
+ u64 rx_ttl_256_511_frms;
+ u64 rx_ttl_512_1023_frms;
+ u64 rx_ttl_1024_1518_frms;
+ u64 rx_ttl_1519_4095_frms;
+ u64 rx_ttl_40956_8191_frms;
+ u64 rx_ttl_8192_max_frms;
+ u64 rx_ttl_gt_max_frms;
+ u64 rx_ip;
+ u64 rx_ip_octets;
+ u64 rx_hdr_err_ip;
+ u64 rx_icmp;
+ u64 rx_tcp;
+ u64 rx_udp;
+ u64 rx_err_tcp;
+ u64 rx_pause_cnt;
+ u64 rx_pause_ctrl_frms;
+ u64 rx_unsup_ctrl_frms;
+ u64 rx_in_rng_len_err_frms;
+ u64 rx_out_rng_len_err_frms;
+ u64 rx_drop_frms;
+ u64 rx_discarded_frms;
+ u64 rx_drop_ip;
+ u64 rx_err_drp_udp;
+ u64 rx_lacpdu_frms;
+ u64 rx_marker_pdu_frms;
+ u64 rx_marker_resp_pdu_frms;
+ u64 rx_unknown_pdu_frms;
+ u64 rx_illegal_pdu_frms;
+ u64 rx_fcs_discard;
+ u64 rx_len_discard;
+ u64 rx_pf_discard;
+ u64 rx_trash_discard;
+ u64 rx_rts_discard;
+ u64 rx_wol_discard;
+ u64 rx_red_discard;
+ u64 rx_ingm_full_discard;
+ u64 rx_xgmii_data_err_cnt;
+ u64 rx_xgmii_ctrl_err_cnt;
+ u64 rx_xgmii_err_sym;
+ u64 rx_xgmii_char1_match;
+ u64 rx_xgmii_char2_match;
+ u64 rx_xgmii_column1_match;
+ u64 rx_xgmii_column2_match;
+ u64 rx_local_fault;
+ u64 rx_remote_fault;
+ u64 rx_queue_full;
+}xge_hal_stats_link_info_t;
+
+/**
+ * struct xge_hal_stats_aggr_info_t - XGMAC statistics for an aggregator
+ *
+ * @tx_frms: Count of data frames transmitted for the aggregator.
+ * @tx_mcst_frms: Count of multicast data frames transmitted for
+ * the aggregator.
+ * @tx_bcst_frms: Count of broadcast data frames transmitted for
+ * the aggregator.
+ * @tx_discarded_frms: Count of discarded data frames transmitted for
+ * the aggregator.
+ * @tx_errored_frms: Count of errored data frames transmitted for
+ * the aggregator.
+ * @rx_frms: Count of received frames for aggregators
+ * @rx_data_octets: Count of data and padding octets of frames received
+ * the aggregator.
+ * @rx_mcst_frms: Count of multicast frames received the aggregator.
+ * @rx_bcst_frms: Count of broadast frames received the aggregator.
+ * @rx_discarded_frms: Count of discarded frames received the aggregator.
+ * @rx_errored_frms: Count of errored frames received the aggregator.
+ * @rx_unknown_protocol_frms: Count of unknown protocol frames received
+ * the aggregator.
+*/
+typedef struct xge_hal_stats_aggr_info_t {
+ u64 tx_frms;
+ u64 tx_mcst_frms;
+ u64 tx_bcst_frms;
+ u64 tx_discarded_frms;
+ u64 tx_errored_frms;
+ u64 rx_frms;
+ u64 rx_data_octets;
+ u64 rx_mcst_frms;
+ u64 rx_bcst_frms;
+ u64 rx_discarded_frms;
+ u64 rx_errored_frms;
+ u64 rx_unknown_protocol_frms;
+}xge_hal_stats_aggr_info_t;
+
+/**
+ * struct xge_hal_stats_vpath_info_t - XGMAC statistics for a vpath.
+ *
+ * @tx_frms: Count of transmitted MAC frames for the vpath.
+ * @tx_ttl_eth_octets: Count of total octets of transmitted frames
+ * for the vpath.
+ * @tx_data_octets: Count of data and padding octets of transmitted
+ * frames for the vpath.
+ * @tx_mcst_frms: Count of multicast MAC frames for the vpath.
+ * @tx_bcst_frms: Count of broadcast MAC frames for the vpath.
+ * @tx_ucst_frms: Count of unicast MAC frames for the vpath.
+ * @tx_tagged_frms: Count of transmitted frames containing a VLAN
+ * tag for the vpath.
+ * @tx_vld_ip: Count of transmitted IP datagrams for the vpath.
+ * @tx_vld_ip_octets: Count of transmitted IP octets for the vpath.
+ * @tx_icmp: Count of transmitted ICMP messages for the vpath.
+ * @tx_tcp: Count of transmitted TCP segments for the vpath.
+ * @tx_rst_tcp: Count of transmitted TCP segments containing the RST
+ * flag the vpath.
+ * @tx_udp: Count of transmitted UDP datagrams for the vpath.
+ * @tx_unknown_protocol: Count of transmitted packets of unknown
+ * protocol for the vpath.
+ * @tx_parse_error: Count of transmitted packets with parsing errors
+ * for the vpath.
+ * @rx_ttl_frms: Count of all received MAC frames for the vpath.
+ * @rx_vld_frms: Count of all successfully received MAC frames for
+ * the vpath.
+ * @rx_offld_frms: Count of all offloaded received MAC frames for
+ * the vpath.
+ * @rx_ttl_eth_octets: Count of total octets of received frames, not
+ * including framing characters for the vpath.
+ * @rx_data_octets: Count of data and padding octets of successfully
+ * received frames for the vpath.
+ * @rx_offld_octets: Count of total octets, not including framing
+ * characters, of offloaded received frames for the vpath.
+ * @rx_vld_mcst_frms: Count of successfully received multicast MAC
+ * frames for the vpath.
+ * @rx_vld_bcst_frms: Count of successfully received broadcast MAC
+ * frames for the vpath.
+ * @rx_accepted_ucst_frms: Count of successfully received unicast
+ * MAC frames for the vpath.
+ * @rx_accepted_nucst_frms: Count of successfully received
+ * non-unicast MAC frames for the vpath.
+ * @rx_tagged_frms: Count of received frames containing a VLAN tag
+ * for the vpath.
+ * @rx_long_frms: Count of received frames that are longer than
+ * RX_MAX_PYLD_LEN + 18 bytes (+ 22 bytes if VLAN-tagged) for the vpath.
+ * @rx_usized_frms: Count of received frames of length less than 64
+ * octets, for the vpath.
+ * @rx_usized_frms: Count of received frames of length more than
+ * 1518 octets, for the vpath.
+ * @rx_osized_frms: Count of received frames of length more than
+ * 1518 octets for the vpath.
+ * @rx_frag_frms: Count of received frames of length less than 64
+ * octets that had bad FCS, for the vpath.
+ * @rx_jabber_frms: Count of received frames of length more than
+ * 1518 octets that had bad FCS, for the vpath.
+ * @rx_ttl_64_frms: Count of all received MAC frames with length of
+ * exactly 64 octets, for the vpath.
+ * @rx_ttl_65_127_frms: Count of all received MAC frames with length
+ * of between 65 and 127 octets inclusive, for the vpath.
+ * @rx_ttl_128_255_frms: Count of all received MAC frames with
+ * length of between 128 and 255 octets inclusive, for the vpath.
+ * @rx_ttl_256_511_frms: Count of all received MAC frames with
+ * length of between 246 and 511 octets inclusive, for the vpath.
+ * @rx_ttl_512_1023_frms: Count of all received MAC frames with
+ * length of between 512 and 1023 octets inclusive, for the vpath.
+ * @rx_ttl_1024_1518_frms: Count of all received MAC frames with
+ * length of between 1024 and 1518 octets inclusive, for the vpath.
+ * @rx_ttl_1519_4095_frms: Count of all received MAC frames with
+ * length of between 1519 and 4095 octets inclusive, for the vpath.
+ * @rx_ttl_40956_8191_frms: Count of all received MAC frames with
+ * of between 4096 and 8191 octets inclusive, for the vpath.
+ * @rx_ttl_8192_max_frms: Count of all received MAC frames with
+ * length of between 8192 and RX_MAX_PYLD_LEN+18 octets inclusive, for the
+ * vpath.
+ * @rx_ttl_gt_max_frms: Count of all received MAC frames with length
+ * exceeding RX_MAX_PYLD_LEN+18 octets inclusive, for the vpath.
+ * @rx_ip: Count of received IP datagrams, for the vpath.
+ * @rx_accepted_ip: Count of received and accepted IP datagrams,
+ * for the vpath.
+ * @rx_ip_octets: Count of number of octets in received IP datagrams
+ * for the vpath.
+ * @rx_hdr_err_ip: Count of received IP datagrams that are discarded
+ * due to IP header errors, for the vpath.
+ * @rx_icmp: Count of received ICMP messages for the vpath.
+ * @rx_tcp: Count of received TCP segments for the vpath.
+ * @rx_udp: Count of received UDP datagrams for the vpath.
+ * @rx_err_tcp: Count of received TCP segments containing errors for
+ * the vpath.
+ * @rx_mpa_ok_frms: Count of received frames that pass the MPA
+ * checks for vptah.
+ * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA
+ * CRC check for the vpath.
+ * @rx_mpa_mrk_fail_frms: Count of received frames that fail the
+ * MPA marker check for the vpath.
+ * @rx_mpa_len_fail_frms: Count of received frames that fail the MPA
+ * length check for the vpath.
+ * @rx_wol_frms: Count of received "magic packet" frames for
+ * the vpath.
+ */
+typedef struct xge_hal_stats_vpath_info_t {
+ u64 tx_frms;
+ u64 tx_ttl_eth_octets;
+ u64 tx_data_octets;
+ u64 tx_mcst_frms;
+ u64 tx_bcst_frms;
+ u64 tx_ucst_frms;
+ u64 tx_tagged_frms;
+ u64 tx_vld_ip;
+ u64 tx_vld_ip_octets;
+ u64 tx_icmp;
+ u64 tx_tcp;
+ u64 tx_rst_tcp;
+ u64 tx_udp;
+ u64 tx_unknown_protocol;
+ u64 tx_parse_error;
+ u64 rx_ttl_frms;
+ u64 rx_vld_frms;
+ u64 rx_offld_frms;
+ u64 rx_ttl_eth_octets;
+ u64 rx_data_octets;
+ u64 rx_offld_octets;
+ u64 rx_vld_mcst_frms;
+ u64 rx_vld_bcst_frms;
+ u64 rx_accepted_ucst_frms;
+ u64 rx_accepted_nucst_frms;
+ u64 rx_tagged_frms;
+ u64 rx_long_frms;
+ u64 rx_usized_frms;
+ u64 rx_osized_frms;
+ u64 rx_frag_frms;
+ u64 rx_jabber_frms;
+ u64 rx_ttl_64_frms;
+ u64 rx_ttl_65_127_frms;
+ u64 rx_ttl_128_255_frms;
+ u64 rx_ttl_256_511_frms;
+ u64 rx_ttl_512_1023_frms;
+ u64 rx_ttl_1024_1518_frms;
+ u64 rx_ttl_1519_4095_frms;
+ u64 rx_ttl_40956_8191_frms;
+ u64 rx_ttl_8192_max_frms;
+ u64 rx_ttl_gt_max_frms;
+ u64 rx_ip;
+ u64 rx_accepted_ip;
+ u64 rx_ip_octets;
+ u64 rx_hdr_err_ip;
+ u64 rx_icmp;
+ u64 rx_tcp;
+ u64 rx_udp;
+ u64 rx_err_tcp;
+ u64 rx_mpa_ok_frms;
+ u64 rx_mpa_crc_fail_frms;
+ u64 rx_mpa_mrk_fail_frms;
+ u64 rx_mpa_len_fail_frms;
+ u64 rx_wol_frms;
+}xge_hal_stats_vpath_info_t;
+
+/**
+ * struct xge_hal_stats_pcim_info_t - Contains PCIM statistics
+ *
+ * @link_info: PCIM links info for link 0, 1, and 2.
+ * @aggr_info: PCIM aggregators info for aggregator 0 and 1.
+ * See also: xge_hal_stats_link_info_t{}, xge_hal_stats_aggr_info_t{}.
+ */
+typedef struct xge_hal_stats_pcim_info_t {
+ xge_hal_stats_link_info_t link_info[XGE_HAL_MAC_LINKS];
+ xge_hal_stats_aggr_info_t aggr_info[XGE_HAL_MAC_AGGREGATORS];
+}xge_hal_stats_pcim_info_t;
+
+/**
+ * struct xge_hal_stats_t - Contains HAL per-device statistics,
+ * including hw.
+ * @devh: HAL device handle.
+ * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
+ * space.
+ * @hw_info_dma_acch: One more DMA handle used subsequently to free the
+ * DMA object. Note that this and the previous handle have
+ * physical meaning for Solaris; on Windows and Linux the
+ * corresponding value will be simply pointer to PCI device.
+ *
+ * @hw_info: Xframe statistics maintained by the hardware.
+ * @hw_info_saved: TBD
+ * @hw_info_latest: TBD
+ * @pcim_info: Xframe PCIM statistics maintained by the hardware.
+ * @pcim_info_saved: TBD
+ * @pcim_info_latest: TBD
+ * @sw_dev_info_stats: HAL's "soft" device informational statistics, e.g. number
+ * of completions per interrupt.
+ * @sw_dev_err_stats: HAL's "soft" device error statistics.
+ *
+ * @is_initialized: True, if all the subordinate structures are allocated and
+ * initialized.
+ * @is_enabled: True, if device stats collection is enabled.
+ *
+ * Structure-container of HAL per-device statistics. Note that per-channel
+ * statistics are kept in separate structures under HAL's fifo and ring
+ * channels.
+ * See also: xge_hal_stats_hw_info_t{}, xge_hal_stats_sw_err_t{},
+ * xge_hal_stats_device_info_t{}.
+ * See also: xge_hal_stats_channel_info_t{}.
+ */
+typedef struct xge_hal_stats_t {
+ /* handles */
+ xge_hal_device_h devh;
+ dma_addr_t dma_addr;
+ pci_dma_h hw_info_dmah;
+ pci_dma_acc_h hw_info_dma_acch;
+
+ /* HAL device hardware statistics */
+ xge_hal_stats_hw_info_t *hw_info;
+ xge_hal_stats_hw_info_t hw_info_saved;
+ xge_hal_stats_hw_info_t hw_info_latest;
+
+ /* HAL device hardware statistics for XFRAME ER */
+ xge_hal_stats_pcim_info_t *pcim_info;
+ xge_hal_stats_pcim_info_t *pcim_info_saved;
+ xge_hal_stats_pcim_info_t *pcim_info_latest;
+
+ /* HAL device "soft" stats */
+ xge_hal_stats_sw_err_t sw_dev_err_stats;
+ xge_hal_stats_device_info_t sw_dev_info_stats;
+
+ /* flags */
+ int is_initialized;
+ int is_enabled;
+} xge_hal_stats_t;
+
+/* ========================== STATS PRIVATE API ========================= */
+
+xge_hal_status_e __hal_stats_initialize(xge_hal_stats_t *stats,
+ xge_hal_device_h devh);
+
+void __hal_stats_terminate(xge_hal_stats_t *stats);
+
+void __hal_stats_enable(xge_hal_stats_t *stats);
+
+void __hal_stats_disable(xge_hal_stats_t *stats);
+
+void __hal_stats_soft_reset(xge_hal_device_h devh, int reset_all);
+
+/* ========================== STATS PUBLIC API ========================= */
+
+xge_hal_status_e xge_hal_stats_hw(xge_hal_device_h devh,
+ xge_hal_stats_hw_info_t **hw_info);
+
+xge_hal_status_e xge_hal_stats_pcim(xge_hal_device_h devh,
+ xge_hal_stats_pcim_info_t **pcim_info);
+
+xge_hal_status_e xge_hal_stats_device(xge_hal_device_h devh,
+ xge_hal_stats_device_info_t **device_info);
+
+xge_hal_status_e xge_hal_stats_channel(xge_hal_channel_h channelh,
+ xge_hal_stats_channel_info_t **channel_info);
+
+xge_hal_status_e xge_hal_stats_reset(xge_hal_device_h devh);
+
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_STATS_H */
diff --git a/sys/dev/nxge/include/xgehal-types.h b/sys/dev/nxge/include/xgehal-types.h
new file mode 100644
index 0000000..ec1942b
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal-types.h
@@ -0,0 +1,626 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-types.h
+ *
+ * Description: HAL commonly used types and enumerations
+ *
+ * Created: 19 May 2004
+ */
+
+#ifndef XGE_HAL_TYPES_H
+#define XGE_HAL_TYPES_H
+
+#include <dev/nxge/include/xge-os-pal.h>
+
+__EXTERN_BEGIN_DECLS
+
+/*
+ * BIT(loc) - set bit at offset
+ */
+#define BIT(loc) (0x8000000000000000ULL >> (loc))
+
+/*
+ * vBIT(val, loc, sz) - set bits at offset
+ */
+#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
+#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
+
+/*
+ * bVALx(bits, loc) - Get the value of x bits at location
+ */
+#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1)
+#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3)
+#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7)
+#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF)
+#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F)
+#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F)
+#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F)
+#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF)
+#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF)
+#define bVAL14(bits, loc) ((((u64)bits) >> (64-(loc+14))) & 0x3FFF)
+#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF)
+#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF)
+#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF)
+#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF)
+#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF)
+#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF)
+#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF)
+#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF)
+#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF)
+#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF)
+#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF)
+#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF)
+#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF)
+
+#define XGE_HAL_BASE_INF 100
+#define XGE_HAL_BASE_ERR 200
+#define XGE_HAL_BASE_BADCFG 300
+
+#define XGE_HAL_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
+
+/**
+ * enum xge_hal_status_e - HAL return codes.
+ * @XGE_HAL_OK: Success.
+ * @XGE_HAL_FAIL: Failure.
+ * @XGE_HAL_COMPLETIONS_REMAIN: There are more completions on a channel.
+ * (specific to polling mode completion processing).
+ * @XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS: No more completed
+ * descriptors. See xge_hal_fifo_dtr_next_completed().
+ * @XGE_HAL_INF_OUT_OF_DESCRIPTORS: Out of descriptors. Channel
+ * descriptors
+ * are reserved (via xge_hal_fifo_dtr_reserve(),
+ * xge_hal_fifo_dtr_reserve())
+ * and not yet freed (via xge_hal_fifo_dtr_free(),
+ * xge_hal_ring_dtr_free()).
+ * @XGE_HAL_INF_CHANNEL_IS_NOT_READY: Channel is not ready for
+ * operation.
+ * @XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING: Indicates that host needs to
+ * poll until PIO is executed.
+ * @XGE_HAL_INF_STATS_IS_NOT_READY: Cannot retrieve statistics because
+ * HAL and/or device is not yet initialized.
+ * @XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS: No descriptors left to
+ * reserve. Internal use only.
+ * @XGE_HAL_INF_IRQ_POLLING_CONTINUE: Returned by the ULD channel
+ * callback when instructed to exit descriptor processing loop
+ * prematurely. Typical usage: polling mode of processing completed
+ * descriptors.
+ * Upon getting LRO_ISED, ll driver shall
+ * 1) initialise lro struct with mbuf if sg_num == 1.
+ * 2) else it will update m_data_ptr_of_mbuf to tcp pointer and
+ * append the new mbuf to the tail of mbuf chain in lro struct.
+ *
+ * @XGE_HAL_INF_LRO_BEGIN: Returned by ULD LRO module, when new LRO is
+ * being initiated.
+ * @XGE_HAL_INF_LRO_CONT: Returned by ULD LRO module, when new frame
+ * is appended at the end of existing LRO.
+ * @XGE_HAL_INF_LRO_UNCAPABLE: Returned by ULD LRO module, when new
+ * frame is not LRO capable.
+ * @XGE_HAL_INF_LRO_END_1: Returned by ULD LRO module, when new frame
+ * triggers LRO flush.
+ * @XGE_HAL_INF_LRO_END_2: Returned by ULD LRO module, when new
+ * frame triggers LRO flush. Lro frame should be flushed first then
+ * new frame should be flushed next.
+ * @XGE_HAL_INF_LRO_END_3: Returned by ULD LRO module, when new
+ * frame triggers close of current LRO session and opening of new LRO session
+ * with the frame.
+ * @XGE_HAL_INF_LRO_SESSIONS_XCDED: Returned by ULD LRO module, when no
+ * more LRO sessions can be added.
+ * @XGE_HAL_INF_NOT_ENOUGH_HW_CQES: TBD
+ * @XGE_HAL_ERR_DRIVER_NOT_INITIALIZED: HAL is not initialized.
+ * @XGE_HAL_ERR_OUT_OF_MEMORY: Out of memory (example, when and
+ * allocating descriptors).
+ * @XGE_HAL_ERR_CHANNEL_NOT_FOUND: xge_hal_channel_open will return this
+ * error if corresponding channel is not configured.
+ * @XGE_HAL_ERR_WRONG_IRQ: Returned by HAL's ISR when the latter is
+ * invoked not because of the Xframe-generated interrupt.
+ * @XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES: Returned when user tries to
+ * configure more than XGE_HAL_MAX_MAC_ADDRESSES mac addresses.
+ * @XGE_HAL_ERR_BAD_DEVICE_ID: Unknown device PCI ID.
+ * @XGE_HAL_ERR_OUT_ALIGNED_FRAGS: Too many unaligned fragments
+ * in a scatter-gather list.
+ * @XGE_HAL_ERR_DEVICE_NOT_INITIALIZED: Device is not initialized.
+ * Typically means wrong sequence of API calls.
+ * @XGE_HAL_ERR_SWAPPER_CTRL: Error during device initialization: failed
+ * to set Xframe byte swapper in accordnace with the host
+ * endian-ness.
+ * @XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT: Failed to restore the device to
+ * a "quiescent" state.
+ * @XGE_HAL_ERR_INVALID_MTU_SIZE: Returned when MTU size specified by
+ * caller is not in the (64, 9600) range.
+ * @XGE_HAL_ERR_OUT_OF_MAPPING: Failed to map DMA-able memory.
+ * @XGE_HAL_ERR_BAD_SUBSYSTEM_ID: Bad PCI subsystem ID. (Currently we
+ * check for zero/non-zero only.)
+ * @XGE_HAL_ERR_INVALID_BAR_ID: Invalid BAR ID. Xframe supports two Base
+ * Address Register Spaces: BAR0 (id=0) and BAR1 (id=1).
+ * @XGE_HAL_ERR_INVALID_OFFSET: Invalid offset. Example, attempt to read
+ * register value (with offset) outside of the BAR0 space.
+ * @XGE_HAL_ERR_INVALID_DEVICE: Invalid device. The HAL device handle
+ * (passed by ULD) is invalid.
+ * @XGE_HAL_ERR_OUT_OF_SPACE: Out-of-provided-buffer-space. Returned by
+ * management "get" routines when the retrieved information does
+ * not fit into the provided buffer.
+ * @XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE: Invalid bit size.
+ * @XGE_HAL_ERR_VERSION_CONFLICT: Upper-layer driver and HAL (versions)
+ * are not compatible.
+ * @XGE_HAL_ERR_INVALID_MAC_ADDRESS: Invalid MAC address.
+ * @XGE_HAL_ERR_SPDM_NOT_ENABLED: SPDM support is not enabled.
+ * @XGE_HAL_ERR_SPDM_TABLE_FULL: SPDM table is full.
+ * @XGE_HAL_ERR_SPDM_INVALID_ENTRY: Invalid SPDM entry.
+ * @XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND: Unable to locate the entry in the
+ * SPDM table.
+ * @XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT: Local SPDM table is not in
+ * synch ith the actual one.
+ * @XGE_HAL_ERR_INVALID_PCI_INFO: Invalid or unrecognized PCI frequency,
+ * and or width, and or mode (Xframe-II only, see UG on PCI_INFO register).
+ * @XGE_HAL_ERR_CRITICAL: Critical error. Returned by HAL APIs
+ * (including xge_hal_device_handle_tcode()) on: ECC, parity, SERR.
+ * Also returned when PIO read does not go through ("all-foxes")
+ * because of "slot-freeze".
+ * @XGE_HAL_ERR_RESET_FAILED: Failed to soft-reset the device.
+ * Returned by xge_hal_device_reset(). One circumstance when it could
+ * happen: slot freeze by the system (see @XGE_HAL_ERR_CRITICAL).
+ * @XGE_HAL_ERR_TOO_MANY: This error is returned if there were laready
+ * maximum number of sessions or queues allocated
+ * @XGE_HAL_ERR_PKT_DROP: TBD
+ * @XGE_HAL_BADCFG_TX_URANGE_A: Invalid Tx link utilization range A. See
+ * the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_UFC_A: Invalid frame count for Tx link utilization
+ * range A. See the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_URANGE_B: Invalid Tx link utilization range B. See
+ * the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_UFC_B: Invalid frame count for Tx link utilization
+ * range B. See the strucuture xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_URANGE_C: Invalid Tx link utilization range C. See
+ * the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_UFC_C: Invalid frame count for Tx link utilization
+ * range C. See the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_UFC_D: Invalid frame count for Tx link utilization
+ * range D. See the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_TIMER_VAL: Invalid Tx timer value. See the
+ * structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_TX_TIMER_CI_EN: Invalid Tx timer continuous interrupt
+ * enable. See the structure xge_hal_tti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_URANGE_A: Invalid Rx link utilization range A. See
+ * the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_UFC_A: Invalid frame count for Rx link utilization
+ * range A. See the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_URANGE_B: Invalid Rx link utilization range B. See
+ * the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_UFC_B: Invalid frame count for Rx link utilization
+ * range B. See the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_URANGE_C: Invalid Rx link utilization range C. See
+ * the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_UFC_C: Invalid frame count for Rx link utilization
+ * range C. See the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_UFC_D: Invalid frame count for Rx link utilization
+ * range D. See the structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RX_TIMER_VAL: Invalid Rx timer value. See the
+ * structure xge_hal_rti_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH: Invalid initial fifo queue
+ * length. See the structure xge_hal_fifo_queue_t for valid values.
+ * @XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH: Invalid fifo queue max length.
+ * See the structure xge_hal_fifo_queue_t for valid values.
+ * @XGE_HAL_BADCFG_FIFO_QUEUE_INTR: Invalid fifo queue interrupt mode.
+ * See the structure xge_hal_fifo_queue_t for valid values.
+ * @XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS: Invalid Initial number of
+ * RxD blocks for the ring. See the structure xge_hal_ring_queue_t for
+ * valid values.
+ * @XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS: Invalid maximum number of RxD
+ * blocks for the ring. See the structure xge_hal_ring_queue_t for
+ * valid values.
+ * @XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE: Invalid ring buffer mode. See
+ * the structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_RING_QUEUE_SIZE: Invalid ring queue size. See the
+ * structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_BACKOFF_INTERVAL_US: Invalid backoff timer interval
+ * for the ring. See the structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_MAX_FRM_LEN: Invalid ring max frame length. See the
+ * structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_RING_PRIORITY: Invalid ring priority. See the
+ * structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_TMAC_UTIL_PERIOD: Invalid tmac util period. See the
+ * structure xge_hal_mac_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RMAC_UTIL_PERIOD: Invalid rmac util period. See the
+ * structure xge_hal_mac_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RMAC_BCAST_EN: Invalid rmac brodcast enable. See the
+ * structure xge_hal_mac_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RMAC_HIGH_PTIME: Invalid rmac pause time. See the
+ * structure xge_hal_mac_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3: Invalid threshold for pause
+ * frame generation for queues 0 through 3. See the structure
+ * xge_hal_mac_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7:Invalid threshold for pause
+ * frame generation for queues 4 through 7. See the structure
+ * xge_hal_mac_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_FIFO_FRAGS: Invalid fifo max fragments length. See
+ * the structure xge_hal_fifo_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD: Invalid fifo reserve
+ * threshold. See the structure xge_hal_fifo_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE: Invalid fifo descriptors memblock
+ * size. See the structure xge_hal_fifo_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE: Invalid ring descriptors memblock
+ * size. See the structure xge_hal_ring_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_MAX_MTU: Invalid max mtu for the device. See the
+ * structure xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_ISR_POLLING_CNT: Invalid isr polling count. See the
+ * structure xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_LATENCY_TIMER: Invalid Latency timer. See the
+ * structure xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_MAX_SPLITS_TRANS: Invalid maximum number of pci-x
+ * split transactions. See the structure xge_hal_device_config_t{} for valid
+ * values.
+ * @XGE_HAL_BADCFG_MMRB_COUNT: Invalid mmrb count. See the structure
+ * xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_SHARED_SPLITS: Invalid number of outstanding split
+ * transactions that is shared by Tx and Rx requests. See the structure
+ * xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_STATS_REFRESH_TIME: Invalid time interval for
+ * automatic statistics transfer to the host. See the structure
+ * xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_PCI_FREQ_MHERZ: Invalid pci clock frequency. See the
+ * structure xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_PCI_MODE: Invalid pci mode. See the structure
+ * xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_INTR_MODE: Invalid interrupt mode. See the structure
+ * xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_SCHED_TIMER_US: Invalid scheduled timer interval to
+ * generate interrupt. See the structure xge_hal_device_config_t{}
+ * for valid values.
+ * @XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT: Invalid scheduled timer one
+ * shot. See the structure xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL: Invalid driver queue initial
+ * size. See the structure xge_hal_driver_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_QUEUE_SIZE_MAX: Invalid driver queue max size. See
+ * the structure xge_hal_driver_config_t{} for valid values.
+ * @XGE_HAL_BADCFG_RING_RTH_EN: Invalid value of RTH-enable. See
+ * the structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS: Invalid value configured for
+ * indicate_max_pkts variable.
+ * @XGE_HAL_BADCFG_TX_TIMER_AC_EN: Invalid value for Tx timer
+ * auto-cancel. See xge_hal_tti_config_t{}.
+ * @XGE_HAL_BADCFG_RX_TIMER_AC_EN: Invalid value for Rx timer
+ * auto-cancel. See xge_hal_rti_config_t{}.
+ * @XGE_HAL_BADCFG_RXUFCA_INTR_THRES: TODO
+ * @XGE_HAL_BADCFG_RXUFCA_LO_LIM: TODO
+ * @XGE_HAL_BADCFG_RXUFCA_HI_LIM: TODO
+ * @XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD: TODO
+ * @XGE_HAL_BADCFG_TRACEBUF_SIZE: Bad configuration: the size of the circular
+ * (in memory) trace buffer either too large or too small. See the
+ * the corresponding header file or README for the acceptable range.
+ * @XGE_HAL_BADCFG_LINK_VALID_CNT: Bad configuration: the link-valid
+ * counter cannot have the specified value. Note that the link-valid
+ * counting is done only at device-open time, to determine with the
+ * specified certainty that the link is up. See the
+ * the corresponding header file or README for the acceptable range.
+ * See also @XGE_HAL_BADCFG_LINK_RETRY_CNT.
+ * @XGE_HAL_BADCFG_LINK_RETRY_CNT: Bad configuration: the specified
+ * link-up retry count is out of the valid range. Note that the link-up
+ * retry counting is done only at device-open time.
+ * See also xge_hal_device_config_t{}.
+ * @XGE_HAL_BADCFG_LINK_STABILITY_PERIOD: Invalid link stability period.
+ * @XGE_HAL_BADCFG_DEVICE_POLL_MILLIS: Invalid device poll interval.
+ * @XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN: TBD
+ * @XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN: TBD
+ * @XGE_HAL_BADCFG_MEDIA: TBD
+ * @XGE_HAL_BADCFG_NO_ISR_EVENTS: TBD
+ * See the structure xge_hal_device_config_t{} for valid values.
+ * @XGE_HAL_EOF_TRACE_BUF: End of the circular (in memory) trace buffer.
+ * Returned by xge_hal_mgmt_trace_read(), when user tries to read the trace
+ * past the buffer limits. Used to enable user to load the trace in two
+ * or more reads.
+ * @XGE_HAL_BADCFG_RING_RTS_MAC_EN: Invalid value of RTS_MAC_EN enable. See
+ * the structure xge_hal_ring_queue_t for valid values.
+ * @XGE_HAL_BADCFG_LRO_SG_SIZE : Invalid value of LRO scatter gatter size.
+ * See the structure xge_hal_device_config_t for valid values.
+ * @XGE_HAL_BADCFG_LRO_FRM_LEN : Invalid value of LRO frame length.
+ * See the structure xge_hal_device_config_t for valid values.
+ * @XGE_HAL_BADCFG_WQE_NUM_ODS: TBD
+ * @XGE_HAL_BADCFG_BIMODAL_INTR: Invalid value to configure bimodal interrupts
+ * Enumerates status and error codes returned by HAL public
+ * API functions.
+ * @XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US: TBD
+ * @XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US: TBD
+ * @XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED: TBD
+ * @XGE_HAL_BADCFG_RTS_QOS_EN: TBD
+ * @XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR: TBD
+ * @XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR: TBD
+ * @XGE_HAL_BADCFG_RTS_PORT_EN: TBD
+ * @XGE_HAL_BADCFG_RING_RTS_PORT_EN: TBD
+ *
+ */
+typedef enum xge_hal_status_e {
+ XGE_HAL_OK = 0,
+ XGE_HAL_FAIL = 1,
+ XGE_HAL_COMPLETIONS_REMAIN = 2,
+
+ XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS = XGE_HAL_BASE_INF + 1,
+ XGE_HAL_INF_OUT_OF_DESCRIPTORS = XGE_HAL_BASE_INF + 2,
+ XGE_HAL_INF_CHANNEL_IS_NOT_READY = XGE_HAL_BASE_INF + 3,
+ XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING = XGE_HAL_BASE_INF + 4,
+ XGE_HAL_INF_STATS_IS_NOT_READY = XGE_HAL_BASE_INF + 5,
+ XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS = XGE_HAL_BASE_INF + 6,
+ XGE_HAL_INF_IRQ_POLLING_CONTINUE = XGE_HAL_BASE_INF + 7,
+ XGE_HAL_INF_LRO_BEGIN = XGE_HAL_BASE_INF + 8,
+ XGE_HAL_INF_LRO_CONT = XGE_HAL_BASE_INF + 9,
+ XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10,
+ XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11,
+ XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12,
+ XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13,
+ XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14,
+ XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15,
+ XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1,
+ XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4,
+ XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5,
+ XGE_HAL_ERR_WRONG_IRQ = XGE_HAL_BASE_ERR + 6,
+ XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES = XGE_HAL_BASE_ERR + 7,
+ XGE_HAL_ERR_SWAPPER_CTRL = XGE_HAL_BASE_ERR + 8,
+ XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT = XGE_HAL_BASE_ERR + 9,
+ XGE_HAL_ERR_INVALID_MTU_SIZE = XGE_HAL_BASE_ERR + 10,
+ XGE_HAL_ERR_OUT_OF_MAPPING = XGE_HAL_BASE_ERR + 11,
+ XGE_HAL_ERR_BAD_SUBSYSTEM_ID = XGE_HAL_BASE_ERR + 12,
+ XGE_HAL_ERR_INVALID_BAR_ID = XGE_HAL_BASE_ERR + 13,
+ XGE_HAL_ERR_INVALID_OFFSET = XGE_HAL_BASE_ERR + 14,
+ XGE_HAL_ERR_INVALID_DEVICE = XGE_HAL_BASE_ERR + 15,
+ XGE_HAL_ERR_OUT_OF_SPACE = XGE_HAL_BASE_ERR + 16,
+ XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE = XGE_HAL_BASE_ERR + 17,
+ XGE_HAL_ERR_VERSION_CONFLICT = XGE_HAL_BASE_ERR + 18,
+ XGE_HAL_ERR_INVALID_MAC_ADDRESS = XGE_HAL_BASE_ERR + 19,
+ XGE_HAL_ERR_BAD_DEVICE_ID = XGE_HAL_BASE_ERR + 20,
+ XGE_HAL_ERR_OUT_ALIGNED_FRAGS = XGE_HAL_BASE_ERR + 21,
+ XGE_HAL_ERR_DEVICE_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 22,
+ XGE_HAL_ERR_SPDM_NOT_ENABLED = XGE_HAL_BASE_ERR + 23,
+ XGE_HAL_ERR_SPDM_TABLE_FULL = XGE_HAL_BASE_ERR + 24,
+ XGE_HAL_ERR_SPDM_INVALID_ENTRY = XGE_HAL_BASE_ERR + 25,
+ XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND = XGE_HAL_BASE_ERR + 26,
+ XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT= XGE_HAL_BASE_ERR + 27,
+ XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28,
+ XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29,
+ XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30,
+ XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32,
+ XGE_HAL_ERR_PKT_DROP = XGE_HAL_BASE_ERR + 33,
+
+ XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1,
+ XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2,
+ XGE_HAL_BADCFG_TX_URANGE_B = XGE_HAL_BASE_BADCFG + 3,
+ XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4,
+ XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5,
+ XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6,
+ XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8,
+ XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9,
+ XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10,
+ XGE_HAL_BADCFG_RX_URANGE_A = XGE_HAL_BASE_BADCFG + 11,
+ XGE_HAL_BADCFG_RX_UFC_A = XGE_HAL_BASE_BADCFG + 12,
+ XGE_HAL_BADCFG_RX_URANGE_B = XGE_HAL_BASE_BADCFG + 13,
+ XGE_HAL_BADCFG_RX_UFC_B = XGE_HAL_BASE_BADCFG + 14,
+ XGE_HAL_BADCFG_RX_URANGE_C = XGE_HAL_BASE_BADCFG + 15,
+ XGE_HAL_BADCFG_RX_UFC_C = XGE_HAL_BASE_BADCFG + 16,
+ XGE_HAL_BADCFG_RX_UFC_D = XGE_HAL_BASE_BADCFG + 17,
+ XGE_HAL_BADCFG_RX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 18,
+ XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH= XGE_HAL_BASE_BADCFG + 19,
+ XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH = XGE_HAL_BASE_BADCFG + 20,
+ XGE_HAL_BADCFG_FIFO_QUEUE_INTR = XGE_HAL_BASE_BADCFG + 21,
+ XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS=XGE_HAL_BASE_BADCFG + 22,
+ XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS = XGE_HAL_BASE_BADCFG + 23,
+ XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE = XGE_HAL_BASE_BADCFG + 24,
+ XGE_HAL_BADCFG_RING_QUEUE_SIZE = XGE_HAL_BASE_BADCFG + 25,
+ XGE_HAL_BADCFG_BACKOFF_INTERVAL_US = XGE_HAL_BASE_BADCFG + 26,
+ XGE_HAL_BADCFG_MAX_FRM_LEN = XGE_HAL_BASE_BADCFG + 27,
+ XGE_HAL_BADCFG_RING_PRIORITY = XGE_HAL_BASE_BADCFG + 28,
+ XGE_HAL_BADCFG_TMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 29,
+ XGE_HAL_BADCFG_RMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 30,
+ XGE_HAL_BADCFG_RMAC_BCAST_EN = XGE_HAL_BASE_BADCFG + 31,
+ XGE_HAL_BADCFG_RMAC_HIGH_PTIME = XGE_HAL_BASE_BADCFG + 32,
+ XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3 = XGE_HAL_BASE_BADCFG +33,
+ XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7 = XGE_HAL_BASE_BADCFG + 34,
+ XGE_HAL_BADCFG_FIFO_FRAGS = XGE_HAL_BASE_BADCFG + 35,
+ XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD = XGE_HAL_BASE_BADCFG + 37,
+ XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 38,
+ XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 39,
+ XGE_HAL_BADCFG_MAX_MTU = XGE_HAL_BASE_BADCFG + 40,
+ XGE_HAL_BADCFG_ISR_POLLING_CNT = XGE_HAL_BASE_BADCFG + 41,
+ XGE_HAL_BADCFG_LATENCY_TIMER = XGE_HAL_BASE_BADCFG + 42,
+ XGE_HAL_BADCFG_MAX_SPLITS_TRANS = XGE_HAL_BASE_BADCFG + 43,
+ XGE_HAL_BADCFG_MMRB_COUNT = XGE_HAL_BASE_BADCFG + 44,
+ XGE_HAL_BADCFG_SHARED_SPLITS = XGE_HAL_BASE_BADCFG + 45,
+ XGE_HAL_BADCFG_STATS_REFRESH_TIME = XGE_HAL_BASE_BADCFG + 46,
+ XGE_HAL_BADCFG_PCI_FREQ_MHERZ = XGE_HAL_BASE_BADCFG + 47,
+ XGE_HAL_BADCFG_PCI_MODE = XGE_HAL_BASE_BADCFG + 48,
+ XGE_HAL_BADCFG_INTR_MODE = XGE_HAL_BASE_BADCFG + 49,
+ XGE_HAL_BADCFG_SCHED_TIMER_US = XGE_HAL_BASE_BADCFG + 50,
+ XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT = XGE_HAL_BASE_BADCFG + 51,
+ XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL = XGE_HAL_BASE_BADCFG + 52,
+ XGE_HAL_BADCFG_QUEUE_SIZE_MAX = XGE_HAL_BASE_BADCFG + 53,
+ XGE_HAL_BADCFG_RING_RTH_EN = XGE_HAL_BASE_BADCFG + 54,
+ XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS = XGE_HAL_BASE_BADCFG + 55,
+ XGE_HAL_BADCFG_TX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 56,
+ XGE_HAL_BADCFG_RX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 57,
+ XGE_HAL_BADCFG_RXUFCA_INTR_THRES = XGE_HAL_BASE_BADCFG + 58,
+ XGE_HAL_BADCFG_RXUFCA_LO_LIM = XGE_HAL_BASE_BADCFG + 59,
+ XGE_HAL_BADCFG_RXUFCA_HI_LIM = XGE_HAL_BASE_BADCFG + 60,
+ XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD = XGE_HAL_BASE_BADCFG + 61,
+ XGE_HAL_BADCFG_TRACEBUF_SIZE = XGE_HAL_BASE_BADCFG + 62,
+ XGE_HAL_BADCFG_LINK_VALID_CNT = XGE_HAL_BASE_BADCFG + 63,
+ XGE_HAL_BADCFG_LINK_RETRY_CNT = XGE_HAL_BASE_BADCFG + 64,
+ XGE_HAL_BADCFG_LINK_STABILITY_PERIOD = XGE_HAL_BASE_BADCFG + 65,
+ XGE_HAL_BADCFG_DEVICE_POLL_MILLIS = XGE_HAL_BASE_BADCFG + 66,
+ XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN = XGE_HAL_BASE_BADCFG + 67,
+ XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN = XGE_HAL_BASE_BADCFG + 68,
+ XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69,
+ XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70,
+ XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71,
+ XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72,
+ XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73,
+ XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74,
+ XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75,
+ XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76,
+ XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77,
+ XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78,
+ XGE_HAL_BADCFG_RTS_QOS_EN = XGE_HAL_BASE_BADCFG + 79,
+ XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 80,
+ XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 81,
+ XGE_HAL_BADCFG_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 82,
+ XGE_HAL_BADCFG_RING_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 83,
+ XGE_HAL_BADCFG_TRACEBUF_TIMESTAMP = XGE_HAL_BASE_BADCFG + 84,
+ XGE_HAL_EOF_TRACE_BUF = -1
+} xge_hal_status_e;
+
+#define XGE_HAL_ETH_ALEN 6
+typedef u8 macaddr_t[XGE_HAL_ETH_ALEN];
+
+#define XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE 0x100
+
+/* frames sizes */
+#define XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE 14
+#define XGE_HAL_HEADER_802_2_SIZE 3
+#define XGE_HAL_HEADER_SNAP_SIZE 5
+#define XGE_HAL_HEADER_VLAN_SIZE 4
+#define XGE_HAL_MAC_HEADER_MAX_SIZE \
+ (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + \
+ XGE_HAL_HEADER_802_2_SIZE + \
+ XGE_HAL_HEADER_SNAP_SIZE)
+
+#define XGE_HAL_TCPIP_HEADER_MAX_SIZE (64 + 64)
+
+/* 32bit alignments */
+#define XGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN 2
+#define XGE_HAL_HEADER_802_2_SNAP_ALIGN 2
+#define XGE_HAL_HEADER_802_2_ALIGN 3
+#define XGE_HAL_HEADER_SNAP_ALIGN 1
+
+#define XGE_HAL_L3_CKSUM_OK 0xFFFF
+#define XGE_HAL_L4_CKSUM_OK 0xFFFF
+#define XGE_HAL_MIN_MTU 46
+#define XGE_HAL_MAX_MTU 9600
+#define XGE_HAL_DEFAULT_MTU 1500
+
+#define XGE_HAL_SEGEMENT_OFFLOAD_MAX_SIZE 81920
+
+#define XGE_HAL_PCISIZE_XENA 26 /* multiples of dword */
+#define XGE_HAL_PCISIZE_HERC 64 /* multiples of dword */
+
+#define XGE_HAL_MAX_MSIX_MESSAGES 64
+#define XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR XGE_HAL_MAX_MSIX_MESSAGES * 2
+/* Highest level interrupt blocks */
+#define XGE_HAL_TX_PIC_INTR (0x0001<<0)
+#define XGE_HAL_TX_DMA_INTR (0x0001<<1)
+#define XGE_HAL_TX_MAC_INTR (0x0001<<2)
+#define XGE_HAL_TX_XGXS_INTR (0x0001<<3)
+#define XGE_HAL_TX_TRAFFIC_INTR (0x0001<<4)
+#define XGE_HAL_RX_PIC_INTR (0x0001<<5)
+#define XGE_HAL_RX_DMA_INTR (0x0001<<6)
+#define XGE_HAL_RX_MAC_INTR (0x0001<<7)
+#define XGE_HAL_RX_XGXS_INTR (0x0001<<8)
+#define XGE_HAL_RX_TRAFFIC_INTR (0x0001<<9)
+#define XGE_HAL_MC_INTR (0x0001<<10)
+#define XGE_HAL_SCHED_INTR (0x0001<<11)
+#define XGE_HAL_ALL_INTRS (XGE_HAL_TX_PIC_INTR | \
+ XGE_HAL_TX_DMA_INTR | \
+ XGE_HAL_TX_MAC_INTR | \
+ XGE_HAL_TX_XGXS_INTR | \
+ XGE_HAL_TX_TRAFFIC_INTR | \
+ XGE_HAL_RX_PIC_INTR | \
+ XGE_HAL_RX_DMA_INTR | \
+ XGE_HAL_RX_MAC_INTR | \
+ XGE_HAL_RX_XGXS_INTR | \
+ XGE_HAL_RX_TRAFFIC_INTR | \
+ XGE_HAL_MC_INTR | \
+ XGE_HAL_SCHED_INTR)
+#define XGE_HAL_GEN_MASK_INTR (0x0001<<12)
+
+/* Interrupt masks for the general interrupt mask register */
+#define XGE_HAL_ALL_INTRS_DIS 0xFFFFFFFFFFFFFFFFULL
+
+#define XGE_HAL_TXPIC_INT_M BIT(0)
+#define XGE_HAL_TXDMA_INT_M BIT(1)
+#define XGE_HAL_TXMAC_INT_M BIT(2)
+#define XGE_HAL_TXXGXS_INT_M BIT(3)
+#define XGE_HAL_TXTRAFFIC_INT_M BIT(8)
+#define XGE_HAL_PIC_RX_INT_M BIT(32)
+#define XGE_HAL_RXDMA_INT_M BIT(33)
+#define XGE_HAL_RXMAC_INT_M BIT(34)
+#define XGE_HAL_MC_INT_M BIT(35)
+#define XGE_HAL_RXXGXS_INT_M BIT(36)
+#define XGE_HAL_RXTRAFFIC_INT_M BIT(40)
+
+/* MSI level Interrupts */
+#define XGE_HAL_MAX_MSIX_VECTORS (16)
+
+typedef struct xge_hal_ipv4 {
+ u32 addr;
+}xge_hal_ipv4;
+
+typedef struct xge_hal_ipv6 {
+ u64 addr[2];
+}xge_hal_ipv6;
+
+typedef union xge_hal_ipaddr_t {
+ xge_hal_ipv4 ipv4;
+ xge_hal_ipv6 ipv6;
+}xge_hal_ipaddr_t;
+
+/* DMA level Interrupts */
+#define XGE_HAL_TXDMA_PFC_INT_M BIT(0)
+
+/* PFC block interrupts */
+#define XGE_HAL_PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO
+full */
+
+/* basic handles */
+typedef void* xge_hal_device_h;
+typedef void* xge_hal_dtr_h;
+typedef void* xge_hal_channel_h;
+#ifdef XGEHAL_RNIC
+typedef void* xge_hal_towi_h;
+typedef void* xge_hal_hw_wqe_h;
+typedef void* xge_hal_hw_cqe_h;
+typedef void* xge_hal_lro_wqe_h;
+typedef void* xge_hal_lro_cqe_h;
+typedef void* xge_hal_up_msg_h;
+typedef void* xge_hal_down_msg_h;
+typedef void* xge_hal_channel_callback_fh;
+typedef void* xge_hal_msg_queueh;
+typedef void* xge_hal_pblist_h;
+#endif
+/*
+ * I2C device id. Used in I2C control register for accessing EEPROM device
+ * memory.
+ */
+#define XGE_DEV_ID 5
+
+typedef enum xge_hal_xpak_alarm_type_e {
+ XGE_HAL_XPAK_ALARM_EXCESS_TEMP = 1,
+ XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT = 2,
+ XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT = 3,
+} xge_hal_xpak_alarm_type_e;
+
+
+__EXTERN_END_DECLS
+
+#endif /* XGE_HAL_TYPES_H */
diff --git a/sys/dev/nxge/include/xgehal.h b/sys/dev/nxge/include/xgehal.h
new file mode 100644
index 0000000..4c3c08a
--- /dev/null
+++ b/sys/dev/nxge/include/xgehal.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal.h
+ *
+ * Description: Consolidated header. Upper layers should include it to
+ * avoid include order problems.
+ *
+ * Created: 14 May 2004
+ */
+
+#ifndef XGE_HAL_H
+#define XGE_HAL_H
+
+#include <dev/nxge/include/xge-defs.h>
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xge-debug.h>
+#include <dev/nxge/include/xgehal-types.h>
+#include <dev/nxge/include/xgehal-driver.h>
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xgehal-device.h>
+#include <dev/nxge/include/xgehal-channel.h>
+#include <dev/nxge/include/xgehal-fifo.h>
+#include <dev/nxge/include/xgehal-ring.h>
+#include <dev/nxge/include/xgehal-mgmt.h>
+
+#endif /* XGE_HAL_H */
diff --git a/sys/dev/nxge/xge-osdep.h b/sys/dev/nxge/xge-osdep.h
new file mode 100644
index 0000000..b2c448d
--- /dev/null
+++ b/sys/dev/nxge/xge-osdep.h
@@ -0,0 +1,758 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * xge-osdep.h
+ *
+ * Platform-dependent "glue" code
+ */
+
+#ifndef XGE_OSDEP_H
+#define XGE_OSDEP_H
+
+/******************************************
+ * Includes and defines
+ ******************************************/
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/stddef.h>
+#include <sys/types.h>
+#include <sys/sockio.h>
+#include <sys/proc.h>
+#include <sys/mutex.h>
+#include <sys/types.h>
+#include <sys/endian.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/clock.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pci_private.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_var.h>
+#include <net/bpf.h>
+#include <net/if_types.h>
+
+
+#define XGE_OS_PLATFORM_64BIT
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define XGE_OS_HOST_BIG_ENDIAN 1
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define XGE_OS_HOST_LITTLE_ENDIAN 1
+#endif
+
+#define XGE_HAL_USE_5B_MODE 1
+#define XGE_HAL_PROCESS_LINK_INT_IN_ISR 1
+#define OS_NETSTACK_BUF struct mbuf *
+#define XGE_LL_IP_FAST_CSUM(hdr, len) 0
+
+#define xge_os_ntohs ntohs
+#define xge_os_ntohl ntohl
+#define xge_os_htons htons
+#define xge_os_htonl htonl
+
+#ifndef __DECONST
+#define __DECONST(type, var) ((type)(uintrptr_t)(const void *)(var))
+#endif
+
+typedef struct busresources {
+ bus_space_tag_t bus_tag; /* DMA Tag */
+ bus_space_handle_t bus_handle; /* Bus handle */
+ struct resource *bar_start_addr;/* BAR start address */
+} busresource_t;
+
+typedef struct xge_dma_alloc {
+ bus_addr_t dma_phyaddr; /* Physical Address */
+ caddr_t dma_viraddr; /* Virtual Address */
+ bus_dma_tag_t dma_tag; /* DMA Tag */
+ bus_dmamap_t dma_map; /* DMA Map */
+ bus_dma_segment_t dma_segment; /* DMA Segment */
+ bus_size_t dma_size; /* Size */
+ int dma_nseg; /* Maximum scatter-gather segs. */
+} xdma;
+
+struct xge_dma_mbuf {
+ bus_addr_t dma_phyaddr; /* Physical Address */
+ bus_dmamap_t dma_map; /* DMA Map */
+};
+
+typedef struct pci_info {
+ device_t device; /* Device */
+ struct resource *regmap0; /* Resource for BAR0 */
+ struct resource *regmap1; /* Resource for BAR1 */
+ void *bar0resource; /* BAR0 tag and handle */
+ void *bar1resource; /* BAR1 tag and handle */
+} pci_info_t;
+
+
+/******************************************
+ * Fixed size primitive types
+ ******************************************/
+#define u8 uint8_t
+#define u16 uint16_t
+#define u32 uint32_t
+#define u64 uint64_t
+#define ulong_t unsigned long
+#define uint unsigned int
+#define ptrdiff_t ptrdiff_t
+typedef bus_addr_t dma_addr_t;
+typedef struct mtx spinlock_t;
+typedef pci_info_t *pci_dev_h;
+typedef busresource_t *pci_reg_h;
+typedef struct xge_dma_alloc pci_dma_h;
+typedef struct resource *pci_irq_h;
+typedef pci_info_t *pci_cfg_h;
+typedef struct xge_dma_alloc pci_dma_acc_h;
+
+/******************************************
+ * "libc" functionality
+ ******************************************/
+#define xge_os_memzero(addr, size) bzero(addr, size)
+#define xge_os_memcpy(dst, src, size) bcopy(src, dst, size)
+#define xge_os_memcmp memcmp
+#define xge_os_strcpy strcpy
+#define xge_os_strlen strlen
+#define xge_os_snprintf snprintf
+#define xge_os_sprintf sprintf
+#define xge_os_printf(fmt...) { \
+ printf(fmt); \
+ printf("\n"); \
+}
+
+#define xge_os_vaprintf(fmt) { \
+ sprintf(fmt, fmt, "\n"); \
+ va_list va; \
+ va_start(va, fmt); \
+ vprintf(fmt, va); \
+ va_end(va); \
+}
+
+#define xge_os_vasprintf(buf, fmt) { \
+ va_list va; \
+ va_start(va, fmt); \
+ (void) vaprintf(buf, fmt, va); \
+ va_end(va); \
+}
+
+#define xge_os_timestamp(buf) { \
+ struct timeval current_time; \
+ gettimeofday(&current_time, 0); \
+ sprintf(buf, "%08li.%08li: ", current_time.tv_sec, \
+ current_time.tv_usec); \
+}
+
+#define xge_os_println xge_os_printf
+
+/******************************************
+ * Synchronization Primitives
+ ******************************************/
+/* Initialize the spin lock */
+#define xge_os_spin_lock_init(lockp, ctxh) \
+ if(mtx_initialized(lockp) == 0) { \
+ mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \
+ }
+
+/* Initialize the spin lock (IRQ version) */
+#define xge_os_spin_lock_init_irq(lockp, ctxh) \
+ if(mtx_initialized(lockp) == 0) { \
+ mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \
+ }
+
+/* Destroy the lock */
+#define xge_os_spin_lock_destroy(lockp, ctxh) \
+ if(mtx_initialized(lockp) != 0) { \
+ mtx_destroy(lockp); \
+ }
+
+/* Destroy the lock (IRQ version) */
+#define xge_os_spin_lock_destroy_irq(lockp, ctxh) \
+ if(mtx_initialized(lockp) != 0) { \
+ mtx_destroy(lockp); \
+ }
+
+/* Acquire the lock */
+#define xge_os_spin_lock(lockp) \
+ if(mtx_owned(lockp) == 0) mtx_lock(lockp)
+
+/* Release the lock */
+#define xge_os_spin_unlock(lockp) mtx_unlock(lockp)
+
+/* Acquire the lock (IRQ version) */
+#define xge_os_spin_lock_irq(lockp, flags) { \
+ flags = MTX_QUIET; \
+ if(mtx_owned(lockp) == 0) mtx_lock_flags(lockp, flags); \
+}
+
+/* Release the lock (IRQ version) */
+#define xge_os_spin_unlock_irq(lockp, flags) { \
+ flags = MTX_QUIET; \
+ mtx_unlock_flags(lockp, flags); \
+}
+
+/* Write memory barrier */
+#define xge_os_wmb()
+
+/* Delay (in micro seconds) */
+#define xge_os_udelay(us) DELAY(us)
+
+/* Delay (in milli seconds) */
+#define xge_os_mdelay(ms) DELAY(ms * 1000)
+
+/* Compare and exchange */
+//#define xge_os_cmpxchg(targetp, cmd, newval)
+
+/******************************************
+ * Misc primitives
+ ******************************************/
+#define xge_os_unlikely(x) (x)
+#define xge_os_prefetch(x) (x=x)
+#define xge_os_prefetchw(x) (x=x)
+#define xge_os_bug(fmt...) printf(fmt...)
+#define xge_os_htohs ntohs
+#define xge_os_ntohl ntohl
+#define xge_os_htons htons
+#define xge_os_htonl htonl
+
+/******************************************
+ * Compiler Stuffs
+ ******************************************/
+#define __xge_os_attr_cacheline_aligned
+#define __xge_os_cacheline_size 32
+
+/******************************************
+ * Memory Primitives
+ ******************************************/
+#define XGE_OS_INVALID_DMA_ADDR ((dma_addr_t)0)
+
+/******************************************
+ * xge_os_malloc - Allocate non DMA-able memory.
+ * @pdev: Device context.
+ * @size: Size to allocate.
+ *
+ * Allocate @size bytes of memory. This allocation can sleep, and
+ * therefore, and therefore it requires process context. In other words,
+ * xge_os_malloc() cannot be called from the interrupt context.
+ * Use xge_os_free() to free the allocated block.
+ *
+ * Returns: Pointer to allocated memory, NULL - on failure.
+ *
+ * See also: xge_os_free().
+ ******************************************/
+static inline void *
+xge_os_malloc(pci_dev_h pdev, unsigned long size) {
+ void *vaddr = malloc((size), M_DEVBUF, M_NOWAIT);
+ xge_os_memzero(vaddr, size);
+ XGE_OS_MEMORY_CHECK_MALLOC(vaddr, size, file, line);
+ return (vaddr);
+}
+
+/******************************************
+ * xge_os_free - Free non DMA-able memory.
+ * @pdev: Device context.
+ * @vaddr: Address of the allocated memory block.
+ * @size: Some OS's require to provide size on free
+ *
+ * Free the memory area obtained via xge_os_malloc().
+ * This call may also sleep, and therefore it cannot be used inside
+ * interrupt.
+ *
+ * See also: xge_os_malloc().
+ ******************************************/
+static inline void
+xge_os_free(pci_dev_h pdev, const void *vaddr, unsigned long size) {
+ XGE_OS_MEMORY_CHECK_FREE(vaddr, size);
+ free(__DECONST(void *, vaddr), M_DEVBUF);
+}
+
+static void
+xge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) {
+ if(error) return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+/******************************************
+ * xge_os_dma_malloc - Allocate DMA-able memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @size: Size (in bytes) to allocate.
+ * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
+ * XGE_OS_DMA_STREAMING,
+ * XGE_OS_DMA_CONSISTENT
+ * Note that the last two flags are mutually exclusive.
+ * @p_dmah: Handle used to map the memory onto the corresponding device memory
+ * space. See xge_os_dma_map(). The handle is an out-parameter
+ * returned by the function.
+ * @p_dma_acch: One more DMA handle used subsequently to free the
+ * DMA object (via xge_os_dma_free()).
+ *
+ * Allocate DMA-able contiguous memory block of the specified @size.
+ * This memory can be subsequently freed using xge_os_dma_free().
+ * Note: can be used inside interrupt context.
+ *
+ * Returns: Pointer to allocated memory(DMA-able), NULL on failure.
+ *
+ ******************************************/
+static inline void *
+xge_os_dma_malloc(pci_dev_h pdev, unsigned long size, int dma_flags,
+ pci_dma_h *p_dmah, pci_dma_acc_h *p_dma_acch) {
+ int retValue = bus_dma_tag_create(
+ bus_get_dma_tag(pdev->device), /* Parent */
+ PAGE_SIZE, /* Alignment no specific alignment */
+ 0, /* Bounds */
+ BUS_SPACE_MAXADDR, /* Low Address */
+ BUS_SPACE_MAXADDR, /* High Address */
+ NULL, /* Filter */
+ NULL, /* Filter arg */
+ size, /* Max Size */
+ 1, /* n segments */
+ size, /* max segment size */
+ BUS_DMA_ALLOCNOW, /* Flags */
+ NULL, /* lockfunction */
+ NULL, /* lock arg */
+ &p_dmah->dma_tag); /* DMA tag */
+ if(retValue != 0) {
+ xge_os_printf("bus_dma_tag_create failed\n");
+ goto fail_1;
+ }
+ p_dmah->dma_size = size;
+ retValue = bus_dmamem_alloc(p_dmah->dma_tag,
+ (void **)&p_dmah->dma_viraddr, BUS_DMA_NOWAIT, &p_dmah->dma_map);
+ if(retValue != 0) {
+ xge_os_printf("bus_dmamem_alloc failed\n");
+ goto fail_2;
+ }
+ return(p_dmah->dma_viraddr);
+
+fail_2: bus_dma_tag_destroy(p_dmah->dma_tag);
+fail_1: return(NULL);
+}
+
+/******************************************
+ * xge_os_dma_free - Free previously allocated DMA-able memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @vaddr: Virtual address of the DMA-able memory.
+ * @p_dma_acch: DMA handle used to free the resource.
+ * @p_dmah: DMA handle used for mapping. See xge_os_dma_malloc().
+ *
+ * Free DMA-able memory originally allocated by xge_os_dma_malloc().
+ * Note: can be used inside interrupt.
+ * See also: xge_os_dma_malloc().
+ ******************************************/
+static inline void
+xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
+ pci_dma_acc_h *p_dma_acch, pci_dma_h *p_dmah) {
+ XGE_OS_MEMORY_CHECK_FREE(p_dmah->dma_viraddr, size);
+ bus_dmamem_free(p_dmah->dma_tag, p_dmah->dma_viraddr, p_dmah->dma_map);
+ bus_dma_tag_destroy(p_dmah->dma_tag);
+ p_dmah->dma_map = NULL;
+ p_dmah->dma_tag = NULL;
+ p_dmah->dma_viraddr = NULL;
+ return;
+}
+
+/******************************************
+ * IO/PCI/DMA Primitives
+ ******************************************/
+#define XGE_OS_DMA_DIR_TODEVICE 0
+#define XGE_OS_DMA_DIR_FROMDEVICE 1
+#define XGE_OS_DMA_DIR_BIDIRECTIONAL 2
+
+/******************************************
+ * xge_os_pci_read8 - Read one byte from device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Address of the result.
+ *
+ * Read byte value from the specified @regh PCI configuration space at the
+ * specified offset = @where.
+ * Returns: 0 - success, non-zero - failure.
+ ******************************************/
+#define xge_os_pci_read8(pdev, cfgh, where, val) \
+ (*(val) = pci_read_config(pdev->device, where, 1))
+
+/******************************************
+ * xge_os_pci_write8 - Write one byte into device PCI configuration.
+ * @pdev: Device context. Some OSs require device context to perform
+ * PIO and/or config space IO.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Value to write.
+ *
+ * Write byte value into the specified PCI configuration space
+ * Returns: 0 - success, non-zero - failure.
+ ******************************************/
+#define xge_os_pci_write8(pdev, cfgh, where, val) \
+ pci_write_config(pdev->device, where, val, 1)
+
+/******************************************
+ * xge_os_pci_read16 - Read 16bit word from device PCI configuration.
+ * @pdev: Device context.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Address of the 16bit result.
+ *
+ * Read 16bit value from the specified PCI configuration space at the
+ * specified offset.
+ * Returns: 0 - success, non-zero - failure.
+ ******************************************/
+#define xge_os_pci_read16(pdev, cfgh, where, val) \
+ (*(val) = pci_read_config(pdev->device, where, 2))
+
+/******************************************
+ * xge_os_pci_write16 - Write 16bit word into device PCI configuration.
+ * @pdev: Device context.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Value to write.
+ *
+ * Write 16bit value into the specified @offset in PCI
+ * configuration space.
+ * Returns: 0 - success, non-zero - failure.
+ ******************************************/
+#define xge_os_pci_write16(pdev, cfgh, where, val) \
+ pci_write_config(pdev->device, where, val, 2)
+
+/******************************************
+ * xge_os_pci_read32 - Read 32bit word from device PCI configuration.
+ * @pdev: Device context.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Address of 32bit result.
+ *
+ * Read 32bit value from the specified PCI configuration space at the
+ * specified offset.
+ * Returns: 0 - success, non-zero - failure.
+ ******************************************/
+#define xge_os_pci_read32(pdev, cfgh, where, val) \
+ (*(val) = pci_read_config(pdev->device, where, 4))
+
+/******************************************
+ * xge_os_pci_write32 - Write 32bit word into device PCI configuration.
+ * @pdev: Device context.
+ * @cfgh: PCI configuration space handle.
+ * @where: Offset in the PCI configuration space.
+ * @val: Value to write.
+ *
+ * Write 32bit value into the specified @offset in PCI
+ * configuration space.
+ * Returns: 0 - success, non-zero - failure.
+ ******************************************/
+#define xge_os_pci_write32(pdev, cfgh, where, val) \
+ pci_write_config(pdev->device, where, val, 4)
+
+/******************************************
+ * xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 1 byte value read from the specified (mapped) memory space address.
+ ******************************************/
+static inline u8
+xge_os_pio_mem_read8(pci_dev_h pdev, pci_reg_h regh, void *addr)
+{
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+
+ return bus_space_read_1(tag, handle, (caddr_t)(addr) - addrss);
+}
+
+/******************************************
+ * xge_os_pio_mem_write8 - Write 1 byte into device memory mapped
+ * space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write byte value into the specified (mapped) device memory space.
+ ******************************************/
+static inline void
+xge_os_pio_mem_write8(pci_dev_h pdev, pci_reg_h regh, u8 val, void *addr)
+{
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+
+ bus_space_write_1(tag, handle, (caddr_t)(addr) - addrss, val);
+}
+
+/******************************************
+ * xge_os_pio_mem_read16 - Read 16bit from device memory mapped space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 16bit value read from the specified (mapped) memory space address.
+ ******************************************/
+static inline u16
+xge_os_pio_mem_read16(pci_dev_h pdev, pci_reg_h regh, void *addr)
+{
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+
+ return bus_space_read_2(tag, handle, (caddr_t)(addr) - addrss);
+}
+
+/******************************************
+ * xge_os_pio_mem_write16 - Write 16bit into device memory mapped space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write 16bit value into the specified (mapped) device memory space.
+ ******************************************/
+static inline void
+xge_os_pio_mem_write16(pci_dev_h pdev, pci_reg_h regh, u16 val, void *addr)
+{
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+
+ bus_space_write_2(tag, handle, (caddr_t)(addr) - addrss, val);
+}
+
+/******************************************
+ * xge_os_pio_mem_read32 - Read 32bit from device memory mapped space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 32bit value read from the specified (mapped) memory space address.
+ ******************************************/
+static inline u32
+xge_os_pio_mem_read32(pci_dev_h pdev, pci_reg_h regh, void *addr)
+{
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+
+ return bus_space_read_4(tag, handle, (caddr_t)(addr) - addrss);
+}
+
+/******************************************
+ * xge_os_pio_mem_write32 - Write 32bit into device memory space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write 32bit value into the specified (mapped) device memory space.
+ ******************************************/
+static inline void
+xge_os_pio_mem_write32(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
+{
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+ bus_space_write_4(tag, handle, (caddr_t)(addr) - addrss, val);
+}
+
+/******************************************
+ * xge_os_pio_mem_read64 - Read 64bit from device memory mapped space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @addr: Address in device memory space.
+ *
+ * Returns: 64bit value read from the specified (mapped) memory space address.
+ ******************************************/
+static inline u64
+xge_os_pio_mem_read64(pci_dev_h pdev, pci_reg_h regh, void *addr)
+{
+ u64 value1, value2;
+
+ bus_space_tag_t tag =
+ (bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
+ bus_space_handle_t handle =
+ (bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
+ caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
+
+ value1 = bus_space_read_4(tag, handle, (caddr_t)(addr) + 4 - addrss);
+ value1 <<= 32;
+ value2 = bus_space_read_4(tag, handle, (caddr_t)(addr) - addrss);
+ value1 |= value2;
+ return value1;
+}
+
+/******************************************
+ * xge_os_pio_mem_write64 - Write 32bit into device memory space.
+ * @pdev: Device context.
+ * @regh: PCI configuration space handle.
+ * @val: Value to write.
+ * @addr: Address in device memory space.
+ *
+ * Write 64bit value into the specified (mapped) device memory space.
+ ******************************************/
+static inline void
+xge_os_pio_mem_write64(pci_dev_h pdev, pci_reg_h regh, u64 val, void *addr)
+{
+ u32 vall = val & 0xffffffff;
+ xge_os_pio_mem_write32(pdev, regh, vall, addr);
+ xge_os_pio_mem_write32(pdev, regh, val >> 32, ((caddr_t)(addr) + 4));
+}
+
+/******************************************
+ * FIXME: document
+ ******************************************/
+#define xge_os_flush_bridge xge_os_pio_mem_read64
+
+/******************************************
+ * xge_os_dma_map - Map DMA-able memory block to, or from, or
+ * to-and-from device.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @dmah: DMA handle used to map the memory block. Obtained via
+ * xge_os_dma_malloc().
+ * @vaddr: Virtual address of the DMA-able memory.
+ * @size: Size (in bytes) to be mapped.
+ * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
+ * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
+ * XGE_OS_DMA_STREAMING,
+ * XGE_OS_DMA_CONSISTENT
+ * Note that the last two flags are mutually exclusive.
+ *
+ * Map a single memory block.
+ *
+ * Returns: DMA address of the memory block,
+ * XGE_OS_INVALID_DMA_ADDR on failure.
+ *
+ * See also: xge_os_dma_malloc(), xge_os_dma_unmap(),
+ * xge_os_dma_sync().
+ ******************************************/
+static inline dma_addr_t
+xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size,
+ int dir, int dma_flags)
+{
+ int retValue =
+ bus_dmamap_load(dmah.dma_tag, dmah.dma_map, dmah.dma_viraddr,
+ dmah.dma_size, xge_dmamap_cb, &dmah.dma_phyaddr, BUS_DMA_NOWAIT);
+ if(retValue != 0) {
+ xge_os_printf("bus_dmamap_load_ failed\n");
+ return XGE_OS_INVALID_DMA_ADDR;
+ }
+ dmah.dma_size = size;
+ return dmah.dma_phyaddr;
+}
+
+/******************************************
+ * xge_os_dma_unmap - Unmap DMA-able memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @dmah: DMA handle used to map the memory block. Obtained via
+ * xge_os_dma_malloc().
+ * @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
+ * @size: Size (in bytes) to be unmapped.
+ * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
+ *
+ * Unmap a single DMA-able memory block that was previously mapped
+ * using xge_os_dma_map().
+ * See also: xge_os_dma_malloc(), xge_os_dma_map().
+ ******************************************/
+static inline void
+xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
+ size_t size, int dir)
+{
+ bus_dmamap_unload(dmah.dma_tag, dmah.dma_map);
+ return;
+}
+
+/******************************************
+ * xge_os_dma_sync - Synchronize mapped memory.
+ * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
+ * @dmah: DMA handle used to map the memory block. Obtained via
+ * xge_os_dma_malloc().
+ * @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
+ * @dma_offset: Offset from start of the blocke. Used by Solaris only.
+ * @length: Size of the block.
+ * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
+ *
+ * Make physical and CPU memory consistent for a single
+ * streaming mode DMA translation.
+ * This API compiles to NOP on cache-coherent platforms.
+ * On non cache-coherent platforms, depending on the direction
+ * of the "sync" operation, this API will effectively
+ * either invalidate CPU cache (that might contain old data),
+ * or flush CPU cache to update physical memory.
+ * See also: xge_os_dma_malloc(), xge_os_dma_map(),
+ * xge_os_dma_unmap().
+ ******************************************/
+static inline void
+xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
+ u64 dma_offset, size_t length, int dir)
+{
+ bus_dmasync_op_t syncop;
+ switch(dir) {
+ case XGE_OS_DMA_DIR_TODEVICE:
+ syncop = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTWRITE;
+ break;
+
+ case XGE_OS_DMA_DIR_FROMDEVICE:
+ syncop = BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD;
+ break;
+
+ case XGE_OS_DMA_DIR_BIDIRECTIONAL:
+ syncop = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREWRITE;
+ break;
+ }
+ bus_dmamap_sync(dmah.dma_tag, dmah.dma_map, syncop);
+ return;
+}
+
+#endif /* XGE_OSDEP_H */
diff --git a/sys/dev/nxge/xgehal/xge-queue.c b/sys/dev/nxge/xgehal/xge-queue.c
new file mode 100644
index 0000000..925f44f
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xge-queue.c
@@ -0,0 +1,460 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xge-queue.c
+ *
+ * Description: serialized event queue
+ *
+ * Created: 7 June 2004
+ */
+
+#include <dev/nxge/include/xge-queue.h>
+
+/**
+ * xge_queue_item_data - Get item's data.
+ * @item: Queue item.
+ *
+ * Returns: item data(variable size). Note that xge_queue_t
+ * contains items comprized of a fixed xge_queue_item_t "header"
+ * and a variable size data. This function returns the variable
+ * user-defined portion of the queue item.
+ */
+void* xge_queue_item_data(xge_queue_item_t *item)
+{
+ return (char *)item + sizeof(xge_queue_item_t);
+}
+
+/*
+ * __queue_consume - (Lockless) dequeue an item from the specified queue.
+ *
+ * @queue: Event queue.
+ * See xge_queue_consume().
+ */
+static xge_queue_status_e
+__queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
+{
+ int real_size;
+ xge_queue_item_t *elem;
+
+ if (xge_list_is_empty(&queue->list_head))
+ return XGE_QUEUE_IS_EMPTY;
+
+ elem = (xge_queue_item_t *)queue->list_head.next;
+ if (elem->data_size > data_max_size)
+ return XGE_QUEUE_NOT_ENOUGH_SPACE;
+
+ xge_list_remove(&elem->item);
+ real_size = elem->data_size + sizeof(xge_queue_item_t);
+ if (queue->head_ptr == elem) {
+ queue->head_ptr = (char *)queue->head_ptr + real_size;
+ xge_debug_queue(XGE_TRACE,
+ "event_type: %d removing from the head: "
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
+ elem->event_type,
+ (u64)(ulong_t)queue->start_ptr,
+ (u64)(ulong_t)queue->head_ptr,
+ (u64)(ulong_t)queue->tail_ptr,
+ (u64)(ulong_t)queue->end_ptr,
+ (u64)(ulong_t)elem,
+ real_size);
+ } else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
+ queue->tail_ptr = (char *)queue->tail_ptr - real_size;
+ xge_debug_queue(XGE_TRACE,
+ "event_type: %d removing from the tail: "
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
+ elem->event_type,
+ (u64)(ulong_t)queue->start_ptr,
+ (u64)(ulong_t)queue->head_ptr,
+ (u64)(ulong_t)queue->tail_ptr,
+ (u64)(ulong_t)queue->end_ptr,
+ (u64)(ulong_t)elem,
+ real_size);
+ } else {
+ xge_debug_queue(XGE_TRACE,
+ "event_type: %d removing from the list: "
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
+ elem->event_type,
+ (u64)(ulong_t)queue->start_ptr,
+ (u64)(ulong_t)queue->head_ptr,
+ (u64)(ulong_t)queue->tail_ptr,
+ (u64)(ulong_t)queue->end_ptr,
+ (u64)(ulong_t)elem,
+ real_size);
+ }
+ xge_assert(queue->tail_ptr >= queue->head_ptr);
+ xge_assert(queue->tail_ptr >= queue->start_ptr &&
+ queue->tail_ptr <= queue->end_ptr);
+ xge_assert(queue->head_ptr >= queue->start_ptr &&
+ queue->head_ptr < queue->end_ptr);
+ xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
+ xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
+ elem->data_size);
+
+ if (xge_list_is_empty(&queue->list_head)) {
+ /* reset buffer pointers just to be clean */
+ queue->head_ptr = queue->tail_ptr = queue->start_ptr;
+ }
+ return XGE_QUEUE_OK;
+}
+
+/**
+ * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
+ * into the specified queue.
+ * @queueh: Queue handle.
+ * @event_type: Event type. One of the enumerated event types
+ * that both consumer and producer "understand".
+ * For an example, please refer to xge_hal_event_e.
+ * @context: Opaque (void*) "context", for instance event producer object.
+ * @is_critical: For critical event, e.g. ECC.
+ * @data_size: Size of the @data.
+ * @data: User data of variable @data_size that is _copied_ into
+ * the new queue item (see xge_queue_item_t{}). Upon return
+ * from the call the @data memory can be re-used or released.
+ *
+ * Enqueue a new item.
+ *
+ * Returns: XGE_QUEUE_OK - success.
+ * XGE_QUEUE_IS_FULL - Queue is full.
+ * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
+ *
+ * See also: xge_queue_item_t{}, xge_queue_consume().
+ */
+xge_queue_status_e
+xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
+ int is_critical, const int data_size, void *data)
+{
+ xge_queue_t *queue = (xge_queue_t *)queueh;
+ int real_size = data_size + sizeof(xge_queue_item_t);
+ xge_queue_item_t *elem;
+ unsigned long flags = 0;
+
+ xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
+
+ xge_os_spin_lock_irq(&queue->lock, flags);
+
+ if (is_critical && !queue->has_critical_event) {
+ unsigned char item_buf[sizeof(xge_queue_item_t) +
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
+ xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
+ xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
+
+ while (__queue_consume(queue,
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
+ item) != XGE_QUEUE_IS_EMPTY)
+ ; /* do nothing */
+ }
+
+try_again:
+ if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
+ elem = (xge_queue_item_t *) queue->tail_ptr;
+ queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
+ xge_debug_queue(XGE_TRACE,
+ "event_type: %d adding to the tail: "
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
+ event_type,
+ (u64)(ulong_t)queue->start_ptr,
+ (u64)(ulong_t)queue->head_ptr,
+ (u64)(ulong_t)queue->tail_ptr,
+ (u64)(ulong_t)queue->end_ptr,
+ (u64)(ulong_t)elem,
+ real_size);
+ } else if ((char *)queue->head_ptr - real_size >=
+ (char *)queue->start_ptr) {
+ elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
+ queue->head_ptr = elem;
+ xge_debug_queue(XGE_TRACE,
+ "event_type: %d adding to the head: "
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" length %d",
+ event_type,
+ (u64)(ulong_t)queue->start_ptr,
+ (u64)(ulong_t)queue->head_ptr,
+ (u64)(ulong_t)queue->tail_ptr,
+ (u64)(ulong_t)queue->end_ptr,
+ real_size);
+ } else {
+ xge_queue_status_e status;
+
+ if (queue->pages_current >= queue->pages_max) {
+ xge_os_spin_unlock_irq(&queue->lock, flags);
+ return XGE_QUEUE_IS_FULL;
+ }
+
+ if (queue->has_critical_event) {
+ xge_os_spin_unlock_irq(&queue->lock, flags);
+ return XGE_QUEUE_IS_FULL;
+ }
+
+ /* grow */
+ status = __io_queue_grow(queueh);
+ if (status != XGE_QUEUE_OK) {
+ xge_os_spin_unlock_irq(&queue->lock, flags);
+ return status;
+ }
+
+ goto try_again;
+ }
+ xge_assert(queue->tail_ptr >= queue->head_ptr);
+ xge_assert(queue->tail_ptr >= queue->start_ptr &&
+ queue->tail_ptr <= queue->end_ptr);
+ xge_assert(queue->head_ptr >= queue->start_ptr &&
+ queue->head_ptr < queue->end_ptr);
+ elem->data_size = data_size;
+ elem->event_type = (xge_hal_event_e) event_type;
+ elem->is_critical = is_critical;
+ if (is_critical)
+ queue->has_critical_event = 1;
+ elem->context = context;
+ xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
+ xge_list_insert_before(&elem->item, &queue->list_head);
+ xge_os_spin_unlock_irq(&queue->lock, flags);
+
+ /* no lock taken! */
+ queue->queued_func(queue->queued_data, event_type);
+
+ return XGE_QUEUE_OK;
+}
+
+
+/**
+ * xge_queue_create - Create protected first-in-first-out queue.
+ * @pdev: PCI device handle.
+ * @irqh: PCI device IRQ handle.
+ * @pages_initial: Number of pages to be initially allocated at the
+ * time of queue creation.
+ * @pages_max: Max number of pages that can be allocated in the queue.
+ * @queued: Optional callback function to be called each time a new item is
+ * added to the queue.
+ * @queued_data: Argument to the callback function.
+ *
+ * Create protected (fifo) queue.
+ *
+ * Returns: Pointer to xge_queue_t structure,
+ * NULL - on failure.
+ *
+ * See also: xge_queue_item_t{}, xge_queue_destroy().
+ */
+xge_queue_h
+xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
+ int pages_max, xge_queued_f queued, void *queued_data)
+{
+ xge_queue_t *queue;
+
+ if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
+ return NULL;
+
+ queue->queued_func = queued;
+ queue->queued_data = queued_data;
+ queue->pdev = pdev;
+ queue->irqh = irqh;
+ queue->pages_current = pages_initial;
+ queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
+ XGE_QUEUE_BUF_SIZE);
+ if (queue->start_ptr == NULL) {
+ xge_os_free(pdev, queue, sizeof(xge_queue_t));
+ return NULL;
+ }
+ queue->head_ptr = queue->tail_ptr = queue->start_ptr;
+ queue->end_ptr = (char *)queue->start_ptr +
+ queue->pages_current * XGE_QUEUE_BUF_SIZE;
+ xge_os_spin_lock_init_irq(&queue->lock, irqh);
+ queue->pages_initial = pages_initial;
+ queue->pages_max = pages_max;
+ xge_list_init(&queue->list_head);
+
+ return queue;
+}
+
+/**
+ * xge_queue_destroy - Destroy xge_queue_t object.
+ * @queueh: Queue handle.
+ *
+ * Destroy the specified xge_queue_t object.
+ *
+ * See also: xge_queue_item_t{}, xge_queue_create().
+ */
+void xge_queue_destroy(xge_queue_h queueh)
+{
+ xge_queue_t *queue = (xge_queue_t *)queueh;
+ xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
+ if (!xge_list_is_empty(&queue->list_head)) {
+ xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
+ XGE_OS_LLXFMT, (u64)(ulong_t)queue);
+ }
+ xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
+ XGE_QUEUE_BUF_SIZE);
+
+ xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
+}
+
+/*
+ * __io_queue_grow - Dynamically increases the size of the queue.
+ * @queueh: Queue handle.
+ *
+ * This function is called in the case of no slot avaialble in the queue
+ * to accomodate the newly received event.
+ * Note that queue cannot grow beyond the max size specified for the
+ * queue.
+ *
+ * Returns XGE_QUEUE_OK: On success.
+ * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
+ */
+xge_queue_status_e
+__io_queue_grow(xge_queue_h queueh)
+{
+ xge_queue_t *queue = (xge_queue_t *)queueh;
+ void *newbuf, *oldbuf;
+ xge_list_t *item;
+ xge_queue_item_t *elem;
+
+ xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
+ (u64)(ulong_t)queue, queue->pages_current);
+
+ newbuf = xge_os_malloc(queue->pdev,
+ (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
+ if (newbuf == NULL)
+ return XGE_QUEUE_OUT_OF_MEMORY;
+
+ xge_os_memcpy(newbuf, queue->start_ptr,
+ queue->pages_current * XGE_QUEUE_BUF_SIZE);
+ oldbuf = queue->start_ptr;
+
+ /* adjust queue sizes */
+ queue->start_ptr = newbuf;
+ queue->end_ptr = (char *)newbuf +
+ (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
+ queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
+ (char *)oldbuf);
+ queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
+ (char *)oldbuf);
+ xge_assert(!xge_list_is_empty(&queue->list_head));
+ queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
+ ((char *)queue->list_head.next - (char *)oldbuf));
+ queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
+ ((char *)queue->list_head.prev - (char *)oldbuf));
+ /* adjust queue list */
+ xge_list_for_each(item, &queue->list_head) {
+ elem = xge_container_of(item, xge_queue_item_t, item);
+ if (elem->item.next != &queue->list_head) {
+ elem->item.next =
+ (xge_list_t*)(void *)((char *)newbuf +
+ ((char *)elem->item.next - (char *)oldbuf));
+ }
+ if (elem->item.prev != &queue->list_head) {
+ elem->item.prev =
+ (xge_list_t*) (void *)((char *)newbuf +
+ ((char *)elem->item.prev - (char *)oldbuf));
+ }
+ }
+ xge_os_free(queue->pdev, oldbuf,
+ queue->pages_current * XGE_QUEUE_BUF_SIZE);
+ queue->pages_current++;
+
+ return XGE_QUEUE_OK;
+}
+
+/**
+ * xge_queue_consume - Dequeue an item from the specified queue.
+ * @queueh: Queue handle.
+ * @data_max_size: Maximum expected size of the item.
+ * @item: Memory area into which the item is _copied_ upon return
+ * from the function.
+ *
+ * Dequeue an item from the queue. The caller is required to provide
+ * enough space for the item.
+ *
+ * Returns: XGE_QUEUE_OK - success.
+ * XGE_QUEUE_IS_EMPTY - Queue is empty.
+ * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
+ * is too small to accomodate an item from the queue.
+ *
+ * See also: xge_queue_item_t{}, xge_queue_produce().
+ */
+xge_queue_status_e
+xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
+{
+ xge_queue_t *queue = (xge_queue_t *)queueh;
+ unsigned long flags = 0;
+ xge_queue_status_e status;
+
+ xge_os_spin_lock_irq(&queue->lock, flags);
+ status = __queue_consume(queue, data_max_size, item);
+ xge_os_spin_unlock_irq(&queue->lock, flags);
+
+ return status;
+}
+
+
+/**
+ * xge_queue_flush - Flush, or empty, the queue.
+ * @queueh: Queue handle.
+ *
+ * Flush the queue, i.e. make it empty by consuming all events
+ * without invoking the event processing logic (callbacks, etc.)
+ */
+void xge_queue_flush(xge_queue_h queueh)
+{
+ unsigned char item_buf[sizeof(xge_queue_item_t) +
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
+ xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
+ xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
+
+ /* flush queue by consuming all enqueued items */
+ while (xge_queue_consume(queueh,
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
+ item) != XGE_QUEUE_IS_EMPTY) {
+ /* do nothing */
+ xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
+ item, item->event_type);
+ }
+ (void) __queue_get_reset_critical (queueh);
+}
+
+/*
+ * __queue_get_reset_critical - Check for critical events in the queue,
+ * @qh: Queue handle.
+ *
+ * Check for critical event(s) in the queue, and reset the
+ * "has-critical-event" flag upon return.
+ * Returns: 1 - if the queue contains atleast one critical event.
+ * 0 - If there are no critical events in the queue.
+ */
+int __queue_get_reset_critical (xge_queue_h qh) {
+ xge_queue_t* queue = (xge_queue_t*)qh;
+ int c = queue->has_critical_event;
+
+ queue->has_critical_event = 0;
+ return c;
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-channel-fp.c b/sys/dev/nxge/xgehal/xgehal-channel-fp.c
new file mode 100644
index 0000000..0417ca0
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-channel-fp.c
@@ -0,0 +1,299 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-channel-fp.c
+ *
+ * Description: HAL channel object functionality (fast path)
+ *
+ * Created: 10 June 2004
+ */
+
+#ifdef XGE_DEBUG_FP
+#include <dev/nxge/include/xgehal-channel.h>
+#endif
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
+{
+ void **tmp_arr;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ unsigned long flags = 0;
+#endif
+
+ if (channel->reserve_length - channel->reserve_top >
+ channel->reserve_threshold) {
+
+_alloc_after_swap:
+ *dtrh = channel->reserve_arr[--channel->reserve_length];
+
+ xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, "
+ "channel %d:%d:%d, reserve_idx %d",
+ (unsigned long long)(ulong_t)*dtrh,
+ channel->type, channel->post_qid,
+ channel->compl_qid, channel->reserve_length);
+
+ return XGE_HAL_OK;
+ }
+
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_lock_irq(&channel->free_lock, flags);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_lock(&channel->free_lock);
+#endif
+
+ /* switch between empty and full arrays */
+
+ /* the idea behind such a design is that by having free and reserved
+ * arrays separated we basically separated irq and non-irq parts.
+ * i.e. no additional lock need to be done when we free a resource */
+
+ if (channel->reserve_initial - channel->free_length >
+ channel->reserve_threshold) {
+
+ tmp_arr = channel->reserve_arr;
+ channel->reserve_arr = channel->free_arr;
+ channel->reserve_length = channel->reserve_initial;
+ channel->free_arr = tmp_arr;
+ channel->reserve_top = channel->free_length;
+ channel->free_length = channel->reserve_initial;
+
+ channel->stats.reserve_free_swaps_cnt++;
+
+ xge_debug_channel(XGE_TRACE,
+ "switch on channel %d:%d:%d, reserve_length %d, "
+ "free_length %d", channel->type, channel->post_qid,
+ channel->compl_qid, channel->reserve_length,
+ channel->free_length);
+
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_unlock_irq(&channel->free_lock, flags);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_unlock(&channel->free_lock);
+#endif
+
+ goto _alloc_after_swap;
+ }
+
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_unlock_irq(&channel->free_lock, flags);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_unlock(&channel->free_lock);
+#endif
+
+ xge_debug_channel(XGE_TRACE, "channel %d:%d:%d is empty!",
+ channel->type, channel->post_qid,
+ channel->compl_qid);
+
+ channel->stats.full_cnt++;
+
+ *dtrh = NULL;
+ return XGE_HAL_INF_OUT_OF_DESCRIPTORS;
+}
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int offset)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ /* restore a previously allocated dtrh at current offset and update
+ * the available reserve length accordingly. If dtrh is null just
+ * update the reserve length, only */
+
+ if (dtrh) {
+ channel->reserve_arr[channel->reserve_length + offset] = dtrh;
+ xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for "
+ "channel %d:%d:%d, offset %d at reserve index %d, ",
+ (unsigned long long)(ulong_t)dtrh, channel->type,
+ channel->post_qid, channel->compl_qid, offset,
+ channel->reserve_length + offset);
+ }
+ else {
+ channel->reserve_length += offset;
+ xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored "
+ "for offset %d, new reserve_length %d, free length %d",
+ channel->type, channel->post_qid, channel->compl_qid,
+ offset, channel->reserve_length, channel->free_length);
+ }
+}
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh;
+
+ xge_assert(channel->work_arr[channel->post_index] == NULL);
+
+ channel->work_arr[channel->post_index++] = dtrh;
+
+ /* wrap-around */
+ if (channel->post_index == channel->length)
+ channel->post_index = 0;
+}
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ xge_assert(channel->work_arr);
+ xge_assert(channel->compl_index < channel->length);
+
+ *dtrh = channel->work_arr[channel->compl_index];
+}
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_complete(xge_hal_channel_h channelh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ channel->work_arr[channel->compl_index] = NULL;
+
+ /* wrap-around */
+ if (++channel->compl_index == channel->length)
+ channel->compl_index = 0;
+
+ channel->stats.total_compl_cnt++;
+}
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ channel->free_arr[--channel->free_length] = dtrh;
+
+ xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, "
+ "channel %d:%d:%d, new free_length %d",
+ (unsigned long long)(ulong_t)dtrh,
+ channel->type, channel->post_qid,
+ channel->compl_qid, channel->free_length);
+}
+
+/**
+ * xge_hal_channel_dtr_count
+ * @channelh: Channel handle. Obtained via xge_hal_channel_open().
+ *
+ * Retreive number of DTRs available. This function can not be called
+ * from data path.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_channel_dtr_count(xge_hal_channel_h channelh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ return ((channel->reserve_length - channel->reserve_top) +
+ (channel->reserve_initial - channel->free_length) -
+ channel->reserve_threshold);
+}
+
+/**
+ * xge_hal_channel_userdata - Get user-specified channel context.
+ * @channelh: Channel handle. Obtained via xge_hal_channel_open().
+ *
+ * Returns: per-channel "user data", which can be any ULD-defined context.
+ * The %userdata "gets" into the channel at open time
+ * (see xge_hal_channel_open()).
+ *
+ * See also: xge_hal_channel_open().
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
+xge_hal_channel_userdata(xge_hal_channel_h channelh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ return channel->userdata;
+}
+
+/**
+ * xge_hal_channel_id - Get channel ID.
+ * @channelh: Channel handle. Obtained via xge_hal_channel_open().
+ *
+ * Returns: channel ID. For link layer channel id is the number
+ * in the range from 0 to 7 that identifies hardware ring or fifo,
+ * depending on the channel type.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_channel_id(xge_hal_channel_h channelh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ return channel->post_qid;
+}
+
+/**
+ * xge_hal_check_alignment - Check buffer alignment and calculate the
+ * "misaligned" portion.
+ * @dma_pointer: DMA address of the buffer.
+ * @size: Buffer size, in bytes.
+ * @alignment: Alignment "granularity" (see below), in bytes.
+ * @copy_size: Maximum number of bytes to "extract" from the buffer
+ * (in order to spost it as a separate scatter-gather entry). See below.
+ *
+ * Check buffer alignment and calculate "misaligned" portion, if exists.
+ * The buffer is considered aligned if its address is multiple of
+ * the specified @alignment. If this is the case,
+ * xge_hal_check_alignment() returns zero.
+ * Otherwise, xge_hal_check_alignment() uses the last argument,
+ * @copy_size,
+ * to calculate the size to "extract" from the buffer. The @copy_size
+ * may or may not be equal @alignment. The difference between these two
+ * arguments is that the @alignment is used to make the decision: aligned
+ * or not aligned. While the @copy_size is used to calculate the portion
+ * of the buffer to "extract", i.e. to post as a separate entry in the
+ * transmit descriptor. For example, the combination
+ * @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes.
+ *
+ * Note: @copy_size should be a multiple of @alignment. In many practical
+ * cases @copy_size and @alignment will probably be equal.
+ *
+ * See also: xge_hal_fifo_dtr_buffer_set_aligned().
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
+ int copy_size)
+{
+ int misaligned_size;
+
+ misaligned_size = (int)(dma_pointer & (alignment - 1));
+ if (!misaligned_size) {
+ return 0;
+ }
+
+ if (size > copy_size) {
+ misaligned_size = (int)(dma_pointer & (copy_size - 1));
+ misaligned_size = copy_size - misaligned_size;
+ } else {
+ misaligned_size = size;
+ }
+
+ return misaligned_size;
+}
+
diff --git a/sys/dev/nxge/xgehal/xgehal-channel.c b/sys/dev/nxge/xgehal/xgehal-channel.c
new file mode 100644
index 0000000..dad39f2
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-channel.c
@@ -0,0 +1,759 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-channel.c
+ *
+ * Description: chipset channel abstraction
+ *
+ * Created: 10 May 2004
+ */
+
+#include <dev/nxge/include/xgehal-channel.h>
+#include <dev/nxge/include/xgehal-fifo.h>
+#include <dev/nxge/include/xgehal-ring.h>
+#include <dev/nxge/include/xgehal-device.h>
+#include <dev/nxge/include/xgehal-regs.h>
+#ifdef XGEHAL_RNIC
+#include <dev/nxge/include/xgehal-types.h>
+#include "xgehal-iov.h"
+#endif
+
+/*
+ * __hal_channel_dtr_next_reservelist
+ *
+ * Walking through the all available DTRs.
+ */
+static xge_hal_status_e
+__hal_channel_dtr_next_reservelist(xge_hal_channel_h channelh,
+ xge_hal_dtr_h *dtrh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ if (channel->reserve_top >= channel->reserve_length) {
+ return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
+ }
+
+ *dtrh = channel->reserve_arr[channel->reserve_top++];
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_channel_dtr_next_freelist
+ *
+ * Walking through the "freed" DTRs.
+ */
+static xge_hal_status_e
+__hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ if (channel->reserve_initial == channel->free_length) {
+ return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
+ }
+
+ *dtrh = channel->free_arr[channel->free_length++];
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_channel_dtr_next_not_completed - Get the _next_ posted but
+ * not completed descriptor.
+ *
+ * Walking through the "not completed" DTRs.
+ */
+static xge_hal_status_e
+__hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh,
+ xge_hal_dtr_h *dtrh)
+{
+#ifndef XGEHAL_RNIC
+ xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
+#endif
+
+ __hal_channel_dtr_try_complete(channelh, dtrh);
+ if (*dtrh == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+#ifndef XGEHAL_RNIC
+ rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
+ xge_assert(rxdp->host_control!=0);
+#endif
+
+ __hal_channel_dtr_complete(channelh);
+
+ return XGE_HAL_OK;
+}
+
+xge_hal_channel_t*
+__hal_channel_allocate(xge_hal_device_h devh, int post_qid,
+#ifdef XGEHAL_RNIC
+ u32 vp_id,
+#endif
+ xge_hal_channel_type_e type)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_channel_t *channel;
+ int size = 0;
+
+ switch(type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM &&
+ post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM);
+ size = sizeof(xge_hal_fifo_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM &&
+ post_qid + 1 <= XGE_HAL_MAX_RING_NUM);
+ size = sizeof(xge_hal_ring_t);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ size = sizeof(__hal_sq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ size = sizeof(__hal_srq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ size = sizeof(__hal_cqrq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ size = sizeof(__hal_umq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ size = sizeof(__hal_dmq_t);
+ break;
+#endif
+ default :
+ xge_assert(size);
+ break;
+
+ }
+
+
+ /* allocate FIFO channel */
+ channel = (xge_hal_channel_t *) xge_os_malloc(hldev->pdev, size);
+ if (channel == NULL) {
+ return NULL;
+ }
+ xge_os_memzero(channel, size);
+
+ channel->pdev = hldev->pdev;
+ channel->regh0 = hldev->regh0;
+ channel->regh1 = hldev->regh1;
+ channel->type = type;
+ channel->devh = devh;
+#ifdef XGEHAL_RNIC
+ channel->vp_id = vp_id;
+#endif
+ channel->post_qid = post_qid;
+ channel->compl_qid = 0;
+
+ return channel;
+}
+
+void __hal_channel_free(xge_hal_channel_t *channel)
+{
+ int size = 0;
+
+ xge_assert(channel->pdev);
+
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ size = sizeof(xge_hal_fifo_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ size = sizeof(xge_hal_ring_t);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ size = sizeof(__hal_sq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ size = sizeof(__hal_srq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ size = sizeof(__hal_cqrq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ size = sizeof(__hal_umq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ size = sizeof(__hal_dmq_t);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_assert(size);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ xge_os_free(channel->pdev, channel, size);
+}
+
+xge_hal_status_e
+__hal_channel_initialize (xge_hal_channel_h channelh,
+ xge_hal_channel_attr_t *attr, void **reserve_arr,
+ int reserve_initial, int reserve_max, int reserve_threshold)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_device_t *hldev;
+
+ hldev = (xge_hal_device_t *)channel->devh;
+
+ channel->dtr_term = attr->dtr_term;
+ channel->dtr_init = attr->dtr_init;
+ channel->callback = attr->callback;
+ channel->userdata = attr->userdata;
+ channel->flags = attr->flags;
+ channel->per_dtr_space = attr->per_dtr_space;
+
+ channel->reserve_arr = reserve_arr;
+ channel->reserve_initial = reserve_initial;
+ channel->reserve_max = reserve_max;
+ channel->reserve_length = channel->reserve_initial;
+ channel->reserve_threshold = reserve_threshold;
+ channel->reserve_top = 0;
+ channel->saved_arr = (void **) xge_os_malloc(hldev->pdev,
+ sizeof(void*)*channel->reserve_max);
+ if (channel->saved_arr == NULL) {
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ xge_os_memzero(channel->saved_arr, sizeof(void*)*channel->reserve_max);
+ channel->free_arr = channel->saved_arr;
+ channel->free_length = channel->reserve_initial;
+ channel->work_arr = (void **) xge_os_malloc(hldev->pdev,
+ sizeof(void*)*channel->reserve_max);
+ if (channel->work_arr == NULL) {
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ xge_os_memzero(channel->work_arr,
+ sizeof(void*)*channel->reserve_max);
+ channel->post_index = 0;
+ channel->compl_index = 0;
+ channel->length = channel->reserve_initial;
+
+ channel->orig_arr = (void **) xge_os_malloc(hldev->pdev,
+ sizeof(void*)*channel->reserve_max);
+ if (channel->orig_arr == NULL)
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+
+ xge_os_memzero(channel->orig_arr, sizeof(void*)*channel->reserve_max);
+
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_lock_init_irq(&channel->free_lock, hldev->irqh);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_lock_init(&channel->free_lock, hldev->pdev);
+#endif
+
+ return XGE_HAL_OK;
+}
+
+void __hal_channel_terminate(xge_hal_channel_h channelh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_device_t *hldev;
+
+ hldev = (xge_hal_device_t *)channel->devh;
+
+ xge_assert(channel->pdev);
+ /* undo changes made at channel_initialize() */
+ if (channel->work_arr) {
+ xge_os_free(channel->pdev, channel->work_arr,
+ sizeof(void*)*channel->reserve_max);
+ channel->work_arr = NULL;
+ }
+
+ if (channel->saved_arr) {
+ xge_os_free(channel->pdev, channel->saved_arr,
+ sizeof(void*)*channel->reserve_max);
+ channel->saved_arr = NULL;
+ }
+
+ if (channel->orig_arr) {
+ xge_os_free(channel->pdev, channel->orig_arr,
+ sizeof(void*)*channel->reserve_max);
+ channel->orig_arr = NULL;
+ }
+
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_lock_destroy_irq(&channel->free_lock, hldev->irqh);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_lock_destroy(&channel->free_lock, hldev->pdev);
+#endif
+}
+
+/**
+ * xge_hal_channel_open - Open communication channel.
+ * @devh: HAL device, pointer to xge_hal_device_t structure.
+ * @attr: Contains attributes required to open
+ * the channel.
+ * @channelh: The channel handle. On success (XGE_HAL_OK) HAL fills
+ * this "out" parameter with a valid channel handle.
+ * @reopen: See xge_hal_channel_reopen_e{}.
+ *
+ * Open communication channel with the device.
+ *
+ * HAL uses (persistent) channel configuration to allocate both channel
+ * and Xframe Tx and Rx descriptors.
+ * Notes:
+ * 1) The channel config data is fed into HAL prior to
+ * xge_hal_channel_open().
+ *
+ * 2) The corresponding hardware queues must be already configured and
+ * enabled.
+ *
+ * 3) Either down or up queue may be omitted, in which case the channel
+ * is treated as _unidirectional_.
+ *
+ * 4) Post and completion queue may be the same, in which case the channel
+ * is said to have "in-band completions".
+ *
+ * Note that free_channels list is not protected. i.e. caller must provide
+ * safe context.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_CHANNEL_NOT_FOUND - Unable to locate the channel.
+ * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
+ *
+ * See also: xge_hal_channel_attr_t{}.
+ * Usage: See ex_open{}.
+ */
+xge_hal_status_e
+xge_hal_channel_open(xge_hal_device_h devh,
+ xge_hal_channel_attr_t *attr,
+ xge_hal_channel_h *channelh,
+ xge_hal_channel_reopen_e reopen)
+{
+ xge_list_t *item;
+ int i;
+ xge_hal_status_e status = XGE_HAL_OK;
+ xge_hal_channel_t *channel = NULL;
+ xge_hal_device_t *device = (xge_hal_device_t *)devh;
+
+ xge_assert(device);
+ xge_assert(attr);
+
+ *channelh = NULL;
+
+#ifdef XGEHAL_RNIC
+ if((attr->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (attr->type == XGE_HAL_CHANNEL_TYPE_RING)) {
+#endif
+ /* find channel */
+ xge_list_for_each(item, &device->free_channels) {
+ xge_hal_channel_t *tmp;
+
+ tmp = xge_container_of(item, xge_hal_channel_t, item);
+ if (tmp->type == attr->type &&
+ tmp->post_qid == attr->post_qid &&
+ tmp->compl_qid == attr->compl_qid) {
+ channel = tmp;
+ break;
+ }
+ }
+
+ if (channel == NULL) {
+ return XGE_HAL_ERR_CHANNEL_NOT_FOUND;
+ }
+
+#ifdef XGEHAL_RNIC
+ }
+ else {
+ channel = __hal_channel_allocate(devh, attr->post_qid,
+#ifdef XGEHAL_RNIC
+ attr->vp_id,
+#endif
+ attr->type);
+ if (channel == NULL) {
+ xge_debug_device(XGE_ERR,
+ "__hal_channel_allocate failed");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ }
+#endif
+
+#ifndef XGEHAL_RNIC
+ xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (channel->type == XGE_HAL_CHANNEL_TYPE_RING));
+#endif
+
+#ifdef XGEHAL_RNIC
+ if((reopen == XGE_HAL_CHANNEL_OC_NORMAL) ||
+ ((channel->type != XGE_HAL_CHANNEL_TYPE_FIFO) &&
+ (channel->type != XGE_HAL_CHANNEL_TYPE_RING))) {
+#else
+ if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
+#endif
+ /* allocate memory, initialize pointers, etc */
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ status = __hal_fifo_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ status = __hal_ring_open(channel, attr);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ status = __hal_sq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ status = __hal_srq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ status = __hal_cqrq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ status = __hal_umq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ status = __hal_dmq_open(channel, attr);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ status = XGE_HAL_FAIL;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (status == XGE_HAL_OK) {
+ for (i = 0; i < channel->reserve_initial; i++) {
+ channel->orig_arr[i] =
+ channel->reserve_arr[i];
+ }
+ }
+ else
+ return status;
+ } else {
+ xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
+
+ for (i = 0; i < channel->reserve_initial; i++) {
+ channel->reserve_arr[i] = channel->orig_arr[i];
+ channel->free_arr[i] = NULL;
+ }
+ channel->free_length = channel->reserve_initial;
+ channel->reserve_length = channel->reserve_initial;
+ channel->reserve_top = 0;
+ channel->post_index = 0;
+ channel->compl_index = 0;
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ status = __hal_ring_initial_replenish(channel,
+ reopen);
+ if (status != XGE_HAL_OK)
+ return status;
+ }
+ }
+
+ /* move channel to the open state list */
+
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ xge_list_remove(&channel->item);
+ xge_list_insert(&channel->item, &device->fifo_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ xge_list_remove(&channel->item);
+ xge_list_insert(&channel->item, &device->ring_channels);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ xge_list_insert(&channel->item,
+ &device->virtual_paths[attr->vp_id].sq_channels);
+ device->virtual_paths[attr->vp_id].stats.no_sqs++;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ xge_list_insert(&channel->item,
+ &device->virtual_paths[attr->vp_id].srq_channels);
+ device->virtual_paths[attr->vp_id].stats.no_srqs++;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ xge_list_insert(&channel->item,
+ &device->virtual_paths[attr->vp_id].cqrq_channels);
+ device->virtual_paths[attr->vp_id].stats.no_cqrqs++;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ xge_list_init(&channel->item);
+ device->virtual_paths[attr->vp_id].umq_channelh = channel;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_list_init(&channel->item);
+ device->virtual_paths[attr->vp_id].dmq_channelh = channel;
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
+ channel->type == XGE_HAL_CHANNEL_TYPE_RING);
+ break;
+#endif
+ default:
+ break;
+ }
+ channel->is_open = 1;
+ /*
+ * The magic check the argument validity, has to be
+ * removed before 03/01/2005.
+ */
+ channel->magic = XGE_HAL_MAGIC;
+
+ *channelh = channel;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_channel_abort - Abort the channel.
+ * @channelh: Channel handle.
+ * @reopen: See xge_hal_channel_reopen_e{}.
+ *
+ * Terminate (via xge_hal_channel_dtr_term_f{}) all channel descriptors.
+ * Currently used internally only by HAL, as part of its
+ * xge_hal_channel_close() and xge_hal_channel_open() in case
+ * of fatal error.
+ *
+ * See also: xge_hal_channel_dtr_term_f{}.
+ */
+void xge_hal_channel_abort(xge_hal_channel_h channelh,
+ xge_hal_channel_reopen_e reopen)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_dtr_h dtr;
+#ifdef XGE_OS_MEMORY_CHECK
+ int check_cnt = 0;
+#endif
+ int free_length_sav;
+ int reserve_top_sav;
+
+ if (channel->dtr_term == NULL) {
+ return;
+ }
+
+ free_length_sav = channel->free_length;
+ while (__hal_channel_dtr_next_freelist(channelh, &dtr) == XGE_HAL_OK) {
+#ifdef XGE_OS_MEMORY_CHECK
+#ifdef XGE_DEBUG_ASSERT
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
+ } else {
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
+ }
+ }
+#endif
+ check_cnt++;
+#endif
+ channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_FREED,
+ channel->userdata, reopen);
+ }
+ channel->free_length = free_length_sav;
+
+ while (__hal_channel_dtr_next_not_completed(channelh, &dtr) ==
+ XGE_HAL_OK) {
+#ifdef XGE_OS_MEMORY_CHECK
+#ifdef XGE_DEBUG_ASSERT
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ xge_assert(__hal_fifo_txdl_priv(dtr)->allocated);
+ } else {
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ xge_assert(__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)
+ ->allocated);
+ }
+ }
+#endif
+ check_cnt++;
+#endif
+ channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED,
+ channel->userdata, reopen);
+
+ }
+
+ reserve_top_sav = channel->reserve_top;
+ while (__hal_channel_dtr_next_reservelist(channelh, &dtr) ==
+ XGE_HAL_OK) {
+#ifdef XGE_OS_MEMORY_CHECK
+#ifdef XGE_DEBUG_ASSERT
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
+ } else {
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
+ }
+ }
+#endif
+ check_cnt++;
+#endif
+ channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_AVAIL,
+ channel->userdata, reopen);
+ }
+ channel->reserve_top = reserve_top_sav;
+
+ xge_assert(channel->reserve_length ==
+ (channel->free_length + channel->reserve_top));
+
+#ifdef XGE_OS_MEMORY_CHECK
+ xge_assert(check_cnt == channel->reserve_initial);
+#endif
+
+}
+
+/**
+ * xge_hal_channel_close - Close communication channel.
+ * @channelh: The channel handle.
+ * @reopen: See xge_hal_channel_reopen_e{}.
+ *
+ * Will close previously opened channel and deallocate associated resources.
+ * Channel must be opened otherwise assert will be generated.
+ * Note that free_channels list is not protected. i.e. caller must provide
+ * safe context.
+ */
+void xge_hal_channel_close(xge_hal_channel_h channelh,
+ xge_hal_channel_reopen_e reopen)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_device_t *hldev;
+ xge_list_t *item;
+#ifdef XGEHAL_RNIC
+ u32 vp_id;
+#endif
+ xge_assert(channel);
+ xge_assert(channel->type < XGE_HAL_CHANNEL_TYPE_MAX);
+
+ hldev = (xge_hal_device_t *)channel->devh;
+ channel->is_open = 0;
+ channel->magic = XGE_HAL_DEAD;
+
+#ifdef XGEHAL_RNIC
+ vp_id = channel->vp_id;
+
+ if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
+#endif
+ /* sanity check: make sure channel is not in free list */
+ xge_list_for_each(item, &hldev->free_channels) {
+ xge_hal_channel_t *tmp;
+
+ tmp = xge_container_of(item, xge_hal_channel_t, item);
+ xge_assert(!tmp->is_open);
+ if (channel == tmp) {
+ return;
+ }
+ }
+#ifdef XGEHAL_RNIC
+ }
+#endif
+
+ xge_hal_channel_abort(channel, reopen);
+
+#ifndef XGEHAL_RNIC
+ xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (channel->type == XGE_HAL_CHANNEL_TYPE_RING));
+#endif
+
+ if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
+ /* de-allocate */
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ __hal_fifo_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ __hal_ring_close(channelh);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ __hal_sq_close(channelh);
+ hldev->virtual_paths[vp_id].stats.no_sqs--;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ __hal_srq_close(channelh);
+ hldev->virtual_paths[vp_id].stats.no_srqs--;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ __hal_cqrq_close(channelh);
+ hldev->virtual_paths[vp_id].stats.no_cqrqs--;
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ __hal_umq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ __hal_dmq_close(channelh);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
+ channel->type == XGE_HAL_CHANNEL_TYPE_RING);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ else
+ xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
+
+ /* move channel back to free state list */
+ xge_list_remove(&channel->item);
+#ifdef XGEHAL_RNIC
+ if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
+#endif
+ xge_list_insert(&channel->item, &hldev->free_channels);
+
+ if (xge_list_is_empty(&hldev->fifo_channels) &&
+ xge_list_is_empty(&hldev->ring_channels)) {
+ /* clear msix_idx in case of following HW reset */
+ hldev->reset_needed_after_close = 1;
+ }
+#ifdef XGEHAL_RNIC
+ }
+ else {
+ __hal_channel_free(channel);
+ }
+#endif
+
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-config.c b/sys/dev/nxge/xgehal/xgehal-config.c
new file mode 100644
index 0000000..45a82e9
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-config.c
@@ -0,0 +1,761 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-config.c
+ *
+ * Description: configuration functionality
+ *
+ * Created: 14 May 2004
+ */
+
+#include <dev/nxge/include/xgehal-config.h>
+#include <dev/nxge/include/xge-debug.h>
+
+/*
+ * __hal_tti_config_check - Check tti configuration
+ * @new_config: tti configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_tti_config_check (xge_hal_tti_config_t *new_config)
+{
+ if ((new_config->urange_a < XGE_HAL_MIN_TX_URANGE_A) ||
+ (new_config->urange_a > XGE_HAL_MAX_TX_URANGE_A)) {
+ return XGE_HAL_BADCFG_TX_URANGE_A;
+ }
+
+ if ((new_config->ufc_a < XGE_HAL_MIN_TX_UFC_A) ||
+ (new_config->ufc_a > XGE_HAL_MAX_TX_UFC_A)) {
+ return XGE_HAL_BADCFG_TX_UFC_A;
+ }
+
+ if ((new_config->urange_b < XGE_HAL_MIN_TX_URANGE_B) ||
+ (new_config->urange_b > XGE_HAL_MAX_TX_URANGE_B)) {
+ return XGE_HAL_BADCFG_TX_URANGE_B;
+ }
+
+ if ((new_config->ufc_b < XGE_HAL_MIN_TX_UFC_B) ||
+ (new_config->ufc_b > XGE_HAL_MAX_TX_UFC_B)) {
+ return XGE_HAL_BADCFG_TX_UFC_B;
+ }
+
+ if ((new_config->urange_c < XGE_HAL_MIN_TX_URANGE_C) ||
+ (new_config->urange_c > XGE_HAL_MAX_TX_URANGE_C)) {
+ return XGE_HAL_BADCFG_TX_URANGE_C;
+ }
+
+ if ((new_config->ufc_c < XGE_HAL_MIN_TX_UFC_C) ||
+ (new_config->ufc_c > XGE_HAL_MAX_TX_UFC_C)) {
+ return XGE_HAL_BADCFG_TX_UFC_C;
+ }
+
+ if ((new_config->ufc_d < XGE_HAL_MIN_TX_UFC_D) ||
+ (new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) {
+ return XGE_HAL_BADCFG_TX_UFC_D;
+ }
+
+ if ((new_config->timer_val_us < XGE_HAL_MIN_TX_TIMER_VAL) ||
+ (new_config->timer_val_us > XGE_HAL_MAX_TX_TIMER_VAL)) {
+ return XGE_HAL_BADCFG_TX_TIMER_VAL;
+ }
+
+ if ((new_config->timer_ci_en < XGE_HAL_MIN_TX_TIMER_CI_EN) ||
+ (new_config->timer_ci_en > XGE_HAL_MAX_TX_TIMER_CI_EN)) {
+ return XGE_HAL_BADCFG_TX_TIMER_CI_EN;
+ }
+
+ if ((new_config->timer_ac_en < XGE_HAL_MIN_TX_TIMER_AC_EN) ||
+ (new_config->timer_ac_en > XGE_HAL_MAX_TX_TIMER_AC_EN)) {
+ return XGE_HAL_BADCFG_TX_TIMER_AC_EN;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_rti_config_check - Check rti configuration
+ * @new_config: rti configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_rti_config_check (xge_hal_rti_config_t *new_config)
+{
+ if ((new_config->urange_a < XGE_HAL_MIN_RX_URANGE_A) ||
+ (new_config->urange_a > XGE_HAL_MAX_RX_URANGE_A)) {
+ return XGE_HAL_BADCFG_RX_URANGE_A;
+ }
+
+ if ((new_config->ufc_a < XGE_HAL_MIN_RX_UFC_A) ||
+ (new_config->ufc_a > XGE_HAL_MAX_RX_UFC_A)) {
+ return XGE_HAL_BADCFG_RX_UFC_A;
+ }
+
+ if ((new_config->urange_b < XGE_HAL_MIN_RX_URANGE_B) ||
+ (new_config->urange_b > XGE_HAL_MAX_RX_URANGE_B)) {
+ return XGE_HAL_BADCFG_RX_URANGE_B;
+ }
+
+ if ((new_config->ufc_b < XGE_HAL_MIN_RX_UFC_B) ||
+ (new_config->ufc_b > XGE_HAL_MAX_RX_UFC_B)) {
+ return XGE_HAL_BADCFG_RX_UFC_B;
+ }
+
+ if ((new_config->urange_c < XGE_HAL_MIN_RX_URANGE_C) ||
+ (new_config->urange_c > XGE_HAL_MAX_RX_URANGE_C)) {
+ return XGE_HAL_BADCFG_RX_URANGE_C;
+ }
+
+ if ((new_config->ufc_c < XGE_HAL_MIN_RX_UFC_C) ||
+ (new_config->ufc_c > XGE_HAL_MAX_RX_UFC_C)) {
+ return XGE_HAL_BADCFG_RX_UFC_C;
+ }
+
+ if ((new_config->ufc_d < XGE_HAL_MIN_RX_UFC_D) ||
+ (new_config->ufc_d > XGE_HAL_MAX_RX_UFC_D)) {
+ return XGE_HAL_BADCFG_RX_UFC_D;
+ }
+
+ if ((new_config->timer_val_us < XGE_HAL_MIN_RX_TIMER_VAL) ||
+ (new_config->timer_val_us > XGE_HAL_MAX_RX_TIMER_VAL)) {
+ return XGE_HAL_BADCFG_RX_TIMER_VAL;
+ }
+
+ if ((new_config->timer_ac_en < XGE_HAL_MIN_RX_TIMER_AC_EN) ||
+ (new_config->timer_ac_en > XGE_HAL_MAX_RX_TIMER_AC_EN)) {
+ return XGE_HAL_BADCFG_RX_TIMER_AC_EN;
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_fifo_queue_check - Check fifo queue configuration
+ * @new_config: fifo queue configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_fifo_queue_check (xge_hal_fifo_config_t *new_config,
+ xge_hal_fifo_queue_t *new_queue)
+{
+ int i;
+
+ if ((new_queue->initial < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
+ (new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
+ return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH;
+ }
+
+ /* FIXME: queue "grow" feature is not supported.
+ * Use "initial" queue size as the "maximum";
+ * Remove the next line when fixed. */
+ new_queue->max = new_queue->initial;
+
+ if ((new_queue->max < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
+ (new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
+ return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
+ }
+
+ if (new_queue->max < new_config->reserve_threshold) {
+ return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
+ }
+
+ if ((new_queue->intr < XGE_HAL_MIN_FIFO_QUEUE_INTR) ||
+ (new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) {
+ return XGE_HAL_BADCFG_FIFO_QUEUE_INTR;
+ }
+
+ if ((new_queue->intr_vector < XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR) ||
+ (new_queue->intr_vector > XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR)) {
+ return XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR;
+ }
+
+ for(i = 0; i < XGE_HAL_MAX_FIFO_TTI_NUM; i++) {
+ /*
+ * Validate the tti configuration parameters only if
+ * the TTI feature is enabled.
+ */
+ if (new_queue->tti[i].enabled) {
+ xge_hal_status_e status;
+
+ if ((status = __hal_tti_config_check(
+ &new_queue->tti[i])) != XGE_HAL_OK) {
+ return status;
+ }
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_ring_queue_check - Check ring queue configuration
+ * @new_config: ring queue configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_ring_queue_check (xge_hal_ring_queue_t *new_config)
+{
+
+ if ((new_config->initial < XGE_HAL_MIN_RING_QUEUE_BLOCKS) ||
+ (new_config->initial > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
+ return XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS;
+ }
+
+ /* FIXME: queue "grow" feature is not supported.
+ * Use "initial" queue size as the "maximum";
+ * Remove the next line when fixed. */
+ new_config->max = new_config->initial;
+
+ if ((new_config->max < XGE_HAL_MIN_RING_QUEUE_BLOCKS) ||
+ (new_config->max > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
+ return XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS;
+ }
+
+ if ((new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
+ (new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_3) &&
+ (new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)) {
+ return XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE;
+ }
+
+ /*
+ * Herc has less DRAM; the check is done later inside
+ * device_initialize()
+ */
+ if (((new_config->dram_size_mb < XGE_HAL_MIN_RING_QUEUE_SIZE) ||
+ (new_config->dram_size_mb > XGE_HAL_MAX_RING_QUEUE_SIZE_XENA)) &&
+ new_config->dram_size_mb != XGE_HAL_DEFAULT_USE_HARDCODE)
+ return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
+
+ if ((new_config->backoff_interval_us <
+ XGE_HAL_MIN_BACKOFF_INTERVAL_US) ||
+ (new_config->backoff_interval_us >
+ XGE_HAL_MAX_BACKOFF_INTERVAL_US)) {
+ return XGE_HAL_BADCFG_BACKOFF_INTERVAL_US;
+ }
+
+ if ((new_config->max_frm_len < XGE_HAL_MIN_MAX_FRM_LEN) ||
+ (new_config->max_frm_len > XGE_HAL_MAX_MAX_FRM_LEN)) {
+ return XGE_HAL_BADCFG_MAX_FRM_LEN;
+ }
+
+ if ((new_config->priority < XGE_HAL_MIN_RING_PRIORITY) ||
+ (new_config->priority > XGE_HAL_MAX_RING_PRIORITY)) {
+ return XGE_HAL_BADCFG_RING_PRIORITY;
+ }
+
+ if ((new_config->rth_en < XGE_HAL_MIN_RING_RTH_EN) ||
+ (new_config->rth_en > XGE_HAL_MAX_RING_RTH_EN)) {
+ return XGE_HAL_BADCFG_RING_RTH_EN;
+ }
+
+ if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_MAC_EN) ||
+ (new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_MAC_EN)) {
+ return XGE_HAL_BADCFG_RING_RTS_MAC_EN;
+ }
+
+ if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_PORT_EN) ||
+ (new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
+ return XGE_HAL_BADCFG_RING_RTS_PORT_EN;
+ }
+
+ if ((new_config->intr_vector < XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR) ||
+ (new_config->intr_vector > XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR)) {
+ return XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR;
+ }
+
+ if (new_config->indicate_max_pkts <
+ XGE_HAL_MIN_RING_INDICATE_MAX_PKTS ||
+ new_config->indicate_max_pkts >
+ XGE_HAL_MAX_RING_INDICATE_MAX_PKTS) {
+ return XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS;
+ }
+
+ return __hal_rti_config_check(&new_config->rti);
+}
+
+/*
+ * __hal_mac_config_check - Check mac configuration
+ * @new_config: mac configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_mac_config_check (xge_hal_mac_config_t *new_config)
+{
+ if ((new_config->tmac_util_period < XGE_HAL_MIN_TMAC_UTIL_PERIOD) ||
+ (new_config->tmac_util_period > XGE_HAL_MAX_TMAC_UTIL_PERIOD)) {
+ return XGE_HAL_BADCFG_TMAC_UTIL_PERIOD;
+ }
+
+ if ((new_config->rmac_util_period < XGE_HAL_MIN_RMAC_UTIL_PERIOD) ||
+ (new_config->rmac_util_period > XGE_HAL_MAX_RMAC_UTIL_PERIOD)) {
+ return XGE_HAL_BADCFG_RMAC_UTIL_PERIOD;
+ }
+
+ if ((new_config->rmac_bcast_en < XGE_HAL_MIN_RMAC_BCAST_EN) ||
+ (new_config->rmac_bcast_en > XGE_HAL_MAX_RMAC_BCAST_EN)) {
+ return XGE_HAL_BADCFG_RMAC_BCAST_EN;
+ }
+
+ if ((new_config->rmac_pause_gen_en < XGE_HAL_MIN_RMAC_PAUSE_GEN_EN) ||
+ (new_config->rmac_pause_gen_en>XGE_HAL_MAX_RMAC_PAUSE_GEN_EN)) {
+ return XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN;
+ }
+
+ if ((new_config->rmac_pause_rcv_en < XGE_HAL_MIN_RMAC_PAUSE_RCV_EN) ||
+ (new_config->rmac_pause_rcv_en>XGE_HAL_MAX_RMAC_PAUSE_RCV_EN)) {
+ return XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN;
+ }
+
+ if ((new_config->rmac_pause_time < XGE_HAL_MIN_RMAC_HIGH_PTIME) ||
+ (new_config->rmac_pause_time > XGE_HAL_MAX_RMAC_HIGH_PTIME)) {
+ return XGE_HAL_BADCFG_RMAC_HIGH_PTIME;
+ }
+
+ if ((new_config->media < XGE_HAL_MIN_MEDIA) ||
+ (new_config->media > XGE_HAL_MAX_MEDIA)) {
+ return XGE_HAL_BADCFG_MEDIA;
+ }
+
+ if ((new_config->mc_pause_threshold_q0q3 <
+ XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3) ||
+ (new_config->mc_pause_threshold_q0q3 >
+ XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3)) {
+ return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3;
+ }
+
+ if ((new_config->mc_pause_threshold_q4q7 <
+ XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7) ||
+ (new_config->mc_pause_threshold_q4q7 >
+ XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7)) {
+ return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_fifo_config_check - Check fifo configuration
+ * @new_config: fifo configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_fifo_config_check (xge_hal_fifo_config_t *new_config)
+{
+ int i;
+ int total_fifo_length = 0;
+
+ /*
+ * recompute max_frags to be multiple of 4,
+ * which means, multiple of 128 for TxDL
+ */
+ new_config->max_frags = ((new_config->max_frags + 3) >> 2) << 2;
+
+ if ((new_config->max_frags < XGE_HAL_MIN_FIFO_FRAGS) ||
+ (new_config->max_frags > XGE_HAL_MAX_FIFO_FRAGS)) {
+ return XGE_HAL_BADCFG_FIFO_FRAGS;
+ }
+
+ if ((new_config->reserve_threshold <
+ XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD) ||
+ (new_config->reserve_threshold >
+ XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD)) {
+ return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
+ }
+
+ if ((new_config->memblock_size < XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE) ||
+ (new_config->memblock_size > XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE)) {
+ return XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE;
+ }
+
+ for(i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
+ xge_hal_status_e status;
+
+ if (!new_config->queue[i].configured)
+ continue;
+
+ if ((status = __hal_fifo_queue_check(new_config,
+ &new_config->queue[i])) != XGE_HAL_OK) {
+ return status;
+ }
+
+ total_fifo_length += new_config->queue[i].max;
+ }
+
+ if(total_fifo_length > XGE_HAL_MAX_FIFO_QUEUE_LENGTH){
+ return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_ring_config_check - Check ring configuration
+ * @new_config: Ring configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+static xge_hal_status_e
+__hal_ring_config_check (xge_hal_ring_config_t *new_config)
+{
+ int i;
+
+ if ((new_config->memblock_size < XGE_HAL_MIN_RING_MEMBLOCK_SIZE) ||
+ (new_config->memblock_size > XGE_HAL_MAX_RING_MEMBLOCK_SIZE)) {
+ return XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE;
+ }
+
+ for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_status_e status;
+
+ if (!new_config->queue[i].configured)
+ continue;
+
+ if ((status = __hal_ring_queue_check(&new_config->queue[i]))
+ != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_device_config_check_common - Check device configuration.
+ * @new_config: Device configuration information
+ *
+ * Check part of configuration that is common to
+ * Xframe-I and Xframe-II.
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ *
+ * See also: __hal_device_config_check_xena().
+ */
+xge_hal_status_e
+__hal_device_config_check_common (xge_hal_device_config_t *new_config)
+{
+ xge_hal_status_e status;
+
+ if ((new_config->mtu < XGE_HAL_MIN_MTU) ||
+ (new_config->mtu > XGE_HAL_MAX_MTU)) {
+ return XGE_HAL_BADCFG_MAX_MTU;
+ }
+
+ if ((new_config->bimodal_interrupts < XGE_HAL_BIMODAL_INTR_MIN) ||
+ (new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) {
+ return XGE_HAL_BADCFG_BIMODAL_INTR;
+ }
+
+ if (new_config->bimodal_interrupts &&
+ ((new_config->bimodal_timer_lo_us < XGE_HAL_BIMODAL_TIMER_LO_US_MIN) ||
+ (new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) {
+ return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US;
+ }
+
+ if (new_config->bimodal_interrupts &&
+ ((new_config->bimodal_timer_hi_us < XGE_HAL_BIMODAL_TIMER_HI_US_MIN) ||
+ (new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) {
+ return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US;
+ }
+
+ if ((new_config->no_isr_events < XGE_HAL_NO_ISR_EVENTS_MIN) ||
+ (new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) {
+ return XGE_HAL_BADCFG_NO_ISR_EVENTS;
+ }
+
+ if ((new_config->isr_polling_cnt < XGE_HAL_MIN_ISR_POLLING_CNT) ||
+ (new_config->isr_polling_cnt > XGE_HAL_MAX_ISR_POLLING_CNT)) {
+ return XGE_HAL_BADCFG_ISR_POLLING_CNT;
+ }
+
+ if (new_config->latency_timer &&
+ new_config->latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
+ if ((new_config->latency_timer < XGE_HAL_MIN_LATENCY_TIMER) ||
+ (new_config->latency_timer > XGE_HAL_MAX_LATENCY_TIMER)) {
+ return XGE_HAL_BADCFG_LATENCY_TIMER;
+ }
+ }
+
+ if (new_config->max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
+ if ((new_config->max_splits_trans <
+ XGE_HAL_ONE_SPLIT_TRANSACTION) ||
+ (new_config->max_splits_trans >
+ XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION))
+ return XGE_HAL_BADCFG_MAX_SPLITS_TRANS;
+ }
+
+ if (new_config->mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT)
+ {
+ if ((new_config->mmrb_count < XGE_HAL_MIN_MMRB_COUNT) ||
+ (new_config->mmrb_count > XGE_HAL_MAX_MMRB_COUNT)) {
+ return XGE_HAL_BADCFG_MMRB_COUNT;
+ }
+ }
+
+ if ((new_config->shared_splits < XGE_HAL_MIN_SHARED_SPLITS) ||
+ (new_config->shared_splits > XGE_HAL_MAX_SHARED_SPLITS)) {
+ return XGE_HAL_BADCFG_SHARED_SPLITS;
+ }
+
+ if (new_config->stats_refresh_time_sec !=
+ XGE_HAL_STATS_REFRESH_DISABLE) {
+ if ((new_config->stats_refresh_time_sec <
+ XGE_HAL_MIN_STATS_REFRESH_TIME) ||
+ (new_config->stats_refresh_time_sec >
+ XGE_HAL_MAX_STATS_REFRESH_TIME)) {
+ return XGE_HAL_BADCFG_STATS_REFRESH_TIME;
+ }
+ }
+
+ if ((new_config->intr_mode != XGE_HAL_INTR_MODE_IRQLINE) &&
+ (new_config->intr_mode != XGE_HAL_INTR_MODE_MSI) &&
+ (new_config->intr_mode != XGE_HAL_INTR_MODE_MSIX)) {
+ return XGE_HAL_BADCFG_INTR_MODE;
+ }
+
+ if ((new_config->sched_timer_us < XGE_HAL_SCHED_TIMER_MIN) ||
+ (new_config->sched_timer_us > XGE_HAL_SCHED_TIMER_MAX)) {
+ return XGE_HAL_BADCFG_SCHED_TIMER_US;
+ }
+
+ if ((new_config->sched_timer_one_shot !=
+ XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE) &&
+ (new_config->sched_timer_one_shot !=
+ XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE)) {
+ return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT;
+ }
+
+ /*
+ * Check adaptive schema parameters. Note that there are two
+ * configuration variables needs to be enabled in ULD:
+ *
+ * a) sched_timer_us should not be zero;
+ * b) rxufca_hi_lim should not be equal to rxufca_lo_lim.
+ *
+ * The code bellow checking for those conditions.
+ */
+ if (new_config->sched_timer_us &&
+ new_config->rxufca_hi_lim != new_config->rxufca_lo_lim) {
+ if ((new_config->rxufca_intr_thres <
+ XGE_HAL_RXUFCA_INTR_THRES_MIN) ||
+ (new_config->rxufca_intr_thres >
+ XGE_HAL_RXUFCA_INTR_THRES_MAX)) {
+ return XGE_HAL_BADCFG_RXUFCA_INTR_THRES;
+ }
+
+ if ((new_config->rxufca_hi_lim < XGE_HAL_RXUFCA_HI_LIM_MIN) ||
+ (new_config->rxufca_hi_lim > XGE_HAL_RXUFCA_HI_LIM_MAX)) {
+ return XGE_HAL_BADCFG_RXUFCA_HI_LIM;
+ }
+
+ if ((new_config->rxufca_lo_lim < XGE_HAL_RXUFCA_LO_LIM_MIN) ||
+ (new_config->rxufca_lo_lim > XGE_HAL_RXUFCA_LO_LIM_MAX) ||
+ (new_config->rxufca_lo_lim > new_config->rxufca_hi_lim)) {
+ return XGE_HAL_BADCFG_RXUFCA_LO_LIM;
+ }
+
+ if ((new_config->rxufca_lbolt_period <
+ XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN) ||
+ (new_config->rxufca_lbolt_period >
+ XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX)) {
+ return XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD;
+ }
+ }
+
+ if ((new_config->link_valid_cnt < XGE_HAL_LINK_VALID_CNT_MIN) ||
+ (new_config->link_valid_cnt > XGE_HAL_LINK_VALID_CNT_MAX)) {
+ return XGE_HAL_BADCFG_LINK_VALID_CNT;
+ }
+
+ if ((new_config->link_retry_cnt < XGE_HAL_LINK_RETRY_CNT_MIN) ||
+ (new_config->link_retry_cnt > XGE_HAL_LINK_RETRY_CNT_MAX)) {
+ return XGE_HAL_BADCFG_LINK_RETRY_CNT;
+ }
+
+ if (new_config->link_valid_cnt > new_config->link_retry_cnt)
+ return XGE_HAL_BADCFG_LINK_VALID_CNT;
+
+ if (new_config->link_stability_period != XGE_HAL_DEFAULT_USE_HARDCODE) {
+ if ((new_config->link_stability_period <
+ XGE_HAL_MIN_LINK_STABILITY_PERIOD) ||
+ (new_config->link_stability_period >
+ XGE_HAL_MAX_LINK_STABILITY_PERIOD)) {
+ return XGE_HAL_BADCFG_LINK_STABILITY_PERIOD;
+ }
+ }
+
+ if (new_config->device_poll_millis !=
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ if ((new_config->device_poll_millis <
+ XGE_HAL_MIN_DEVICE_POLL_MILLIS) ||
+ (new_config->device_poll_millis >
+ XGE_HAL_MAX_DEVICE_POLL_MILLIS)) {
+ return XGE_HAL_BADCFG_DEVICE_POLL_MILLIS;
+ }
+ }
+
+ if ((new_config->rts_port_en < XGE_HAL_MIN_RING_RTS_PORT_EN) ||
+ (new_config->rts_port_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
+ return XGE_HAL_BADCFG_RTS_PORT_EN;
+ }
+
+ if ((new_config->rts_qos_en < XGE_HAL_RTS_QOS_DISABLE) ||
+ (new_config->rts_qos_en > XGE_HAL_RTS_QOS_ENABLE)) {
+ return XGE_HAL_BADCFG_RTS_QOS_EN;
+ }
+
+#if defined(XGE_HAL_CONFIG_LRO)
+ if (new_config->lro_sg_size !=
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) ||
+ (new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) {
+ return XGE_HAL_BADCFG_LRO_SG_SIZE;
+ }
+ }
+
+ if (new_config->lro_frm_len !=
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) ||
+ (new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) {
+ return XGE_HAL_BADCFG_LRO_FRM_LEN;
+ }
+ }
+#endif
+
+ if ((status = __hal_ring_config_check(&new_config->ring))
+ != XGE_HAL_OK) {
+ return status;
+ }
+
+ if ((status = __hal_mac_config_check(&new_config->mac)) !=
+ XGE_HAL_OK) {
+ return status;
+ }
+
+ if ((status = __hal_fifo_config_check(&new_config->fifo)) !=
+ XGE_HAL_OK) {
+ return status;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_config_check_xena - Check Xframe-I configuration
+ * @new_config: Device configuration.
+ *
+ * Check part of configuration that is relevant only to Xframe-I.
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ *
+ * See also: __hal_device_config_check_common().
+ */
+xge_hal_status_e
+__hal_device_config_check_xena (xge_hal_device_config_t *new_config)
+{
+ if ((new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_33) &&
+ (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) &&
+ (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) &&
+ (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) &&
+ (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) &&
+ (new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) {
+ return XGE_HAL_BADCFG_PCI_FREQ_MHERZ;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_config_check_herc - Check device configuration
+ * @new_config: Device configuration.
+ *
+ * Check part of configuration that is relevant only to Xframe-II.
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ *
+ * See also: __hal_device_config_check_common().
+ */
+xge_hal_status_e
+__hal_device_config_check_herc (xge_hal_device_config_t *new_config)
+{
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_driver_config_check - Check HAL configuration
+ * @new_config: Driver configuration information
+ *
+ * Returns: XGE_HAL_OK - success,
+ * otherwise one of the xge_hal_status_e{} enumerated error codes.
+ */
+xge_hal_status_e
+__hal_driver_config_check (xge_hal_driver_config_t *new_config)
+{
+ if ((new_config->queue_size_initial <
+ XGE_HAL_MIN_QUEUE_SIZE_INITIAL) ||
+ (new_config->queue_size_initial >
+ XGE_HAL_MAX_QUEUE_SIZE_INITIAL)) {
+ return XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL;
+ }
+
+ if ((new_config->queue_size_max < XGE_HAL_MIN_QUEUE_SIZE_MAX) ||
+ (new_config->queue_size_max > XGE_HAL_MAX_QUEUE_SIZE_MAX)) {
+ return XGE_HAL_BADCFG_QUEUE_SIZE_MAX;
+ }
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ if ((new_config->tracebuf_size < XGE_HAL_MIN_CIRCULAR_ARR) ||
+ (new_config->tracebuf_size > XGE_HAL_MAX_CIRCULAR_ARR)) {
+ return XGE_HAL_BADCFG_TRACEBUF_SIZE;
+ }
+ if ((new_config->tracebuf_timestamp_en < XGE_HAL_MIN_TIMESTAMP_EN) ||
+ (new_config->tracebuf_timestamp_en > XGE_HAL_MAX_TIMESTAMP_EN)) {
+ return XGE_HAL_BADCFG_TRACEBUF_SIZE;
+ }
+#endif
+
+ return XGE_HAL_OK;
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-device-fp.c b/sys/dev/nxge/xgehal/xgehal-device-fp.c
new file mode 100644
index 0000000..5e2faf1
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-device-fp.c
@@ -0,0 +1,1432 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-device-fp.c
+ *
+ * Description: HAL device object functionality (fast path)
+ *
+ * Created: 10 June 2004
+ */
+
+#ifdef XGE_DEBUG_FP
+#include <dev/nxge/include/xgehal-device.h>
+#endif
+
+#include <dev/nxge/include/xgehal-ring.h>
+#include <dev/nxge/include/xgehal-fifo.h>
+
+/**
+ * xge_hal_device_bar0 - Get BAR0 mapped address.
+ * @hldev: HAL device handle.
+ *
+ * Returns: BAR0 address of the specified device.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_bar0(xge_hal_device_t *hldev)
+{
+ return hldev->bar0;
+}
+
+/**
+ * xge_hal_device_isrbar0 - Get BAR0 mapped address.
+ * @hldev: HAL device handle.
+ *
+ * Returns: BAR0 address of the specified device.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_isrbar0(xge_hal_device_t *hldev)
+{
+ return hldev->isrbar0;
+}
+
+/**
+ * xge_hal_device_bar1 - Get BAR1 mapped address.
+ * @hldev: HAL device handle.
+ *
+ * Returns: BAR1 address of the specified device.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_bar1(xge_hal_device_t *hldev)
+{
+ return hldev->bar1;
+}
+
+/**
+ * xge_hal_device_bar0_set - Set BAR0 mapped address.
+ * @hldev: HAL device handle.
+ * @bar0: BAR0 mapped address.
+ * * Set BAR0 address in the HAL device object.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0)
+{
+ xge_assert(bar0);
+ hldev->bar0 = bar0;
+}
+
+/**
+ * xge_hal_device_isrbar0_set - Set BAR0 mapped address.
+ * @hldev: HAL device handle.
+ * @isrbar0: BAR0 mapped address.
+ * * Set BAR0 address in the HAL device object.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0)
+{
+ xge_assert(isrbar0);
+ hldev->isrbar0 = isrbar0;
+}
+
+/**
+ * xge_hal_device_bar1_set - Set BAR1 mapped address.
+ * @hldev: HAL device handle.
+ * @channelh: Channel handle.
+ * @bar1: BAR1 mapped address.
+ *
+ * Set BAR1 address for the given channel.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh,
+ char *bar1)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+
+ xge_assert(bar1);
+ xge_assert(fifo);
+
+ /* Initializing the BAR1 address as the start of
+ * the FIFO queue pointer and as a location of FIFO control
+ * word. */
+ fifo->hw_pair =
+ (xge_hal_fifo_hw_pair_t *) (bar1 +
+ (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
+ hldev->bar1 = bar1;
+}
+
+
+/**
+ * xge_hal_device_rev - Get Device revision number.
+ * @hldev: HAL device handle.
+ *
+ * Returns: Device revision number
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int
+xge_hal_device_rev(xge_hal_device_t *hldev)
+{
+ return hldev->revision;
+}
+
+
+/**
+ * xge_hal_device_begin_irq - Begin IRQ processing.
+ * @hldev: HAL device handle.
+ * @reason: "Reason" for the interrupt, the value of Xframe's
+ * general_int_status register.
+ *
+ * The function performs two actions, It first checks whether (shared IRQ) the
+ * interrupt was raised by the device. Next, it masks the device interrupts.
+ *
+ * Note:
+ * xge_hal_device_begin_irq() does not flush MMIO writes through the
+ * bridge. Therefore, two back-to-back interrupts are potentially possible.
+ * It is the responsibility of the ULD to make sure that only one
+ * xge_hal_device_continue_irq() runs at a time.
+ *
+ * Returns: 0, if the interrupt is not "ours" (note that in this case the
+ * device remain enabled).
+ * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter
+ * status.
+ * See also: xge_hal_device_handle_irq()
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason)
+{
+ u64 val64;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ hldev->stats.sw_dev_info_stats.total_intr_cnt++;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &isrbar0->general_int_status);
+ if (xge_os_unlikely(!val64)) {
+ /* not Xframe interrupt */
+ hldev->stats.sw_dev_info_stats.not_xge_intr_cnt++;
+ *reason = 0;
+ return XGE_HAL_ERR_WRONG_IRQ;
+ }
+
+ if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) {
+ u64 adapter_status =
+ xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->adapter_status);
+ if (adapter_status == XGE_HAL_ALL_FOXES) {
+ (void) xge_queue_produce(hldev->queueh,
+ XGE_HAL_EVENT_SLOT_FREEZE,
+ hldev,
+ 1, /* critical: slot freeze */
+ sizeof(u64),
+ (void*)&adapter_status);
+ *reason = 0;
+ return XGE_HAL_ERR_CRITICAL;
+ }
+ }
+
+ *reason = val64;
+
+ /* separate fast path, i.e. no errors */
+ if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) {
+ hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++;
+ return XGE_HAL_OK;
+ }
+ if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) {
+ hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++;
+ return XGE_HAL_OK;
+ }
+
+ hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txpic_intr_cnt++;
+ status = __hal_device_handle_txpic(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txdma_intr_cnt++;
+ status = __hal_device_handle_txdma(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txmac_intr_cnt++;
+ status = __hal_device_handle_txmac(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++;
+ status = __hal_device_handle_txxgxs(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++;
+ status = __hal_device_handle_rxpic(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++;
+ status = __hal_device_handle_rxdma(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++;
+ status = __hal_device_handle_rxmac(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++;
+ status = __hal_device_handle_rxxgxs(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) {
+ xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.mc_intr_cnt++;
+ status = __hal_device_handle_mc(hldev, val64);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_clear_rx - Acknowledge (that is, clear) the
+ * condition that has caused the RX interrupt.
+ * @hldev: HAL device handle.
+ *
+ * Acknowledge (that is, clear) the condition that has caused
+ * the Rx interrupt.
+ * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
+ * xge_hal_device_clear_tx(), xge_hal_device_mask_rx().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_clear_rx(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->rx_traffic_int);
+}
+
+/**
+ * xge_hal_device_clear_tx - Acknowledge (that is, clear) the
+ * condition that has caused the TX interrupt.
+ * @hldev: HAL device handle.
+ *
+ * Acknowledge (that is, clear) the condition that has caused
+ * the Tx interrupt.
+ * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
+ * xge_hal_device_clear_rx(), xge_hal_device_mask_tx().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_clear_tx(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->tx_traffic_int);
+}
+
+/**
+ * xge_hal_device_poll_rx_channel - Poll Rx channel for completed
+ * descriptors and process the same.
+ * @channel: HAL channel.
+ * @got_rx: Buffer to return the flag set if receive interrupt is occured
+ *
+ * The function polls the Rx channel for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
+ *
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: xge_hal_device_poll_tx_channel()
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx)
+{
+ xge_hal_status_e ret = XGE_HAL_OK;
+ xge_hal_dtr_h first_dtrh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
+ u8 t_code;
+ int got_bytes;
+
+ /* for each opened rx channel */
+ got_bytes = *got_rx = 0;
+ ((xge_hal_ring_t *)channel)->cmpl_cnt = 0;
+ channel->poll_bytes = 0;
+ if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh,
+ &t_code)) == XGE_HAL_OK) {
+ if (channel->callback(channel, first_dtrh,
+ t_code, channel->userdata) != XGE_HAL_OK) {
+ (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
+ got_bytes += channel->poll_bytes + 1;
+ ret = XGE_HAL_COMPLETIONS_REMAIN;
+ } else {
+ (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
+ got_bytes += channel->poll_bytes + 1;
+ }
+ }
+
+ if (*got_rx) {
+ hldev->irq_workload_rxd[channel->post_qid] += *got_rx;
+ hldev->irq_workload_rxcnt[channel->post_qid] ++;
+ }
+ hldev->irq_workload_rxlen[channel->post_qid] += got_bytes;
+
+ return ret;
+}
+
+/**
+ * xge_hal_device_poll_tx_channel - Poll Tx channel for completed
+ * descriptors and process the same.
+ * @channel: HAL channel.
+ * @got_tx: Buffer to return the flag set if transmit interrupt is occured
+ *
+ * The function polls the Tx channel for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
+ *
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: xge_hal_device_poll_rx_channel().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx)
+{
+ xge_hal_dtr_h first_dtrh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
+ u8 t_code;
+ int got_bytes;
+
+ /* for each opened tx channel */
+ got_bytes = *got_tx = 0;
+ channel->poll_bytes = 0;
+ if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh,
+ &t_code) == XGE_HAL_OK) {
+ if (channel->callback(channel, first_dtrh,
+ t_code, channel->userdata) != XGE_HAL_OK) {
+ (*got_tx)++;
+ got_bytes += channel->poll_bytes + 1;
+ return XGE_HAL_COMPLETIONS_REMAIN;
+ }
+ (*got_tx)++;
+ got_bytes += channel->poll_bytes + 1;
+ }
+
+ if (*got_tx) {
+ hldev->irq_workload_txd[channel->post_qid] += *got_tx;
+ hldev->irq_workload_txcnt[channel->post_qid] ++;
+ }
+ hldev->irq_workload_txlen[channel->post_qid] += got_bytes;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_poll_rx_channels - Poll Rx channels for completed
+ * descriptors and process the same.
+ * @hldev: HAL device handle.
+ * @got_rx: Buffer to return flag set if receive is ready
+ *
+ * The function polls the Rx channels for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
+ *
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx)
+{
+ xge_list_t *item;
+ xge_hal_channel_t *channel;
+
+ /* for each opened rx channel */
+ xge_list_for_each(item, &hldev->ring_channels) {
+ if (hldev->terminating)
+ return XGE_HAL_OK;
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ (void) xge_hal_device_poll_rx_channel(channel, got_rx);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_poll_tx_channels - Poll Tx channels for completed
+ * descriptors and process the same.
+ * @hldev: HAL device handle.
+ * @got_tx: Buffer to return flag set if transmit is ready
+ *
+ * The function polls the Tx channels for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
+ *
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx)
+{
+ xge_list_t *item;
+ xge_hal_channel_t *channel;
+
+ /* for each opened tx channel */
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ if (hldev->terminating)
+ return XGE_HAL_OK;
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ (void) xge_hal_device_poll_tx_channel(channel, got_tx);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_mask_tx - Mask Tx interrupts.
+ * @hldev: HAL device handle.
+ *
+ * Mask Tx device interrupts.
+ *
+ * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(),
+ * xge_hal_device_clear_tx().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_tx(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->tx_traffic_mask);
+}
+
+/**
+ * xge_hal_device_mask_rx - Mask Rx interrupts.
+ * @hldev: HAL device handle.
+ *
+ * Mask Rx device interrupts.
+ *
+ * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(),
+ * xge_hal_device_clear_rx().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_rx(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->rx_traffic_mask);
+}
+
+/**
+ * xge_hal_device_mask_all - Mask all device interrupts.
+ * @hldev: HAL device handle.
+ *
+ * Mask all device interrupts.
+ *
+ * See also: xge_hal_device_unmask_all()
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_all(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->general_int_mask);
+}
+
+/**
+ * xge_hal_device_unmask_tx - Unmask Tx interrupts.
+ * @hldev: HAL device handle.
+ *
+ * Unmask Tx device interrupts.
+ *
+ * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_unmask_tx(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL,
+ &isrbar0->tx_traffic_mask);
+}
+
+/**
+ * xge_hal_device_unmask_rx - Unmask Rx interrupts.
+ * @hldev: HAL device handle.
+ *
+ * Unmask Rx device interrupts.
+ *
+ * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_unmask_rx(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL,
+ &isrbar0->rx_traffic_mask);
+}
+
+/**
+ * xge_hal_device_unmask_all - Unmask all device interrupts.
+ * @hldev: HAL device handle.
+ *
+ * Unmask all device interrupts.
+ *
+ * See also: xge_hal_device_mask_all()
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_unmask_all(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL,
+ &isrbar0->general_int_mask);
+}
+
+
+/**
+ * xge_hal_device_continue_irq - Continue handling IRQ: process all
+ * completed descriptors.
+ * @hldev: HAL device handle.
+ *
+ * Process completed descriptors and unmask the device interrupts.
+ *
+ * The xge_hal_device_continue_irq() walks all open channels
+ * and calls upper-layer driver (ULD) via supplied completion
+ * callback. Note that the completion callback is specified at channel open
+ * time, see xge_hal_channel_open().
+ *
+ * Note that the xge_hal_device_continue_irq is part of the _fast_ path.
+ * To optimize the processing, the function does _not_ check for
+ * errors and alarms.
+ *
+ * The latter is done in a polling fashion, via xge_hal_device_poll().
+ *
+ * Returns: XGE_HAL_OK.
+ *
+ * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(),
+ * xge_hal_ring_dtr_next_completed(),
+ * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}.
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_continue_irq(xge_hal_device_t *hldev)
+{
+ int got_rx = 1, got_tx = 1;
+ int isr_polling_cnt = hldev->config.isr_polling_cnt;
+ int count = 0;
+
+ do
+ {
+ if (got_rx)
+ (void) xge_hal_device_poll_rx_channels(hldev, &got_rx);
+ if (got_tx && hldev->tti_enabled)
+ (void) xge_hal_device_poll_tx_channels(hldev, &got_tx);
+
+ if (!got_rx && !got_tx)
+ break;
+
+ count += (got_rx + got_tx);
+ }while (isr_polling_cnt--);
+
+ if (!count)
+ hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_handle_irq - Handle device IRQ.
+ * @hldev: HAL device handle.
+ *
+ * Perform the complete handling of the line interrupt. The function
+ * performs two calls.
+ * First it uses xge_hal_device_begin_irq() to check the reason for
+ * the interrupt and mask the device interrupts.
+ * Second, it calls xge_hal_device_continue_irq() to process all
+ * completed descriptors and re-enable the interrupts.
+ *
+ * Returns: XGE_HAL_OK - success;
+ * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device.
+ *
+ * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_handle_irq(xge_hal_device_t *hldev)
+{
+ u64 reason;
+ xge_hal_status_e status;
+
+ xge_hal_device_mask_all(hldev);
+
+ status = xge_hal_device_begin_irq(hldev, &reason);
+ if (status != XGE_HAL_OK) {
+ xge_hal_device_unmask_all(hldev);
+ return status;
+ }
+
+ if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) {
+ xge_hal_device_clear_rx(hldev);
+ }
+
+ status = xge_hal_device_continue_irq(hldev);
+
+ xge_hal_device_clear_tx(hldev);
+
+ xge_hal_device_unmask_all(hldev);
+
+ return status;
+}
+
+#if defined(XGE_HAL_CONFIG_LRO)
+
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+__hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip)
+{
+
+ /* Match Source address field */
+ if ((lro->ip_hdr->saddr != ip->saddr))
+ return XGE_HAL_FAIL;
+
+ /* Match Destination address field */
+ if ((lro->ip_hdr->daddr != ip->daddr))
+ return XGE_HAL_FAIL;
+
+ /* Match Source Port field */
+ if ((lro->tcp_hdr->source != tcp->source))
+ return XGE_HAL_FAIL;
+
+ /* Match Destination Port field */
+ if ((lro->tcp_hdr->dest != tcp->dest))
+ return XGE_HAL_FAIL;
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_tcp_seg_len: Find the tcp seg len.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * returns: Tcp seg length.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
+__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp)
+{
+ u16 ret;
+
+ ret = (xge_os_ntohs(ip->tot_len) -
+ ((ip->version_ihl & 0x0F)<<2) -
+ ((tcp->doff_res)>>2));
+ return (ret);
+}
+
+/*
+ * __hal_ip_lro_capable: Finds whether ip is lro capable.
+ * @ip: ip header.
+ * @ext_info: descriptor info.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_ip_lro_capable(iplro_t *ip,
+ xge_hal_dtr_info_t *ext_info)
+{
+
+#ifdef XGE_LL_DEBUG_DUMP_PKT
+ {
+ u16 i;
+ u8 ch, *iph = (u8 *)ip;
+
+ xge_debug_ring(XGE_TRACE, "Dump Ip:" );
+ for (i =0; i < 40; i++) {
+ ch = ntohs(*((u8 *)(iph + i)) );
+ printf("i:%d %02x, ",i,ch);
+ }
+ }
+#endif
+
+ if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) {
+ xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl);
+ return XGE_HAL_FAIL;
+ }
+
+ if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) {
+ xge_debug_ring(XGE_ERR, "IP fragmented");
+ return XGE_HAL_FAIL;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_tcp_lro_capable: Finds whether tcp is lro capable.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off)
+{
+#ifdef XGE_LL_DEBUG_DUMP_PKT
+ {
+ u8 ch;
+ u16 i;
+
+ xge_debug_ring(XGE_TRACE, "Dump Tcp:" );
+ for (i =0; i < 20; i++) {
+ ch = ntohs(*((u8 *)((u8 *)tcp + i)) );
+ xge_os_printf("i:%d %02x, ",i,ch);
+ }
+ }
+#endif
+ if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) &&
+ (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl))
+ goto _exit_fail;
+
+ *ts_off = -1;
+ if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) {
+ u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */
+ u16 off = 20; /* Start of tcp options */
+ int i, diff;
+
+ /* Does Packet can contain time stamp */
+ if (tcp_hdr_len < 32) {
+ /*
+ * If the session is not opened, we can consider
+ * this packet for LRO
+ */
+ if (lro == NULL)
+ return XGE_HAL_OK;
+
+ goto _exit_fail;
+ }
+
+ /* Ignore No-operation 0x1 */
+ while (((u8 *)tcp)[off] == 0x1)
+ off++;
+
+ /* Next option == Timestamp */
+ if (((u8 *)tcp)[off] != 0x8) {
+ /*
+ * If the session ie not opened, we can consider
+ * this packet for LRO
+ */
+ if (lro == NULL)
+ return XGE_HAL_OK;
+
+ goto _exit_fail;
+ }
+
+ *ts_off = off;
+ if (lro == NULL)
+ return XGE_HAL_OK;
+
+ /*
+ * Now the session is opened. If the LRO frame doesn't
+ * have time stamp, we cannot consider current packet for
+ * LRO.
+ */
+ if (lro->ts_off == -1) {
+ xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+ }
+
+ /*
+ * If the difference is greater than three, then there are
+ * more options possible.
+ * else, there are two cases:
+ * case 1: remaining are padding bytes.
+ * case 2: remaining can contain options or padding
+ */
+ off += ((u8 *)tcp)[off+1];
+ diff = tcp_hdr_len - off;
+ if (diff > 3) {
+ /*
+ * Probably contains more options.
+ */
+ xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+ }
+
+ for (i = 0; i < diff; i++) {
+ u8 byte = ((u8 *)tcp)[off+i];
+
+ /* Ignore No-operation 0x1 */
+ if ((byte == 0x0) || (byte == 0x1))
+ continue;
+ xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+ }
+
+ /*
+ * Update the time stamp of LRO frame.
+ */
+ xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2),
+ (char *)((char *)tcp + (*ts_off) + 2), 8);
+ }
+
+ return XGE_HAL_OK;
+
+_exit_fail:
+ xge_debug_ring(XGE_ERR, "tcphdr not fastpth %02x %02x", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+
+}
+
+/*
+ * __hal_lro_capable: Finds whether frame is lro capable.
+ * @buffer: Ethernet frame.
+ * @ip: ip frame.
+ * @tcp: tcp frame.
+ * @ext_info: Descriptor info.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_lro_capable( u8 *buffer,
+ iplro_t **ip,
+ tcplro_t **tcp,
+ xge_hal_dtr_info_t *ext_info)
+{
+ u8 ip_off, ip_length;
+
+ if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) {
+ xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto);
+ return XGE_HAL_FAIL;
+ }
+
+ if ( !*ip )
+ {
+#ifdef XGE_LL_DEBUG_DUMP_PKT
+ {
+ u8 ch;
+ u16 i;
+
+ xge_os_printf("Dump Eth:" );
+ for (i =0; i < 60; i++) {
+ ch = ntohs(*((u8 *)(buffer + i)) );
+ xge_os_printf("i:%d %02x, ",i,ch);
+ }
+ }
+#endif
+
+ switch (ext_info->frame) {
+ case XGE_HAL_FRAME_TYPE_DIX:
+ ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
+ break;
+ case XGE_HAL_FRAME_TYPE_LLC:
+ ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
+ XGE_HAL_HEADER_802_2_SIZE);
+ break;
+ case XGE_HAL_FRAME_TYPE_SNAP:
+ ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
+ XGE_HAL_HEADER_SNAP_SIZE);
+ break;
+ default: // XGE_HAL_FRAME_TYPE_IPX, etc.
+ return XGE_HAL_FAIL;
+ }
+
+
+ if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) {
+ ip_off += XGE_HAL_HEADER_VLAN_SIZE;
+ }
+
+ /* Grab ip, tcp headers */
+ *ip = (iplro_t *)((char*)buffer + ip_off);
+ } /* !*ip */
+
+ ip_length = (u8)((*ip)->version_ihl & 0x0F);
+ ip_length = ip_length <<2;
+ *tcp = (tcplro_t *)((char *)*ip + ip_length);
+
+ xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT
+ " tcp:"XGE_OS_LLXFMT"", (int)ip_length,
+ (unsigned long long)(ulong_t)*ip, (unsigned long long)(ulong_t)*tcp);
+
+ return XGE_HAL_OK;
+
+}
+
+
+/*
+ * __hal_open_lro_session: Open a new LRO session.
+ * @buffer: Ethernet frame.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * @lro: lro pointer
+ * @ext_info: Descriptor info.
+ * @hldev: Hal context.
+ * @ring_lro: LRO descriptor per rx ring.
+ * @slot: Bucket no.
+ * @tcp_seg_len: Length of tcp segment.
+ * @ts_off: time stamp offset in the packet.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
+ xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, int slot,
+ u32 tcp_seg_len, int ts_off)
+{
+
+ lro_t *lro_new = &ring_lro->lro_pool[slot];
+
+ lro_new->in_use = 1;
+ lro_new->ll_hdr = buffer;
+ lro_new->ip_hdr = ip;
+ lro_new->tcp_hdr = tcp;
+ lro_new->tcp_next_seq_num = tcp_seg_len + xge_os_ntohl(
+ tcp->seq);
+ lro_new->tcp_seq_num = tcp->seq;
+ lro_new->tcp_ack_num = tcp->ack_seq;
+ lro_new->sg_num = 1;
+ lro_new->total_length = xge_os_ntohs(ip->tot_len);
+ lro_new->frags_len = 0;
+ lro_new->ts_off = ts_off;
+
+ hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
+ hldev->stats.sw_dev_info_stats.tot_lro_sessions++;
+
+ *lro = ring_lro->lro_recent = lro_new;
+ return;
+}
+/*
+ * __hal_lro_get_free_slot: Get a free LRO bucket.
+ * @ring_lro: LRO descriptor per ring.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro)
+{
+ int i;
+
+ for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
+ lro_t *lro_temp = &ring_lro->lro_pool[i];
+
+ if (!lro_temp->in_use)
+ return i;
+ }
+ return -1;
+}
+
+/*
+ * __hal_get_lro_session: Gets matching LRO session or creates one.
+ * @eth_hdr: Ethernet header.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * @lro: lro pointer
+ * @ext_info: Descriptor info.
+ * @hldev: Hal context.
+ * @ring_lro: LRO descriptor per rx ring
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_get_lro_session (u8 *eth_hdr,
+ iplro_t *ip,
+ tcplro_t *tcp,
+ lro_t **lro,
+ xge_hal_dtr_info_t *ext_info,
+ xge_hal_device_t *hldev,
+ xge_hal_lro_desc_t *ring_lro,
+ lro_t **lro_end3 /* Valid only when ret=END_3 */)
+{
+ lro_t *lro_match;
+ int i, free_slot = -1;
+ u32 tcp_seg_len;
+ int ts_off = -1;
+
+ *lro = lro_match = NULL;
+ /*
+ * Compare the incoming frame with the lro session left from the
+ * previous call. There is a good chance that this incoming frame
+ * matches the lro session.
+ */
+ if (ring_lro->lro_recent && ring_lro->lro_recent->in_use) {
+ if (__hal_lro_check_for_session_match(ring_lro->lro_recent,
+ tcp, ip)
+ == XGE_HAL_OK)
+ lro_match = ring_lro->lro_recent;
+ }
+
+ if (!lro_match) {
+ /*
+ * Search in the pool of LROs for the session that matches
+ * the incoming frame.
+ */
+ for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
+ lro_t *lro_temp = &ring_lro->lro_pool[i];
+
+ if (!lro_temp->in_use) {
+ if (free_slot == -1)
+ free_slot = i;
+ continue;
+ }
+
+ if (__hal_lro_check_for_session_match(lro_temp, tcp,
+ ip) == XGE_HAL_OK) {
+ lro_match = lro_temp;
+ break;
+ }
+ }
+ }
+
+
+ if (lro_match) {
+ /*
+ * Matching LRO Session found
+ */
+ *lro = lro_match;
+
+ if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) {
+ xge_debug_ring(XGE_ERR, "**retransmit **"
+ "found***");
+ hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++;
+ return XGE_HAL_INF_LRO_END_2;
+ }
+
+ if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info))
+ {
+ return XGE_HAL_INF_LRO_END_2;
+ }
+
+ if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match,
+ &ts_off)) {
+ /*
+ * Close the current session and open a new
+ * LRO session with this packet,
+ * provided it has tcp payload
+ */
+ tcp_seg_len = __hal_tcp_seg_len(ip, tcp);
+ if (tcp_seg_len == 0)
+ {
+ return XGE_HAL_INF_LRO_END_2;
+ }
+
+ /* Get a free bucket */
+ free_slot = __hal_lro_get_free_slot(ring_lro);
+ if (free_slot == -1)
+ {
+ return XGE_HAL_INF_LRO_END_2;
+ }
+
+ /*
+ * Open a new LRO session
+ */
+ __hal_open_lro_session (eth_hdr, ip, tcp, lro_end3,
+ hldev, ring_lro, free_slot, tcp_seg_len,
+ ts_off);
+
+ return XGE_HAL_INF_LRO_END_3;
+ }
+
+ /*
+ * The frame is good, in-sequence, can be LRO-ed;
+ * take its (latest) ACK - unless it is a dupack.
+ * Note: to be exact need to check window size as well..
+ */
+ if (lro_match->tcp_ack_num == tcp->ack_seq &&
+ lro_match->tcp_seq_num == tcp->seq) {
+ hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++;
+ return XGE_HAL_INF_LRO_END_2;
+ }
+
+ lro_match->tcp_seq_num = tcp->seq;
+ lro_match->tcp_ack_num = tcp->ack_seq;
+ lro_match->frags_len += __hal_tcp_seg_len(ip, tcp);
+
+ ring_lro->lro_recent = lro_match;
+
+ return XGE_HAL_INF_LRO_CONT;
+ }
+
+ /* ********** New Session ***************/
+ if (free_slot == -1)
+ return XGE_HAL_INF_LRO_UNCAPABLE;
+
+ if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info))
+ return XGE_HAL_INF_LRO_UNCAPABLE;
+
+ if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off))
+ return XGE_HAL_INF_LRO_UNCAPABLE;
+
+ xge_debug_ring(XGE_TRACE, "Creating lro session.");
+
+ /*
+ * Open a LRO session, provided the packet contains payload.
+ */
+ tcp_seg_len = __hal_tcp_seg_len(ip, tcp);
+ if (tcp_seg_len == 0)
+ return XGE_HAL_INF_LRO_UNCAPABLE;
+
+ __hal_open_lro_session (eth_hdr, ip, tcp, lro, hldev, ring_lro, free_slot,
+ tcp_seg_len, ts_off);
+
+ return XGE_HAL_INF_LRO_BEGIN;
+}
+
+/*
+ * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * @lro: lro pointer
+ * @hldev: Hal context.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_lro_under_optimal_thresh (iplro_t *ip,
+ tcplro_t *tcp,
+ lro_t *lro,
+ xge_hal_device_t *hldev)
+{
+ if (!lro) return XGE_HAL_FAIL;
+
+ if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) >
+ hldev->config.lro_frm_len) {
+ xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:"
+ "max length %d ", hldev->config.lro_frm_len);
+ hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++;
+ return XGE_HAL_FAIL;
+ }
+
+ if (lro->sg_num == hldev->config.lro_sg_size) {
+ xge_debug_ring(XGE_TRACE, "Max sg count exceeded:"
+ "max sg %d ", hldev->config.lro_sg_size);
+ hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
+ return XGE_HAL_FAIL;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_collapse_ip_hdr: Collapses ip header.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * @lro: lro pointer
+ * @hldev: Hal context.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_collapse_ip_hdr ( iplro_t *ip,
+ tcplro_t *tcp,
+ lro_t *lro,
+ xge_hal_device_t *hldev)
+{
+
+ lro->total_length += __hal_tcp_seg_len(ip, tcp);
+
+ /* May be we have to handle time stamps or more options */
+
+ return XGE_HAL_OK;
+
+}
+
+/*
+ * __hal_collapse_tcp_hdr: Collapses tcp header.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * @lro: lro pointer
+ * @hldev: Hal context.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_collapse_tcp_hdr ( iplro_t *ip,
+ tcplro_t *tcp,
+ lro_t *lro,
+ xge_hal_device_t *hldev)
+{
+ lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp);
+ return XGE_HAL_OK;
+
+}
+
+/*
+ * __hal_append_lro: Appends new frame to existing LRO session.
+ * @ip: ip header.
+ * @tcp: IN tcp header, OUT tcp payload.
+ * @seg_len: tcp payload length.
+ * @lro: lro pointer
+ * @hldev: Hal context.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+__hal_append_lro(iplro_t *ip,
+ tcplro_t **tcp,
+ u32 *seg_len,
+ lro_t *lro,
+ xge_hal_device_t *hldev)
+{
+ (void) __hal_collapse_ip_hdr(ip, *tcp, lro, hldev);
+ (void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev);
+ // Update mbuf chain will be done in ll driver.
+ // xge_hal_accumulate_large_rx on success of appending new frame to
+ // lro will return to ll driver tcpdata pointer, and tcp payload length.
+ // along with return code lro frame appended.
+
+ lro->sg_num++;
+ *seg_len = __hal_tcp_seg_len(ip, *tcp);
+ *tcp = (tcplro_t *)((char *)*tcp + (((*tcp)->doff_res)>>2));
+
+ return XGE_HAL_OK;
+
+}
+
+/**
+ * __xge_hal_accumulate_large_rx: LRO a given frame
+ * frames
+ * @ring: rx ring number
+ * @eth_hdr: ethernet header.
+ * @ip_hdr: ip header (optional)
+ * @tcp: tcp header.
+ * @seglen: packet length.
+ * @p_lro: lro pointer.
+ * @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
+ * @hldev: HAL device.
+ * @lro_end3: for lro_end3 output
+ *
+ * LRO the newly received frame, i.e. attach it (if possible) to the
+ * already accumulated (i.e., already LRO-ed) received frames (if any),
+ * to form one super-sized frame for the subsequent processing
+ * by the stack.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp,
+ u32 *seglen, lro_t **p_lro,
+ xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
+ lro_t **lro_end3)
+{
+ iplro_t *ip = (iplro_t *)ip_hdr;
+ xge_hal_status_e ret;
+ lro_t *lro;
+
+ xge_debug_ring(XGE_TRACE, "Entered accumu lro. ");
+ if (XGE_HAL_OK != __hal_lro_capable(eth_hdr, &ip, (tcplro_t **)tcp,
+ ext_info))
+ return XGE_HAL_INF_LRO_UNCAPABLE;
+
+ /*
+ * This function shall get matching LRO or else
+ * create one and return it
+ */
+ ret = __hal_get_lro_session(eth_hdr, ip, (tcplro_t *)*tcp,
+ p_lro, ext_info, hldev, &hldev->lro_desc[ring],
+ lro_end3);
+ xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret);
+ lro = *p_lro;
+ if (XGE_HAL_INF_LRO_CONT == ret) {
+ if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip,
+ (tcplro_t *)*tcp, lro, hldev)) {
+ (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen,
+ lro, hldev);
+ hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
+
+ if (lro->sg_num >= hldev->config.lro_sg_size) {
+ hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
+ ret = XGE_HAL_INF_LRO_END_1;
+ }
+
+ } else ret = XGE_HAL_INF_LRO_END_2;
+ }
+
+ /*
+ * Since its time to flush,
+ * update ip header so that it can be sent up
+ */
+ if ((ret == XGE_HAL_INF_LRO_END_1) ||
+ (ret == XGE_HAL_INF_LRO_END_2) ||
+ (ret == XGE_HAL_INF_LRO_END_3)) {
+ lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length);
+ lro->ip_hdr->check = xge_os_htons(0);
+ lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
+ (lro->ip_hdr->version_ihl & 0x0F));
+ lro->tcp_hdr->ack_seq = lro->tcp_ack_num;
+ }
+
+ return (ret);
+}
+
+/**
+ * xge_hal_accumulate_large_rx: LRO a given frame
+ * frames
+ * @buffer: Ethernet frame.
+ * @tcp: tcp header.
+ * @seglen: packet length.
+ * @p_lro: lro pointer.
+ * @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
+ * @hldev: HAL device.
+ * @lro_end3: for lro_end3 output
+ *
+ * LRO the newly received frame, i.e. attach it (if possible) to the
+ * already accumulated (i.e., already LRO-ed) received frames (if any),
+ * to form one super-sized frame for the subsequent processing
+ * by the stack.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
+xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
+lro_t **p_lro, xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
+lro_t **lro_end3)
+{
+ int ring = 0;
+ return xge_hal_lro_process_rx(ring, buffer, NULL, tcp, seglen, p_lro,
+ ext_info, hldev, lro_end3);
+}
+
+/**
+ * xge_hal_lro_close_session: Close LRO session
+ * @lro: LRO Session.
+ * @hldev: HAL Context.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+xge_hal_lro_close_session (lro_t *lro)
+{
+ lro->in_use = 0;
+}
+
+/**
+ * xge_hal_lro_next_session: Returns next LRO session in the list or NULL
+ * if none exists.
+ * @hldev: HAL Context.
+ * @ring: rx ring number.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
+xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring)
+{
+xge_hal_lro_desc_t *ring_lro = &hldev->lro_desc[ring];
+ int i;
+ int start_idx = ring_lro->lro_next_idx;
+
+ for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
+ lro_t *lro = &ring_lro->lro_pool[i];
+
+ if (!lro->in_use)
+ continue;
+
+ lro->ip_hdr->tot_len = xge_os_htons(lro->total_length);
+ lro->ip_hdr->check = xge_os_htons(0);
+ lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
+ (lro->ip_hdr->version_ihl & 0x0F));
+ ring_lro->lro_next_idx = i + 1;
+ return lro;
+ }
+
+ ring_lro->lro_next_idx = 0;
+ return NULL;
+
+}
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
+xge_hal_lro_get_next_session(xge_hal_device_t *hldev)
+{
+ int ring = 0; /* assume default ring=0 */
+ return xge_hal_lro_next_session(hldev, ring);
+}
+#endif
diff --git a/sys/dev/nxge/xgehal/xgehal-device.c b/sys/dev/nxge/xgehal/xgehal-device.c
new file mode 100644
index 0000000..0ba7562
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-device.c
@@ -0,0 +1,7247 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-device.c
+ *
+ * Description: HAL device object functionality
+ *
+ * Created: 10 May 2004
+ */
+
+#include <dev/nxge/include/xgehal-device.h>
+#include <dev/nxge/include/xgehal-channel.h>
+#include <dev/nxge/include/xgehal-fifo.h>
+#include <dev/nxge/include/xgehal-ring.h>
+#include <dev/nxge/include/xgehal-driver.h>
+#include <dev/nxge/include/xgehal-mgmt.h>
+
+#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
+#define END_SIGN 0x0
+
+#ifdef XGE_HAL_HERC_EMULATION
+#undef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+#endif
+
+/*
+ * Jenkins hash key length(in bytes)
+ */
+#define XGE_HAL_JHASH_MSG_LEN 50
+
+/*
+ * mix(a,b,c) used in Jenkins hash algorithm
+ */
+#define mix(a,b,c) { \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+
+/*
+ * __hal_device_event_queued
+ * @data: pointer to xge_hal_device_t structure
+ *
+ * Will be called when new event succesfully queued.
+ */
+void
+__hal_device_event_queued(void *data, int event_type)
+{
+ xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC);
+ if (g_xge_hal_driver->uld_callbacks.event_queued) {
+ g_xge_hal_driver->uld_callbacks.event_queued(data, event_type);
+ }
+}
+
+/*
+ * __hal_pio_mem_write32_upper
+ *
+ * Endiann-aware implementation of xge_os_pio_mem_write32().
+ * Since Xframe has 64bit registers, we differintiate uppper and lower
+ * parts.
+ */
+void
+__hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
+{
+#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
+ xge_os_pio_mem_write32(pdev, regh, val, addr);
+#else
+ xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4));
+#endif
+}
+
+/*
+ * __hal_pio_mem_write32_upper
+ *
+ * Endiann-aware implementation of xge_os_pio_mem_write32().
+ * Since Xframe has 64bit registers, we differintiate uppper and lower
+ * parts.
+ */
+void
+__hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
+ void *addr)
+{
+#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
+ xge_os_pio_mem_write32(pdev, regh, val,
+ (void *) ((char *)addr + 4));
+#else
+ xge_os_pio_mem_write32(pdev, regh, val, addr);
+#endif
+}
+
+/*
+ * __hal_device_register_poll
+ * @hldev: pointer to xge_hal_device_t structure
+ * @reg: register to poll for
+ * @op: 0 - bit reset, 1 - bit set
+ * @mask: mask for logical "and" condition based on %op
+ * @max_millis: maximum time to try to poll in milliseconds
+ *
+ * Will poll certain register for specified amount of time.
+ * Will poll until masked bit is not cleared.
+ */
+xge_hal_status_e
+__hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
+ int op, u64 mask, int max_millis)
+{
+ u64 val64;
+ int i = 0;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
+
+ xge_os_udelay(10);
+
+ do {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
+ if (op == 0 && !(val64 & mask))
+ return XGE_HAL_OK;
+ else if (op == 1 && (val64 & mask) == mask)
+ return XGE_HAL_OK;
+ xge_os_udelay(100);
+ } while (++i <= 9);
+
+ do {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
+ if (op == 0 && !(val64 & mask))
+ return XGE_HAL_OK;
+ else if (op == 1 && (val64 & mask) == mask)
+ return XGE_HAL_OK;
+ xge_os_udelay(1000);
+ } while (++i < max_millis);
+
+ return ret;
+}
+
+/*
+ * __hal_device_wait_quiescent
+ * @hldev: the device
+ * @hw_status: hw_status in case of error
+ *
+ * Will wait until device is quiescent for some blocks.
+ */
+static xge_hal_status_e
+__hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ /* poll and wait first */
+#ifdef XGE_HAL_HERC_EMULATION
+ (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
+ (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
+ XGE_HAL_ADAPTER_STATUS_RDMA_READY |
+ XGE_HAL_ADAPTER_STATUS_PFC_READY |
+ XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
+ XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
+ XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
+ XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
+ XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK),
+ XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
+#else
+ (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
+ (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
+ XGE_HAL_ADAPTER_STATUS_RDMA_READY |
+ XGE_HAL_ADAPTER_STATUS_PFC_READY |
+ XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
+ XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
+ XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
+ XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
+ XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK |
+ XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK),
+ XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
+#endif
+
+ return xge_hal_device_status(hldev, hw_status);
+}
+
+/**
+ * xge_hal_device_is_slot_freeze
+ * @devh: the device
+ *
+ * Returns non-zero if the slot is freezed.
+ * The determination is made based on the adapter_status
+ * register which will never give all FFs, unless PCI read
+ * cannot go through.
+ */
+int
+xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u16 device_id;
+ u64 adapter_status =
+ xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+ xge_os_pci_read16(hldev->pdev,hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, device_id),
+ &device_id);
+#ifdef TX_DEBUG
+ if (adapter_status == XGE_HAL_ALL_FOXES)
+ {
+ u64 dummy;
+ dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pcc_enable);
+ printf(">>> Slot is frozen!\n");
+ brkpoint(0);
+ }
+#endif
+ return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
+}
+
+
+/*
+ * __hal_device_led_actifity_fix
+ * @hldev: pointer to xge_hal_device_t structure
+ *
+ * SXE-002: Configure link and activity LED to turn it off
+ */
+static void
+__hal_device_led_actifity_fix(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u16 subid;
+ u64 val64;
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
+
+ /*
+ * In the case of Herc, there is a new register named beacon control
+ * is added which was not present in Xena.
+ * Beacon control register in Herc is at the same offset as
+ * gpio control register in Xena. It means they are one and same in
+ * the case of Xena. Also, gpio control register offset in Herc and
+ * Xena is different.
+ * The current register map represents Herc(It means we have
+ * both beacon and gpio control registers in register map).
+ * WRT transition from Xena to Herc, all the code in Xena which was
+ * using gpio control register for LED handling would have to
+ * use beacon control register in Herc and the rest of the code
+ * which uses gpio control in Xena would use the same register
+ * in Herc.
+ * WRT LED handling(following code), In the case of Herc, beacon
+ * control register has to be used. This is applicable for Xena also,
+ * since it represents the gpio control register in Xena.
+ */
+ if ((subid & 0xFF) >= 0x07) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->beacon_control);
+ val64 |= 0x0000800000000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->beacon_control);
+ val64 = 0x0411040400000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ (void *) ((u8 *)bar0 + 0x2700));
+ }
+}
+
+/* Constants for Fixing the MacAddress problem seen mostly on
+ * Alpha machines.
+ */
+static u64 xena_fix_mac[] = {
+ 0x0060000000000000ULL, 0x0060600000000000ULL,
+ 0x0040600000000000ULL, 0x0000600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0000600000000000ULL,
+ 0x0040600000000000ULL, 0x0060600000000000ULL,
+ END_SIGN
+};
+
+/*
+ * __hal_device_fix_mac
+ * @hldev: HAL device handle.
+ *
+ * Fix for all "FFs" MAC address problems observed on Alpha platforms.
+ */
+static void
+__hal_device_xena_fix_mac(xge_hal_device_t *hldev)
+{
+ int i = 0;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ /*
+ * In the case of Herc, there is a new register named beacon control
+ * is added which was not present in Xena.
+ * Beacon control register in Herc is at the same offset as
+ * gpio control register in Xena. It means they are one and same in
+ * the case of Xena. Also, gpio control register offset in Herc and
+ * Xena is different.
+ * The current register map represents Herc(It means we have
+ * both beacon and gpio control registers in register map).
+ * WRT transition from Xena to Herc, all the code in Xena which was
+ * using gpio control register for LED handling would have to
+ * use beacon control register in Herc and the rest of the code
+ * which uses gpio control in Xena would use the same register
+ * in Herc.
+ * In the following code(xena_fix_mac), beacon control register has
+ * to be used in the case of Xena, since it represents gpio control
+ * register. In the case of Herc, there is no change required.
+ */
+ while (xena_fix_mac[i] != END_SIGN) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ xena_fix_mac[i++], &bar0->beacon_control);
+ xge_os_mdelay(1);
+ }
+}
+
+/*
+ * xge_hal_device_bcast_enable
+ * @hldev: HAL device handle.
+ *
+ * Enable receiving broadcasts.
+ * The host must first write RMAC_CFG_KEY "key"
+ * register, and then - MAC_CFG register.
+ */
+void
+xge_hal_device_bcast_enable(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+ val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32), &bar0->mac_cfg);
+
+ xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
+ (unsigned long long)val64,
+ hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
+}
+
+/*
+ * xge_hal_device_bcast_disable
+ * @hldev: HAL device handle.
+ *
+ * Disable receiving broadcasts.
+ * The host must first write RMAC_CFG_KEY "key"
+ * register, and then - MAC_CFG register.
+ */
+void
+xge_hal_device_bcast_disable(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+
+ val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32), &bar0->mac_cfg);
+
+ xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
+ (unsigned long long)val64,
+ hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
+}
+
+/*
+ * __hal_device_shared_splits_configure
+ * @hldev: HAL device handle.
+ *
+ * TxDMA will stop Read request if the number of read split had exceeded
+ * the limit set by shared_splits
+ */
+static void
+__hal_device_shared_splits_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pic_control);
+ val64 |=
+ XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->pic_control);
+ xge_debug_device(XGE_TRACE, "%s", "shared splits configured");
+}
+
+/*
+ * __hal_device_rmac_padding_configure
+ * @hldev: HAL device handle.
+ *
+ * Configure RMAC frame padding. Depends on configuration, it
+ * can be send to host or removed by MAC.
+ */
+static void
+__hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+ val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE );
+ val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
+ val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD;
+
+ /*
+ * If the RTH enable bit is not set, strip the FCS
+ */
+ if (!hldev->config.rth_en ||
+ !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) {
+ val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS;
+ }
+
+ val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD );
+ val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM;
+
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
+ xge_os_mdelay(1);
+
+ xge_debug_device(XGE_TRACE,
+ "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
+ (unsigned long long)val64);
+}
+
+/*
+ * __hal_device_pause_frames_configure
+ * @hldev: HAL device handle.
+ *
+ * Set Pause threshold.
+ *
+ * Pause frame is generated if the amount of data outstanding
+ * on any queue exceeded the ratio of
+ * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
+ */
+static void
+__hal_device_pause_frames_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ int i;
+ u64 val64;
+
+ switch (hldev->config.mac.media) {
+ case XGE_HAL_MEDIA_SR:
+ case XGE_HAL_MEDIA_SW:
+ val64=0xfffbfffbfffbfffbULL;
+ break;
+ case XGE_HAL_MEDIA_LR:
+ case XGE_HAL_MEDIA_LW:
+ val64=0xffbbffbbffbbffbbULL;
+ break;
+ case XGE_HAL_MEDIA_ER:
+ case XGE_HAL_MEDIA_EW:
+ default:
+ val64=0xffbbffbbffbbffbbULL;
+ break;
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->mc_pause_thresh_q0q3);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->mc_pause_thresh_q4q7);
+
+ /* Set the time value to be inserted in the pause frame generated
+ * by Xframe */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rmac_pause_cfg);
+ if (hldev->config.mac.rmac_pause_gen_en)
+ val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
+ else
+ val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN);
+ if (hldev->config.mac.rmac_pause_rcv_en)
+ val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
+ else
+ val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN);
+ val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff));
+ val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rmac_pause_cfg);
+
+ val64 = 0;
+ for (i = 0; i<4; i++) {
+ val64 |=
+ (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3)
+ <<(i*2*8));
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_pause_thresh_q0q3);
+
+ val64 = 0;
+ for (i = 0; i<4; i++) {
+ val64 |=
+ (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7)
+ <<(i*2*8));
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_pause_thresh_q4q7);
+ xge_debug_device(XGE_TRACE, "%s", "pause frames configured");
+}
+
+/*
+ * Herc's clock rate doubled, unless the slot is 33MHz.
+ */
+unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev,
+ unsigned int time_ival)
+{
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+ return time_ival;
+
+ xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC);
+
+ if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN &&
+ hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ)
+ time_ival *= 2;
+
+ return time_ival;
+}
+
+
+/*
+ * __hal_device_bus_master_disable
+ * @hldev: HAL device handle.
+ *
+ * Disable bus mastership.
+ */
+static void
+__hal_device_bus_master_disable (xge_hal_device_t *hldev)
+{
+ u16 cmd;
+ u16 bus_master = 4;
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
+ cmd &= ~bus_master;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
+}
+
+/*
+ * __hal_device_bus_master_enable
+ * @hldev: HAL device handle.
+ *
+ * Disable bus mastership.
+ */
+static void
+__hal_device_bus_master_enable (xge_hal_device_t *hldev)
+{
+ u16 cmd;
+ u16 bus_master = 4;
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
+
+ /* already enabled? do nothing */
+ if (cmd & bus_master)
+ return;
+
+ cmd |= bus_master;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
+}
+/*
+ * __hal_device_intr_mgmt
+ * @hldev: HAL device handle.
+ * @mask: mask indicating which Intr block must be modified.
+ * @flag: if true - enable, otherwise - disable interrupts.
+ *
+ * Disable or enable device interrupts. Mask is used to specify
+ * which hardware blocks should produce interrupts. For details
+ * please refer to Xframe User Guide.
+ */
+static void
+__hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64 = 0, temp64 = 0;
+ u64 gim, gim_saved;
+
+ gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->general_int_mask);
+
+ /* Top level interrupt classification */
+ /* PIC Interrupts */
+ if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) {
+ /* Enable PIC Intrs in the general intr mask register */
+ val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/;
+ if (flag) {
+ gim &= ~((u64) val64);
+ temp64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->pic_int_mask);
+
+ temp64 &= ~XGE_HAL_PIC_INT_TX;
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) ==
+ XGE_HAL_CARD_HERC) {
+ temp64 &= ~XGE_HAL_PIC_INT_MISC;
+ }
+#endif
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ temp64, &bar0->pic_int_mask);
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) ==
+ XGE_HAL_CARD_HERC) {
+ /*
+ * Unmask only Link Up interrupt
+ */
+ temp64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->misc_int_mask);
+ temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, temp64,
+ &bar0->misc_int_mask);
+ xge_debug_device(XGE_TRACE,
+ "unmask link up flag "XGE_OS_LLXFMT,
+ (unsigned long long)temp64);
+ }
+#endif
+ } else { /* flag == 0 */
+
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) ==
+ XGE_HAL_CARD_HERC) {
+ /*
+ * Mask both Link Up and Down interrupts
+ */
+ temp64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->misc_int_mask);
+ temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
+ temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, temp64,
+ &bar0->misc_int_mask);
+ xge_debug_device(XGE_TRACE,
+ "mask link up/down flag "XGE_OS_LLXFMT,
+ (unsigned long long)temp64);
+ }
+#endif
+ /* Disable PIC Intrs in the general intr mask
+ * register */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->pic_int_mask);
+ gim |= val64;
+ }
+ }
+
+ /* DMA Interrupts */
+ /* Enabling/Disabling Tx DMA interrupts */
+ if (mask & XGE_HAL_TX_DMA_INTR) {
+ /* Enable TxDMA Intrs in the general intr mask register */
+ val64 = XGE_HAL_TXDMA_INT_M;
+ if (flag) {
+ gim &= ~((u64) val64);
+ /* Enable all TxDMA interrupts */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->txdma_int_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->pfc_err_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->tda_err_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->pcc_err_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->tti_err_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->lso_err_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->tpa_err_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0, &bar0->sm_err_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable TxDMA Intrs in the general intr mask
+ * register */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->txdma_int_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->pfc_err_mask);
+
+ gim |= val64;
+ }
+ }
+
+ /* Enabling/Disabling Rx DMA interrupts */
+ if (mask & XGE_HAL_RX_DMA_INTR) {
+ /* Enable RxDMA Intrs in the general intr mask register */
+ val64 = XGE_HAL_RXDMA_INT_M;
+ if (flag) {
+
+ gim &= ~((u64) val64);
+ /* All RxDMA block interrupts are disabled for now
+ * TODO */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->rxdma_int_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable RxDMA Intrs in the general intr mask
+ * register */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->rxdma_int_mask);
+
+ gim |= val64;
+ }
+ }
+
+ /* MAC Interrupts */
+ /* Enabling/Disabling MAC interrupts */
+ if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) {
+ val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M;
+ if (flag) {
+
+ gim &= ~((u64) val64);
+
+ /* All MAC block error inter. are disabled for now. */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable MAC Intrs in the general intr mask
+ * register */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
+
+ gim |= val64;
+ }
+ }
+
+ /* XGXS Interrupts */
+ if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) {
+ val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M;
+ if (flag) {
+
+ gim &= ~((u64) val64);
+ /* All XGXS block error interrupts are disabled for now
+ * TODO */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable MC Intrs in the general intr mask register */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
+
+ gim |= val64;
+ }
+ }
+
+ /* Memory Controller(MC) interrupts */
+ if (mask & XGE_HAL_MC_INTR) {
+ val64 = XGE_HAL_MC_INT_M;
+ if (flag) {
+
+ gim &= ~((u64) val64);
+
+ /* Enable all MC blocks error interrupts */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL, &bar0->mc_int_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable MC Intrs in the general intr mask
+ * register */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask);
+
+ gim |= val64;
+ }
+ }
+
+
+ /* Tx traffic interrupts */
+ if (mask & XGE_HAL_TX_TRAFFIC_INTR) {
+ val64 = XGE_HAL_TXTRAFFIC_INT_M;
+ if (flag) {
+
+ gim &= ~((u64) val64);
+
+ /* Enable all the Tx side interrupts */
+ /* '0' Enables all 64 TX interrupt levels. */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
+ &bar0->tx_traffic_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable Tx Traffic Intrs in the general intr mask
+ * register. */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->tx_traffic_mask);
+ gim |= val64;
+ }
+ }
+
+ /* Rx traffic interrupts */
+ if (mask & XGE_HAL_RX_TRAFFIC_INTR) {
+ val64 = XGE_HAL_RXTRAFFIC_INT_M;
+ if (flag) {
+ gim &= ~((u64) val64);
+ /* '0' Enables all 8 RX interrupt levels. */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
+ &bar0->rx_traffic_mask);
+
+ } else { /* flag == 0 */
+
+ /* Disable Rx Traffic Intrs in the general intr mask
+ * register.
+ */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_ALL_INTRS_DIS,
+ &bar0->rx_traffic_mask);
+
+ gim |= val64;
+ }
+ }
+
+ /* Sched Timer interrupt */
+ if (mask & XGE_HAL_SCHED_INTR) {
+ if (flag) {
+ temp64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->txpic_int_mask);
+ temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ temp64, &bar0->txpic_int_mask);
+
+ xge_hal_device_sched_timer(hldev,
+ hldev->config.sched_timer_us,
+ hldev->config.sched_timer_one_shot);
+ } else {
+ temp64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->txpic_int_mask);
+ temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ temp64, &bar0->txpic_int_mask);
+
+ xge_hal_device_sched_timer(hldev,
+ XGE_HAL_SCHED_TIMER_DISABLED,
+ XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE);
+ }
+ }
+
+ if (gim != gim_saved) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
+ &bar0->general_int_mask);
+ xge_debug_device(XGE_TRACE, "general_int_mask updated "
+ XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
+ (unsigned long long)gim_saved, (unsigned long long)gim);
+ }
+}
+
+/*
+ * __hal_device_bimodal_configure
+ * @hldev: HAL device handle.
+ *
+ * Bimodal parameters initialization.
+ */
+static void
+__hal_device_bimodal_configure(xge_hal_device_t *hldev)
+{
+ int i;
+
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_tti_config_t *tti;
+ xge_hal_rti_config_t *rti;
+
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ rti = &hldev->config.ring.queue[i].rti;
+ tti = &hldev->bimodal_tti[i];
+
+ tti->enabled = 1;
+ tti->urange_a = hldev->bimodal_urange_a_en * 10;
+ tti->urange_b = 20;
+ tti->urange_c = 30;
+ tti->ufc_a = hldev->bimodal_urange_a_en * 8;
+ tti->ufc_b = 16;
+ tti->ufc_c = 32;
+ tti->ufc_d = 64;
+ tti->timer_val_us = hldev->bimodal_timer_val_us;
+ tti->timer_ac_en = 1;
+ tti->timer_ci_en = 0;
+
+ rti->urange_a = 10;
+ rti->urange_b = 20;
+ rti->urange_c = 30;
+ rti->ufc_a = 1; /* <= for netpipe type of tests */
+ rti->ufc_b = 4;
+ rti->ufc_c = 4;
+ rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
+ rti->timer_ac_en = 1;
+ rti->timer_val_us = 5; /* for optimal bus efficiency usage */
+ }
+}
+
+/*
+ * __hal_device_tti_apply
+ * @hldev: HAL device handle.
+ *
+ * apply TTI configuration.
+ */
+static xge_hal_status_e
+__hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
+ int num, int runtime)
+{
+ u64 val64, data1 = 0, data2 = 0;
+ xge_hal_pci_bar0_t *bar0;
+
+ if (runtime)
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ else
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ if (tti->timer_val_us) {
+ unsigned int tx_interval;
+
+ if (hldev->config.pci_freq_mherz) {
+ tx_interval = hldev->config.pci_freq_mherz *
+ tti->timer_val_us / 64;
+ tx_interval =
+ __hal_fix_time_ival_herc(hldev,
+ tx_interval);
+ } else {
+ tx_interval = tti->timer_val_us;
+ }
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
+ if (tti->timer_ac_en) {
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
+ }
+ if (tti->timer_ci_en) {
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ }
+
+ if (!runtime) {
+ xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
+ num, tx_interval, tti->timer_ci_en ?
+ "enabled": "disabled");
+ }
+ }
+
+ if (tti->urange_a ||
+ tti->urange_b ||
+ tti->urange_c ||
+ tti->ufc_a ||
+ tti->ufc_b ||
+ tti->ufc_c ||
+ tti->ufc_d ) {
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
+ XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
+ XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
+
+ data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
+ XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
+ XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
+ XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
+ &bar0->tti_data1_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->tti_data1_mem);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
+ &bar0->tti_data2_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->tti_data2_mem);
+ xge_os_wmb();
+
+ val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_TTI_CMD_MEM_OFFSET(num);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->tti_command_mem);
+
+ if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
+ 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ if (!runtime) {
+ xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
+ XGE_OS_LLXFMT, num,
+ (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->tti_data1_mem));
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_tti_configure
+ * @hldev: HAL device handle.
+ *
+ * TTI Initialization.
+ * Initialize Transmit Traffic Interrupt Scheme.
+ */
+static xge_hal_status_e
+__hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
+{
+ int i;
+
+ for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
+ int j;
+
+ if (!hldev->config.fifo.queue[i].configured)
+ continue;
+
+ for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
+ xge_hal_status_e status;
+
+ if (!hldev->config.fifo.queue[i].tti[j].enabled)
+ continue;
+
+ /* at least some TTI enabled. Record it. */
+ hldev->tti_enabled = 1;
+
+ status = __hal_device_tti_apply(hldev,
+ &hldev->config.fifo.queue[i].tti[j],
+ i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
+ if (status != XGE_HAL_OK)
+ return status;
+ }
+ }
+
+ /* processing bimodal TTIs */
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_status_e status;
+
+ if (!hldev->bimodal_tti[i].enabled)
+ continue;
+
+ /* at least some bimodal TTI enabled. Record it. */
+ hldev->tti_enabled = 1;
+
+ status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
+ XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_rti_configure
+ * @hldev: HAL device handle.
+ *
+ * RTI Initialization.
+ * Initialize Receive Traffic Interrupt Scheme.
+ */
+xge_hal_status_e
+__hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
+{
+ xge_hal_pci_bar0_t *bar0;
+ u64 val64, data1 = 0, data2 = 0;
+ int i;
+
+ if (runtime) {
+ /*
+ * we don't want to re-configure RTI in case when
+ * bimodal interrupts are in use. Instead reconfigure TTI
+ * with new RTI values.
+ */
+ if (hldev->config.bimodal_interrupts) {
+ __hal_device_bimodal_configure(hldev);
+ return __hal_device_tti_configure(hldev, 1);
+ }
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ } else
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti;
+
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+
+ if (rti->timer_val_us) {
+ unsigned int rx_interval;
+
+ if (hldev->config.pci_freq_mherz) {
+ rx_interval = hldev->config.pci_freq_mherz *
+ rti->timer_val_us / 8;
+ rx_interval =
+ __hal_fix_time_ival_herc(hldev,
+ rx_interval);
+ } else {
+ rx_interval = rti->timer_val_us;
+ }
+ data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval);
+ if (rti->timer_ac_en) {
+ data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN;
+ }
+ data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN;
+ }
+
+ if (rti->urange_a ||
+ rti->urange_b ||
+ rti->urange_c ||
+ rti->ufc_a ||
+ rti->ufc_b ||
+ rti->ufc_c ||
+ rti->ufc_d) {
+ data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) |
+ XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) |
+ XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c);
+
+ data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) |
+ XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) |
+ XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) |
+ XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
+ &bar0->rti_data1_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rti_data1_mem);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
+ &bar0->rti_data2_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rti_data2_mem);
+ xge_os_wmb();
+
+ val64 = XGE_HAL_RTI_CMD_MEM_WE |
+ XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD;
+ val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rti_command_mem);
+
+ if (!runtime && __hal_device_register_poll(hldev,
+ &bar0->rti_command_mem, 0,
+ XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ if (!runtime) {
+ xge_debug_device(XGE_TRACE,
+ "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
+ i,
+ (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rti_data1_mem));
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+/* Constants to be programmed into the Xena's registers to configure
+ * the XAUI. */
+static u64 default_xena_mdio_cfg[] = {
+ /* Reset PMA PLL */
+ 0xC001010000000000ULL, 0xC0010100000000E0ULL,
+ 0xC0010100008000E4ULL,
+ /* Remove Reset from PMA PLL */
+ 0xC001010000000000ULL, 0xC0010100000000E0ULL,
+ 0xC0010100000000E4ULL,
+ END_SIGN
+};
+
+static u64 default_herc_mdio_cfg[] = {
+ END_SIGN
+};
+
+static u64 default_xena_dtx_cfg[] = {
+ 0x8000051500000000ULL, 0x80000515000000E0ULL,
+ 0x80000515D93500E4ULL, 0x8001051500000000ULL,
+ 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ 0x80020515F21000E4ULL,
+ /* Set PADLOOPBACKN */
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ 0x80020515B20000E4ULL, 0x8003051500000000ULL,
+ 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
+ 0x8004051500000000ULL, 0x80040515000000E0ULL,
+ 0x80040515B20000E4ULL, 0x8005051500000000ULL,
+ 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
+ SWITCH_SIGN,
+ /* Remove PADLOOPBACKN */
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ 0x80020515F20000E4ULL, 0x8003051500000000ULL,
+ 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
+ 0x8004051500000000ULL, 0x80040515000000E0ULL,
+ 0x80040515F20000E4ULL, 0x8005051500000000ULL,
+ 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
+ END_SIGN
+};
+
+/*
+static u64 default_herc_dtx_cfg[] = {
+ 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
+ 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
+ 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
+ 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
+ 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
+ END_SIGN
+};
+*/
+
+static u64 default_herc_dtx_cfg[] = {
+ 0x8000051536750000ULL, 0x80000515367500E0ULL,
+ 0x8000051536750004ULL, 0x80000515367500E4ULL,
+
+ 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
+ 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
+
+ 0x801205150D440000ULL, 0x801205150D4400E0ULL,
+ 0x801205150D440004ULL, 0x801205150D4400E4ULL,
+
+ 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
+ END_SIGN
+};
+
+
+void
+__hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg)
+{
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(value>>32), reg);
+ xge_os_wmb();
+ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
+ (u32)value, reg);
+ xge_os_wmb();
+ xge_os_mdelay(1);
+}
+
+u64
+__hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg)
+{
+ u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ reg);
+ xge_os_mdelay(1);
+ return val64;
+}
+
+/*
+ * __hal_device_xaui_configure
+ * @hldev: HAL device handle.
+ *
+ * Configure XAUI Interface of Xena.
+ *
+ * To Configure the Xena's XAUI, one has to write a series
+ * of 64 bit values into two registers in a particular
+ * sequence. Hence a macro 'SWITCH_SIGN' has been defined
+ * which will be defined in the array of configuration values
+ * (default_dtx_cfg & default_mdio_cfg) at appropriate places
+ * to switch writing from one regsiter to another. We continue
+ * writing these values until we encounter the 'END_SIGN' macro.
+ * For example, After making a series of 21 writes into
+ * dtx_control register the 'SWITCH_SIGN' appears and hence we
+ * start writing into mdio_control until we encounter END_SIGN.
+ */
+static void
+__hal_device_xaui_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ int mdio_cnt = 0, dtx_cnt = 0;
+ u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ default_dtx_cfg = default_xena_dtx_cfg;
+ default_mdio_cfg = default_xena_mdio_cfg;
+ } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ default_dtx_cfg = default_herc_dtx_cfg;
+ default_mdio_cfg = default_herc_mdio_cfg;
+ } else {
+ xge_assert(default_dtx_cfg);
+ return;
+ }
+
+ do {
+ dtx_cfg:
+ while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
+ if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
+ dtx_cnt++;
+ goto mdio_cfg;
+ }
+ __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control);
+ dtx_cnt++;
+ }
+ mdio_cfg:
+ while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
+ if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
+ mdio_cnt++;
+ goto dtx_cfg;
+ }
+ __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt],
+ &bar0->mdio_control);
+ mdio_cnt++;
+ }
+ } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
+ (default_mdio_cfg[mdio_cnt] == END_SIGN)) );
+
+ xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured");
+}
+
+/*
+ * __hal_device_mac_link_util_set
+ * @hldev: HAL device handle.
+ *
+ * Set sampling rate to calculate link utilization.
+ */
+static void
+__hal_device_mac_link_util_set(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL(
+ hldev->config.mac.tmac_util_period) |
+ XGE_HAL_MAC_RX_LINK_UTIL_VAL(
+ hldev->config.mac.rmac_util_period);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mac_link_util);
+ xge_debug_device(XGE_TRACE, "%s",
+ "bandwidth link utilization configured");
+}
+
+/*
+ * __hal_device_set_swapper
+ * @hldev: HAL device handle.
+ *
+ * Set the Xframe's byte "swapper" in accordance with
+ * endianness of the host.
+ */
+xge_hal_status_e
+__hal_device_set_swapper(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ /*
+ * from 32bit errarta:
+ *
+ * The SWAPPER_CONTROL register determines how the adapter accesses
+ * host memory as well as how it responds to read and write requests
+ * from the host system. Writes to this register should be performed
+ * carefully, since the byte swappers could reverse the order of bytes.
+ * When configuring this register keep in mind that writes to the PIF
+ * read and write swappers could reverse the order of the upper and
+ * lower 32-bit words. This means that the driver may have to write
+ * to the upper 32 bits of the SWAPPER_CONTROL twice in order to
+ * configure the entire register. */
+
+ /*
+ * The device by default set to a big endian format, so a big endian
+ * driver need not set anything.
+ */
+
+#if defined(XGE_HAL_CUSTOM_HW_SWAPPER)
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xffffffffffffffffULL, &bar0->swapper_ctrl);
+
+ val64 = XGE_HAL_CUSTOM_HW_SWAPPER;
+
+ xge_os_wmb();
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->swapper_ctrl);
+
+ xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+#elif !defined(XGE_OS_HOST_BIG_ENDIAN)
+
+ /*
+ * Initially we enable all bits to make it accessible by the driver,
+ * then we selectively enable only those bits that we want to set.
+ * i.e. force swapper to swap for the first time since second write
+ * will overwrite with the final settings.
+ *
+ * Use only for little endian platforms.
+ */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xffffffffffffffffULL, &bar0->swapper_ctrl);
+ xge_os_wmb();
+ val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE |
+ XGE_HAL_SWAPPER_CTRL_PIF_R_SE |
+ XGE_HAL_SWAPPER_CTRL_PIF_W_FE |
+ XGE_HAL_SWAPPER_CTRL_PIF_W_SE |
+ XGE_HAL_SWAPPER_CTRL_RTH_FE |
+ XGE_HAL_SWAPPER_CTRL_RTH_SE |
+ XGE_HAL_SWAPPER_CTRL_TXP_FE |
+ XGE_HAL_SWAPPER_CTRL_TXP_SE |
+ XGE_HAL_SWAPPER_CTRL_TXD_R_FE |
+ XGE_HAL_SWAPPER_CTRL_TXD_R_SE |
+ XGE_HAL_SWAPPER_CTRL_TXD_W_FE |
+ XGE_HAL_SWAPPER_CTRL_TXD_W_SE |
+ XGE_HAL_SWAPPER_CTRL_TXF_R_FE |
+ XGE_HAL_SWAPPER_CTRL_RXD_R_FE |
+ XGE_HAL_SWAPPER_CTRL_RXD_R_SE |
+ XGE_HAL_SWAPPER_CTRL_RXD_W_FE |
+ XGE_HAL_SWAPPER_CTRL_RXD_W_SE |
+ XGE_HAL_SWAPPER_CTRL_RXF_W_FE |
+ XGE_HAL_SWAPPER_CTRL_XMSI_FE |
+ XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE);
+
+ /*
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
+ val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE;
+ } */
+ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
+ &bar0->swapper_ctrl);
+ xge_os_wmb();
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
+ &bar0->swapper_ctrl);
+ xge_os_wmb();
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
+ &bar0->swapper_ctrl);
+ xge_debug_device(XGE_TRACE, "%s", "using little endian set");
+#endif
+
+ /* Verifying if endian settings are accurate by reading a feedback
+ * register. */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pif_rd_swapper_fb);
+ if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
+ xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
+ (unsigned long long) val64);
+ return XGE_HAL_ERR_SWAPPER_CTRL;
+ }
+
+ xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled");
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_rts_mac_configure - Configure RTS steering based on
+ * destination mac address.
+ * @hldev: HAL device handle.
+ *
+ */
+xge_hal_status_e
+__hal_device_rts_mac_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ if (!hldev->config.rts_mac_en) {
+ return XGE_HAL_OK;
+ }
+
+ /*
+ * Set the receive traffic steering mode from default(classic)
+ * to enhanced.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_ctrl);
+ val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_ctrl);
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_rts_port_configure - Configure RTS steering based on
+ * destination or source port number.
+ * @hldev: HAL device handle.
+ *
+ */
+xge_hal_status_e
+__hal_device_rts_port_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int rnum;
+
+ if (!hldev->config.rts_port_en) {
+ return XGE_HAL_OK;
+ }
+
+ /*
+ * Set the receive traffic steering mode from default(classic)
+ * to enhanced.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_ctrl);
+ val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_ctrl);
+
+ /*
+ * Initiate port steering according to per-ring configuration
+ */
+ for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
+ int pnum;
+ xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum];
+
+ if (!queue->configured || queue->rts_port_en)
+ continue;
+
+ for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) {
+ xge_hal_rts_port_t *port = &queue->rts_ports[pnum];
+
+ /*
+ * Skip and clear empty ports
+ */
+ if (!port->num) {
+ /*
+ * Clear CAM memory
+ */
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, 0ULL,
+ &bar0->rts_pn_cam_data);
+
+ val64 = BIT(7) | BIT(15);
+ } else {
+ /*
+ * Assign new Port values according
+ * to configuration
+ */
+ val64 = vBIT(port->num,8,16) |
+ vBIT(rnum,37,3) | BIT(63);
+ if (port->src)
+ val64 = BIT(47);
+ if (!port->udp)
+ val64 = BIT(7);
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, val64,
+ &bar0->rts_pn_cam_data);
+
+ val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_pn_cam_ctrl);
+
+ /* poll until done */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rts_pn_cam_ctrl, 0,
+ XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) !=
+ XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+ }
+ }
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_rts_qos_configure - Configure RTS steering based on
+ * qos.
+ * @hldev: HAL device handle.
+ *
+ */
+xge_hal_status_e
+__hal_device_rts_qos_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int j, rx_ring_num;
+
+ if (!hldev->config.rts_qos_en) {
+ return XGE_HAL_OK;
+ }
+
+ /* First clear the RTS_DS_MEM_DATA */
+ val64 = 0;
+ for (j = 0; j < 64; j++ )
+ {
+ /* First clear the value */
+ val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_ds_mem_data);
+
+ val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
+ XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
+ XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_ds_mem_ctrl);
+
+
+ /* poll until done */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rts_ds_mem_ctrl, 0,
+ XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ }
+
+ rx_ring_num = 0;
+ for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) {
+ if (hldev->config.ring.queue[j].configured)
+ rx_ring_num++;
+ }
+
+ switch (rx_ring_num) {
+ case 1:
+ val64 = 0x0;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 2:
+ val64 = 0x0001000100010001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001000100000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 3:
+ val64 = 0x0001020001020001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0200010200010200ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0102000102000102ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001020001020001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0200010200000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 4:
+ val64 = 0x0001020300010203ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001020300000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 5:
+ val64 = 0x0001020304000102ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0304000102030400ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0102030400010203ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0400010203040001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0203040000000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 6:
+ val64 = 0x0001020304050001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0203040500010203ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0405000102030405ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001020304050001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0203040500000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 7:
+ val64 = 0x0001020304050600ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0102030405060001ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0203040506000102ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0304050600010203ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0405060000000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ case 8:
+ val64 = 0x0001020304050607ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001020300000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
+ break;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * xge__hal_device_rts_mac_enable
+ *
+ * @devh: HAL device handle.
+ * @index: index number where the MAC addr will be stored
+ * @macaddr: MAC address
+ *
+ * - Enable RTS steering for the given MAC address. This function has to be
+ * called with lock acquired.
+ *
+ * NOTE:
+ * 1. ULD has to call this function with the index value which
+ * statisfies the following condition:
+ * ring_num = (index % 8)
+ * 2.ULD also needs to make sure that the index is not
+ * occupied by any MAC address. If that index has any MAC address
+ * it will be overwritten and HAL will not check for it.
+ *
+ */
+xge_hal_status_e
+xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
+{
+ int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
+ xge_hal_status_e status;
+
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
+
+ if ( index >= max_addr )
+ return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
+
+ /*
+ * Set the MAC address at the given location marked by index.
+ */
+ status = xge_hal_device_macaddr_set(hldev, index, macaddr);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "%s",
+ "Not able to set the mac addr");
+ return status;
+ }
+
+ return xge_hal_device_rts_section_enable(hldev, index);
+}
+
+/*
+ * xge__hal_device_rts_mac_disable
+ * @hldev: HAL device handle.
+ * @index: index number where to disable the MAC addr
+ *
+ * Disable RTS Steering based on the MAC address.
+ * This function should be called with lock acquired.
+ *
+ */
+xge_hal_status_e
+xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
+{
+ xge_hal_status_e status;
+ u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
+
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ xge_debug_ll(XGE_TRACE, "the index value is %d ", index);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
+
+ if ( index >= max_addr )
+ return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
+
+ /*
+ * Disable MAC address @ given index location
+ */
+ status = xge_hal_device_macaddr_set(hldev, index, macaddr);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "%s",
+ "Not able to set the mac addr");
+ return status;
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_device_rth_configure - Configure RTH for the device
+ * @hldev: HAL device handle.
+ *
+ * Using IT (Indirection Table).
+ */
+xge_hal_status_e
+__hal_device_rth_it_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int rings[XGE_HAL_MAX_RING_NUM]={0};
+ int rnum;
+ int rmax;
+ int buckets_num;
+ int bucket;
+
+ if (!hldev->config.rth_en) {
+ return XGE_HAL_OK;
+ }
+
+ /*
+ * Set the receive traffic steering mode from default(classic)
+ * to enhanced.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_ctrl);
+ val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_ctrl);
+
+ buckets_num = (1 << hldev->config.rth_bucket_size);
+
+ rmax=0;
+ for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
+ if (hldev->config.ring.queue[rnum].configured &&
+ hldev->config.ring.queue[rnum].rth_en)
+ rings[rmax++] = rnum;
+ }
+
+ rnum = 0;
+ /* for starters: fill in all the buckets with rings "equally" */
+ for (bucket = 0; bucket < buckets_num; bucket++) {
+
+ if (rnum == rmax)
+ rnum = 0;
+
+ /* write data */
+ val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
+ XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_map_mem_data);
+
+ /* execute */
+ val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
+ XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
+ XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_map_mem_ctrl);
+
+ /* poll until done */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rts_rth_map_mem_ctrl, 0,
+ XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ rnum++;
+ }
+
+ val64 = XGE_HAL_RTS_RTH_EN;
+ val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size);
+ val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN |
+ XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN |
+ XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_cfg);
+
+ xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d",
+ hldev->config.rth_bucket_size);
+
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_spdm_entry_add - Add a new entry to the SPDM table.
+ *
+ * Add a new entry to the SPDM table
+ *
+ * This function add a new entry to the SPDM table.
+ *
+ * Note:
+ * This function should be called with spdm_lock.
+ *
+ * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove.
+ */
+static xge_hal_status_e
+__hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
+ xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp,
+ u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ u64 spdm_line_arr[8];
+ u8 line_no;
+
+ /*
+ * Clear the SPDM READY bit
+ */
+ val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rxpic_int_reg);
+
+ xge_debug_device(XGE_TRACE,
+ "L4 SP %x:DP %x: hash %x tgt_queue %d ",
+ l4_sp, l4_dp, jhash_value, tgt_queue);
+
+ xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr));
+
+ /*
+ * Construct the SPDM entry.
+ */
+ spdm_line_arr[0] = vBIT(l4_sp,0,16) |
+ vBIT(l4_dp,16,32) |
+ vBIT(tgt_queue,53,3) |
+ vBIT(is_tcp,59,1) |
+ vBIT(is_ipv4,63,1);
+
+
+ if (is_ipv4) {
+ spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) |
+ vBIT(dst_ip->ipv4.addr,32,32);
+
+ } else {
+ xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8);
+ xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8);
+ xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8);
+ xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8);
+ }
+
+ spdm_line_arr[7] = vBIT(jhash_value,0,32) |
+ BIT(63); /* entry enable bit */
+
+ /*
+ * Add the entry to the SPDM table
+ */
+ for(line_no = 0; line_no < 8; line_no++) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ spdm_line_arr[line_no],
+ (void *)((char *)hldev->spdm_mem_base +
+ (spdm_entry * 64) +
+ (line_no * 8)));
+ }
+
+ /*
+ * Wait for the operation to be completed.
+ */
+ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
+ XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ /*
+ * Add this information to a local SPDM table. The purpose of
+ * maintaining a local SPDM table is to avoid a search in the
+ * adapter SPDM table for spdm entry lookup which is very costly
+ * in terms of time.
+ */
+ hldev->spdm_table[spdm_entry]->in_use = 1;
+ xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip,
+ sizeof(xge_hal_ipaddr_t));
+ xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip,
+ sizeof(xge_hal_ipaddr_t));
+ hldev->spdm_table[spdm_entry]->l4_sp = l4_sp;
+ hldev->spdm_table[spdm_entry]->l4_dp = l4_dp;
+ hldev->spdm_table[spdm_entry]->is_tcp = is_tcp;
+ hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4;
+ hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue;
+ hldev->spdm_table[spdm_entry]->jhash_value = jhash_value;
+ hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry;
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_rth_spdm_configure - Configure RTH for the device
+ * @hldev: HAL device handle.
+ *
+ * Using SPDM (Socket-Pair Direct Match).
+ */
+xge_hal_status_e
+__hal_device_rth_spdm_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+ u8 spdm_bar_num;
+ u32 spdm_bar_offset;
+ int spdm_table_size;
+ int i;
+
+ if (!hldev->config.rth_spdm_en) {
+ return XGE_HAL_OK;
+ }
+
+ /*
+ * Retrieve the base address of SPDM Table.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->spdm_bir_offset);
+
+ spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64);
+ spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64);
+
+
+ /*
+ * spdm_bar_num specifies the PCI bar num register used to
+ * address the memory space. spdm_bar_offset specifies the offset
+ * of the SPDM memory with in the bar num memory space.
+ */
+ switch (spdm_bar_num) {
+ case 0:
+ {
+ hldev->spdm_mem_base = (char *)bar0 +
+ (spdm_bar_offset * 8);
+ break;
+ }
+ case 1:
+ {
+ char *bar1 = (char *)hldev->bar1;
+ hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8);
+ break;
+ }
+ default:
+ xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1)));
+ }
+
+ /*
+ * Retrieve the size of SPDM table(number of entries).
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->spdm_structure);
+ hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64);
+
+
+ spdm_table_size = hldev->spdm_max_entries *
+ sizeof(xge_hal_spdm_entry_t);
+ if (hldev->spdm_table == NULL) {
+ void *mem;
+
+ /*
+ * Allocate memory to hold the copy of SPDM table.
+ */
+ if ((hldev->spdm_table = (xge_hal_spdm_entry_t **)
+ xge_os_malloc(
+ hldev->pdev,
+ (sizeof(xge_hal_spdm_entry_t *) *
+ hldev->spdm_max_entries))) == NULL) {
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL)
+ {
+ xge_os_free(hldev->pdev, hldev->spdm_table,
+ (sizeof(xge_hal_spdm_entry_t *) *
+ hldev->spdm_max_entries));
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ xge_os_memzero(mem, spdm_table_size);
+ for (i = 0; i < hldev->spdm_max_entries; i++) {
+ hldev->spdm_table[i] = (xge_hal_spdm_entry_t *)
+ ((char *)mem +
+ i * sizeof(xge_hal_spdm_entry_t));
+ }
+ xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev);
+ } else {
+ /*
+ * We are here because the host driver tries to
+ * do a soft reset on the device.
+ * Since the device soft reset clears the SPDM table, copy
+ * the entries from the local SPDM table to the actual one.
+ */
+ xge_os_spin_lock(&hldev->spdm_lock);
+ for (i = 0; i < hldev->spdm_max_entries; i++) {
+ xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i];
+
+ if (spdm_entry->in_use) {
+ if (__hal_spdm_entry_add(hldev,
+ &spdm_entry->src_ip,
+ &spdm_entry->dst_ip,
+ spdm_entry->l4_sp,
+ spdm_entry->l4_dp,
+ spdm_entry->is_tcp,
+ spdm_entry->is_ipv4,
+ spdm_entry->tgt_queue,
+ spdm_entry->jhash_value,
+ spdm_entry->spdm_entry)
+ != XGE_HAL_OK) {
+ /* Log an warning */
+ xge_debug_device(XGE_ERR,
+ "SPDM table update from local"
+ " memory failed");
+ }
+ }
+ }
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ }
+
+ /*
+ * Set the receive traffic steering mode from default(classic)
+ * to enhanced.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rts_ctrl);
+ val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_ctrl);
+
+ /*
+ * We may not need to configure rts_rth_jhash_cfg register as the
+ * default values are good enough to calculate the hash.
+ */
+
+ /*
+ * As of now, set all the rth mask registers to zero. TODO.
+ */
+ for(i = 0; i < 5; i++) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0, &bar0->rts_rth_hash_mask[i]);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0, &bar0->rts_rth_hash_mask_5);
+
+ if (hldev->config.rth_spdm_use_l4) {
+ val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_rth_status);
+ }
+
+ val64 = XGE_HAL_RTS_RTH_EN;
+ val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_cfg);
+
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_pci_init
+ * @hldev: HAL device handle.
+ *
+ * Initialize certain PCI/PCI-X configuration registers
+ * with recommended values. Save config space for future hw resets.
+ */
+static void
+__hal_device_pci_init(xge_hal_device_t *hldev)
+{
+ int i, pcisize = 0;
+ u16 cmd = 0;
+ u8 val;
+
+ /* Store PCI device ID and revision for future references where in we
+ * decide Xena revision using PCI sub system ID */
+ xge_os_pci_read16(hldev->pdev,hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, device_id),
+ &hldev->device_id);
+ xge_os_pci_read8(hldev->pdev,hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, revision),
+ &hldev->revision);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ pcisize = XGE_HAL_PCISIZE_HERC;
+ else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+ pcisize = XGE_HAL_PCISIZE_XENA;
+
+ /* save original PCI config space to restore it on device_terminate() */
+ for (i = 0; i < pcisize; i++) {
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
+ (u32*)&hldev->pci_config_space_bios + i);
+ }
+
+ /* Set the PErr Repconse bit and SERR in PCI command register. */
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
+ cmd |= 0x140;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
+
+ /* Set user spcecified value for the PCI Latency Timer */
+ if (hldev->config.latency_timer &&
+ hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
+ xge_os_pci_write8(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t,
+ latency_timer),
+ (u8)hldev->config.latency_timer);
+ }
+ /* Read back latency timer to reflect it into user level */
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val);
+ hldev->config.latency_timer = val;
+
+ /* Enable Data Parity Error Recovery in PCI-X command register. */
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
+ cmd |= 1;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
+
+ /* Set MMRB count in PCI-X command register. */
+ if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) {
+ cmd &= 0xFFF3;
+ cmd |= hldev->config.mmrb_count << 2;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
+ cmd);
+ }
+ /* Read back MMRB count to reflect it into user level */
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
+ &cmd);
+ cmd &= 0x000C;
+ hldev->config.mmrb_count = cmd>>2;
+
+ /* Setting Maximum outstanding splits based on system type. */
+ if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
+ &cmd);
+ cmd &= 0xFF8F;
+ cmd |= hldev->config.max_splits_trans << 4;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
+ cmd);
+ }
+
+ /* Read back max split trans to reflect it into user level */
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
+ cmd &= 0x0070;
+ hldev->config.max_splits_trans = cmd>>4;
+
+ /* Forcibly disabling relaxed ordering capability of the card. */
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
+ cmd &= 0xFFFD;
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
+
+ /* save PCI config space for future resets */
+ for (i = 0; i < pcisize; i++) {
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
+ (u32*)&hldev->pci_config_space + i);
+ }
+
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
+ hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
+ /* Upper limit of the MSI number enabled by the system */
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, msi_control),
+ &hldev->msi_mask);
+ hldev->msi_mask &= 0x70;
+ if (!hldev->msi_mask)
+ return;
+ hldev->msi_mask >>= 4;
+ /*
+ * This number's power of 2 is the number
+ * of MSIs enabled.
+ */
+ hldev->msi_mask = (0x1 << hldev->msi_mask);
+ /*
+ * If 32 MSIs are enabled, then MSI numbers range from 0 - 31.
+ */
+ hldev->msi_mask -= 1;
+ }
+}
+
+/*
+ * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency
+ * and mode.
+ * @devh: HAL device handle.
+ * @pci_mode: pointer to a variable of enumerated type
+ * xge_hal_pci_mode_e{}.
+ * @bus_frequency: pointer to a variable of enumerated type
+ * xge_hal_pci_bus_frequency_e{}.
+ * @bus_width: pointer to a variable of enumerated type
+ * xge_hal_pci_bus_width_e{}.
+ *
+ * Get pci mode, frequency, and PCI bus width.
+ *
+ * Returns: one of the xge_hal_status_e{} enumerated types.
+ * XGE_HAL_OK - for success.
+ * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card.
+ * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card.
+ *
+ * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
+ */
+static xge_hal_status_e
+__hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
+ xge_hal_pci_bus_frequency_e *bus_frequency,
+ xge_hal_pci_bus_width_e *bus_width)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_status_e rc_status = XGE_HAL_OK;
+ xge_hal_card_e card_id = xge_hal_device_check_id (devh);
+
+#ifdef XGE_HAL_HERC_EMULATION
+ hldev->config.pci_freq_mherz =
+ XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
+ *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
+#else
+ if (card_id == XGE_HAL_CARD_HERC) {
+ xge_hal_pci_bar0_t *bar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pci_info);
+ if (XGE_HAL_PCI_32_BIT & pci_info)
+ *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT;
+ else
+ *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
+ switch((pci_info & XGE_HAL_PCI_INFO)>>60)
+ {
+ case XGE_HAL_PCI_33MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_33MHZ;
+ *pci_mode = XGE_HAL_PCI_33MHZ_MODE;
+ break;
+ case XGE_HAL_PCI_66MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
+ *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M1_66MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
+ *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M1_100MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_100MHZ;
+ *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M1_133MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
+ *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M2_66MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
+ *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M2_100MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_200MHZ;
+ *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M2_133MHZ_MODE:
+ *bus_frequency =
+ XGE_HAL_PCI_BUS_FREQUENCY_266MHZ;
+ *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE;
+ break;
+ case XGE_HAL_PCIX_M1_RESERVED:
+ case XGE_HAL_PCIX_M1_66MHZ_NS:
+ case XGE_HAL_PCIX_M1_100MHZ_NS:
+ case XGE_HAL_PCIX_M1_133MHZ_NS:
+ case XGE_HAL_PCIX_M2_RESERVED:
+ case XGE_HAL_PCIX_533_RESERVED:
+ default:
+ rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
+ xge_debug_device(XGE_ERR,
+ "invalid pci info "XGE_OS_LLXFMT,
+ (unsigned long long)pci_info);
+ break;
+ }
+ if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO)
+ xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
+ "%d frequency %d", *pci_mode, *bus_width,
+ *bus_frequency);
+ if (hldev->config.pci_freq_mherz ==
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ hldev->config.pci_freq_mherz = *bus_frequency;
+ }
+ }
+ /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
+ * are set to unknown */
+ else if (card_id == XGE_HAL_CARD_XENA) {
+ u32 pcix_status;
+ u8 dev_num, bus_num;
+ /* initialize defaults for XENA */
+ *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
+ *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, pcix_status),
+ &pcix_status);
+ dev_num = (u8)((pcix_status & 0xF8) >> 3);
+ bus_num = (u8)((pcix_status & 0xFF00) >> 8);
+ if (dev_num == 0 && bus_num == 0)
+ *pci_mode = XGE_HAL_PCI_BASIC_MODE;
+ else
+ *pci_mode = XGE_HAL_PCIX_BASIC_MODE;
+ xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode);
+ if (hldev->config.pci_freq_mherz ==
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ /*
+ * There is no way to detect BUS frequency on Xena,
+ * so, in case of automatic configuration we hopelessly
+ * assume 133MHZ.
+ */
+ hldev->config.pci_freq_mherz =
+ XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
+ }
+ } else if (card_id == XGE_HAL_CARD_TITAN) {
+ *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
+ *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ;
+ if (hldev->config.pci_freq_mherz ==
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ hldev->config.pci_freq_mherz = *bus_frequency;
+ }
+ } else{
+ rc_status = XGE_HAL_ERR_BAD_DEVICE_ID;
+ xge_debug_device(XGE_ERR, "invalid device id %d", card_id);
+ }
+#endif
+
+ return rc_status;
+}
+
+/*
+ * __hal_device_handle_link_up_ind
+ * @hldev: HAL device handle.
+ *
+ * Link up indication handler. The function is invoked by HAL when
+ * Xframe indicates that the link is up for programmable amount of time.
+ */
+static int
+__hal_device_handle_link_up_ind(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == XGE_HAL_LINK_UP) {
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
+ val64 = xge_os_pio_mem_read64(
+ hldev->pdev, hldev->regh0,
+ &bar0->misc_int_mask);
+ val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
+ val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->misc_int_mask);
+ }
+#endif
+ xge_debug_device(XGE_TRACE,
+ "link up indication while link is up, ignoring..");
+ return 0;
+ }
+
+ /* Now re-enable it as due to noise, hardware turned it off */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 |= XGE_HAL_ADAPTER_CNTL_EN;
+ val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+ /* Turn on the Laser */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON |
+ XGE_HAL_ADAPTER_LED_ON);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+ if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) {
+ xge_debug_device(XGE_TRACE, "%s",
+ "fail to transition link to up...");
+ return 0;
+ }
+ else {
+ /*
+ * Mask the Link Up interrupt and unmask the Link Down
+ * interrupt.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_int_mask);
+ val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
+ val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->misc_int_mask);
+ xge_debug_device(XGE_TRACE, "calling link up..");
+ hldev->link_state = XGE_HAL_LINK_UP;
+
+ /* notify ULD */
+ if (g_xge_hal_driver->uld_callbacks.link_up) {
+ g_xge_hal_driver->uld_callbacks.link_up(
+ hldev->upper_layer_info);
+ }
+ return 1;
+ }
+ }
+#endif
+ xge_os_mdelay(1);
+ if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
+ (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
+ XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
+
+ /* notify ULD */
+ (void) xge_queue_produce_context(hldev->queueh,
+ XGE_HAL_EVENT_LINK_IS_UP,
+ hldev);
+ /* link is up after been enabled */
+ return 1;
+ } else {
+ xge_debug_device(XGE_TRACE, "%s",
+ "fail to transition link to up...");
+ return 0;
+ }
+}
+
+/*
+ * __hal_device_handle_link_down_ind
+ * @hldev: HAL device handle.
+ *
+ * Link down indication handler. The function is invoked by HAL when
+ * Xframe indicates that the link is down.
+ */
+static int
+__hal_device_handle_link_down_ind(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ /*
+ * If the previous link state is not up, return.
+ */
+ if (hldev->link_state == XGE_HAL_LINK_DOWN) {
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
+ val64 = xge_os_pio_mem_read64(
+ hldev->pdev, hldev->regh0,
+ &bar0->misc_int_mask);
+ val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
+ val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->misc_int_mask);
+ }
+#endif
+ xge_debug_device(XGE_TRACE,
+ "link down indication while link is down, ignoring..");
+ return 0;
+ }
+ xge_os_mdelay(1);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+
+ /* try to debounce the link only if the adapter is enabled. */
+ if (val64 & XGE_HAL_ADAPTER_CNTL_EN) {
+ if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
+ (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
+ XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
+ xge_debug_device(XGE_TRACE,
+ "link is actually up (possible noisy link?), ignoring.");
+ return(0);
+ }
+ }
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ /* turn off LED */
+ val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ /*
+ * Mask the Link Down interrupt and unmask the Link up
+ * interrupt
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_int_mask);
+ val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
+ val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->misc_int_mask);
+
+ /* link is down */
+ xge_debug_device(XGE_TRACE, "calling link down..");
+ hldev->link_state = XGE_HAL_LINK_DOWN;
+
+ /* notify ULD */
+ if (g_xge_hal_driver->uld_callbacks.link_down) {
+ g_xge_hal_driver->uld_callbacks.link_down(
+ hldev->upper_layer_info);
+ }
+ return 1;
+ }
+#endif
+ /* notify ULD */
+ (void) xge_queue_produce_context(hldev->queueh,
+ XGE_HAL_EVENT_LINK_IS_DOWN,
+ hldev);
+ /* link is down */
+ return 1;
+}
+/*
+ * __hal_device_handle_link_state_change
+ * @hldev: HAL device handle.
+ *
+ * Link state change handler. The function is invoked by HAL when
+ * Xframe indicates link state change condition. The code here makes sure to
+ * 1) ignore redundant state change indications;
+ * 2) execute link-up sequence, and handle the failure to bring the link up;
+ * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by
+ * upper-layer driver (ULD).
+ */
+static int
+__hal_device_handle_link_state_change(xge_hal_device_t *hldev)
+{
+ u64 hw_status;
+ int hw_link_state;
+ int retcode;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int i = 0;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+
+ /* If the adapter is not enabled but the hal thinks we are in the up
+ * state then transition to the down state.
+ */
+ if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) &&
+ (hldev->link_state == XGE_HAL_LINK_UP) ) {
+ return(__hal_device_handle_link_down_ind(hldev));
+ }
+
+ do {
+ xge_os_mdelay(1);
+ (void) xge_hal_device_status(hldev, &hw_status);
+ hw_link_state = (hw_status &
+ (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
+ XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
+
+ /* check if the current link state is still considered
+ * to be changed. This way we will make sure that this is
+ * not a noise which needs to be filtered out */
+ if (hldev->link_state == hw_link_state)
+ break;
+ } while (i++ < hldev->config.link_valid_cnt);
+
+ /* If the current link state is same as previous, just return */
+ if (hldev->link_state == hw_link_state)
+ retcode = 0;
+ /* detected state change */
+ else if (hw_link_state == XGE_HAL_LINK_UP)
+ retcode = __hal_device_handle_link_up_ind(hldev);
+ else
+ retcode = __hal_device_handle_link_down_ind(hldev);
+ return retcode;
+}
+
+/*
+ *
+ */
+static void
+__hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
+{
+ hldev->stats.sw_dev_err_stats.serr_cnt++;
+ if (hldev->config.dump_on_serr) {
+#ifdef XGE_HAL_USE_MGMT_AUX
+ (void) xge_hal_aux_device_dump(hldev);
+#endif
+ }
+
+ (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
+ 1, sizeof(u64), (void *)&value);
+
+ xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
+ (unsigned long long) value);
+}
+
+/*
+ *
+ */
+static void
+__hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
+{
+ if (hldev->config.dump_on_eccerr) {
+#ifdef XGE_HAL_USE_MGMT_AUX
+ (void) xge_hal_aux_device_dump(hldev);
+#endif
+ }
+
+ /* Herc smart enough to recover on its own! */
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ (void) xge_queue_produce(hldev->queueh,
+ XGE_HAL_EVENT_ECCERR, hldev,
+ 1, sizeof(u64), (void *)&value);
+ }
+
+ xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
+ (unsigned long long) value);
+}
+
+/*
+ *
+ */
+static void
+__hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
+{
+ if (hldev->config.dump_on_parityerr) {
+#ifdef XGE_HAL_USE_MGMT_AUX
+ (void) xge_hal_aux_device_dump(hldev);
+#endif
+ }
+ (void) xge_queue_produce_context(hldev->queueh,
+ XGE_HAL_EVENT_PARITYERR, hldev);
+
+ xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
+ (unsigned long long) value);
+}
+
+/*
+ *
+ */
+static void
+__hal_device_handle_targetabort(xge_hal_device_t *hldev)
+{
+ (void) xge_queue_produce_context(hldev->queueh,
+ XGE_HAL_EVENT_TARGETABORT, hldev);
+}
+
+
+/*
+ * __hal_device_hw_initialize
+ * @hldev: HAL device handle.
+ *
+ * Initialize Xframe hardware.
+ */
+static xge_hal_status_e
+__hal_device_hw_initialize(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ xge_hal_status_e status;
+ u64 val64;
+
+ /* Set proper endian settings and verify the same by reading the PIF
+ * Feed-back register. */
+ status = __hal_device_set_swapper(hldev);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ /* update the pci mode, frequency, and width */
+ if (__hal_device_pci_info_get(hldev, &hldev->pci_mode,
+ &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){
+ hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE;
+ hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
+ hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
+ /*
+ * FIXME: this cannot happen.
+ * But if it happens we cannot continue just like that
+ */
+ xge_debug_device(XGE_ERR, "unable to get pci info");
+ }
+
+ if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) ||
+ (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) ||
+ (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) {
+ /* PCI optimization: set TxReqTimeOut
+ * register (0x800+0x120) to 0x1ff or
+ * something close to this.
+ * Note: not to be used for PCI-X! */
+
+ val64 = XGE_HAL_TXREQTO_VAL(0x1FF);
+ val64 |= XGE_HAL_TXREQTO_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->txreqtimeout);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
+ &bar0->read_retry_delay);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
+ &bar0->write_retry_delay);
+
+ xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
+ }
+
+ if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ ||
+ hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) {
+
+ /* Optimizing for PCI-X 266/250 */
+
+ val64 = XGE_HAL_TXREQTO_VAL(0x7F);
+ val64 |= XGE_HAL_TXREQTO_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->txreqtimeout);
+
+ xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes");
+ }
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
+ &bar0->read_retry_delay);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
+ &bar0->write_retry_delay);
+ }
+
+ /* added this to set the no of bytes used to update lso_bytes_sent
+ returned TxD0 */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pic_control_2);
+ val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2);
+ val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->pic_control_2);
+ /* added this to clear the EOI_RESET field while leaving XGXS_RESET
+ * in reset, then a 1-second delay */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset);
+ xge_os_mdelay(1000);
+
+ /* Clear the XGXS_RESET field of the SW_RESET register in order to
+ * release the XGXS from reset. Its reset value is 0xA5; write 0x00
+ * to activate the XGXS. The core requires a minimum 500 us reset.*/
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset);
+ (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->sw_reset);
+ xge_os_mdelay(1);
+
+ /* read registers in all blocks */
+ (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_int_mask);
+ (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mc_int_mask);
+ (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xgxs_int_mask);
+
+ /* set default MTU and steer based on length*/
+ __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work
+
+ if (hldev->config.mac.rmac_bcast_en) {
+ xge_hal_device_bcast_enable(hldev);
+ } else {
+ xge_hal_device_bcast_disable(hldev);
+ }
+
+#ifndef XGE_HAL_HERC_EMULATION
+ __hal_device_xaui_configure(hldev);
+#endif
+ __hal_device_mac_link_util_set(hldev);
+
+ __hal_device_mac_link_util_set(hldev);
+
+ /*
+ * Keep its PCI REQ# line asserted during a write
+ * transaction up to the end of the transaction
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_control);
+
+ val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->misc_control);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_control);
+
+ val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->misc_control);
+ }
+
+ /*
+ * bimodal interrupts is when all Rx traffic interrupts
+ * will go to TTI, so we need to adjust RTI settings and
+ * use adaptive TTI timer. We need to make sure RTI is
+ * properly configured to sane value which will not
+ * distrupt bimodal behavior.
+ */
+ if (hldev->config.bimodal_interrupts) {
+ int i;
+
+ /* force polling_cnt to be "0", otherwise
+ * IRQ workload statistics will be screwed. This could
+ * be worked out in TXPIC handler later. */
+ hldev->config.isr_polling_cnt = 0;
+ hldev->config.sched_timer_us = 10000;
+
+ /* disable all TTI < 56 */
+ for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
+ int j;
+ if (!hldev->config.fifo.queue[i].configured)
+ continue;
+ for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
+ if (hldev->config.fifo.queue[i].tti[j].enabled)
+ hldev->config.fifo.queue[i].tti[j].enabled = 0;
+ }
+ }
+
+ /* now configure bimodal interrupts */
+ __hal_device_bimodal_configure(hldev);
+ }
+
+ status = __hal_device_tti_configure(hldev, 0);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ status = __hal_device_rti_configure(hldev, 0);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ status = __hal_device_rth_it_configure(hldev);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ status = __hal_device_rth_spdm_configure(hldev);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ status = __hal_device_rts_mac_configure(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed ");
+ return status;
+ }
+
+ status = __hal_device_rts_port_configure(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed ");
+ return status;
+ }
+
+ status = __hal_device_rts_qos_configure(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed ");
+ return status;
+ }
+
+ __hal_device_pause_frames_configure(hldev);
+ __hal_device_rmac_padding_configure(hldev);
+ __hal_device_shared_splits_configure(hldev);
+
+ /* make sure all interrupts going to be disabled at the moment */
+ __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
+
+ /* SXE-008 Transmit DMA arbitration issue */
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
+ hldev->revision < 4) {
+ xge_os_pio_mem_write64(hldev->pdev,hldev->regh0,
+ XGE_HAL_ADAPTER_PCC_ENABLE_FOUR,
+ &bar0->pcc_enable);
+ }
+ __hal_fifo_hw_initialize(hldev);
+ __hal_ring_hw_initialize(hldev);
+
+ if (__hal_device_wait_quiescent(hldev, &val64)) {
+ return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+
+ if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
+ XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
+ XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
+ return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+
+ xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
+ (unsigned long long)(ulong_t)hldev);
+
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX ||
+ hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) {
+ /*
+ * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL
+ * is disabled.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pic_control);
+ val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->pic_control);
+ }
+
+ hldev->hw_is_initialized = 1;
+ hldev->terminating = 0;
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_reset - Reset device only.
+ * @hldev: HAL device handle.
+ *
+ * Reset the device, and subsequently restore
+ * the previously saved PCI configuration space.
+ */
+#define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
+static xge_hal_status_e
+__hal_device_reset(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ int i, j, swap_done, pcisize = 0;
+ u64 val64, rawval = 0ULL;
+
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ if ( hldev->bar2 ) {
+ u64 *msix_vetor_table = (u64 *)hldev->bar2;
+
+ // 2 64bit words for each entry
+ for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
+ i++) {
+ hldev->msix_vector_table[i] =
+ xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh2, &msix_vetor_table[i]);
+ }
+ }
+ }
+ }
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pif_rd_swapper_fb);
+ swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB);
+
+ if (swap_done) {
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset);
+ } else {
+ u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32);
+#if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN)
+ /* swap it */
+ val = (((val & (u32)0x000000ffUL) << 24) |
+ ((val & (u32)0x0000ff00UL) << 8) |
+ ((val & (u32)0x00ff0000UL) >> 8) |
+ ((val & (u32)0xff000000UL) >> 24));
+#endif
+ xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val,
+ &bar0->sw_reset);
+ }
+
+ pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
+ XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
+
+ xge_os_mdelay(20); /* Wait for 20 ms after reset */
+
+ {
+ /* Poll for no more than 1 second */
+ for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
+ {
+ for (j = 0; j < pcisize; j++) {
+ xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
+ *((u32*)&hldev->pci_config_space + j));
+ }
+
+ xge_os_pci_read16(hldev->pdev,hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, device_id),
+ &hldev->device_id);
+
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
+ break;
+ xge_os_mdelay(20);
+ }
+ }
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
+ {
+ xge_debug_device(XGE_ERR, "device reset failed");
+ return XGE_HAL_ERR_RESET_FAILED;
+ }
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ int cnt = 0;
+
+ rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC;
+ pcisize = XGE_HAL_PCISIZE_HERC;
+ xge_os_mdelay(1);
+ do {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->sw_reset);
+ if (val64 != rawval) {
+ break;
+ }
+ cnt++;
+ xge_os_mdelay(1); /* Wait for 1ms before retry */
+ } while(cnt < 20);
+ } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA;
+ pcisize = XGE_HAL_PCISIZE_XENA;
+ xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
+ }
+
+ /* Restore MSI-X vector table */
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ if ( hldev->bar2 ) {
+ /*
+ * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 )
+ * 98: PBATable 00000404 ( BIR:4 Offset:0x400 )
+ */
+ u64 *msix_vetor_table = (u64 *)hldev->bar2;
+
+ /* 2 64bit words for each entry */
+ for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
+ i++) {
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh2,
+ hldev->msix_vector_table[i],
+ &msix_vetor_table[i]);
+ }
+ }
+ }
+ }
+
+ hldev->link_state = XGE_HAL_LINK_DOWN;
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->sw_reset);
+
+ if (val64 != rawval) {
+ xge_debug_device(XGE_ERR, "device has not been reset "
+ "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64, (unsigned long long)rawval);
+ return XGE_HAL_ERR_RESET_FAILED;
+ }
+
+ hldev->hw_is_initialized = 0;
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_poll - General private routine to poll the device.
+ * @hldev: HAL device handle.
+ *
+ * Returns: one of the xge_hal_status_e{} enumerated types.
+ * XGE_HAL_OK - for success.
+ * XGE_HAL_ERR_CRITICAL - when encounters critical error.
+ */
+static xge_hal_status_e
+__hal_device_poll(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0;
+ u64 err_reg;
+
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ /* Handling SERR errors by forcing a H/W reset. */
+ err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->serr_source);
+ if (err_reg & XGE_HAL_SERR_SOURCE_ANY) {
+ __hal_device_handle_serr(hldev, "serr_source", err_reg);
+ return XGE_HAL_ERR_CRITICAL;
+ }
+
+ err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_int_reg);
+
+ if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) {
+ hldev->stats.sw_dev_err_stats.parity_err_cnt++;
+ __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg);
+ return XGE_HAL_ERR_CRITICAL;
+ }
+
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+#endif
+ {
+
+ /* Handling link status change error Intr */
+ err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_rmac_err_reg);
+ if (__hal_device_handle_link_state_change(hldev))
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err_reg, &bar0->mac_rmac_err_reg);
+ }
+
+ if (hldev->inject_serr != 0) {
+ err_reg = hldev->inject_serr;
+ hldev->inject_serr = 0;
+ __hal_device_handle_serr(hldev, "inject_serr", err_reg);
+ return XGE_HAL_ERR_CRITICAL;
+ }
+
+ if (hldev->inject_ecc != 0) {
+ err_reg = hldev->inject_ecc;
+ hldev->inject_ecc = 0;
+ hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
+ __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg);
+ return XGE_HAL_ERR_CRITICAL;
+ }
+
+ if (hldev->inject_bad_tcode != 0) {
+ u8 t_code = hldev->inject_bad_tcode;
+ xge_hal_channel_t channel;
+ xge_hal_fifo_txd_t txd;
+ xge_hal_ring_rxd_1_t rxd;
+
+ channel.devh = hldev;
+
+ if (hldev->inject_bad_tcode_for_chan_type ==
+ XGE_HAL_CHANNEL_TYPE_FIFO) {
+ channel.type = XGE_HAL_CHANNEL_TYPE_FIFO;
+
+ } else {
+ channel.type = XGE_HAL_CHANNEL_TYPE_RING;
+ }
+
+ hldev->inject_bad_tcode = 0;
+
+ if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO)
+ return xge_hal_device_handle_tcode(&channel, &txd,
+ t_code);
+ else
+ return xge_hal_device_handle_tcode(&channel, &rxd,
+ t_code);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not
+ * @hldev: HAL device handle.
+ * @adp_status: Adapter Status value
+ * Usage: See xge_hal_device_enable{}.
+ */
+xge_hal_status_e
+__hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
+{
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
+ hldev->revision < 4) {
+ /*
+ * For Xena 1,2,3 we enable only 4 PCCs Due to
+ * SXE-008 (Transmit DMA arbitration issue)
+ */
+ if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE)
+ != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) {
+ xge_debug_device(XGE_TRACE, "%s",
+ "PCC is not IDLE after adapter enabled!");
+ return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+ } else {
+ if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) !=
+ XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) {
+ xge_debug_device(XGE_TRACE, "%s",
+ "PCC is not IDLE after adapter enabled!");
+ return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+ }
+ return XGE_HAL_OK;
+}
+
+static void
+__hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
+{
+ int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
+ int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
+ int iwl_cnt, i;
+
+#define _HIST_SIZE 50 /* 0.5 sec history */
+#define _HIST_ADJ_TIMER 1
+#define _STEP 2
+
+ static int bytes_avg_history[_HIST_SIZE] = {0};
+ static int d_avg_history[_HIST_SIZE] = {0};
+ static int history_idx = 0;
+ static int pstep = 1;
+ static int hist_adj_timer = 0;
+
+ /*
+ * tval - current value of this bimodal timer
+ */
+ tval = hldev->bimodal_tti[ring_no].timer_val_us;
+
+ /*
+ * d - how many interrupts we were getting since last
+ * bimodal timer tick.
+ */
+ d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
+ hldev->bimodal_intr_cnt;
+
+ /* advance bimodal interrupt counter */
+ hldev->bimodal_intr_cnt =
+ hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
+
+ /*
+ * iwl_cnt - how many interrupts we've got since last
+ * bimodal timer tick.
+ */
+ iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
+ hldev->irq_workload_rxcnt[ring_no] : 1);
+ iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
+ hldev->irq_workload_txcnt[ring_no] : 1);
+ iwl_cnt = iwl_rxcnt + iwl_txcnt;
+ iwl_cnt = iwl_cnt; /* just to remove the lint warning */
+
+ /*
+ * we need to take hldev->config.isr_polling_cnt into account
+ * but for some reason this line causing GCC to produce wrong
+ * code on Solaris. As of now, if bimodal_interrupts is configured
+ * hldev->config.isr_polling_cnt is forced to be "0".
+ *
+ * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
+
+ /*
+ * iwl_avg - how many RXDs on avarage been processed since
+ * last bimodal timer tick. This indirectly includes
+ * CPU utilizations.
+ */
+ iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
+ iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
+ iwl_avg = iwl_rxavg + iwl_txavg;
+ iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
+
+ /*
+ * len_avg - how many bytes on avarage been processed since
+ * last bimodal timer tick. i.e. avarage frame size.
+ */
+ len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
+ (hldev->irq_workload_rxd[ring_no] ?
+ hldev->irq_workload_rxd[ring_no] : 1);
+ len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
+ (hldev->irq_workload_txd[ring_no] ?
+ hldev->irq_workload_txd[ring_no] : 1);
+ len_avg = len_rxavg + len_txavg;
+ if (len_avg < 60)
+ len_avg = 60;
+
+ /* align on low boundary */
+ if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
+ tval = hldev->config.bimodal_timer_lo_us;
+
+ /* reset faster */
+ if (iwl_avg == 1) {
+ tval = hldev->config.bimodal_timer_lo_us;
+ /* reset history */
+ for (i = 0; i < _HIST_SIZE; i++)
+ bytes_avg_history[i] = d_avg_history[i] = 0;
+ history_idx = 0;
+ pstep = 1;
+ hist_adj_timer = 0;
+ }
+
+ /* always try to ajust timer to the best throughput value */
+ bytes_avg = iwl_avg * len_avg;
+ history_idx %= _HIST_SIZE;
+ bytes_avg_history[history_idx] = bytes_avg;
+ d_avg_history[history_idx] = d;
+ history_idx++;
+ d_hist = bytes_hist = 0;
+ for (i = 0; i < _HIST_SIZE; i++) {
+ /* do not re-configure until history is gathered */
+ if (!bytes_avg_history[i]) {
+ tval = hldev->config.bimodal_timer_lo_us;
+ goto _end;
+ }
+ bytes_hist += bytes_avg_history[i];
+ d_hist += d_avg_history[i];
+ }
+ bytes_hist /= _HIST_SIZE;
+ d_hist /= _HIST_SIZE;
+
+// xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
+// d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
+// d_hist*bytes_hist, pstep);
+
+ /* make an adaptive step */
+ if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
+ pstep = !pstep;
+ hist_adj_timer = 0;
+ }
+
+ if (pstep &&
+ (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
+ tval += _STEP;
+ hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
+ } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
+ tval -= _STEP;
+ hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
+ }
+
+ /* enable TTI range A for better latencies */
+ hldev->bimodal_urange_a_en = 0;
+ if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
+ hldev->bimodal_urange_a_en = 1;
+
+_end:
+ /* reset workload statistics counters */
+ hldev->irq_workload_rxcnt[ring_no] = 0;
+ hldev->irq_workload_rxd[ring_no] = 0;
+ hldev->irq_workload_rxlen[ring_no] = 0;
+ hldev->irq_workload_txcnt[ring_no] = 0;
+ hldev->irq_workload_txd[ring_no] = 0;
+ hldev->irq_workload_txlen[ring_no] = 0;
+
+ /* reconfigure TTI56 + ring_no with new timer value */
+ hldev->bimodal_timer_val_us = tval;
+ (void) __hal_device_rti_configure(hldev, 1);
+}
+
+static void
+__hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
+{
+ int ufc, ic, i;
+
+ ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
+ ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
+
+ /* urange_a adaptive coalescing */
+ if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
+ if (ic > hldev->rxufca_intr_thres) {
+ if (ufc < hldev->config.rxufca_hi_lim) {
+ ufc += 1;
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
+ hldev->config.ring.queue[i].rti.ufc_a = ufc;
+ (void) __hal_device_rti_configure(hldev, 1);
+ hldev->stats.sw_dev_info_stats.
+ rxufca_hi_adjust_cnt++;
+ }
+ hldev->rxufca_intr_thres = ic +
+ hldev->config.rxufca_intr_thres; /* def: 30 */
+ } else {
+ if (ufc > hldev->config.rxufca_lo_lim) {
+ ufc -= 1;
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
+ hldev->config.ring.queue[i].rti.ufc_a = ufc;
+ (void) __hal_device_rti_configure(hldev, 1);
+ hldev->stats.sw_dev_info_stats.
+ rxufca_lo_adjust_cnt++;
+ }
+ }
+ hldev->rxufca_lbolt_time = hldev->rxufca_lbolt +
+ hldev->config.rxufca_lbolt_period;
+ }
+ hldev->rxufca_lbolt++;
+}
+
+/*
+ * __hal_device_handle_mc - Handle MC interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mc_int_status);
+ if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT))
+ return XGE_HAL_OK;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mc_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->mc_err_reg);
+
+ if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L ||
+ val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U ||
+ val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 ||
+ val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 ||
+ (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
+ (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L ||
+ val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U ||
+ val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L ||
+ val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) {
+ hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++;
+ hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
+ }
+
+ if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L ||
+ val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U ||
+ val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
+ val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 ||
+ (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
+ (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L ||
+ val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U ||
+ val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L ||
+ val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) {
+ hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++;
+ hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
+ }
+
+ if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) {
+ hldev->stats.sw_dev_err_stats.sm_err_cnt++;
+ }
+
+ /* those two should result in device reset */
+ if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
+ val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) {
+ __hal_device_handle_eccerr(hldev, "mc_err_reg", val64);
+ return XGE_HAL_ERR_CRITICAL;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64;
+
+ if (reason & XGE_HAL_PIC_INT_FLSH) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->flsh_int_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->flsh_int_reg);
+ /* FIXME: handle register */
+ }
+ if (reason & XGE_HAL_PIC_INT_MDIO) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mdio_int_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->mdio_int_reg);
+ /* FIXME: handle register */
+ }
+ if (reason & XGE_HAL_PIC_INT_IIC) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->iic_int_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->iic_int_reg);
+ /* FIXME: handle register */
+ }
+ if (reason & XGE_HAL_PIC_INT_MISC) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &isrbar0->misc_int_reg);
+#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ /* Check for Link interrupts. If both Link Up/Down
+ * bits are set, clear both and check adapter status
+ */
+ if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) &&
+ (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) {
+ u64 temp64;
+
+ xge_debug_device(XGE_TRACE,
+ "both link up and link down detected "XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+ temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
+ XGE_HAL_MISC_INT_REG_LINK_UP_INT);
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, temp64,
+ &isrbar0->misc_int_reg);
+ }
+ else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
+ xge_debug_device(XGE_TRACE,
+ "link up call request, misc_int "XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+ __hal_device_handle_link_up_ind(hldev);
+ }
+ else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
+ xge_debug_device(XGE_TRACE,
+ "link down request, misc_int "XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+ __hal_device_handle_link_down_ind(hldev);
+ }
+ } else
+#endif
+ {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->misc_int_reg);
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_txpic - Handle TxPIC interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_status_e status = XGE_HAL_OK;
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ volatile u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->pic_int_status);
+ if ( val64 & (XGE_HAL_PIC_INT_FLSH |
+ XGE_HAL_PIC_INT_MDIO |
+ XGE_HAL_PIC_INT_IIC |
+ XGE_HAL_PIC_INT_MISC) ) {
+ status = __hal_device_handle_pic(hldev, val64);
+ xge_os_wmb();
+ }
+
+ if (!(val64 & XGE_HAL_PIC_INT_TX))
+ return status;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->txpic_int_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->txpic_int_reg);
+ xge_os_wmb();
+
+ if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) {
+ int i;
+
+ if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL)
+ g_xge_hal_driver->uld_callbacks.sched_timer(
+ hldev, hldev->upper_layer_info);
+ /*
+ * This feature implements adaptive receive interrupt
+ * coalecing. It is disabled by default. To enable it
+ * set hldev->config.rxufca_lo_lim to be not equal to
+ * hldev->config.rxufca_hi_lim.
+ *
+ * We are using HW timer for this feature, so
+ * use needs to configure hldev->config.rxufca_lbolt_period
+ * which is essentially a time slice of timer.
+ *
+ * For those who familiar with Linux, lbolt means jiffies
+ * of this timer. I.e. timer tick.
+ */
+ if (hldev->config.rxufca_lo_lim !=
+ hldev->config.rxufca_hi_lim &&
+ hldev->config.rxufca_lo_lim != 0) {
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (hldev->config.ring.queue[i].rti.urange_a)
+ __hal_update_rxufca(hldev, i);
+ }
+ }
+
+ /*
+ * This feature implements adaptive TTI timer re-calculation
+ * based on host utilization, number of interrupt processed,
+ * number of RXD per tick and avarage length of packets per
+ * tick.
+ */
+ if (hldev->config.bimodal_interrupts) {
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (hldev->bimodal_tti[i].enabled)
+ __hal_update_bimodal(hldev, i);
+ }
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_txdma - Handle TxDMA interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64, temp64, err;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->txdma_int_status);
+ if (val64 & XGE_HAL_TXDMA_PFC_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->pfc_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->pfc_err_reg);
+ hldev->stats.sw_dev_info_stats.pfc_err_cnt++;
+ temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM
+ |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR
+ |XGE_HAL_PFC_PCIX_ERR;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_TXDMA_TDA_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->tda_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->tda_err_reg);
+ hldev->stats.sw_dev_info_stats.tda_err_cnt++;
+ temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM
+ |XGE_HAL_TDA_SM1_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_TXDMA_PCC_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->pcc_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->pcc_err_reg);
+ hldev->stats.sw_dev_info_stats.pcc_err_cnt++;
+ temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR
+ |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM
+ |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR
+ |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR
+ |XGE_HAL_PCC_7_LSO_OV_ERR;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_TXDMA_TTI_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->tti_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->tti_err_reg);
+ hldev->stats.sw_dev_info_stats.tti_err_cnt++;
+ temp64 = XGE_HAL_TTI_SM_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_TXDMA_LSO_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->lso_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->lso_err_reg);
+ hldev->stats.sw_dev_info_stats.lso_err_cnt++;
+ temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT
+ |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_TXDMA_TPA_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->tpa_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->tpa_err_reg);
+ hldev->stats.sw_dev_info_stats.tpa_err_cnt++;
+ temp64 = XGE_HAL_TPA_SM_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_TXDMA_SM_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->sm_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->sm_err_reg);
+ hldev->stats.sw_dev_info_stats.sm_err_cnt++;
+ temp64 = XGE_HAL_SM_SM_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+
+ return XGE_HAL_OK;
+
+reset : xge_hal_device_reset(hldev);
+ xge_hal_device_enable(hldev);
+ xge_hal_device_intr_enable(hldev);
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_txmac - Handle TxMAC interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64, temp64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mac_int_status);
+ if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT))
+ return XGE_HAL_OK;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mac_tmac_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->mac_tmac_err_reg);
+ hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++;
+ temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR;
+ if (val64 & temp64) {
+ xge_hal_device_reset(hldev);
+ xge_hal_device_enable(hldev);
+ xge_hal_device_intr_enable(hldev);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64, temp64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->xgxs_int_status);
+ if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS))
+ return XGE_HAL_OK;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->xgxs_txgxs_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->xgxs_txgxs_err_reg);
+ hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++;
+ temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR;
+ if (val64 & temp64) {
+ xge_hal_device_reset(hldev);
+ xge_hal_device_enable(hldev);
+ xge_hal_device_intr_enable(hldev);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_rxpic - Handle RxPIC interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason)
+{
+ /* FIXME: handle register */
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_rxdma - Handle RxDMA interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64, err, temp64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->rxdma_int_status);
+ if (val64 & XGE_HAL_RXDMA_RC_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->rc_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->rc_err_reg);
+ hldev->stats.sw_dev_info_stats.rc_err_cnt++;
+ temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR
+ |XGE_HAL_RC_PRCn_SM_ERR_ALARM
+ |XGE_HAL_RC_FTC_SM_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_RXDMA_RPA_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->rpa_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->rpa_err_reg);
+ hldev->stats.sw_dev_info_stats.rpa_err_cnt++;
+ temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_RXDMA_RDA_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->rda_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->rda_err_reg);
+ hldev->stats.sw_dev_info_stats.rda_err_cnt++;
+ temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR
+ |XGE_HAL_RDA_FRM_ECC_DB_N_AERR
+ |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM
+ |XGE_HAL_RDA_RXD_ECC_DB_SERR;
+ if (val64 & temp64)
+ goto reset;
+ }
+ if (val64 & XGE_HAL_RXDMA_RTI_INT) {
+ err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->rti_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ err, &isrbar0->rti_err_reg);
+ hldev->stats.sw_dev_info_stats.rti_err_cnt++;
+ temp64 = XGE_HAL_RTI_SM_ERR_ALARM;
+ if (val64 & temp64)
+ goto reset;
+ }
+
+ return XGE_HAL_OK;
+
+reset : xge_hal_device_reset(hldev);
+ xge_hal_device_enable(hldev);
+ xge_hal_device_intr_enable(hldev);
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_rxmac - Handle RxMAC interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64, temp64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mac_int_status);
+ if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT))
+ return XGE_HAL_OK;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->mac_rmac_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->mac_rmac_err_reg);
+ hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++;
+ temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR;
+ if (val64 & temp64) {
+ xge_hal_device_reset(hldev);
+ xge_hal_device_enable(hldev);
+ xge_hal_device_intr_enable(hldev);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason
+ * @hldev: HAL device handle.
+ * @reason: interrupt reason
+ */
+xge_hal_status_e
+__hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason)
+{
+ xge_hal_pci_bar0_t *isrbar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ u64 val64, temp64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->xgxs_int_status);
+ if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS))
+ return XGE_HAL_OK;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->xgxs_rxgxs_err_reg);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &isrbar0->xgxs_rxgxs_err_reg);
+ hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++;
+ temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR;
+ if (val64 & temp64) {
+ xge_hal_device_reset(hldev);
+ xge_hal_device_enable(hldev);
+ xge_hal_device_intr_enable(hldev);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_enable - Enable device.
+ * @hldev: HAL device handle.
+ *
+ * Enable the specified device: bring up the link/interface.
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device
+ * to a "quiescent" state.
+ *
+ * See also: xge_hal_status_e{}.
+ *
+ * Usage: See ex_open{}.
+ */
+xge_hal_status_e
+xge_hal_device_enable(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ u64 adp_status;
+ int i, j;
+
+ if (!hldev->hw_is_initialized) {
+ xge_hal_status_e status;
+
+ status = __hal_device_hw_initialize(hldev);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ }
+
+ /*
+ * Not needed in most cases, i.e.
+ * when device_disable() is followed by reset -
+ * the latter copies back PCI config space, along with
+ * the bus mastership - see __hal_device_reset().
+ * However, there are/may-in-future be other cases, and
+ * does not hurt.
+ */
+ __hal_device_bus_master_enable(hldev);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ /*
+ * Configure the link stability period.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_control);
+ if (hldev->config.link_stability_period !=
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+
+ val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
+ hldev->config.link_stability_period);
+ } else {
+ /*
+ * Use the link stability period 1 ms as default
+ */
+ val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
+ XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD);
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->misc_control);
+
+ /*
+ * Clearing any possible Link up/down interrupts that
+ * could have popped up just before Enabling the card.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->misc_int_reg);
+ if (val64) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->misc_int_reg);
+ xge_debug_device(XGE_TRACE, "%s","link state cleared");
+ }
+ } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ /*
+ * Clearing any possible Link state change interrupts that
+ * could have popped up just before Enabling the card.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_rmac_err_reg);
+ if (val64) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->mac_rmac_err_reg);
+ xge_debug_device(XGE_TRACE, "%s", "link state cleared");
+ }
+ }
+
+ if (__hal_device_wait_quiescent(hldev, &val64)) {
+ return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+
+ /* Enabling Laser. */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 |= XGE_HAL_ADAPTER_EOI_TX_ON;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+ /* let link establish */
+ xge_os_mdelay(1);
+
+ /* set link down untill poll() routine will set it up (maybe) */
+ hldev->link_state = XGE_HAL_LINK_DOWN;
+
+ /* If link is UP (adpter is connected) then enable the adapter */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+ if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
+ } else {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON |
+ XGE_HAL_ADAPTER_LED_ON );
+ }
+
+ val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */
+ val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
+ xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+ /* We spin here waiting for the Link to come up.
+ * This is the fix for the Link being unstable after the reset. */
+ i = 0;
+ j = 0;
+ do
+ {
+ adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+
+ /* Read the adapter control register for Adapter_enable bit */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) &&
+ (val64 & XGE_HAL_ADAPTER_CNTL_EN)) {
+ j++;
+ if (j >= hldev->config.link_valid_cnt) {
+ if (xge_hal_device_status(hldev, &adp_status) ==
+ XGE_HAL_OK) {
+ if (__hal_verify_pcc_idle(hldev,
+ adp_status) != XGE_HAL_OK) {
+ return
+ XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+ xge_debug_device(XGE_TRACE,
+ "adp_status: "XGE_OS_LLXFMT
+ ", link is up on "
+ "adapter enable!",
+ (unsigned long long)adp_status);
+ val64 = xge_os_pio_mem_read64(
+ hldev->pdev,
+ hldev->regh0,
+ &bar0->adapter_control);
+ val64 = val64|
+ (XGE_HAL_ADAPTER_EOI_TX_ON |
+ XGE_HAL_ADAPTER_LED_ON );
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, val64,
+ &bar0->adapter_control);
+ xge_os_mdelay(1);
+
+ val64 = xge_os_pio_mem_read64(
+ hldev->pdev,
+ hldev->regh0,
+ &bar0->adapter_control);
+ break; /* out of for loop */
+ } else {
+ return
+ XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+ }
+ } else {
+ j = 0; /* Reset the count */
+ /* Turn on the Laser */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON;
+ xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0,
+ val64, &bar0->adapter_control);
+
+ xge_os_mdelay(1);
+
+ /* Now re-enable it as due to noise, hardware
+ * turned it off */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 |= XGE_HAL_ADAPTER_CNTL_EN;
+ val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+ }
+ xge_os_mdelay(1); /* Sleep for 1 msec */
+ i++;
+ } while (i < hldev->config.link_retry_cnt);
+
+ __hal_device_led_actifity_fix(hldev);
+
+#ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR
+ /* Here we are performing soft reset on XGXS to force link down.
+ * Since link is already up, we will get link state change
+ * poll notificatoin after adapter is enabled */
+
+ __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL,
+ &bar0->dtx_control);
+ (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
+
+ __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL,
+ &bar0->dtx_control);
+ (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
+
+ __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL,
+ &bar0->dtx_control);
+ (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
+
+ xge_os_mdelay(100); /* Sleep for 500 msec */
+#else
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+#endif
+ {
+ /*
+ * With some switches the link state change interrupt does not
+ * occur even though the xgxs reset is done as per SPN-006. So,
+ * poll the adapter status register and check if the link state
+ * is ok.
+ */
+ adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+ if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
+ {
+ xge_debug_device(XGE_TRACE, "%s",
+ "enable device causing link state change ind..");
+ (void) __hal_device_handle_link_state_change(hldev);
+ }
+ }
+
+ if (hldev->config.stats_refresh_time_sec !=
+ XGE_HAL_STATS_REFRESH_DISABLE)
+ __hal_stats_enable(&hldev->stats);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_disable - Disable Xframe adapter.
+ * @hldev: Device handle.
+ *
+ * Disable this device. To gracefully reset the adapter, the host should:
+ *
+ * - call xge_hal_device_disable();
+ *
+ * - call xge_hal_device_intr_disable();
+ *
+ * - close all opened channels and clean up outstanding resources;
+ *
+ * - do some work (error recovery, change mtu, reset, etc);
+ *
+ * - call xge_hal_device_enable();
+ *
+ * - open channels, replenish RxDs, etc.
+ *
+ * - call xge_hal_device_intr_enable().
+ *
+ * Note: Disabling the device does _not_ include disabling of interrupts.
+ * After disabling the device stops receiving new frames but those frames
+ * that were already in the pipe will keep coming for some few milliseconds.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
+ * a "quiescent" state.
+ *
+ * See also: xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_disable(xge_hal_device_t *hldev)
+{
+ xge_hal_status_e status = XGE_HAL_OK;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware");
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+ if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) {
+ status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+
+ if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
+ XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
+ XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
+ status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
+ }
+
+ if (hldev->config.stats_refresh_time_sec !=
+ XGE_HAL_STATS_REFRESH_DISABLE)
+ __hal_stats_disable(&hldev->stats);
+#ifdef XGE_DEBUG_ASSERT
+ else
+ xge_assert(!hldev->stats.is_enabled);
+#endif
+
+#ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
+ __hal_device_bus_master_disable(hldev);
+#endif
+
+ return status;
+}
+
+/**
+ * xge_hal_device_reset - Reset device.
+ * @hldev: HAL device handle.
+ *
+ * Soft-reset the device, reset the device stats except reset_cnt.
+ *
+ * After reset is done, will try to re-initialize HW.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized.
+ * XGE_HAL_ERR_RESET_FAILED - Reset failed.
+ *
+ * See also: xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_reset(xge_hal_device_t *hldev)
+{
+ xge_hal_status_e status;
+
+ /* increment the soft reset counter */
+ u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
+
+ xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
+
+ if (!hldev->is_initialized)
+ return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
+
+ /* actual "soft" reset of the adapter */
+ status = __hal_device_reset(hldev);
+
+ /* reset all stats including saved */
+ __hal_stats_soft_reset(hldev, 1);
+
+ /* increment reset counter */
+ hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1;
+
+ /* re-initialize rxufca_intr_thres */
+ hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
+
+ hldev->reset_needed_after_close = 0;
+
+ return status;
+}
+
+/**
+ * xge_hal_device_status - Check whether Xframe hardware is ready for
+ * operation.
+ * @hldev: HAL device handle.
+ * @hw_status: Xframe status register. Returned by HAL.
+ *
+ * Check whether Xframe hardware is ready for operation.
+ * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest
+ * hardware functional blocks.
+ *
+ * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise
+ * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status).
+ *
+ * See also: xge_hal_status_e{}.
+ * Usage: See ex_open{}.
+ */
+xge_hal_status_e
+xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 tmp64;
+
+ tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+
+ *hw_status = tmp64;
+
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) {
+ xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) {
+ xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) {
+ xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
+ xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) {
+ xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) {
+ xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) {
+ xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!");
+ return XGE_HAL_FAIL;
+ }
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) {
+ xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!");
+ return XGE_HAL_FAIL;
+ }
+#ifndef XGE_HAL_HERC_EMULATION
+ /*
+ * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore,
+ * the the P_PLL_LOCK bit in the adapter_status register will
+ * not be asserted.
+ */
+ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) &&
+ xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
+ hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) {
+ xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!");
+ return XGE_HAL_FAIL;
+ }
+#endif
+
+ return XGE_HAL_OK;
+}
+
+void
+__hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag)
+{
+ u16 msi_control_reg;
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t,
+ msi_control), &msi_control_reg);
+
+ if (flag)
+ msi_control_reg |= 0x1;
+ else
+ msi_control_reg &= ~0x1;
+
+ xge_os_pci_write16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t,
+ msi_control), msi_control_reg);
+}
+
+void
+__hal_device_msix_intr_endis(xge_hal_device_t *hldev,
+ xge_hal_channel_t *channel, int flag)
+{
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xmsi_mask_reg);
+
+ if (flag)
+ val64 &= ~(1LL << ( 63 - channel->msix_idx ));
+ else
+ val64 |= (1LL << ( 63 - channel->msix_idx ));
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->xmsi_mask_reg);
+}
+
+/**
+ * xge_hal_device_intr_enable - Enable Xframe interrupts.
+ * @hldev: HAL device handle.
+ * @op: One of the xge_hal_device_intr_e enumerated values specifying
+ * the type(s) of interrupts to enable.
+ *
+ * Enable Xframe interrupts. The function is to be executed the last in
+ * Xframe initialization sequence.
+ *
+ * See also: xge_hal_device_intr_disable()
+ */
+void
+xge_hal_device_intr_enable(xge_hal_device_t *hldev)
+{
+ xge_list_t *item;
+ u64 val64;
+
+ /* PRC initialization and configuration */
+ xge_list_for_each(item, &hldev->ring_channels) {
+ xge_hal_channel_h channel;
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ __hal_ring_prc_enable(channel);
+ }
+
+ /* enable traffic only interrupts */
+ if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
+ /*
+ * make sure all interrupts going to be disabled if MSI
+ * is enabled.
+ */
+ __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
+ } else {
+ /*
+ * Enable the Tx traffic interrupts only if the TTI feature is
+ * enabled.
+ */
+ val64 = 0;
+ if (hldev->tti_enabled)
+ val64 = XGE_HAL_TX_TRAFFIC_INTR;
+
+ if (!hldev->config.bimodal_interrupts)
+ val64 |= XGE_HAL_RX_TRAFFIC_INTR;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+ val64 |= XGE_HAL_RX_TRAFFIC_INTR;
+
+ val64 |=XGE_HAL_TX_PIC_INTR |
+ XGE_HAL_MC_INTR |
+ XGE_HAL_TX_DMA_INTR |
+ (hldev->config.sched_timer_us !=
+ XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
+ __hal_device_intr_mgmt(hldev, val64, 1);
+ }
+
+ /*
+ * Enable MSI-X interrupts
+ */
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ /*
+ * To enable MSI-X, MSI also needs to be enabled,
+ * due to a bug in the herc NIC.
+ */
+ __hal_device_msi_intr_endis(hldev, 1);
+ }
+
+
+ /* Enable the MSI-X interrupt for each configured channel */
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ xge_hal_channel_t *channel;
+
+ channel = xge_container_of(item,
+ xge_hal_channel_t, item);
+
+ /* 0 vector is reserved for alarms */
+ if (!channel->msix_idx)
+ continue;
+
+ __hal_device_msix_intr_endis(hldev, channel, 1);
+ }
+
+ xge_list_for_each(item, &hldev->ring_channels) {
+ xge_hal_channel_t *channel;
+
+ channel = xge_container_of(item,
+ xge_hal_channel_t, item);
+
+ /* 0 vector is reserved for alarms */
+ if (!channel->msix_idx)
+ continue;
+
+ __hal_device_msix_intr_endis(hldev, channel, 1);
+ }
+ }
+
+ xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
+}
+
+
+/**
+ * xge_hal_device_intr_disable - Disable Xframe interrupts.
+ * @hldev: HAL device handle.
+ * @op: One of the xge_hal_device_intr_e enumerated values specifying
+ * the type(s) of interrupts to disable.
+ *
+ * Disable Xframe interrupts.
+ *
+ * See also: xge_hal_device_intr_enable()
+ */
+void
+xge_hal_device_intr_disable(xge_hal_device_t *hldev)
+{
+ xge_list_t *item;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ /*
+ * To disable MSI-X, MSI also needs to be disabled,
+ * due to a bug in the herc NIC.
+ */
+ __hal_device_msi_intr_endis(hldev, 0);
+ }
+
+ /* Disable the MSI-X interrupt for each configured channel */
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ xge_hal_channel_t *channel;
+
+ channel = xge_container_of(item,
+ xge_hal_channel_t, item);
+
+ /* 0 vector is reserved for alarms */
+ if (!channel->msix_idx)
+ continue;
+
+ __hal_device_msix_intr_endis(hldev, channel, 0);
+
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
+ &bar0->tx_traffic_mask);
+
+ xge_list_for_each(item, &hldev->ring_channels) {
+ xge_hal_channel_t *channel;
+
+ channel = xge_container_of(item,
+ xge_hal_channel_t, item);
+
+ /* 0 vector is reserved for alarms */
+ if (!channel->msix_idx)
+ continue;
+
+ __hal_device_msix_intr_endis(hldev, channel, 0);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
+ &bar0->rx_traffic_mask);
+ }
+
+ /*
+ * Disable traffic only interrupts.
+ * Tx traffic interrupts are used only if the TTI feature is
+ * enabled.
+ */
+ val64 = 0;
+ if (hldev->tti_enabled)
+ val64 = XGE_HAL_TX_TRAFFIC_INTR;
+
+ val64 |= XGE_HAL_RX_TRAFFIC_INTR |
+ XGE_HAL_TX_PIC_INTR |
+ XGE_HAL_MC_INTR |
+ (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
+ XGE_HAL_SCHED_INTR : 0);
+ __hal_device_intr_mgmt(hldev, val64, 0);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &bar0->general_int_mask);
+
+
+ /* disable all configured PRCs */
+ xge_list_for_each(item, &hldev->ring_channels) {
+ xge_hal_channel_h channel;
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ __hal_ring_prc_disable(channel);
+ }
+
+ xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled");
+}
+
+
+/**
+ * xge_hal_device_mcast_enable - Enable Xframe multicast addresses.
+ * @hldev: HAL device handle.
+ *
+ * Enable Xframe multicast addresses.
+ * Returns: XGE_HAL_OK on success.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast
+ * feature within the time(timeout).
+ *
+ * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
+{
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0;
+ int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
+
+ if (hldev == NULL)
+ return XGE_HAL_ERR_INVALID_DEVICE;
+
+ if (hldev->mcast_refcnt)
+ return XGE_HAL_OK;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
+
+ hldev->mcast_refcnt = 1;
+
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ /* Enable all Multicast addresses */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL),
+ &bar0->rmac_addr_data0_mem);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL),
+ &bar0->rmac_addr_data1_mem);
+ val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rmac_addr_cmd_mem);
+
+ if (__hal_device_register_poll(hldev,
+ &bar0->rmac_addr_cmd_mem, 0,
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_mcast_disable - Disable Xframe multicast addresses.
+ * @hldev: HAL device handle.
+ *
+ * Disable Xframe multicast addresses.
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast
+ * feature within the time(timeout).
+ *
+ * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
+{
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0;
+ int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
+
+ if (hldev == NULL)
+ return XGE_HAL_ERR_INVALID_DEVICE;
+
+ if (hldev->mcast_refcnt == 0)
+ return XGE_HAL_OK;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
+
+ hldev->mcast_refcnt = 0;
+
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ /* Disable all Multicast addresses */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL),
+ &bar0->rmac_addr_data0_mem);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0),
+ &bar0->rmac_addr_data1_mem);
+
+ val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rmac_addr_cmd_mem);
+
+ if (__hal_device_register_poll(hldev,
+ &bar0->rmac_addr_cmd_mem, 0,
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_promisc_enable - Enable promiscuous mode.
+ * @hldev: HAL device handle.
+ *
+ * Enable promiscuous mode of Xframe operation.
+ *
+ * See also: xge_hal_device_promisc_disable().
+ */
+void
+xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
+{
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0;
+
+ xge_assert(hldev);
+
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ if (!hldev->is_promisc) {
+ /* Put the NIC into promiscuous mode */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+ val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_CFG_KEY(0x4C0D),
+ &bar0->rmac_cfg_key);
+
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32),
+ &bar0->mac_cfg);
+
+ hldev->is_promisc = 1;
+ xge_debug_device(XGE_TRACE,
+ "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
+ (unsigned long long)val64);
+ }
+}
+
+/**
+ * xge_hal_device_promisc_disable - Disable promiscuous mode.
+ * @hldev: HAL device handle.
+ *
+ * Disable promiscuous mode of Xframe operation.
+ *
+ * See also: xge_hal_device_promisc_enable().
+ */
+void
+xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
+{
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0;
+
+ xge_assert(hldev);
+
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ if (hldev->is_promisc) {
+ /* Remove the NIC from promiscuous mode */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+ val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_CFG_KEY(0x4C0D),
+ &bar0->rmac_cfg_key);
+
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32),
+ &bar0->mac_cfg);
+
+ hldev->is_promisc = 0;
+ xge_debug_device(XGE_TRACE,
+ "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
+ (unsigned long long)val64);
+ }
+}
+
+/**
+ * xge_hal_device_macaddr_get - Get MAC addresses.
+ * @hldev: HAL device handle.
+ * @index: MAC address index, in the range from 0 to
+ * XGE_HAL_MAX_MAC_ADDRESSES.
+ * @macaddr: MAC address. Returned by HAL.
+ *
+ * Retrieve one of the stored MAC addresses by reading non-volatile
+ * memory on the chip.
+ *
+ * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
+ * address within the time(timeout).
+ * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
+ *
+ * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index,
+ macaddr_t *macaddr)
+{
+ xge_hal_pci_bar0_t *bar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int i;
+
+ if (hldev == NULL) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) {
+ return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
+ }
+
+#ifdef XGE_HAL_HERC_EMULATION
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000,
+ &bar0->rmac_addr_data0_mem);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000,
+ &bar0->rmac_addr_data1_mem);
+ val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index));
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rmac_addr_cmd_mem);
+
+ /* poll until done */
+ __hal_device_register_poll(hldev,
+ &bar0->rmac_addr_cmd_mem, 0,
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS);
+
+#endif
+
+ val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rmac_addr_cmd_mem);
+
+ if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rmac_addr_data0_mem);
+ for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
+ (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8)));
+ }
+
+#ifdef XGE_HAL_HERC_EMULATION
+ for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
+ (*macaddr)[i] = (u8)0;
+ }
+ (*macaddr)[1] = (u8)1;
+
+#endif
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_macaddr_set - Set MAC address.
+ * @hldev: HAL device handle.
+ * @index: MAC address index, in the range from 0 to
+ * XGE_HAL_MAX_MAC_ADDRESSES.
+ * @macaddr: New MAC address to configure.
+ *
+ * Configure one of the available MAC address "slots".
+ *
+ * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
+ * address within the time(timeout).
+ * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
+ *
+ * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index,
+ macaddr_t macaddr)
+{
+ xge_hal_pci_bar0_t *bar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64, temp64;
+ int i;
+
+ if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
+ return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
+
+ temp64 = 0;
+ for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
+ temp64 |= macaddr[i];
+ temp64 <<= 8;
+ }
+ temp64 >>= 8;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64),
+ &bar0->rmac_addr_data0_mem);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL),
+ &bar0->rmac_addr_data1_mem);
+
+ val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rmac_addr_cmd_mem);
+
+ if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
+ XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_macaddr_clear - Set MAC address.
+ * @hldev: HAL device handle.
+ * @index: MAC address index, in the range from 0 to
+ * XGE_HAL_MAX_MAC_ADDRESSES.
+ *
+ * Clear one of the available MAC address "slots".
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
+ * address within the time(timeout).
+ * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
+ *
+ * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index)
+{
+ xge_hal_status_e status;
+ u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+ status = xge_hal_device_macaddr_set(hldev, index, macaddr);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "%s",
+ "Not able to set the mac addr");
+ return status;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_macaddr_find - Finds index in the rmac table.
+ * @hldev: HAL device handle.
+ * @wanted: Wanted MAC address.
+ *
+ * See also: xge_hal_device_macaddr_set().
+ */
+int
+xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted)
+{
+ int i;
+
+ if (hldev == NULL) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) {
+ macaddr_t macaddr;
+ (void) xge_hal_device_macaddr_get(hldev, i, &macaddr);
+ if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * xge_hal_device_mtu_set - Set MTU.
+ * @hldev: HAL device handle.
+ * @new_mtu: New MTU size to configure.
+ *
+ * Set new MTU value. Example, to use jumbo frames:
+ * xge_hal_device_mtu_set(my_device, my_channel, 9600);
+ *
+ * Returns: XGE_HAL_OK on success.
+ * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control
+ * register.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI
+ * schemes.
+ * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
+ * a "quiescent" state.
+ */
+xge_hal_status_e
+xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu)
+{
+ xge_hal_status_e status;
+
+ /*
+ * reset needed if 1) new MTU differs, and
+ * 2a) device was closed or
+ * 2b) device is being upped for first time.
+ */
+ if (hldev->config.mtu != new_mtu) {
+ if (hldev->reset_needed_after_close ||
+ !hldev->mtu_first_time_set) {
+ status = xge_hal_device_reset(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_TRACE, "%s",
+ "fatal: can not reset the device");
+ return status;
+ }
+ }
+ /* store the new MTU in device, reset will use it */
+ hldev->config.mtu = new_mtu;
+ xge_debug_device(XGE_TRACE, "new MTU %d applied",
+ new_mtu);
+ }
+
+ if (!hldev->mtu_first_time_set)
+ hldev->mtu_first_time_set = 1;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_initialize - Initialize Xframe device.
+ * @hldev: HAL device handle.
+ * @attr: pointer to xge_hal_device_attr_t structure
+ * @device_config: Configuration to be _applied_ to the device,
+ * For the Xframe configuration "knobs" please
+ * refer to xge_hal_device_config_t and Xframe
+ * User Guide.
+ *
+ * Initialize Xframe device. Note that all the arguments of this public API
+ * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with
+ * OS to find new Xframe device, locate its PCI and memory spaces.
+ *
+ * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL
+ * to enable the latter to perform Xframe hardware initialization.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized.
+ * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not
+ * valid.
+ * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
+ * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid.
+ * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
+ * address within the time(timeout) or TTI/RTI initialization failed.
+ * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control.
+ * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent.
+ *
+ * See also: xge_hal_device_terminate(), xge_hal_status_e{}
+ * xge_hal_device_attr_t{}.
+ */
+xge_hal_status_e
+xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
+ xge_hal_device_config_t *device_config)
+{
+ int i;
+ xge_hal_status_e status;
+ xge_hal_channel_t *channel;
+ u16 subsys_device;
+ u16 subsys_vendor;
+ int total_dram_size, ring_auto_dram_cfg, left_dram_size;
+ int total_dram_size_max = 0;
+
+ xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
+ (unsigned long long)(ulong_t)hldev);
+
+ /* sanity check */
+ if (g_xge_hal_driver == NULL ||
+ !g_xge_hal_driver->is_initialized) {
+ return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
+ }
+
+ xge_os_memzero(hldev, sizeof(xge_hal_device_t));
+
+ /*
+ * validate a common part of Xframe-I/II configuration
+ * (and run check_card() later, once PCI inited - see below)
+ */
+ status = __hal_device_config_check_common(device_config);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ /* apply config */
+ xge_os_memcpy(&hldev->config, device_config,
+ sizeof(xge_hal_device_config_t));
+
+ /* save original attr */
+ xge_os_memcpy(&hldev->orig_attr, attr,
+ sizeof(xge_hal_device_attr_t));
+
+ /* initialize rxufca_intr_thres */
+ hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
+
+ hldev->regh0 = attr->regh0;
+ hldev->regh1 = attr->regh1;
+ hldev->regh2 = attr->regh2;
+ hldev->isrbar0 = hldev->bar0 = attr->bar0;
+ hldev->bar1 = attr->bar1;
+ hldev->bar2 = attr->bar2;
+ hldev->pdev = attr->pdev;
+ hldev->irqh = attr->irqh;
+ hldev->cfgh = attr->cfgh;
+
+ /* set initial bimodal timer for bimodal adaptive schema */
+ hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
+
+ hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
+ g_xge_hal_driver->config.queue_size_initial,
+ g_xge_hal_driver->config.queue_size_max,
+ __hal_device_event_queued, hldev);
+ if (hldev->queueh == NULL)
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+
+ hldev->magic = XGE_HAL_MAGIC;
+
+ xge_assert(hldev->regh0);
+ xge_assert(hldev->regh1);
+ xge_assert(hldev->bar0);
+ xge_assert(hldev->bar1);
+ xge_assert(hldev->pdev);
+ xge_assert(hldev->irqh);
+ xge_assert(hldev->cfgh);
+
+ /* initialize some PCI/PCI-X fields of this PCI device. */
+ __hal_device_pci_init(hldev);
+
+ /*
+ * initlialize lists to properly handling a potential
+ * terminate request
+ */
+ xge_list_init(&hldev->free_channels);
+ xge_list_init(&hldev->fifo_channels);
+ xge_list_init(&hldev->ring_channels);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ /* fixups for xena */
+ hldev->config.rth_en = 0;
+ hldev->config.rth_spdm_en = 0;
+ hldev->config.rts_mac_en = 0;
+ total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA;
+
+ status = __hal_device_config_check_xena(device_config);
+ if (status != XGE_HAL_OK) {
+ xge_hal_device_terminate(hldev);
+ return status;
+ }
+ if (hldev->config.bimodal_interrupts == 1) {
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
+ } else if (hldev->config.bimodal_interrupts ==
+ XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.bimodal_interrupts = 0;
+ } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ /* fixups for herc */
+ total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
+ status = __hal_device_config_check_herc(device_config);
+ if (status != XGE_HAL_OK) {
+ xge_hal_device_terminate(hldev);
+ return status;
+ }
+ if (hldev->config.bimodal_interrupts ==
+ XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.bimodal_interrupts = 1;
+ } else {
+ xge_debug_device(XGE_ERR,
+ "detected unknown device_id 0x%x", hldev->device_id);
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_BAD_DEVICE_ID;
+ }
+
+#ifdef XGEHAL_RNIC
+
+ if(__hal_blockpool_create(hldev,&hldev->block_pool,
+ XGE_HAL_BLOCKPOOL_SIZE) != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "block pool: __hal_blockpool_create failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ if(__hal_regpool_create(hldev,&hldev->reg_pool,
+ XGE_HAL_REGPOOL_SIZE) != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "reg pool: __hal_regpool_create failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ for(i = 0; i < XGE_HAL_MAX_VIRTUAL_PATHS; i++) {
+ if(__hal_vp_initialize(hldev, i, &device_config->vp_config[i])
+ != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "virtual Paths: __hal_vp_initialize failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ }
+
+#endif
+
+ /* allocate and initialize FIFO types of channels according to
+ * configuration */
+ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
+ if (!device_config->fifo.queue[i].configured)
+ continue;
+
+ channel = __hal_channel_allocate(hldev, i,
+#ifdef XGEHAL_RNIC
+ 0,
+#endif
+ XGE_HAL_CHANNEL_TYPE_FIFO);
+ if (channel == NULL) {
+ xge_debug_device(XGE_ERR,
+ "fifo: __hal_channel_allocate failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ /* add new channel to the device */
+ xge_list_insert(&channel->item, &hldev->free_channels);
+ }
+
+ /*
+ * automatic DRAM adjustment
+ */
+ total_dram_size = 0;
+ ring_auto_dram_cfg = 0;
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!device_config->ring.queue[i].configured)
+ continue;
+ if (device_config->ring.queue[i].dram_size_mb ==
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ ring_auto_dram_cfg++;
+ continue;
+ }
+ total_dram_size += device_config->ring.queue[i].dram_size_mb;
+ }
+ left_dram_size = total_dram_size_max - total_dram_size;
+ if (left_dram_size < 0 ||
+ (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) {
+ xge_debug_device(XGE_ERR,
+ "ring config: exceeded DRAM size %d MB",
+ total_dram_size_max);
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
+ }
+
+ /*
+ * allocate and initialize RING types of channels according to
+ * configuration
+ */
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!device_config->ring.queue[i].configured)
+ continue;
+
+ if (device_config->ring.queue[i].dram_size_mb ==
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ hldev->config.ring.queue[i].dram_size_mb =
+ device_config->ring.queue[i].dram_size_mb =
+ left_dram_size / ring_auto_dram_cfg;
+ }
+
+ channel = __hal_channel_allocate(hldev, i,
+#ifdef XGEHAL_RNIC
+ 0,
+#endif
+ XGE_HAL_CHANNEL_TYPE_RING);
+ if (channel == NULL) {
+ xge_debug_device(XGE_ERR,
+ "ring: __hal_channel_allocate failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ /* add new channel to the device */
+ xge_list_insert(&channel->item, &hldev->free_channels);
+ }
+
+ /* get subsystem IDs */
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
+ &subsys_device);
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
+ &subsys_vendor);
+ xge_debug_device(XGE_TRACE,
+ "subsystem_id %04x:%04x",
+ subsys_vendor, subsys_device);
+
+ /* reset device initially */
+ (void) __hal_device_reset(hldev);
+
+ /* set host endian before, to assure proper action */
+ status = __hal_device_set_swapper(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "__hal_device_set_swapper failed");
+ xge_hal_device_terminate(hldev);
+ (void) __hal_device_reset(hldev);
+ return status;
+ }
+
+#ifndef XGE_HAL_HERC_EMULATION
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+ __hal_device_xena_fix_mac(hldev);
+#endif
+
+ /* MAC address initialization.
+ * For now only one mac address will be read and used. */
+ status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "xge_hal_device_macaddr_get failed");
+ xge_hal_device_terminate(hldev);
+ return status;
+ }
+
+ if (hldev->macaddr[0][0] == 0xFF &&
+ hldev->macaddr[0][1] == 0xFF &&
+ hldev->macaddr[0][2] == 0xFF &&
+ hldev->macaddr[0][3] == 0xFF &&
+ hldev->macaddr[0][4] == 0xFF &&
+ hldev->macaddr[0][5] == 0xFF) {
+ xge_debug_device(XGE_ERR,
+ "xge_hal_device_macaddr_get returns all FFs");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_INVALID_MAC_ADDRESS;
+ }
+
+ xge_debug_device(XGE_TRACE,
+ "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
+ hldev->macaddr[0][0], hldev->macaddr[0][1],
+ hldev->macaddr[0][2], hldev->macaddr[0][3],
+ hldev->macaddr[0][4], hldev->macaddr[0][5]);
+
+ status = __hal_stats_initialize(&hldev->stats, hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "__hal_stats_initialize failed");
+ xge_hal_device_terminate(hldev);
+ return status;
+ }
+
+ status = __hal_device_hw_initialize(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "__hal_device_hw_initialize failed");
+ xge_hal_device_terminate(hldev);
+ return status;
+ }
+ hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
+ if (hldev->dump_buf == NULL) {
+ xge_debug_device(XGE_ERR,
+ "__hal_device_hw_initialize failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+
+ /* Xena-only: need to serialize fifo posts across all device fifos */
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh);
+#endif
+ /* Getting VPD data */
+ __hal_device_get_vpd_data(hldev);
+
+ hldev->is_initialized = 1;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_terminating - Mark the device as 'terminating'.
+ * @devh: HAL device handle.
+ *
+ * Mark the device as 'terminating', going to terminate. Can be used
+ * to serialize termination with other running processes/contexts.
+ *
+ * See also: xge_hal_device_terminate().
+ */
+void
+xge_hal_device_terminating(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ hldev->terminating = 1;
+}
+
+/**
+ * xge_hal_device_terminate - Terminate Xframe device.
+ * @hldev: HAL device handle.
+ *
+ * Terminate HAL device.
+ *
+ * See also: xge_hal_device_initialize().
+ */
+void
+xge_hal_device_terminate(xge_hal_device_t *hldev)
+{
+#ifdef XGEHAL_RNIC
+ int i;
+#endif
+ xge_assert(g_xge_hal_driver != NULL);
+ xge_assert(hldev != NULL);
+ xge_assert(hldev->magic == XGE_HAL_MAGIC);
+
+ xge_queue_flush(hldev->queueh);
+
+ hldev->terminating = 1;
+ hldev->is_initialized = 0;
+ hldev->in_poll = 0;
+ hldev->magic = XGE_HAL_DEAD;
+
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
+#endif
+
+ xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
+ (unsigned long long)(ulong_t)hldev);
+
+ xge_assert(xge_list_is_empty(&hldev->fifo_channels));
+ xge_assert(xge_list_is_empty(&hldev->ring_channels));
+
+ if (hldev->stats.is_initialized) {
+ __hal_stats_terminate(&hldev->stats);
+ }
+
+ /* close if open and free all channels */
+ while (!xge_list_is_empty(&hldev->free_channels)) {
+ xge_hal_channel_t *channel = (xge_hal_channel_t*)
+ hldev->free_channels.next;
+
+ xge_assert(!channel->is_open);
+ xge_list_remove(&channel->item);
+ __hal_channel_free(channel);
+ }
+
+ if (hldev->queueh) {
+ xge_queue_destroy(hldev->queueh);
+ }
+
+ if (hldev->spdm_table) {
+ xge_os_free(hldev->pdev,
+ hldev->spdm_table[0],
+ (sizeof(xge_hal_spdm_entry_t) *
+ hldev->spdm_max_entries));
+ xge_os_free(hldev->pdev,
+ hldev->spdm_table,
+ (sizeof(xge_hal_spdm_entry_t *) *
+ hldev->spdm_max_entries));
+ xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev);
+ hldev->spdm_table = NULL;
+ }
+
+ if (hldev->dump_buf) {
+ xge_os_free(hldev->pdev, hldev->dump_buf,
+ XGE_HAL_DUMP_BUF_SIZE);
+ hldev->dump_buf = NULL;
+ }
+
+ if (hldev->device_id != 0) {
+ int j, pcisize;
+
+ pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
+ XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
+ for (j = 0; j < pcisize; j++) {
+ xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
+ *((u32*)&hldev->pci_config_space_bios + j));
+ }
+ }
+#ifdef XGEHAL_RNIC
+
+ for(i = 0; i < XGE_HAL_MAX_VIRTUAL_PATHS; i++) {
+ __hal_vp_terminate(hldev, i);
+ }
+
+ __hal_blockpool_destroy(&hldev->block_pool);
+
+ __hal_regpool_destroy(&hldev->reg_pool);
+#endif
+
+}
+/**
+ * __hal_device_get_vpd_data - Getting vpd_data.
+ *
+ * @hldev: HAL device handle.
+ *
+ * Getting product name and serial number from vpd capabilites structure
+ *
+ */
+void
+__hal_device_get_vpd_data(xge_hal_device_t *hldev)
+{
+ u8 * vpd_data;
+ u8 data;
+ int index = 0, count, fail = 0;
+ u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR;
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR;
+
+ xge_os_strcpy((char *) hldev->vpd_data.product_name,
+ "10 Gigabit Ethernet Adapter");
+ xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available");
+
+ vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE);
+ if ( vpd_data == 0 )
+ return;
+
+ for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) {
+ xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index);
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data);
+ xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0);
+ for (count = 0; count < 5; count++ ) {
+ xge_os_mdelay(2);
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data);
+ if (data == XGE_HAL_VPD_READ_COMPLETE)
+ break;
+ }
+
+ if (count >= 5) {
+ xge_os_printf("ERR, Reading VPD data failed");
+ fail = 1;
+ break;
+ }
+
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4),
+ (u32 *)&vpd_data[index]);
+ }
+
+ if(!fail) {
+
+ /* read serial number of adapter */
+ for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) {
+ if ((vpd_data[count] == 'S') &&
+ (vpd_data[count + 1] == 'N') &&
+ (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) {
+ memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH);
+ memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3],
+ vpd_data[count + 2]);
+ break;
+ }
+ }
+
+ if (vpd_data[1] < XGE_HAL_VPD_LENGTH) {
+ memset(hldev->vpd_data.product_name, 0, vpd_data[1]);
+ memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]);
+ }
+
+ }
+
+ xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE);
+}
+
+
+/**
+ * xge_hal_device_handle_tcode - Handle transfer code.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @t_code: One of the enumerated (and documented in the Xframe user guide)
+ * "transfer codes".
+ *
+ * Handle descriptor's transfer code. The latter comes with each completed
+ * descriptor, see xge_hal_fifo_dtr_next_completed() and
+ * xge_hal_ring_dtr_next_completed().
+ * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h.
+ *
+ * Returns: one of the xge_hal_status_e{} enumerated types.
+ * XGE_HAL_OK - for success.
+ * XGE_HAL_ERR_CRITICAL - when encounters critical error.
+ */
+xge_hal_status_e
+xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtrh, u8 t_code)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
+
+ if (t_code > 15) {
+ xge_os_printf("invalid t_code %d", t_code);
+ return XGE_HAL_OK;
+ }
+
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++;
+
+#if defined(XGE_HAL_DEBUG_BAD_TCODE)
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+ xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
+ XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
+ txdp->control_1, txdp->control_2, txdp->buffer_pointer,
+ txdp->host_control);
+#endif
+
+ /* handle link "down" immediately without going through
+ * xge_hal_device_poll() routine. */
+ if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) {
+ /* link is down */
+ if (hldev->link_state != XGE_HAL_LINK_DOWN) {
+ xge_hal_pci_bar0_t *bar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ hldev->link_state = XGE_HAL_LINK_DOWN;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->adapter_control);
+
+ /* turn off LED */
+ val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, val64,
+ &bar0->adapter_control);
+
+ g_xge_hal_driver->uld_callbacks.link_down(
+ hldev->upper_layer_info);
+ }
+ } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER ||
+ t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) {
+ __hal_device_handle_targetabort(hldev);
+ return XGE_HAL_ERR_CRITICAL;
+ }
+ return XGE_HAL_ERR_PKT_DROP;
+ } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++;
+
+#if defined(XGE_HAL_DEBUG_BAD_TCODE)
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
+ ":"XGE_OS_LLXFMT, rxdp->control_1,
+ rxdp->control_2, rxdp->buffer0_ptr,
+ rxdp->host_control);
+#endif
+ if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
+ hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
+ __hal_device_handle_eccerr(hldev, "rxd_t_code",
+ (u64)t_code);
+ return XGE_HAL_ERR_CRITICAL;
+ } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY ||
+ t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) {
+ hldev->stats.sw_dev_err_stats.parity_err_cnt++;
+ __hal_device_handle_parityerr(hldev, "rxd_t_code",
+ (u64)t_code);
+ return XGE_HAL_ERR_CRITICAL;
+ /* do not drop if detected unknown IPv6 extension */
+ } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) {
+ return XGE_HAL_ERR_PKT_DROP;
+ }
+ }
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_link_state - Get link state.
+ * @devh: HAL device handle.
+ * @ls: Link state, see xge_hal_device_link_state_e{}.
+ *
+ * Get link state.
+ * Returns: XGE_HAL_OK.
+ * See also: xge_hal_device_link_state_e{}.
+ */
+xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
+ xge_hal_device_link_state_e *ls)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ xge_assert(ls != NULL);
+ *ls = hldev->link_state;
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_device_sched_timer - Configure scheduled device interrupt.
+ * @devh: HAL device handle.
+ * @interval_us: Time interval, in miscoseconds.
+ * Unlike transmit and receive interrupts,
+ * the scheduled interrupt is generated independently of
+ * traffic, but purely based on time.
+ * @one_shot: 1 - generate scheduled interrupt only once.
+ * 0 - generate scheduled interrupt periodically at the specified
+ * @interval_us interval.
+ *
+ * (Re-)configure scheduled interrupt. Can be called at runtime to change
+ * the setting, generate one-shot interrupts based on the resource and/or
+ * traffic conditions, other purposes.
+ * See also: xge_hal_device_config_t{}.
+ */
+void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
+ int one_shot)
+{
+ u64 val64;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 =
+ (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ unsigned int interval = hldev->config.pci_freq_mherz * interval_us;
+
+ interval = __hal_fix_time_ival_herc(hldev, interval);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->scheduled_int_ctrl);
+ if (interval) {
+ val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK;
+ val64 |= XGE_HAL_SCHED_INT_PERIOD(interval);
+ if (one_shot) {
+ val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT;
+ }
+ val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
+ } else {
+ val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->scheduled_int_ctrl);
+
+ xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
+ (unsigned long long)val64,
+ interval ? "enabled" : "disabled");
+}
+
+/**
+ * xge_hal_device_check_id - Verify device ID.
+ * @devh: HAL device handle.
+ *
+ * Verify device ID.
+ * Returns: one of the xge_hal_card_e{} enumerated types.
+ * See also: xge_hal_card_e{}.
+ */
+xge_hal_card_e
+xge_hal_device_check_id(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ switch (hldev->device_id) {
+ case XGE_PCI_DEVICE_ID_XENA_1:
+ case XGE_PCI_DEVICE_ID_XENA_2:
+ return XGE_HAL_CARD_XENA;
+ case XGE_PCI_DEVICE_ID_HERC_1:
+ case XGE_PCI_DEVICE_ID_HERC_2:
+ return XGE_HAL_CARD_HERC;
+ case XGE_PCI_DEVICE_ID_TITAN_1:
+ case XGE_PCI_DEVICE_ID_TITAN_2:
+ return XGE_HAL_CARD_TITAN;
+ default:
+ return XGE_HAL_CARD_UNKNOWN;
+ }
+}
+
+/**
+ * xge_hal_device_pci_info_get - Get PCI bus informations such as width,
+ * frequency, and mode from previously stored values.
+ * @devh: HAL device handle.
+ * @pci_mode: pointer to a variable of enumerated type
+ * xge_hal_pci_mode_e{}.
+ * @bus_frequency: pointer to a variable of enumerated type
+ * xge_hal_pci_bus_frequency_e{}.
+ * @bus_width: pointer to a variable of enumerated type
+ * xge_hal_pci_bus_width_e{}.
+ *
+ * Get pci mode, frequency, and PCI bus width.
+ * Returns: one of the xge_hal_status_e{} enumerated types.
+ * XGE_HAL_OK - for success.
+ * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle.
+ * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
+ */
+xge_hal_status_e
+xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
+ xge_hal_pci_bus_frequency_e *bus_frequency,
+ xge_hal_pci_bus_width_e *bus_width)
+{
+ xge_hal_status_e rc_status;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) {
+ rc_status = XGE_HAL_ERR_INVALID_DEVICE;
+ xge_debug_device(XGE_ERR,
+ "xge_hal_device_pci_info_get error, rc %d for device %p",
+ rc_status, hldev);
+
+ return rc_status;
+ }
+
+ *pci_mode = hldev->pci_mode;
+ *bus_frequency = hldev->bus_frequency;
+ *bus_width = hldev->bus_width;
+ rc_status = XGE_HAL_OK;
+ return rc_status;
+}
+
+/**
+ * xge_hal_reinitialize_hw
+ * @hldev: private member of the device structure.
+ *
+ * This function will soft reset the NIC and re-initalize all the
+ * I/O registers to the values they had after it's inital initialization
+ * through the probe function.
+ */
+int xge_hal_reinitialize_hw(xge_hal_device_t * hldev)
+{
+ (void) xge_hal_device_reset(hldev);
+ if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) {
+ xge_hal_device_terminate(hldev);
+ (void) __hal_device_reset(hldev);
+ return 1;
+ }
+ return 0;
+}
+
+
+/*
+ * __hal_read_spdm_entry_line
+ * @hldev: pointer to xge_hal_device_t structure
+ * @spdm_line: spdm line in the spdm entry to be read.
+ * @spdm_entry: spdm entry of the spdm_line in the SPDM table.
+ * @spdm_line_val: Contains the value stored in the spdm line.
+ *
+ * SPDM table contains upto a maximum of 256 spdm entries.
+ * Each spdm entry contains 8 lines and each line stores 8 bytes.
+ * This function reads the spdm line(addressed by @spdm_line)
+ * of the spdm entry(addressed by @spdm_entry) in
+ * the SPDM table.
+ */
+xge_hal_status_e
+__hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
+ u16 spdm_entry, u64 *spdm_line_val)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE |
+ XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) |
+ XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_spdm_mem_ctrl);
+
+ /* poll until done */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rts_rth_spdm_mem_ctrl, 0,
+ XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rts_rth_spdm_mem_data);
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_get_free_spdm_entry
+ * @hldev: pointer to xge_hal_device_t structure
+ * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table.
+ *
+ * This function returns an index of unused spdm entry in the SPDM
+ * table.
+ */
+static xge_hal_status_e
+__hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry)
+{
+ xge_hal_status_e status;
+ u64 spdm_line_val=0;
+
+ /*
+ * Search in the local SPDM table for a free slot.
+ */
+ *spdm_entry = 0;
+ for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) {
+ if (hldev->spdm_table[*spdm_entry]->in_use) {
+ break;
+ }
+ }
+
+ if (*spdm_entry >= hldev->spdm_max_entries) {
+ return XGE_HAL_ERR_SPDM_TABLE_FULL;
+ }
+
+ /*
+ * Make sure that the corresponding spdm entry in the SPDM
+ * table is free.
+ * Seventh line of the spdm entry contains information about
+ * whether the entry is free or not.
+ */
+ if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry,
+ &spdm_line_val)) != XGE_HAL_OK) {
+ return status;
+ }
+
+ /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */
+ if ((spdm_line_val & BIT(63))) {
+ /*
+ * Log a warning
+ */
+ xge_debug_device(XGE_ERR, "Local SPDM table is not "
+ "consistent with the actual one for the spdm "
+ "entry %d", *spdm_entry);
+ return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+/*
+ * __hal_calc_jhash - Calculate Jenkins hash.
+ * @msg: Jenkins hash algorithm key.
+ * @length: Length of the key.
+ * @golden_ratio: Jenkins hash golden ratio.
+ * @init_value: Jenkins hash initial value.
+ *
+ * This function implements the Jenkins based algorithm used for the
+ * calculation of the RTH hash.
+ * Returns: Jenkins hash value.
+ *
+ */
+static u32
+__hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value)
+{
+
+ register u32 a,b,c,len;
+
+ /*
+ * Set up the internal state
+ */
+ len = length;
+ a = b = golden_ratio; /* the golden ratio; an arbitrary value */
+ c = init_value; /* the previous hash value */
+
+ /* handle most of the key */
+ while (len >= 12)
+ {
+ a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16)
+ + ((u32)msg[3]<<24));
+ b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16)
+ + ((u32)msg[7]<<24));
+ c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16)
+ + ((u32)msg[11]<<24));
+ mix(a,b,c);
+ msg += 12; len -= 12;
+ }
+
+ /* handle the last 11 bytes */
+ c += length;
+ switch(len) /* all the case statements fall through */
+ {
+ case 11: c+= ((u32)msg[10]<<24);
+ break;
+ case 10: c+= ((u32)msg[9]<<16);
+ break;
+ case 9 : c+= ((u32)msg[8]<<8);
+ break;
+ /* the first byte of c is reserved for the length */
+ case 8 : b+= ((u32)msg[7]<<24);
+ break;
+ case 7 : b+= ((u32)msg[6]<<16);
+ break;
+ case 6 : b+= ((u32)msg[5]<<8);
+ break;
+ case 5 : b+= msg[4];
+ break;
+ case 4 : a+= ((u32)msg[3]<<24);
+ break;
+ case 3 : a+= ((u32)msg[2]<<16);
+ break;
+ case 2 : a+= ((u32)msg[1]<<8);
+ break;
+ case 1 : a+= msg[0];
+ break;
+ /* case 0: nothing left to add */
+ }
+
+ mix(a,b,c);
+
+ /* report the result */
+ return c;
+}
+
+
+/**
+ * xge_hal_spdm_entry_add - Add a new entry to the SPDM table.
+ * @devh: HAL device handle.
+ * @src_ip: Source ip address(IPv4/IPv6).
+ * @dst_ip: Destination ip address(IPv4/IPv6).
+ * @l4_sp: L4 source port.
+ * @l4_dp: L4 destination port.
+ * @is_tcp: Set to 1, if the protocol is TCP.
+ * 0, if the protocol is UDP.
+ * @is_ipv4: Set to 1, if the protocol is IPv4.
+ * 0, if the protocol is IPv6.
+ * @tgt_queue: Target queue to route the receive packet.
+ *
+ * This function add a new entry to the SPDM table.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in
+ * the time(timeout).
+ * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full.
+ * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry.
+ *
+ * See also: xge_hal_spdm_entry_remove{}.
+ */
+xge_hal_status_e
+xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
+ xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
+ u8 is_tcp, u8 is_ipv4, u8 tgt_queue)
+{
+
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u32 jhash_value;
+ u32 jhash_init_val;
+ u32 jhash_golden_ratio;
+ u64 val64;
+ int off;
+ u16 spdm_entry;
+ u8 msg[XGE_HAL_JHASH_MSG_LEN];
+ int ipaddr_len;
+ xge_hal_status_e status;
+
+
+ if (!hldev->config.rth_spdm_en) {
+ return XGE_HAL_ERR_SPDM_NOT_ENABLED;
+ }
+
+ if ((tgt_queue < XGE_HAL_MIN_RING_NUM) ||
+ (tgt_queue > XGE_HAL_MAX_RING_NUM)) {
+ return XGE_HAL_ERR_SPDM_INVALID_ENTRY;
+ }
+
+
+ /*
+ * Calculate the jenkins hash.
+ */
+ /*
+ * Create the Jenkins hash algorithm key.
+ * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to
+ * use L4 information. Otherwize key = {L3SA, L3DA}.
+ */
+
+ if (is_ipv4) {
+ ipaddr_len = 4; // In bytes
+ } else {
+ ipaddr_len = 16;
+ }
+
+ /*
+ * Jenkins hash algorithm expects the key in the big endian
+ * format. Since key is the byte array, memcpy won't work in the
+ * case of little endian. So, the current code extracts each
+ * byte starting from MSB and store it in the key.
+ */
+ if (is_ipv4) {
+ for (off = 0; off < ipaddr_len; off++) {
+ u32 mask = vBIT32(0xff,(off*8),8);
+ int shift = 32-(off+1)*8;
+ msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift);
+ msg[off+ipaddr_len] =
+ (u8)((dst_ip->ipv4.addr & mask) >> shift);
+ }
+ } else {
+ for (off = 0; off < ipaddr_len; off++) {
+ int loc = off % 8;
+ u64 mask = vBIT(0xff,(loc*8),8);
+ int shift = 64-(loc+1)*8;
+
+ msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask)
+ >> shift);
+ msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8]
+ & mask) >> shift);
+ }
+ }
+
+ off = (2*ipaddr_len);
+
+ if (hldev->config.rth_spdm_use_l4) {
+ msg[off] = (u8)((l4_sp & 0xff00) >> 8);
+ msg[off + 1] = (u8)(l4_sp & 0xff);
+ msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8);
+ msg[off + 3] = (u8)(l4_dp & 0xff);
+ off += 4;
+ }
+
+ /*
+ * Calculate jenkins hash for this configuration
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0,
+ &bar0->rts_rth_jhash_cfg);
+ jhash_golden_ratio = (u32)(val64 >> 32);
+ jhash_init_val = (u32)(val64 & 0xffffffff);
+
+ jhash_value = __hal_calc_jhash(msg, off,
+ jhash_golden_ratio,
+ jhash_init_val);
+
+ xge_os_spin_lock(&hldev->spdm_lock);
+
+ /*
+ * Locate a free slot in the SPDM table. To avoid a seach in the
+ * actual SPDM table, which is very expensive in terms of time,
+ * we are maintaining a local copy of the table and the search for
+ * the free entry is performed in the local table.
+ */
+ if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry))
+ != XGE_HAL_OK) {
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ return status;
+ }
+
+ /*
+ * Add this entry to the SPDM table
+ */
+ status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp,
+ is_tcp, is_ipv4, tgt_queue,
+ jhash_value, /* calculated jhash */
+ spdm_entry);
+
+ xge_os_spin_unlock(&hldev->spdm_lock);
+
+ return status;
+}
+
+/**
+ * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table.
+ * @devh: HAL device handle.
+ * @src_ip: Source ip address(IPv4/IPv6).
+ * @dst_ip: Destination ip address(IPv4/IPv6).
+ * @l4_sp: L4 source port.
+ * @l4_dp: L4 destination port.
+ * @is_tcp: Set to 1, if the protocol is TCP.
+ * 0, if the protocol os UDP.
+ * @is_ipv4: Set to 1, if the protocol is IPv4.
+ * 0, if the protocol is IPv6.
+ *
+ * This function remove an entry from the SPDM table.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
+ * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in
+ * the time(timeout).
+ * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM
+ * table.
+ *
+ * See also: xge_hal_spdm_entry_add{}.
+ */
+xge_hal_status_e
+xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
+ xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
+ u8 is_tcp, u8 is_ipv4)
+{
+
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ u16 spdm_entry;
+ xge_hal_status_e status;
+ u64 spdm_line_arr[8];
+ u8 line_no;
+ u8 spdm_is_tcp;
+ u8 spdm_is_ipv4;
+ u16 spdm_l4_sp;
+ u16 spdm_l4_dp;
+
+ if (!hldev->config.rth_spdm_en) {
+ return XGE_HAL_ERR_SPDM_NOT_ENABLED;
+ }
+
+ xge_os_spin_lock(&hldev->spdm_lock);
+
+ /*
+ * Poll the rxpic_int_reg register until spdm ready bit is set or
+ * timeout happens.
+ */
+ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
+ XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+
+ /* upper layer may require to repeat */
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ /*
+ * Clear the SPDM READY bit.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rxpic_int_reg);
+ val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rxpic_int_reg);
+
+ /*
+ * Search in the local SPDM table to get the index of the
+ * corresponding entry in the SPDM table.
+ */
+ spdm_entry = 0;
+ for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) {
+ if ((!hldev->spdm_table[spdm_entry]->in_use) ||
+ (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) ||
+ (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) ||
+ (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) ||
+ (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) {
+ continue;
+ }
+
+ /*
+ * Compare the src/dst IP addresses of source and target
+ */
+ if (is_ipv4) {
+ if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr
+ != src_ip->ipv4.addr) ||
+ (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr
+ != dst_ip->ipv4.addr)) {
+ continue;
+ }
+ } else {
+ if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0]
+ != src_ip->ipv6.addr[0]) ||
+ (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1]
+ != src_ip->ipv6.addr[1]) ||
+ (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0]
+ != dst_ip->ipv6.addr[0]) ||
+ (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1]
+ != dst_ip->ipv6.addr[1])) {
+ continue;
+ }
+ }
+ break;
+ }
+
+ if (spdm_entry >= hldev->spdm_max_entries) {
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND;
+ }
+
+ /*
+ * Retrieve the corresponding entry from the SPDM table and
+ * make sure that the data is consistent.
+ */
+ for(line_no = 0; line_no < 8; line_no++) {
+
+ /*
+ * SPDM line 2,3,4 are valid only for IPv6 entry.
+ * SPDM line 5 & 6 are reserved. We don't have to
+ * read these entries in the above cases.
+ */
+ if (((is_ipv4) &&
+ ((line_no == 2)||(line_no == 3)||(line_no == 4))) ||
+ (line_no == 5) ||
+ (line_no == 6)) {
+ continue;
+ }
+
+ if ((status = __hal_read_spdm_entry_line(
+ hldev,
+ line_no,
+ spdm_entry,
+ &spdm_line_arr[line_no]))
+ != XGE_HAL_OK) {
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ return status;
+ }
+ }
+
+ /*
+ * Seventh line of the spdm entry contains the entry_enable
+ * bit. Make sure that the entry_enable bit of this spdm entry
+ * is set.
+ * To remove an entry from the SPDM table, reset this
+ * bit.
+ */
+ if (!(spdm_line_arr[7] & BIT(63))) {
+ /*
+ * Log a warning
+ */
+ xge_debug_device(XGE_ERR, "Local SPDM table is not "
+ "consistent with the actual one for the spdm "
+ "entry %d ", spdm_entry);
+ goto err_exit;
+ }
+
+ /*
+ * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM
+ * table and do a comparision.
+ */
+ spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4);
+ spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63));
+ spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48);
+ spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff);
+
+
+ if ((spdm_is_tcp != is_tcp) ||
+ (spdm_is_ipv4 != is_ipv4) ||
+ (spdm_l4_sp != l4_sp) ||
+ (spdm_l4_dp != l4_dp)) {
+ /*
+ * Log a warning
+ */
+ xge_debug_device(XGE_ERR, "Local SPDM table is not "
+ "consistent with the actual one for the spdm "
+ "entry %d ", spdm_entry);
+ goto err_exit;
+ }
+
+ if (is_ipv4) {
+ /* Upper 32 bits of spdm_line(64 bit) contains the
+ * src IPv4 address. Lower 32 bits of spdm_line
+ * contains the destination IPv4 address.
+ */
+ u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32);
+ u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff);
+
+ if ((temp_src_ip != src_ip->ipv4.addr) ||
+ (temp_dst_ip != dst_ip->ipv4.addr)) {
+ xge_debug_device(XGE_ERR, "Local SPDM table is not "
+ "consistent with the actual one for the spdm "
+ "entry %d ", spdm_entry);
+ goto err_exit;
+ }
+
+ } else {
+ /*
+ * SPDM line 1 & 2 contains the src IPv6 address.
+ * SPDM line 3 & 4 contains the dst IPv6 address.
+ */
+ if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) ||
+ (spdm_line_arr[2] != src_ip->ipv6.addr[1]) ||
+ (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) ||
+ (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) {
+
+ /*
+ * Log a warning
+ */
+ xge_debug_device(XGE_ERR, "Local SPDM table is not "
+ "consistent with the actual one for the spdm "
+ "entry %d ", spdm_entry);
+ goto err_exit;
+ }
+ }
+
+ /*
+ * Reset the entry_enable bit to zero
+ */
+ spdm_line_arr[7] &= ~BIT(63);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ spdm_line_arr[7],
+ (void *)((char *)hldev->spdm_mem_base +
+ (spdm_entry * 64) + (7 * 8)));
+
+ /*
+ * Wait for the operation to be completed.
+ */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rxpic_int_reg, 1,
+ XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ /*
+ * Make the corresponding spdm entry in the local SPDM table
+ * available for future use.
+ */
+ hldev->spdm_table[spdm_entry]->in_use = 0;
+ xge_os_spin_unlock(&hldev->spdm_lock);
+
+ return XGE_HAL_OK;
+
+err_exit:
+ xge_os_spin_unlock(&hldev->spdm_lock);
+ return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
+}
+
+/*
+ * __hal_device_rti_set
+ * @ring: The post_qid of the ring.
+ * @channel: HAL channel of the ring.
+ *
+ * This function stores the RTI value associated for the MSI and
+ * also unmasks this particular RTI in the rti_mask register.
+ */
+static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
+ hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
+ channel->rti = (u8)ring_qid;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rx_traffic_mask);
+ val64 &= ~BIT(ring_qid);
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, val64,
+ &bar0->rx_traffic_mask);
+}
+
+/*
+ * __hal_device_tti_set
+ * @ring: The post_qid of the FIFO.
+ * @channel: HAL channel the FIFO.
+ *
+ * This function stores the TTI value associated for the MSI and
+ * also unmasks this particular TTI in the tti_mask register.
+ */
+static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
+ hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
+ channel->tti = (u8)fifo_qid;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->tx_traffic_mask);
+ val64 &= ~BIT(fifo_qid);
+ xge_os_pio_mem_write64(hldev->pdev,
+ hldev->regh0, val64,
+ &bar0->tx_traffic_mask);
+}
+
+/**
+ * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a
+ * FIFO for a given MSI.
+ * @channelh: HAL channel handle.
+ * @msi: MSI Number associated with the channel.
+ * @msi_msg: The MSI message associated with the MSI number above.
+ *
+ * This API will associate a given channel (either Ring or FIFO) with the
+ * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the
+ * hardware to indicate this association to the hardware.
+ */
+xge_hal_status_e
+xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ channel->msi_msg = msi_msg;
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ int ring = channel->post_qid;
+ xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d,"
+ " MSI: %d", channel->msi_msg, ring, msi);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rx_mat);
+ val64 |= XGE_HAL_SET_RX_MAT(ring, msi);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_mat);
+ __hal_device_rti_set(ring, channel);
+ } else {
+ int fifo = channel->post_qid;
+ xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d,"
+ " MSI: %d", channel->msi_msg, fifo, msi);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->tx_mat[0]);
+ val64 |= XGE_HAL_SET_TX_MAT(fifo, msi);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->tx_mat[0]);
+ __hal_device_tti_set(fifo, channel);
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mask_msix - Begin IRQ processing.
+ * @hldev: HAL device handle.
+ * @msi_id: MSI ID
+ *
+ * The function masks the msix interrupt for the given msi_id
+ *
+ * Note:
+ *
+ * Returns: 0,
+ * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+xge_hal_status_e
+xge_hal_mask_msix(xge_hal_device_h devh, int msi_id)
+{
+ xge_hal_status_e status = XGE_HAL_OK;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ u32 *bar2 = (u32 *)hldev->bar2;
+ u32 val32;
+
+ xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
+
+ val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
+ val32 |= 1;
+ xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
+ return status;
+}
+
+/**
+ * xge_hal_mask_msix - Begin IRQ processing.
+ * @hldev: HAL device handle.
+ * @msi_id: MSI ID
+ *
+ * The function masks the msix interrupt for the given msi_id
+ *
+ * Note:
+ *
+ * Returns: 0,
+ * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+xge_hal_status_e
+xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id)
+{
+ xge_hal_status_e status = XGE_HAL_OK;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ u32 *bar2 = (u32 *)hldev->bar2;
+ u32 val32;
+
+ xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
+
+ val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
+ val32 &= ~1;
+ xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
+ return status;
+}
+
+/*
+ * __hal_set_msix_vals
+ * @devh: HAL device handle.
+ * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address.
+ * Filled in by this function.
+ * @msix_address: 32bit MSI-X DMA address.
+ * Filled in by this function.
+ * @msix_idx: index that corresponds to the (@msix_value, @msix_address)
+ * entry in the table of MSI-X (value, address) pairs.
+ *
+ * This function will program the hardware associating the given
+ * address/value cobination to the specified msi number.
+ */
+static void __hal_set_msix_vals (xge_hal_device_h devh,
+ u32 *msix_value,
+ u64 *msix_addr,
+ int msix_idx)
+{
+ int cnt = 0;
+
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE;
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32), &bar0->xmsi_access);
+ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
+ (u32)(val64), &bar0->xmsi_access);
+ do {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xmsi_access);
+ if (val64 & XGE_HAL_XMSI_STROBE)
+ break;
+ cnt++;
+ xge_os_mdelay(20);
+ } while(cnt < 5);
+ *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xmsi_data));
+ *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xmsi_address);
+}
+
+/**
+ * xge_hal_channel_msix_set - Associate MSI-X with a channel.
+ * @channelh: HAL channel handle.
+ * @msix_idx: index that corresponds to a particular (@msix_value,
+ * @msix_address) entry in the MSI-X table.
+ *
+ * This API associates a given channel (either Ring or FIFO) with the
+ * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables
+ * to indicate this association.
+ */
+xge_hal_status_e
+xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ /* Currently Ring and RTI is one on one. */
+ int ring = channel->post_qid;
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rx_mat);
+ val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_mat);
+ __hal_device_rti_set(ring, channel);
+ hldev->config.fifo.queue[channel->post_qid].intr_vector =
+ msix_idx;
+ } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ int fifo = channel->post_qid;
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->tx_mat[0]);
+ val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->tx_mat[0]);
+ __hal_device_tti_set(fifo, channel);
+ hldev->config.ring.queue[channel->post_qid].intr_vector =
+ msix_idx;
+ }
+ channel->msix_idx = msix_idx;
+ __hal_set_msix_vals(hldev, &channel->msix_data,
+ &channel->msix_address,
+ channel->msix_idx);
+
+ return XGE_HAL_OK;
+}
+
+#if defined(XGE_HAL_CONFIG_LRO)
+/**
+ * xge_hal_lro_terminate - Terminate lro resources.
+ * @lro_scale: Amount of lro memory.
+ * @hldev: Hal device structure.
+ *
+ */
+void
+xge_hal_lro_terminate(u32 lro_scale,
+ xge_hal_device_t *hldev)
+{
+}
+
+/**
+ * xge_hal_lro_init - Initiate lro resources.
+ * @lro_scale: Amount of lro memory.
+ * @hldev: Hal device structure.
+ * Note: For time being I am using only one LRO per device. Later on size
+ * will be increased.
+ */
+
+xge_hal_status_e
+xge_hal_lro_init(u32 lro_scale,
+ xge_hal_device_t *hldev)
+{
+ int i;
+
+ if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
+
+ if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
+
+ for (i=0; i < XGE_HAL_MAX_RING_NUM; i++)
+ {
+ xge_os_memzero(hldev->lro_desc[i].lro_pool,
+ sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
+
+ hldev->lro_desc[i].lro_next_idx = 0;
+ hldev->lro_desc[i].lro_recent = NULL;
+ }
+
+ return XGE_HAL_OK;
+}
+#endif
+
+
+/**
+ * xge_hal_device_poll - HAL device "polling" entry point.
+ * @devh: HAL device.
+ *
+ * HAL "polling" entry point. Note that this is part of HAL public API.
+ * Upper-Layer driver _must_ periodically poll HAL via
+ * xge_hal_device_poll().
+ *
+ * HAL uses caller's execution context to serially process accumulated
+ * slow-path events, such as link state changes and hardware error
+ * indications.
+ *
+ * The rate of polling could be somewhere between 500us to 10ms,
+ * depending on requirements (e.g., the requirement to support fail-over
+ * could mean that 500us or even 100us polling interval need to be used).
+ *
+ * The need and motivation for external polling includes
+ *
+ * - remove the error-checking "burden" from the HAL interrupt handler
+ * (see xge_hal_device_handle_irq());
+ *
+ * - remove the potential source of portability issues by _not_
+ * implementing separate polling thread within HAL itself.
+ *
+ * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}.
+ * Usage: See ex_slow_path{}.
+ */
+void
+xge_hal_device_poll(xge_hal_device_h devh)
+{
+ unsigned char item_buf[sizeof(xge_queue_item_t) +
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
+ xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
+ xge_queue_status_e qstatus;
+ xge_hal_status_e hstatus;
+ int i = 0;
+ int queue_has_critical_event = 0;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
+
+_again:
+ if (!hldev->is_initialized ||
+ hldev->terminating ||
+ hldev->magic != XGE_HAL_MAGIC)
+ return;
+
+ if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
+ {
+ /*
+ * Wait for an Hour
+ */
+ hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
+ } else {
+ /*
+ * Logging Error messages in the excess temperature,
+ * Bias current, laser ouput for three cycle
+ */
+ __hal_updt_stats_xpak(hldev);
+ hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
+ }
+
+ if (!queue_has_critical_event)
+ queue_has_critical_event =
+ __queue_get_reset_critical(hldev->queueh);
+
+ hldev->in_poll = 1;
+ while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) {
+
+ qstatus = xge_queue_consume(hldev->queueh,
+ XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
+ item);
+ if (qstatus == XGE_QUEUE_IS_EMPTY)
+ break;
+
+ xge_debug_queue(XGE_TRACE,
+ "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
+ XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
+ (u64)(ulong_t)item->context);
+
+ if (!hldev->is_initialized ||
+ hldev->magic != XGE_HAL_MAGIC) {
+ hldev->in_poll = 0;
+ return;
+ }
+
+ switch (item->event_type) {
+ case XGE_HAL_EVENT_LINK_IS_UP: {
+ if (!queue_has_critical_event &&
+ g_xge_hal_driver->uld_callbacks.link_up) {
+ g_xge_hal_driver->uld_callbacks.link_up(
+ hldev->upper_layer_info);
+ hldev->link_state = XGE_HAL_LINK_UP;
+ }
+ } break;
+ case XGE_HAL_EVENT_LINK_IS_DOWN: {
+ if (!queue_has_critical_event &&
+ g_xge_hal_driver->uld_callbacks.link_down) {
+ g_xge_hal_driver->uld_callbacks.link_down(
+ hldev->upper_layer_info);
+ hldev->link_state = XGE_HAL_LINK_DOWN;
+ }
+ } break;
+ case XGE_HAL_EVENT_SERR:
+ case XGE_HAL_EVENT_ECCERR:
+ case XGE_HAL_EVENT_PARITYERR:
+ case XGE_HAL_EVENT_TARGETABORT:
+ case XGE_HAL_EVENT_SLOT_FREEZE: {
+ void *item_data = xge_queue_item_data(item);
+ xge_hal_event_e event_type = item->event_type;
+ u64 val64 = *((u64*)item_data);
+
+ if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
+ if (xge_hal_device_is_slot_freeze(hldev))
+ event_type = XGE_HAL_EVENT_SLOT_FREEZE;
+ if (g_xge_hal_driver->uld_callbacks.crit_err) {
+ g_xge_hal_driver->uld_callbacks.crit_err(
+ hldev->upper_layer_info,
+ event_type,
+ val64);
+ /* handle one critical event per poll cycle */
+ hldev->in_poll = 0;
+ return;
+ }
+ } break;
+ default: {
+ xge_debug_queue(XGE_TRACE,
+ "got non-HAL event %d",
+ item->event_type);
+ } break;
+ }
+
+ /* broadcast this event */
+ if (g_xge_hal_driver->uld_callbacks.event)
+ g_xge_hal_driver->uld_callbacks.event(item);
+ }
+
+ if (g_xge_hal_driver->uld_callbacks.before_device_poll) {
+ if (g_xge_hal_driver->uld_callbacks.before_device_poll(
+ hldev) != 0) {
+ hldev->in_poll = 0;
+ return;
+ }
+ }
+
+ hstatus = __hal_device_poll(hldev);
+ if (g_xge_hal_driver->uld_callbacks.after_device_poll)
+ g_xge_hal_driver->uld_callbacks.after_device_poll(hldev);
+
+ /*
+ * handle critical error right away:
+ * - walk the device queue again
+ * - drop non-critical events, if any
+ * - look for the 1st critical
+ */
+ if (hstatus == XGE_HAL_ERR_CRITICAL) {
+ queue_has_critical_event = 1;
+ goto _again;
+ }
+
+ hldev->in_poll = 0;
+}
+
+/**
+ * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing.
+ * @hldev: HAL device handle.
+ *
+ * This function is used to set the adapter to enhanced mode.
+ *
+ * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
+ */
+void
+xge_hal_rts_rth_init(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ /*
+ * Set the receive traffic steering mode from default(classic)
+ * to enhanced.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_ctrl);
+ val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_ctrl);
+}
+
+/**
+ * xge_hal_rts_rth_clr - Clear RTS hashing.
+ * @hldev: HAL device handle.
+ *
+ * This function is used to clear all RTS hashing related stuff.
+ * It brings the adapter out from enhanced mode to classic mode.
+ * It also clears RTS_RTH_CFG register i.e clears hash type, function etc.
+ *
+ * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set().
+ */
+void
+xge_hal_rts_rth_clr(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ /*
+ * Set the receive traffic steering mode from default(classic)
+ * to enhanced.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_ctrl);
+ val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_ctrl);
+ val64 = 0;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_cfg);
+}
+
+/**
+ * xge_hal_rts_rth_set - Set/configure RTS hashing.
+ * @hldev: HAL device handle.
+ * @def_q: default queue
+ * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc.
+ * @bucket_size: no of least significant bits to be used for hashing.
+ *
+ * Used to set/configure all RTS hashing related stuff.
+ * - set the steering mode to enhanced.
+ * - set hash function i.e algo selection.
+ * - set the default queue.
+ *
+ * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set().
+ */
+void
+xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
+ u16 bucket_size)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = XGE_HAL_RTS_DEFAULT_Q(def_q);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_default_q);
+
+ val64 = hash_type;
+ val64 |= XGE_HAL_RTS_RTH_EN;
+ val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size);
+ val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_cfg);
+}
+
+/**
+ * xge_hal_rts_rth_start - Start RTS hashing.
+ * @hldev: HAL device handle.
+ *
+ * Used to Start RTS hashing .
+ *
+ * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
+ */
+void
+xge_hal_rts_rth_start(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_rth_cfg);
+ val64 |= XGE_HAL_RTS_RTH_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_cfg);
+}
+
+/**
+ * xge_hal_rts_rth_stop - Stop the RTS hashing.
+ * @hldev: HAL device handle.
+ *
+ * Used to Staop RTS hashing .
+ *
+ * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
+ */
+void
+xge_hal_rts_rth_stop(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_rth_cfg);
+ val64 &= ~XGE_HAL_RTS_RTH_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_cfg);
+}
+
+/**
+ * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT).
+ * @hldev: HAL device handle.
+ * @itable: Pointer to the indirection table
+ * @itable_size: no of least significant bits to be used for hashing
+ *
+ * Used to set/configure indirection table.
+ * It enables the required no of entries in the IT.
+ * It adds entries to the IT.
+ *
+ * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
+ */
+xge_hal_status_e
+xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ u32 idx;
+
+ for (idx = 0; idx < itable_size; idx++) {
+ val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
+ XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_map_mem_data);
+
+ /* execute */
+ val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
+ XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
+ XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx));
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_map_mem_ctrl);
+
+ /* poll until done */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rts_rth_map_mem_ctrl, 0,
+ XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+/**
+ * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc.
+ *
+ * @hldev: HAL device handle.
+ * @KeySize: Number of 64-bit words
+ * @Key: upto 40-byte array of 8-bit values
+ * This function configures the 40-byte secret which is used for hash
+ * calculation.
+ *
+ * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
+ */
+void
+xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0;
+ u64 val64;
+ u32 entry, nreg, i;
+
+ entry = 0;
+ nreg = 0;
+
+ while( KeySize ) {
+ val64 = 0;
+ for ( i = 0; i < 8 ; i++) {
+ /* Prepare 64-bit word for 'nreg' containing 8 keys. */
+ if (i)
+ val64 <<= 8;
+ val64 |= Key[entry++];
+ }
+
+ KeySize--;
+
+ /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_hash_mask[nreg++]);
+ }
+
+ while( nreg < 5 ) {
+ /* Clear the rest if key is less than 40 bytes */
+ val64 = 0;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_rth_hash_mask[nreg++]);
+ }
+}
+
+
+/**
+ * xge_hal_device_is_closed - Device is closed
+ *
+ * @devh: HAL device handle.
+ */
+int
+xge_hal_device_is_closed(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ if (xge_list_is_empty(&hldev->fifo_channels) &&
+ xge_list_is_empty(&hldev->ring_channels))
+ return 1;
+
+ return 0;
+}
+
+xge_hal_status_e
+xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
+{
+ u64 val64;
+ int section;
+ int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
+
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
+
+ if ( index >= max_addr )
+ return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
+
+ /*
+ * Calculate the section value
+ */
+ section = index / 32;
+
+ xge_debug_device(XGE_TRACE, "the Section value is %d ", section);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_mac_cfg);
+ switch(section)
+ {
+ case 0:
+ val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
+ break;
+ case 1:
+ val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
+ break;
+ case 2:
+ val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
+ break;
+ case 3:
+ val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
+ break;
+ case 4:
+ val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
+ break;
+ case 5:
+ val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
+ break;
+ case 6:
+ val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
+ break;
+ case 7:
+ val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
+ break;
+ default:
+ xge_debug_device(XGE_ERR, "Invalid Section value %d "
+ , section);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_mac_cfg);
+ return XGE_HAL_OK;
+}
+
+
diff --git a/sys/dev/nxge/xgehal/xgehal-driver.c b/sys/dev/nxge/xgehal/xgehal-driver.c
new file mode 100644
index 0000000..c8d1989
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-driver.c
@@ -0,0 +1,300 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-driver.c
+ *
+ * Description: HAL driver object functionality
+ *
+ * Created: 10 May 2004
+ */
+
+#include <dev/nxge/include/xgehal-driver.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+static xge_hal_driver_t g_driver;
+xge_hal_driver_t *g_xge_hal_driver = NULL;
+char *g_xge_hal_log = NULL;
+
+#ifdef XGE_OS_MEMORY_CHECK
+xge_os_malloc_t g_malloc_arr[XGE_OS_MALLOC_CNT_MAX];
+int g_malloc_cnt = 0;
+#endif
+
+/*
+ * Runtime tracing support
+ */
+static unsigned long g_module_mask_default = 0;
+unsigned long *g_module_mask = &g_module_mask_default;
+static int g_level_default = 0;
+int *g_level = &g_level_default;
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+static xge_os_tracebuf_t g_tracebuf;
+char *dmesg, *dmesg_start;
+
+/**
+ * xge_hal_driver_tracebuf_dump - Dump the trace buffer.
+ *
+ * Dump the trace buffer contents.
+ */
+void
+xge_hal_driver_tracebuf_dump(void)
+{
+ int i;
+ int off = 0;
+
+ if (g_xge_os_tracebuf == NULL) {
+ return;
+ }
+
+ xge_os_printf("################ Trace dump Begin ###############");
+ if (g_xge_os_tracebuf->wrapped_once) {
+ for (i = 0; i < g_xge_os_tracebuf->size -
+ g_xge_os_tracebuf->offset; i += off) {
+ if (*(dmesg_start + i))
+ xge_os_printf(dmesg_start + i);
+ off = xge_os_strlen(dmesg_start + i) + 1;
+ }
+ }
+ for (i = 0; i < g_xge_os_tracebuf->offset; i += off) {
+ if (*(dmesg + i))
+ xge_os_printf(dmesg + i);
+ off = xge_os_strlen(dmesg + i) + 1;
+ }
+ xge_os_printf("################ Trace dump End ###############");
+}
+
+xge_hal_status_e
+xge_hal_driver_tracebuf_read(int bufsize, char *retbuf, int *retsize)
+{
+ int i;
+ int off = 0, retbuf_off = 0;
+
+ *retsize = 0;
+ *retbuf = 0;
+
+ if (g_xge_os_tracebuf == NULL) {
+ return XGE_HAL_FAIL;
+ }
+
+ if (g_xge_os_tracebuf->wrapped_once) {
+ for (i = 0; i < g_xge_os_tracebuf->size -
+ g_xge_os_tracebuf->offset; i += off) {
+ if (*(dmesg_start + i)) {
+ xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg_start + i);
+ retbuf_off += xge_os_strlen(dmesg_start + i) + 1;
+ if (retbuf_off > bufsize)
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ off = xge_os_strlen(dmesg_start + i) + 1;
+ }
+ }
+ for (i = 0; i < g_xge_os_tracebuf->offset; i += off) {
+ if (*(dmesg + i)) {
+ xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg + i);
+ retbuf_off += xge_os_strlen(dmesg + i) + 1;
+ if (retbuf_off > bufsize)
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ off = xge_os_strlen(dmesg + i) + 1;
+ }
+
+ *retsize = retbuf_off;
+ *(retbuf + retbuf_off + 1) = 0;
+
+ return XGE_HAL_OK;
+}
+#endif
+xge_os_tracebuf_t *g_xge_os_tracebuf = NULL;
+
+#ifdef XGE_HAL_DEBUG_BAR0_OFFSET
+void
+xge_hal_driver_bar0_offset_check(void)
+{
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, adapter_status) ==
+ 0x108);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_traffic_int) ==
+ 0x08E0);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, dtx_control) ==
+ 0x09E8);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_fifo_partition_0) ==
+ 0x1108);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_enable) ==
+ 0x1170);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, prc_rxd0_n[0]) ==
+ 0x1930);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rti_command_mem) ==
+ 0x19B8);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_cfg) ==
+ 0x2100);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rmac_addr_cmd_mem) ==
+ 0x2128);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_link_util) ==
+ 0x2170);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_pause_thresh_q0q3) ==
+ 0x2918);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_err_reg) ==
+ 0x1040);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rxdma_int_status) ==
+ 0x1800);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_tmac_err_reg) ==
+ 0x2010);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_err_reg) ==
+ 0x2810);
+ xge_assert(xge_offsetof(xge_hal_pci_bar0_t, xgxs_int_status) ==
+ 0x3000);
+}
+#endif
+
+/**
+ * xge_hal_driver_initialize - Initialize HAL.
+ * @config: HAL configuration, see xge_hal_driver_config_t{}.
+ * @uld_callbacks: Upper-layer driver callbacks, e.g. link-up.
+ *
+ * HAL initialization entry point. Not to confuse with device initialization
+ * (note that HAL "contains" zero or more Xframe devices).
+ *
+ * Returns: XGE_HAL_OK - success;
+ * XGE_HAL_ERR_BAD_DRIVER_CONFIG - Driver configuration params invalid.
+ *
+ * See also: xge_hal_device_initialize(), xge_hal_status_e{},
+ * xge_hal_uld_cbs_t{}.
+ */
+xge_hal_status_e
+xge_hal_driver_initialize(xge_hal_driver_config_t *config,
+ xge_hal_uld_cbs_t *uld_callbacks)
+{
+ xge_hal_status_e status;
+
+ g_xge_hal_driver = &g_driver;
+
+ xge_hal_driver_debug_module_mask_set(XGE_DEBUG_MODULE_MASK_DEF);
+ xge_hal_driver_debug_level_set(XGE_DEBUG_LEVEL_DEF);
+
+#ifdef XGE_HAL_DEBUG_BAR0_OFFSET
+ xge_hal_driver_bar0_offset_check();
+#endif
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ if (config->tracebuf_size == 0)
+ /*
+ * Trace buffer implementation is not lock protected.
+ * The only harm to expect is memcpy() to go beyond of
+ * allowed boundaries. To make it safe (driver-wise),
+ * we pre-allocate needed number of extra bytes.
+ */
+ config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR +
+ XGE_OS_TRACE_MSGBUF_MAX;
+#endif
+
+ status = __hal_driver_config_check(config);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ xge_os_memzero(g_xge_hal_driver, sizeof(xge_hal_driver_t));
+
+ /* apply config */
+ xge_os_memcpy(&g_xge_hal_driver->config, config,
+ sizeof(xge_hal_driver_config_t));
+
+ /* apply ULD callbacks */
+ xge_os_memcpy(&g_xge_hal_driver->uld_callbacks, uld_callbacks,
+ sizeof(xge_hal_uld_cbs_t));
+
+ g_xge_hal_driver->is_initialized = 1;
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ g_tracebuf.size = config->tracebuf_size;
+ g_tracebuf.data = (char *)xge_os_malloc(NULL, g_tracebuf.size);
+ if (g_tracebuf.data == NULL) {
+ xge_os_printf("cannot allocate trace buffer!");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ /* timestamps disabled by default */
+ g_tracebuf.timestamp = config->tracebuf_timestamp_en;
+ if (g_tracebuf.timestamp) {
+ xge_os_timestamp(g_tracebuf.msg);
+ g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX -
+ xge_os_strlen(g_tracebuf.msg);
+ } else
+ g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX;
+ g_tracebuf.offset = 0;
+ *g_tracebuf.msg = 0;
+ xge_os_memzero(g_tracebuf.data, g_tracebuf.size);
+ g_xge_os_tracebuf = &g_tracebuf;
+ dmesg = g_tracebuf.data;
+ *dmesg = 0;
+#endif
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_driver_terminate - Terminate HAL.
+ *
+ * HAL termination entry point.
+ *
+ * See also: xge_hal_device_terminate().
+ */
+void
+xge_hal_driver_terminate(void)
+{
+ g_xge_hal_driver->is_initialized = 0;
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+ if (g_tracebuf.size) {
+ xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size);
+ }
+#endif
+
+ g_xge_hal_driver = NULL;
+
+#ifdef XGE_OS_MEMORY_CHECK
+ {
+ int i, leaks=0;
+ xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt);
+ for (i=0; i<g_malloc_cnt; i++) {
+ if (g_malloc_arr[i].ptr != NULL) {
+ xge_os_printf("OSPAL: memory leak detected at "
+ "%s:%d:"XGE_OS_LLXFMT":%d",
+ g_malloc_arr[i].file,
+ g_malloc_arr[i].line,
+ (unsigned long long)(ulong_t)
+ g_malloc_arr[i].ptr,
+ g_malloc_arr[i].size);
+ leaks++;
+ }
+ }
+ if (leaks) {
+ xge_os_printf("OSPAL: %d memory leaks detected", leaks);
+ } else {
+ xge_os_printf("OSPAL: no memory leaks detected");
+ }
+ }
+#endif
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-fifo-fp.c b/sys/dev/nxge/xgehal/xgehal-fifo-fp.c
new file mode 100644
index 0000000..4f59674
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-fifo-fp.c
@@ -0,0 +1,1175 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-fifo-fp.c
+ *
+ * Description: Tx fifo object functionality (fast path)
+ *
+ * Created: 10 June 2004
+ */
+
+#ifdef XGE_DEBUG_FP
+#include <dev/nxge/include/xgehal-fifo.h>
+#endif
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t*
+__hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)
+{
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t*)dtrh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+
+ xge_assert(txdp);
+ txdl_priv = (xge_hal_fifo_txdl_priv_t *)
+ (ulong_t)txdp->host_control;
+
+ xge_assert(txdl_priv);
+ xge_assert(txdl_priv->dma_object);
+ xge_assert(txdl_priv->dma_addr);
+
+ xge_assert(txdl_priv->dma_object->handle == txdl_priv->dma_handle);
+
+ return txdl_priv;
+}
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ u64 ctrl_1)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_hw_pair_t *hw_pair = fifo->hw_pair;
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ u64 ctrl;
+
+ txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA;
+
+#ifdef XGE_DEBUG_ASSERT
+ /* make sure Xena overwrites the (illegal) t_code value on completion */
+ XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5);
+#endif
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ /* sync the TxDL to device */
+ xge_os_dma_sync(fifo->channel.pdev,
+ txdl_priv->dma_handle,
+ txdl_priv->dma_addr,
+ txdl_priv->dma_offset,
+ txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */,
+ XGE_OS_DMA_DIR_TODEVICE);
+#endif
+ /* write the pointer first */
+ xge_os_pio_mem_write64(fifo->channel.pdev,
+ fifo->channel.regh1,
+ txdl_priv->dma_addr,
+ &hw_pair->txdl_pointer);
+
+ /* spec: 0x00 = 1 TxD in the list */
+ ctrl = XGE_HAL_TX_FIFO_LAST_TXD_NUM(txdl_priv->frags - 1);
+ ctrl |= ctrl_1;
+ ctrl |= fifo->no_snoop_bits;
+
+ if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) {
+ ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
+ }
+
+ /*
+ * according to the XENA spec:
+ *
+ * It is important to note that pointers and list control words are
+ * always written in pairs: in the first write, the host must write a
+ * pointer, and in the second write, it must write the list control
+ * word. Any other access will result in an error. Also, all 16 bytes
+ * of the pointer/control structure must be written, including any
+ * reserved bytes.
+ */
+ xge_os_wmb();
+
+ /*
+ * we want touch work_arr in order with ownership bit set to HW
+ */
+ __hal_channel_dtr_post(channelh, dtrh);
+
+ xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1,
+ ctrl, &hw_pair->list_control);
+
+ xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" "
+ "into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
+ (unsigned long long)ctrl,
+ (unsigned long long)(ulong_t)&hw_pair->txdl_pointer);
+
+#ifdef XGE_HAL_FIFO_DUMP_TXD
+ xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
+ XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
+ txdp->control_1, txdp->control_2, txdp->buffer_pointer,
+ txdp->host_control, txdl_priv->dma_addr);
+#endif
+
+ fifo->channel.stats.total_posts++;
+ fifo->channel.usage_cnt++;
+ if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt)
+ fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
+}
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+__hal_fifo_txdl_free_many(xge_hal_channel_h channelh,
+ xge_hal_fifo_txd_t *txdp, int list_size, int frags)
+{
+ xge_hal_fifo_txdl_priv_t *current_txdl_priv;
+ xge_hal_fifo_txdl_priv_t *next_txdl_priv;
+ int invalid_frags = frags % list_size;
+ if (invalid_frags){
+ xge_debug_fifo(XGE_ERR,
+ "freeing corrupt dtrh %p, fragments %d list size %d",
+ txdp, frags, list_size);
+ xge_assert(invalid_frags == 0);
+ }
+ while(txdp){
+ xge_debug_fifo(XGE_TRACE,
+ "freeing linked dtrh %p, fragments %d list size %d",
+ txdp, frags, list_size);
+ current_txdl_priv = __hal_fifo_txdl_priv(txdp);
+#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
+ current_txdl_priv->allocated = 0;
+#endif
+ __hal_channel_dtr_free(channelh, txdp);
+ next_txdl_priv = current_txdl_priv->next_txdl_priv;
+ xge_assert(frags);
+ frags -= list_size;
+ if (next_txdl_priv) {
+ current_txdl_priv->next_txdl_priv = NULL;
+ txdp = next_txdl_priv->first_txdp;
+ }
+ else {
+ xge_debug_fifo(XGE_TRACE,
+ "freed linked dtrh fragments %d list size %d",
+ frags, list_size);
+ break;
+ }
+ }
+ xge_assert(frags == 0)
+}
+
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
+ xge_hal_fifo_txd_t *txdp, int txdl_count)
+{
+ xge_hal_fifo_txdl_priv_t *current_txdl_priv;
+ xge_hal_fifo_txdl_priv_t *next_txdl_priv;
+ int i = txdl_count;
+
+ xge_assert(((xge_hal_channel_t *)channelh)->reserve_length +
+ txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial);
+
+ current_txdl_priv = __hal_fifo_txdl_priv(txdp);
+ do{
+ xge_assert(i);
+#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
+ current_txdl_priv->allocated = 0;
+#endif
+ next_txdl_priv = current_txdl_priv->next_txdl_priv;
+ txdp = current_txdl_priv->first_txdp;
+ current_txdl_priv->next_txdl_priv = NULL;
+ __hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i);
+ xge_debug_fifo(XGE_TRACE,
+ "dtrh %p restored at offset %d", txdp, i);
+ current_txdl_priv = next_txdl_priv;
+ } while(current_txdl_priv);
+ __hal_channel_dtr_restore(channelh, NULL, txdl_count);
+}
+/**
+ * xge_hal_fifo_dtr_private - Retrieve per-descriptor private data.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * Retrieve per-descriptor private data.
+ * Note that ULD requests per-descriptor space via
+ * xge_hal_channel_open().
+ *
+ * Returns: private ULD data associated with the descriptor.
+ * Usage: See ex_xmit{} and ex_tx_compl{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void*
+xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh)
+{
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+
+ return ((char *)(ulong_t)txdp->host_control) +
+ sizeof(xge_hal_fifo_txdl_priv_t);
+}
+
+/**
+ * xge_hal_fifo_dtr_buffer_cnt - Get number of buffers carried by the
+ * descriptor.
+ * @dtrh: Descriptor handle.
+ *
+ * Returns: Number of buffers stored in the given descriptor. Can be used
+ * _after_ the descriptor is set up for posting (see
+ * xge_hal_fifo_dtr_post()) and _before_ it is deallocated (see
+ * xge_hal_fifo_dtr_free()).
+ *
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO int
+xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh)
+{
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+
+ return txdl_priv->frags;
+}
+/**
+ * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
+ * than single txdl.
+ * @channelh: Channel handle.
+ * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
+ * with a valid handle.
+ * @frags: minimum number of fragments to be reserved.
+ *
+ * Reserve TxDL(s) (that is, fifo descriptor)
+ * for the subsequent filling-in by upper layerdriver (ULD))
+ * and posting on the corresponding channel (@channelh)
+ * via xge_hal_fifo_dtr_post().
+ *
+ * Returns: XGE_HAL_OK - success;
+ * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
+ *
+ * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
+ * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
+ * Usage: See ex_xmit{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
+ xge_hal_dtr_h *dtrh, const int frags)
+{
+ xge_hal_status_e status = XGE_HAL_OK;
+ int alloc_frags = 0, dang_frags = 0;
+ xge_hal_fifo_txd_t *curr_txdp = NULL;
+ xge_hal_fifo_txd_t *next_txdp;
+ xge_hal_fifo_txdl_priv_t *next_txdl_priv, *curr_txdl_priv = NULL;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ int max_frags = fifo->config->max_frags;
+ xge_hal_dtr_h dang_dtrh = NULL;
+#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ unsigned long flags=0;
+#endif
+ xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d",
+ frags);
+ xge_assert(frags < (fifo->txdl_per_memblock * max_frags));
+#if defined(XGE_HAL_TX_MULTI_RESERVE)
+ xge_os_spin_lock(&fifo->channel.reserve_lock);
+#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags);
+#endif
+ while(alloc_frags < frags) {
+ status = __hal_channel_dtr_alloc(channelh,
+ (xge_hal_dtr_h *)(void*)&next_txdp);
+ if (status != XGE_HAL_OK){
+ xge_debug_fifo(XGE_ERR,
+ "failed to allocate linked fragments rc %d",
+ status);
+ xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
+ if (*dtrh) {
+ xge_assert(alloc_frags/max_frags);
+ __hal_fifo_txdl_restore_many(channelh,
+ (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
+ }
+ if (dang_dtrh) {
+ xge_assert(dang_frags/max_frags);
+ __hal_fifo_txdl_restore_many(channelh,
+ (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
+ }
+ break;
+ }
+ xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
+ " for frags %d", next_txdp, frags);
+ next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
+ xge_assert(next_txdl_priv);
+ xge_assert(next_txdl_priv->first_txdp == next_txdp);
+ next_txdl_priv->dang_txdl = NULL;
+ next_txdl_priv->dang_frags = 0;
+ next_txdl_priv->next_txdl_priv = NULL;
+#if defined(XGE_OS_MEMORY_CHECK)
+ next_txdl_priv->allocated = 1;
+#endif
+ if (!curr_txdp || !curr_txdl_priv) {
+ curr_txdp = next_txdp;
+ curr_txdl_priv = next_txdl_priv;
+ *dtrh = (xge_hal_dtr_h)next_txdp;
+ alloc_frags = max_frags;
+ continue;
+ }
+ if (curr_txdl_priv->memblock ==
+ next_txdl_priv->memblock) {
+ xge_debug_fifo(XGE_TRACE,
+ "linking dtrh %p, with %p",
+ *dtrh, next_txdp);
+ xge_assert (next_txdp ==
+ curr_txdp + max_frags);
+ alloc_frags += max_frags;
+ curr_txdl_priv->next_txdl_priv = next_txdl_priv;
+ }
+ else {
+ xge_assert(*dtrh);
+ xge_assert(dang_dtrh == NULL);
+ dang_dtrh = *dtrh;
+ dang_frags = alloc_frags;
+ xge_debug_fifo(XGE_TRACE,
+ "dangling dtrh %p, linked with dtrh %p",
+ *dtrh, next_txdp);
+ next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
+ next_txdl_priv->dang_frags = alloc_frags;
+ alloc_frags = max_frags;
+ *dtrh = next_txdp;
+ }
+ curr_txdp = next_txdp;
+ curr_txdl_priv = next_txdl_priv;
+ }
+
+#if defined(XGE_HAL_TX_MULTI_RESERVE)
+ xge_os_spin_unlock(&fifo->channel.reserve_lock);
+#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ xge_os_spin_unlock_irq(&fifo->channel.reserve_lock, flags);
+#endif
+
+ if (status == XGE_HAL_OK) {
+ xge_hal_fifo_txdl_priv_t * txdl_priv;
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
+ xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
+ txdl_priv = __hal_fifo_txdl_priv(txdp);
+ /* reset the TxDL's private */
+ txdl_priv->align_dma_offset = 0;
+ txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
+ txdl_priv->align_used_frags = 0;
+ txdl_priv->frags = 0;
+ txdl_priv->bytes_sent = 0;
+ txdl_priv->alloc_frags = alloc_frags;
+ /* reset TxD0 */
+ txdp->control_1 = txdp->control_2 = 0;
+
+#if defined(XGE_OS_MEMORY_CHECK)
+ txdl_priv->allocated = 1;
+#endif
+ /* update statistics */
+ statsp->total_posts_dtrs_many++;
+ statsp->total_posts_frags_many += txdl_priv->alloc_frags;
+ if (txdl_priv->dang_frags){
+ statsp->total_posts_dang_dtrs++;
+ statsp->total_posts_dang_frags += txdl_priv->dang_frags;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * xge_hal_fifo_dtr_reserve - Reserve fifo descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
+ * with a valid handle.
+ *
+ * Reserve a single TxDL (that is, fifo descriptor)
+ * for the subsequent filling-in by upper layerdriver (ULD))
+ * and posting on the corresponding channel (@channelh)
+ * via xge_hal_fifo_dtr_post().
+ *
+ * Note: it is the responsibility of ULD to reserve multiple descriptors
+ * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
+ * carries up to configured number (fifo.max_frags) of contiguous buffers.
+ *
+ * Returns: XGE_HAL_OK - success;
+ * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
+ *
+ * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
+ * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
+ * Usage: See ex_xmit{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
+{
+ xge_hal_status_e status;
+#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ unsigned long flags=0;
+#endif
+
+#if defined(XGE_HAL_TX_MULTI_RESERVE)
+ xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
+#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
+ flags);
+#endif
+
+ status = __hal_channel_dtr_alloc(channelh, dtrh);
+
+#if defined(XGE_HAL_TX_MULTI_RESERVE)
+ xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
+#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
+ flags);
+#endif
+
+ if (status == XGE_HAL_OK) {
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+
+ txdl_priv = __hal_fifo_txdl_priv(txdp);
+
+ /* reset the TxDL's private */
+ txdl_priv->align_dma_offset = 0;
+ txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
+ txdl_priv->align_used_frags = 0;
+ txdl_priv->frags = 0;
+ txdl_priv->alloc_frags =
+ ((xge_hal_fifo_t *)channelh)->config->max_frags;
+ txdl_priv->dang_txdl = NULL;
+ txdl_priv->dang_frags = 0;
+ txdl_priv->next_txdl_priv = NULL;
+ txdl_priv->bytes_sent = 0;
+
+ /* reset TxD0 */
+ txdp->control_1 = txdp->control_2 = 0;
+
+#if defined(XGE_OS_MEMORY_CHECK)
+ txdl_priv->allocated = 1;
+#endif
+ }
+
+ return status;
+}
+
+/**
+ * xge_hal_fifo_dtr_reserve_sp - Reserve fifo descriptor and store it in
+ * the ULD-provided "scratch" memory.
+ * @channelh: Channel handle.
+ * @dtr_sp_size: Size of the %dtr_sp "scratch pad" that HAL can use for TxDL.
+ * @dtr_sp: "Scratch pad" supplied by upper-layer driver (ULD).
+ *
+ * Reserve TxDL and fill-in ULD supplied "scratch pad". The difference
+ * between this API and xge_hal_fifo_dtr_reserve() is (possibly) -
+ * performance.
+ *
+ * If upper-layer uses ULP-defined commands, and if those commands have enough
+ * space for HAL/Xframe descriptors - tnan it is better (read: faster) to fit
+ * all the per-command information into one command, which is typically
+ * one contiguous block.
+ *
+ * Note: Unlike xge_hal_fifo_dtr_reserve(), this function can be used to
+ * allocate a single descriptor for transmit operation.
+ *
+ * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_free(),
+ * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh, int dtr_sp_size,
+ xge_hal_dtr_h dtr_sp)
+{
+ /* FIXME: implement */
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_fifo_dtr_post - Post descriptor on the fifo channel.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor obtained via xge_hal_fifo_dtr_reserve() or
+ * xge_hal_fifo_dtr_reserve_sp()
+ * @frags: Number of contiguous buffers that are part of a single
+ * transmit operation.
+ *
+ * Post descriptor on the 'fifo' type channel for transmission.
+ * Prior to posting the descriptor should be filled in accordance with
+ * Host/Xframe interface specification for a given service (LL, etc.).
+ *
+ * See also: xge_hal_fifo_dtr_post_many(), xge_hal_ring_dtr_post().
+ * Usage: See ex_xmit{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp_last;
+ xge_hal_fifo_txd_t *txdp_first;
+#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ unsigned long flags = 0;
+#endif
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+
+ txdp_first = (xge_hal_fifo_txd_t *)dtrh;
+ txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
+ txdp_first->control_2 |= fifo->interrupt_type;
+
+ txdp_last = (xge_hal_fifo_txd_t *)dtrh + (txdl_priv->frags - 1);
+ txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
+
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_lock(fifo->post_lock_ptr);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_lock_irq(fifo->post_lock_ptr, flags);
+#endif
+
+ __hal_fifo_dtr_post_single(channelh, dtrh,
+ (u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST));
+
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_unlock(fifo->post_lock_ptr);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_unlock_irq(fifo->post_lock_ptr, flags);
+#endif
+}
+
+/**
+ * xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo
+ * channel.
+ * @channelh: Channel to post descriptor.
+ * @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[].
+ * @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve().
+ * @frags_arr: Number of fragments carried @dtrs descriptors.
+ * Note that frag_arr[i] corresponds to descriptor dtrs[i].
+ *
+ * Post multi-descriptor on the fifo channel. The operation is atomic:
+ * all descriptrs are posted on the channel "back-to-back' without
+ * letting other posts (possibly driven by multiple transmitting threads)
+ * to interleave.
+ *
+ * See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post().
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
+ xge_hal_dtr_h dtrs[])
+{
+ int i;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_txd_t *txdp_last;
+ xge_hal_fifo_txd_t *txdp_first;
+ xge_hal_fifo_txdl_priv_t *txdl_priv_last;
+#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ unsigned long flags = 0;
+#endif
+
+ xge_assert(num > 1);
+
+ txdp_first = (xge_hal_fifo_txd_t *)dtrs[0];
+ txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
+ txdp_first->control_2 |= fifo->interrupt_type;
+
+ txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]);
+ txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] +
+ (txdl_priv_last->frags - 1);
+ txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
+
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
+ flags);
+#endif
+
+ for (i=0; i<num; i++) {
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ u64 val64;
+ xge_hal_dtr_h dtrh = dtrs[i];
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+ txdl_priv = txdl_priv; /* Cheat lint */
+
+ val64 = 0;
+ if (i == 0) {
+ val64 |= XGE_HAL_TX_FIFO_FIRST_LIST;
+ } else if (i == num -1) {
+ val64 |= XGE_HAL_TX_FIFO_LAST_LIST;
+ }
+
+ val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
+ __hal_fifo_dtr_post_single(channelh, dtrh, val64);
+ }
+
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
+ flags);
+#endif
+
+ fifo->channel.stats.total_posts_many++;
+}
+
+/**
+ * xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle. Returned by HAL.
+ * @t_code: Transfer code, as per Xframe User Guide,
+ * Transmit Descriptor Format.
+ * Returned by HAL.
+ *
+ * Retrieve the _next_ completed descriptor.
+ * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
+ * upper-layer driver (ULD) of new completed descriptors. After that
+ * the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest
+ * completions (the very first completion is passed by HAL via
+ * xge_hal_channel_callback_f).
+ *
+ * Implementation-wise, the upper-layer driver is free to call
+ * xge_hal_fifo_dtr_next_completed either immediately from inside the
+ * channel callback, or in a deferred fashion and separate (from HAL)
+ * context.
+ *
+ * Non-zero @t_code means failure to process the descriptor.
+ * The failure could happen, for instance, when the link is
+ * down, in which case Xframe completes the descriptor because it
+ * is not able to send the data out.
+ *
+ * For details please refer to Xframe User Guide.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ *
+ * See also: xge_hal_channel_callback_f{},
+ * xge_hal_ring_dtr_next_completed().
+ * Usage: See ex_tx_compl{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
+ xge_hal_dtr_h *dtrh, u8 *t_code)
+{
+ xge_hal_fifo_txd_t *txdp;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+#endif
+
+ __hal_channel_dtr_try_complete(channelh, dtrh);
+ txdp = (xge_hal_fifo_txd_t *)*dtrh;
+ if (txdp == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ txdl_priv = __hal_fifo_txdl_priv(txdp);
+
+ /* sync TxDL to read the ownership
+ *
+ * Note: 16bytes means Control_1 & Control_2 */
+ xge_os_dma_sync(fifo->channel.pdev,
+ txdl_priv->dma_handle,
+ txdl_priv->dma_addr,
+ txdl_priv->dma_offset,
+ 16,
+ XGE_OS_DMA_DIR_FROMDEVICE);
+#endif
+
+ /* check whether host owns it */
+ if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
+
+ xge_assert(txdp->host_control!=0);
+
+ __hal_channel_dtr_complete(channelh);
+
+ *t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1);
+
+ /* see XGE_HAL_SET_TXD_T_CODE() above.. */
+ xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);
+
+ if (fifo->channel.usage_cnt > 0)
+ fifo->channel.usage_cnt--;
+
+ return XGE_HAL_OK;
+ }
+
+ /* no more completions */
+ *dtrh = 0;
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+}
+
+/**
+ * xge_hal_fifo_dtr_free - Free descriptor.
+ * @channelh: Channel handle.
+ * @dtr: Descriptor handle.
+ *
+ * Free the reserved descriptor. This operation is "symmetrical" to
+ * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
+ * The "free-ing" completes the descriptor's lifecycle.
+ *
+ * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
+ * be:
+ *
+ * - reserved (xge_hal_fifo_dtr_reserve);
+ *
+ * - posted (xge_hal_fifo_dtr_post);
+ *
+ * - completed (xge_hal_fifo_dtr_next_completed);
+ *
+ * - and recycled again (xge_hal_fifo_dtr_free).
+ *
+ * For alternative state transitions and more details please refer to
+ * the design doc.
+ *
+ * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
+ * Usage: See ex_tx_compl{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
+{
+#if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ unsigned long flags = 0;
+#endif
+ xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv(
+ (xge_hal_fifo_txd_t *)dtr);
+ int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags;
+#if defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
+#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
+ flags);
+#endif
+
+ if (txdl_priv->alloc_frags > max_frags) {
+ xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
+ txdl_priv->dang_txdl;
+ int dang_frags = txdl_priv->dang_frags;
+ int alloc_frags = txdl_priv->alloc_frags;
+ txdl_priv->dang_txdl = NULL;
+ txdl_priv->dang_frags = 0;
+ txdl_priv->alloc_frags = 0;
+ /* dtrh must have a linked list of dtrh */
+ xge_assert(txdl_priv->next_txdl_priv);
+
+ /* free any dangling dtrh first */
+ if (dang_txdp) {
+ xge_debug_fifo(XGE_TRACE,
+ "freeing dangled dtrh %p for %d fragments",
+ dang_txdp, dang_frags);
+ __hal_fifo_txdl_free_many(channelh, dang_txdp,
+ max_frags, dang_frags);
+ }
+
+ /* now free the reserved dtrh list */
+ xge_debug_fifo(XGE_TRACE,
+ "freeing dtrh %p list of %d fragments", dtr,
+ alloc_frags);
+ __hal_fifo_txdl_free_many(channelh,
+ (xge_hal_fifo_txd_t *)dtr, max_frags,
+ alloc_frags);
+ }
+ else
+ __hal_channel_dtr_free(channelh, dtr);
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;
+
+#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
+ __hal_fifo_txdl_priv(dtr)->allocated = 0;
+#endif
+
+#if defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
+#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
+ flags);
+#endif
+}
+
+
+/**
+ * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
+ * in fifo descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @frag_idx: Index of the data buffer in the caller's scatter-gather listá
+ * (of buffers).
+ * @vaddr: Virtual address of the data buffer.
+ * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
+ * @size: Size of the data buffer (in bytes).
+ * @misaligned_size: Size (in bytes) of the misaligned portion of the
+ * data buffer. Calculated by the caller, based on the platform/OS/other
+ * specific criteria, which is outside of HAL's domain. See notes below.
+ *
+ * This API is part of the transmit descriptor preparation for posting
+ * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
+ * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
+ * All three APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Xframe specification.
+ * On the PCI-X based systems aligning transmit data typically provides better
+ * transmit performance. The typical alignment granularity: L2 cacheline size.
+ * However, HAL does not make assumptions in terms of the alignment granularity;
+ * this is specified via additional @misaligned_size parameter described above.
+ * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
+ * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
+ * provides a separate xge_hal_check_alignment() API sufficient to cover
+ * most (but not all) possible alignment criteria.
+ * If the buffer appears to be aligned, the ULD calls
+ * xge_hal_fifo_dtr_buffer_set().
+ * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
+ *
+ * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
+ * addition to filling in the specified descriptor it aligns transmit data on
+ * the specified boundary.
+ * Note: Decision on whether to align or not to align a given contiguous
+ * transmit buffer is outside of HAL's domain. To this end ULD can use any
+ * programmable criteria, which can help to 1) boost transmit performance,
+ * and/or 2) provide a workaround for PCI bridge bugs, if any.
+ *
+ * See also: xge_hal_fifo_dtr_buffer_set(),
+ * xge_hal_check_alignment().
+ *
+ * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
+ * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
+ xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
+ dma_addr_t dma_pointer, int size, int misaligned_size)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp;
+ int remaining_size;
+ ptrdiff_t prev_boff;
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+ txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
+
+ if (frag_idx != 0) {
+ txdp->control_1 = txdp->control_2 = 0;
+ }
+
+ /* On some systems buffer size could be zero.
+ * It is the responsibility of ULD and *not HAL* to
+ * detect it and skip it. */
+ xge_assert(size > 0);
+ xge_assert(frag_idx < txdl_priv->alloc_frags);
+ xge_assert(misaligned_size != 0 &&
+ misaligned_size <= fifo->config->alignment_size);
+
+ remaining_size = size - misaligned_size;
+ xge_assert(remaining_size >= 0);
+
+ xge_os_memcpy((char*)txdl_priv->align_vaddr_start,
+ vaddr, misaligned_size);
+
+ if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
+ return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
+ }
+
+ /* setup new buffer */
+ prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
+ txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
+ txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size);
+ txdl_priv->bytes_sent += misaligned_size;
+ fifo->channel.stats.total_buffers++;
+ txdl_priv->frags++;
+ txdl_priv->align_used_frags++;
+ txdl_priv->align_vaddr_start += fifo->config->alignment_size;
+ txdl_priv->align_dma_offset = 0;
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC)
+ /* sync new buffer */
+ xge_os_dma_sync(fifo->channel.pdev,
+ txdl_priv->align_dma_handle,
+ txdp->buffer_pointer,
+ 0,
+ misaligned_size,
+ XGE_OS_DMA_DIR_TODEVICE);
+#endif
+
+ if (remaining_size) {
+ xge_assert(frag_idx < txdl_priv->alloc_frags);
+ txdp++;
+ txdp->buffer_pointer = (u64)dma_pointer +
+ misaligned_size;
+ txdp->control_1 =
+ XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
+ txdl_priv->bytes_sent += remaining_size;
+ txdp->control_2 = 0;
+ fifo->channel.stats.total_buffers++;
+ txdl_priv->frags++;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
+ * contiguous data buffer to a single physically contiguous buffer.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @vaddr: Virtual address of the data buffer.
+ * @size: Size of the data buffer (in bytes).
+ *
+ * This API is part of the transmit descriptor preparation for posting
+ * (via xge_hal_fifo_dtr_post()).
+ * The main difference of this API wrt to the APIs
+ * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
+ * contents of virtually contiguous data buffers received from
+ * upper layer into a single physically contiguous data buffer and the
+ * device will do a DMA from this buffer.
+ *
+ * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
+ * xge_hal_fifo_dtr_buffer_set_aligned().
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ void *vaddr, int size)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ ptrdiff_t used;
+
+ xge_assert(size > 0);
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+
+ used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
+ used += txdl_priv->align_dma_offset;
+ if (used + (unsigned int)size > (unsigned int)fifo->align_size)
+ return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
+
+ xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
+ txdl_priv->align_dma_offset, vaddr, size);
+
+ fifo->channel.stats.copied_frags++;
+
+ txdl_priv->align_dma_offset += size;
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the
+ * single physically contiguous buffer.
+ *
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @frag_idx: Index of the data buffer in the Txdl list.
+ *
+ * This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares
+ * a descriptor that consists of a single physically contiguous buffer
+ * which inturn contains the contents of one or more virtually contiguous
+ * buffers received from the upper layer.
+ *
+ * See Also: xge_hal_fifo_dtr_buffer_append().
+*/
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int frag_idx)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp;
+ ptrdiff_t prev_boff;
+
+ xge_assert(frag_idx < fifo->config->max_frags);
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+ txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
+
+ if (frag_idx != 0) {
+ txdp->control_1 = txdp->control_2 = 0;
+ }
+
+ prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
+ txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
+ txdp->control_1 |=
+ XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
+ txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
+ fifo->channel.stats.total_buffers++;
+ fifo->channel.stats.copied_buffers++;
+ txdl_priv->frags++;
+ txdl_priv->align_used_frags++;
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC)
+ /* sync pre-mapped buffer */
+ xge_os_dma_sync(fifo->channel.pdev,
+ txdl_priv->align_dma_handle,
+ txdp->buffer_pointer,
+ 0,
+ txdl_priv->align_dma_offset,
+ XGE_OS_DMA_DIR_TODEVICE);
+#endif
+
+ /* increment vaddr_start for the next buffer_append() iteration */
+ txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
+ txdl_priv->align_dma_offset = 0;
+}
+
+/**
+ * xge_hal_fifo_dtr_buffer_set - Set transmit buffer pointer in the
+ * descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @frag_idx: Index of the data buffer in the caller's scatter-gather listá
+ * (of buffers).
+ * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
+ * @size: Size of the data buffer (in bytes).
+ *
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
+ * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
+ * All three APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Xframe specification.
+ *
+ * See also: xge_hal_fifo_dtr_buffer_set_aligned(),
+ * xge_hal_check_alignment().
+ *
+ * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
+ * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
+ * Prepare transmit descriptor for transmission (via
+ * xge_hal_fifo_dtr_post()).
+ * See also: xge_hal_fifo_dtr_vlan_set().
+ * Note: Compare with xge_hal_fifo_dtr_buffer_set_aligned().
+ *
+ * Usage: See ex_xmit{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ int frag_idx, dma_addr_t dma_pointer, int size)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp;
+
+ txdl_priv = __hal_fifo_txdl_priv(dtrh);
+ txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
+
+ if (frag_idx != 0) {
+ txdp->control_1 = txdp->control_2 = 0;
+ }
+
+ /* Note:
+ * it is the responsibility of upper layers and not HAL
+ * detect it and skip zero-size fragment
+ */
+ xge_assert(size > 0);
+ xge_assert(frag_idx < txdl_priv->alloc_frags);
+
+ txdp->buffer_pointer = (u64)dma_pointer;
+ txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(size);
+ txdl_priv->bytes_sent += size;
+ fifo->channel.stats.total_buffers++;
+ txdl_priv->frags++;
+}
+
+/**
+ * xge_hal_fifo_dtr_mss_set - Set MSS.
+ * @dtrh: Descriptor handle.
+ * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
+ * ULD, which in turn inserts the MSS into the @dtrh.
+ *
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
+ * xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
+ * and xge_hal_fifo_dtr_cksum_set_bits().
+ * All these APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Xframe specification.
+ *
+ * See also: xge_hal_fifo_dtr_reserve(),
+ * xge_hal_fifo_dtr_post(), xge_hal_fifo_dtr_vlan_set().
+ * Usage: See ex_xmit{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss)
+{
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+
+ txdp->control_1 |= XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO);
+ txdp->control_1 |= XGE_HAL_TXD_TCP_LSO_MSS(mss);
+}
+
+/**
+ * xge_hal_fifo_dtr_cksum_set_bits - Offload checksum.
+ * @dtrh: Descriptor handle.
+ * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
+ * and/or TCP and/or UDP.
+ *
+ * Ask Xframe to calculate IPv4 & transport checksums for _this_ transmit
+ * descriptor.
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
+ * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
+ * and xge_hal_fifo_dtr_buffer_set().
+ * All these APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Xframe specification.
+ *
+ * See also: xge_hal_fifo_dtr_reserve(),
+ * xge_hal_fifo_dtr_post(), XGE_HAL_TXD_TX_CKO_IPV4_EN,
+ * XGE_HAL_TXD_TX_CKO_TCP_EN.
+ * Usage: See ex_xmit{}.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits)
+{
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+
+ txdp->control_2 |= cksum_bits;
+}
+
+
+/**
+ * xge_hal_fifo_dtr_vlan_set - Set VLAN tag.
+ * @dtrh: Descriptor handle.
+ * @vlan_tag: 16bit VLAN tag.
+ *
+ * Insert VLAN tag into specified transmit descriptor.
+ * The actual insertion of the tag into outgoing frame is done by the hardware.
+ * See also: xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_mss_set().
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
+xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag)
+{
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+
+ txdp->control_2 |= XGE_HAL_TXD_VLAN_ENABLE;
+ txdp->control_2 |= XGE_HAL_TXD_VLAN_TAG(vlan_tag);
+}
+
+/**
+ * xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
+ * @channelh: Channel handle.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)
+{
+ xge_hal_fifo_txd_t *txdp;
+ xge_hal_dtr_h dtrh;
+
+ __hal_channel_dtr_try_complete(channelh, &dtrh);
+ txdp = (xge_hal_fifo_txd_t *)dtrh;
+ if (txdp == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+ /* check whether host owns it */
+ if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
+ xge_assert(txdp->host_control!=0);
+ return XGE_HAL_OK;
+ }
+
+ /* no more completions */
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-fifo.c b/sys/dev/nxge/xgehal/xgehal-fifo.c
new file mode 100644
index 0000000..de6befd
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-fifo.c
@@ -0,0 +1,568 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-fifo.c
+ *
+ * Description: fifo object implementation
+ *
+ * Created: 10 May 2004
+ */
+
+#include <dev/nxge/include/xgehal-fifo.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+static xge_hal_status_e
+__hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
+ void *memblock,
+ int memblock_index,
+ xge_hal_mempool_dma_t *dma_object,
+ void *item,
+ int index,
+ int is_last,
+ void *userdata)
+{
+ int memblock_item_idx;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)item;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
+
+ xge_assert(item);
+ txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
+ __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
+ memblock_index,
+ item,
+ &memblock_item_idx);
+ xge_assert(txdl_priv);
+
+ /* pre-format HAL's TxDL's private */
+ txdl_priv->dma_offset = (char*)item - (char*)memblock;
+ txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
+ txdl_priv->dma_handle = dma_object->handle;
+ txdl_priv->memblock = memblock;
+ txdl_priv->first_txdp = (xge_hal_fifo_txd_t *)item;
+ txdl_priv->next_txdl_priv = NULL;
+ txdl_priv->dang_txdl = NULL;
+ txdl_priv->dang_frags = 0;
+ txdl_priv->alloc_frags = 0;
+
+#ifdef XGE_DEBUG_ASSERT
+ txdl_priv->dma_object = dma_object;
+#endif
+ txdp->host_control = (u64)(ulong_t)txdl_priv;
+
+#ifdef XGE_HAL_ALIGN_XMIT
+ txdl_priv->align_vaddr = NULL;
+ txdl_priv->align_dma_addr = (dma_addr_t)0;
+
+#ifndef XGE_HAL_ALIGN_XMIT_ALLOC_RT
+ {
+ xge_hal_status_e status;
+ if (fifo->config->alignment_size) {
+ status =__hal_fifo_dtr_align_alloc_map(fifo, txdp);
+ if (status != XGE_HAL_OK) {
+ xge_debug_mm(XGE_ERR,
+ "align buffer[%d] %d bytes, status %d",
+ index,
+ fifo->align_size,
+ status);
+ return status;
+ }
+ }
+ }
+#endif
+#endif
+
+ if (fifo->channel.dtr_init) {
+ fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index,
+ fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL);
+ }
+
+ return XGE_HAL_OK;
+}
+
+
+static xge_hal_status_e
+__hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
+ void *memblock,
+ int memblock_index,
+ xge_hal_mempool_dma_t *dma_object,
+ void *item,
+ int index,
+ int is_last,
+ void *userdata)
+{
+ int memblock_item_idx;
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+#ifdef XGE_HAL_ALIGN_XMIT
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
+#endif
+
+ xge_assert(item);
+
+ txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
+ __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
+ memblock_index,
+ item,
+ &memblock_item_idx);
+ xge_assert(txdl_priv);
+
+#ifdef XGE_HAL_ALIGN_XMIT
+ if (fifo->config->alignment_size) {
+ if (txdl_priv->align_dma_addr != 0) {
+ xge_os_dma_unmap(fifo->channel.pdev,
+ txdl_priv->align_dma_handle,
+ txdl_priv->align_dma_addr,
+ fifo->align_size,
+ XGE_OS_DMA_DIR_TODEVICE);
+
+ txdl_priv->align_dma_addr = 0;
+ }
+
+ if (txdl_priv->align_vaddr != NULL) {
+ xge_os_dma_free(fifo->channel.pdev,
+ txdl_priv->align_vaddr,
+ fifo->align_size,
+ &txdl_priv->align_dma_acch,
+ &txdl_priv->align_dma_handle);
+
+ txdl_priv->align_vaddr = NULL;
+ }
+ }
+#endif
+
+ return XGE_HAL_OK;
+}
+
+xge_hal_status_e
+__hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
+{
+ xge_hal_device_t *hldev;
+ xge_hal_status_e status;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_fifo_queue_t *queue;
+ int i, txdl_size, max_arr_index, mid_point;
+ xge_hal_dtr_h dtrh;
+
+ hldev = (xge_hal_device_t *)fifo->channel.devh;
+ fifo->config = &hldev->config.fifo;
+ queue = &fifo->config->queue[attr->post_qid];
+
+#if defined(XGE_HAL_TX_MULTI_RESERVE)
+ xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev);
+#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh);
+#endif
+#if defined(XGE_HAL_TX_MULTI_POST)
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ fifo->post_lock_ptr = &hldev->xena_post_lock;
+ } else {
+ xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
+ fifo->post_lock_ptr = &fifo->channel.post_lock;
+ }
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
+ fifo->post_lock_ptr = &hldev->xena_post_lock;
+ } else {
+ xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
+ hldev->irqh);
+ fifo->post_lock_ptr = &fifo->channel.post_lock;
+ }
+#endif
+
+ fifo->align_size =
+ fifo->config->alignment_size * fifo->config->max_aligned_frags;
+
+ /* Initializing the BAR1 address as the start of
+ * the FIFO queue pointer and as a location of FIFO control
+ * word. */
+ fifo->hw_pair =
+ (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
+ (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
+
+ /* apply "interrupts per txdl" attribute */
+ fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
+ if (queue->intr) {
+ fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
+ }
+ fifo->no_snoop_bits =
+ (int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));
+
+ /*
+ * FIFO memory management strategy:
+ *
+ * TxDL splitted into three independent parts:
+ * - set of TxD's
+ * - TxD HAL private part
+ * - upper layer private part
+ *
+ * Adaptative memory allocation used. i.e. Memory allocated on
+ * demand with the size which will fit into one memory block.
+ * One memory block may contain more than one TxDL. In simple case
+ * memory block size can be equal to CPU page size. On more
+ * sophisticated OS's memory block can be contigious across
+ * several pages.
+ *
+ * During "reserve" operations more memory can be allocated on demand
+ * for example due to FIFO full condition.
+ *
+ * Pool of memory memblocks never shrinks except __hal_fifo_close
+ * routine which will essentially stop channel and free the resources.
+ */
+
+ /* TxDL common private size == TxDL private + ULD private */
+ fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
+ attr->per_dtr_space;
+ fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
+ __xge_os_cacheline_size) *
+ __xge_os_cacheline_size;
+
+ /* recompute txdl size to be cacheline aligned */
+ fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
+ txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
+ __xge_os_cacheline_size) * __xge_os_cacheline_size;
+
+ if (fifo->txdl_size != txdl_size)
+ xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d",
+ fifo->config->max_frags, fifo->txdl_size, txdl_size,
+ __xge_os_cacheline_size);
+
+ fifo->txdl_size = txdl_size;
+
+ /* since dtr_init() callback will be called from item_alloc(),
+ * the same way channels userdata might be used prior to
+ * channel_initialize() */
+ fifo->channel.dtr_init = attr->dtr_init;
+ fifo->channel.userdata = attr->userdata;
+ fifo->txdl_per_memblock = fifo->config->memblock_size /
+ fifo->txdl_size;
+
+ fifo->mempool = __hal_mempool_create(hldev->pdev,
+ fifo->config->memblock_size,
+ fifo->txdl_size,
+ fifo->priv_size,
+ queue->initial,
+ queue->max,
+ __hal_fifo_mempool_item_alloc,
+ __hal_fifo_mempool_item_free,
+ fifo);
+ if (fifo->mempool == NULL) {
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ status = __hal_channel_initialize(channelh, attr,
+ (void **) __hal_mempool_items_arr(fifo->mempool),
+ queue->initial, queue->max,
+ fifo->config->reserve_threshold);
+ if (status != XGE_HAL_OK) {
+ __hal_fifo_close(channelh);
+ return status;
+ }
+ xge_debug_fifo(XGE_TRACE,
+ "DTR reserve_length:%d reserve_top:%d\n"
+ "max_frags:%d reserve_threshold:%d\n"
+ "memblock_size:%d alignment_size:%d max_aligned_frags:%d",
+ fifo->channel.reserve_length, fifo->channel.reserve_top,
+ fifo->config->max_frags, fifo->config->reserve_threshold,
+ fifo->config->memblock_size, fifo->config->alignment_size,
+ fifo->config->max_aligned_frags);
+
+#ifdef XGE_DEBUG_ASSERT
+ for ( i = 0; i < fifo->channel.reserve_length; i++) {
+ xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
+ " handle:%p", i, fifo->channel.reserve_arr[i]);
+ }
+#endif
+
+ xge_assert(fifo->channel.reserve_length);
+ /* reverse the FIFO dtr array */
+ max_arr_index = fifo->channel.reserve_length - 1;
+ max_arr_index -=fifo->channel.reserve_top;
+ xge_assert(max_arr_index);
+ mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
+ for (i = 0; i < mid_point; i++) {
+ dtrh = fifo->channel.reserve_arr[i];
+ fifo->channel.reserve_arr[i] =
+ fifo->channel.reserve_arr[max_arr_index - i];
+ fifo->channel.reserve_arr[max_arr_index - i] = dtrh;
+ }
+
+#ifdef XGE_DEBUG_ASSERT
+ for ( i = 0; i < fifo->channel.reserve_length; i++) {
+ xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
+ " handle:%p", i, fifo->channel.reserve_arr[i]);
+ }
+#endif
+
+ return XGE_HAL_OK;
+}
+
+void
+__hal_fifo_close(xge_hal_channel_h channelh)
+{
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh;
+
+ if (fifo->mempool) {
+ __hal_mempool_destroy(fifo->mempool);
+ }
+
+ __hal_channel_terminate(channelh);
+
+#if defined(XGE_HAL_TX_MULTI_RESERVE)
+ xge_os_spin_lock_destroy(&fifo->channel.reserve_lock, hldev->pdev);
+#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_destroy_irq(&fifo->channel.reserve_lock, hldev->pdev);
+#endif
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+#if defined(XGE_HAL_TX_MULTI_POST)
+ xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev);
+#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
+ xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
+ hldev->pdev);
+#endif
+ }
+}
+
+void
+__hal_fifo_hw_initialize(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64* tx_fifo_partitions[4];
+ u64* tx_fifo_wrr[5];
+ u64 tx_fifo_wrr_value[5];
+ u64 val64, part0;
+ int i;
+
+ /* Tx DMA Initialization */
+
+ tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0;
+ tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1;
+ tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2;
+ tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3;
+
+ tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0;
+ tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1;
+ tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2;
+ tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3;
+ tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4;
+
+ tx_fifo_wrr_value[0] = XGE_HAL_FIFO_WRR_0;
+ tx_fifo_wrr_value[1] = XGE_HAL_FIFO_WRR_1;
+ tx_fifo_wrr_value[2] = XGE_HAL_FIFO_WRR_2;
+ tx_fifo_wrr_value[3] = XGE_HAL_FIFO_WRR_3;
+ tx_fifo_wrr_value[4] = XGE_HAL_FIFO_WRR_4;
+
+ /* Note: WRR calendar must be configured before the transmit
+ * FIFOs are enabled! page 6-77 user guide */
+
+ if (!hldev->config.rts_qos_en) {
+ /* all zeroes for Round-Robin */
+ for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
+ tx_fifo_wrr[i]);
+ }
+
+ /* reset all of them but '0' */
+ for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
+ tx_fifo_partitions[i]);
+ }
+ } else { /* Change the default settings */
+
+ for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ tx_fifo_wrr_value[i], tx_fifo_wrr[i]);
+ }
+ }
+
+ /* configure only configured FIFOs */
+ val64 = 0; part0 = 0;
+ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
+ int reg_half = i % 2;
+ int reg_num = i / 2;
+
+ if (hldev->config.fifo.queue[i].configured) {
+ int priority = hldev->config.fifo.queue[i].priority;
+ val64 |=
+ vBIT((hldev->config.fifo.queue[i].max-1),
+ (((reg_half) * 32) + 19),
+ 13) | vBIT(priority, (((reg_half)*32) + 5), 3);
+ }
+
+ /* NOTE: do write operation for each second u64 half
+ * or force for first one if configured number
+ * is even */
+ if (reg_half) {
+ if (reg_num == 0) {
+ /* skip partition '0', must write it once at
+ * the end */
+ part0 = val64;
+ } else {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, tx_fifo_partitions[reg_num]);
+ xge_debug_fifo(XGE_TRACE,
+ "fifo partition_%d at: "
+ "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
+ reg_num, (unsigned long long)(ulong_t)
+ tx_fifo_partitions[reg_num],
+ (unsigned long long)val64);
+ }
+ val64 = 0;
+ }
+ }
+
+ part0 |= BIT(0); /* to enable the FIFO partition. */
+ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0,
+ tx_fifo_partitions[0]);
+ xge_os_wmb();
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
+ tx_fifo_partitions[0]);
+ xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
+ "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
+ (unsigned long long)(ulong_t)
+ tx_fifo_partitions[0],
+ (unsigned long long) part0);
+
+ /*
+ * Initialization of Tx_PA_CONFIG register to ignore packet
+ * integrity checking.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->tx_pa_cfg);
+ val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR |
+ XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
+ XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
+ XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->tx_pa_cfg);
+
+ /*
+ * Assign MSI-X vectors
+ */
+ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
+ xge_list_t *item;
+ xge_hal_channel_t *channel = NULL;
+
+ if (!hldev->config.fifo.queue[i].configured ||
+ !hldev->config.fifo.queue[i].intr_vector ||
+ !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
+ continue;
+
+ /* find channel */
+ xge_list_for_each(item, &hldev->free_channels) {
+ xge_hal_channel_t *tmp;
+ tmp = xge_container_of(item, xge_hal_channel_t,
+ item);
+ if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO &&
+ tmp->post_qid == i) {
+ channel = tmp;
+ break;
+ }
+ }
+
+ if (channel) {
+ xge_hal_channel_msix_set(channel,
+ hldev->config.fifo.queue[i].intr_vector);
+ }
+ }
+
+ xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized");
+}
+
+#ifdef XGE_HAL_ALIGN_XMIT
+void
+__hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+
+ txdl_priv = __hal_fifo_txdl_priv(txdp);
+
+ if (txdl_priv->align_dma_addr != 0) {
+ xge_os_dma_unmap(fifo->channel.pdev,
+ txdl_priv->align_dma_handle,
+ txdl_priv->align_dma_addr,
+ fifo->align_size,
+ XGE_OS_DMA_DIR_TODEVICE);
+
+ txdl_priv->align_dma_addr = 0;
+ }
+
+ if (txdl_priv->align_vaddr != NULL) {
+ xge_os_dma_free(fifo->channel.pdev,
+ txdl_priv->align_vaddr,
+ fifo->align_size,
+ &txdl_priv->align_dma_acch,
+ &txdl_priv->align_dma_handle);
+
+
+ txdl_priv->align_vaddr = NULL;
+ }
+ }
+
+xge_hal_status_e
+__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_fifo_txdl_priv_t *txdl_priv;
+ xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+
+ xge_assert(txdp);
+
+ txdl_priv = __hal_fifo_txdl_priv(txdp);
+
+ /* allocate alignment DMA-buffer */
+ txdl_priv->align_vaddr = xge_os_dma_malloc(fifo->channel.pdev,
+ fifo->align_size,
+ XGE_OS_DMA_CACHELINE_ALIGNED |
+ XGE_OS_DMA_STREAMING,
+ &txdl_priv->align_dma_handle,
+ &txdl_priv->align_dma_acch);
+ if (txdl_priv->align_vaddr == NULL) {
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ /* map it */
+ txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev,
+ txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
+ fifo->align_size,
+ XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);
+
+ if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) {
+ __hal_fifo_dtr_align_free_unmap(channelh, dtrh);
+ return XGE_HAL_ERR_OUT_OF_MAPPING;
+ }
+
+ return XGE_HAL_OK;
+}
+#endif
+
+
diff --git a/sys/dev/nxge/xgehal/xgehal-mgmt.c b/sys/dev/nxge/xgehal/xgehal-mgmt.c
new file mode 100644
index 0000000..3e30e25
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-mgmt.c
@@ -0,0 +1,1772 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-mgmt.c
+ *
+ * Description: Xframe-family management facility implementation
+ *
+ * Created: 1 September 2004
+ */
+
+#include <dev/nxge/include/xgehal-mgmt.h>
+#include <dev/nxge/include/xgehal-driver.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+/**
+ * xge_hal_mgmt_about - Retrieve about info.
+ * @devh: HAL device handle.
+ * @about_info: Filled in by HAL. See xge_hal_mgmt_about_info_t{}.
+ * @size: Size of the @about_info buffer. HAL will return error if the
+ * size is smaller than sizeof(xge_hal_mgmt_about_info_t).
+ *
+ * Retrieve information such as PCI device and vendor IDs, board
+ * revision number, HAL version number, etc.
+ *
+ * Returns: XGE_HAL_OK - success;
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ * XGE_HAL_FAIL - Failed to retrieve the information.
+ *
+ * See also: xge_hal_mgmt_about_info_t{}.
+ */
+xge_hal_status_e
+xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info,
+ int size)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_mgmt_about_info_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, vendor_id),
+ &about_info->vendor);
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, device_id),
+ &about_info->device);
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
+ &about_info->subsys_vendor);
+
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
+ &about_info->subsys_device);
+
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, revision),
+ &about_info->board_rev);
+
+ xge_os_strcpy(about_info->vendor_name, XGE_DRIVER_VENDOR);
+ xge_os_strcpy(about_info->chip_name, XGE_CHIP_FAMILY);
+ xge_os_strcpy(about_info->media, XGE_SUPPORTED_MEDIA_0);
+
+ xge_os_strcpy(about_info->hal_major, XGE_HAL_VERSION_MAJOR);
+ xge_os_strcpy(about_info->hal_minor, XGE_HAL_VERSION_MINOR);
+ xge_os_strcpy(about_info->hal_fix, XGE_HAL_VERSION_FIX);
+ xge_os_strcpy(about_info->hal_build, XGE_HAL_VERSION_BUILD);
+
+ xge_os_strcpy(about_info->ll_major, XGELL_VERSION_MAJOR);
+ xge_os_strcpy(about_info->ll_minor, XGELL_VERSION_MINOR);
+ xge_os_strcpy(about_info->ll_fix, XGELL_VERSION_FIX);
+ xge_os_strcpy(about_info->ll_build, XGELL_VERSION_BUILD);
+
+ about_info->transponder_temperature =
+ xge_hal_read_xfp_current_temp(devh);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_reg_read - Read Xframe register.
+ * @devh: HAL device handle.
+ * @bar_id: 0 - for BAR0, 1- for BAR1.
+ * @offset: Register offset in the Base Address Register (BAR) space.
+ * @value: Register value. Returned by HAL.
+ * Read Xframe register.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_INVALID_OFFSET - Register offset in the BAR space is not
+ * valid.
+ * XGE_HAL_ERR_INVALID_BAR_ID - BAR id is not valid.
+ *
+ * See also: xge_hal_aux_bar0_read(), xge_hal_aux_bar1_read().
+ */
+xge_hal_status_e
+xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset,
+ u64 *value)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (bar_id == 0) {
+ if (offset > sizeof(xge_hal_pci_bar0_t)-8) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+ *value = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ (void *)(hldev->bar0 + offset));
+ } else if (bar_id == 1 &&
+ (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA ||
+ xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)) {
+ int i;
+ for (i=0; i<XGE_HAL_MAX_FIFO_NUM_HERC; i++) {
+ if (offset == i*0x2000 || offset == i*0x2000+0x18) {
+ break;
+ }
+ }
+ if (i == XGE_HAL_MAX_FIFO_NUM_HERC) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+ *value = xge_os_pio_mem_read64(hldev->pdev, hldev->regh1,
+ (void *)(hldev->bar1 + offset));
+ } else if (bar_id == 1) {
+ /* FIXME: check TITAN BAR1 offsets */
+ } else {
+ return XGE_HAL_ERR_INVALID_BAR_ID;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_reg_write - Write Xframe register.
+ * @devh: HAL device handle.
+ * @bar_id: 0 - for BAR0, 1- for BAR1.
+ * @offset: Register offset in the Base Address Register (BAR) space.
+ * @value: Register value.
+ *
+ * Write Xframe register.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_INVALID_OFFSET - Register offset in the BAR space is not
+ * valid.
+ * XGE_HAL_ERR_INVALID_BAR_ID - BAR id is not valid.
+ *
+ * See also: xge_hal_aux_bar0_write().
+ */
+xge_hal_status_e
+xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset,
+ u64 value)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (bar_id == 0) {
+ if (offset > sizeof(xge_hal_pci_bar0_t)-8) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, value,
+ (void *)(hldev->bar0 + offset));
+ } else if (bar_id == 1 &&
+ (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA ||
+ xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)) {
+ int i;
+ for (i=0; i<XGE_HAL_MAX_FIFO_NUM_HERC; i++) {
+ if (offset == i*0x2000 || offset == i*0x2000+0x18) {
+ break;
+ }
+ }
+ if (i == XGE_HAL_MAX_FIFO_NUM_HERC) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh1, value,
+ (void *)(hldev->bar1 + offset));
+ } else if (bar_id == 1) {
+ /* FIXME: check TITAN BAR1 offsets */
+ } else {
+ return XGE_HAL_ERR_INVALID_BAR_ID;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_hw_stats - Get Xframe hardware statistics.
+ * @devh: HAL device handle.
+ * @hw_stats: Hardware statistics. Returned by HAL.
+ * See xge_hal_stats_hw_info_t{}.
+ * @size: Size of the @hw_stats buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_stats_hw_info_t).
+ * Get Xframe hardware statistics.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_sw_stats().
+ */
+xge_hal_status_e
+xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats,
+ int size)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_stats_hw_info_t *hw_info;
+
+ xge_assert(xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN);
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_stats_hw_info_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ if ((status = xge_hal_stats_hw (devh, &hw_info)) != XGE_HAL_OK) {
+ return status;
+ }
+
+ xge_os_memcpy(hw_stats, hw_info, sizeof(xge_hal_stats_hw_info_t));
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_hw_stats_off - TBD.
+ * @devh: HAL device handle.
+ * @off: TBD
+ * @size: TBD
+ * @out: TBD
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_sw_stats().
+ */
+xge_hal_status_e
+xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_stats_hw_info_t *hw_info;
+
+ xge_assert(xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN);
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (off > sizeof(xge_hal_stats_hw_info_t)-4 ||
+ size > 8) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+
+ if ((status = xge_hal_stats_hw (devh, &hw_info)) != XGE_HAL_OK) {
+ return status;
+ }
+
+ xge_os_memcpy(out, (char*)hw_info + off, size);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_pcim_stats - Get Titan hardware statistics.
+ * @devh: HAL device handle.
+ * @pcim_stats: PCIM statistics. Returned by HAL.
+ * See xge_hal_stats_hw_info_t{}.
+ * @size: Size of the @hw_stats buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_stats_hw_info_t).
+ * Get Xframe hardware statistics.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_sw_stats().
+ */
+xge_hal_status_e
+xge_hal_mgmt_pcim_stats(xge_hal_device_h devh,
+ xge_hal_mgmt_pcim_stats_t *pcim_stats, int size)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_stats_pcim_info_t *pcim_info;
+
+ xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN);
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_stats_pcim_info_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ if ((status = xge_hal_stats_pcim (devh, &pcim_info)) != XGE_HAL_OK) {
+ return status;
+ }
+
+ xge_os_memcpy(pcim_stats, pcim_info,
+ sizeof(xge_hal_stats_pcim_info_t));
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_pcim_stats_off - TBD.
+ * @devh: HAL device handle.
+ * @off: TBD
+ * @size: TBD
+ * @out: TBD
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_sw_stats().
+ */
+xge_hal_status_e
+xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size,
+ char *out)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_stats_pcim_info_t *pcim_info;
+
+ xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN);
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (off > sizeof(xge_hal_stats_pcim_info_t)-8 ||
+ size > 8) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+
+ if ((status = xge_hal_stats_pcim (devh, &pcim_info)) != XGE_HAL_OK) {
+ return status;
+ }
+
+ xge_os_memcpy(out, (char*)pcim_info + off, size);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_sw_stats - Get per-device software statistics.
+ * @devh: HAL device handle.
+ * @sw_stats: Hardware statistics. Returned by HAL.
+ * See xge_hal_stats_sw_err_t{}.
+ * @size: Size of the @sw_stats buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_stats_sw_err_t).
+ * Get device software statistics, including ECC and Parity error
+ * counters, etc.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_stats_sw_err_t{}, xge_hal_mgmt_hw_stats().
+ */
+xge_hal_status_e
+xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *sw_stats,
+ int size)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_stats_sw_err_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ if (!hldev->stats.is_initialized ||
+ !hldev->stats.is_enabled) {
+ return XGE_HAL_INF_STATS_IS_NOT_READY;
+ }
+
+ /* Updating xpak stats value */
+ __hal_updt_stats_xpak(hldev);
+
+ xge_os_memcpy(sw_stats, &hldev->stats.sw_dev_err_stats,
+ sizeof(xge_hal_stats_sw_err_t));
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_device_stats - Get HAL device statistics.
+ * @devh: HAL device handle.
+ * @device_stats: HAL device "soft" statistics. Maintained by HAL itself.
+ * (as opposed to xge_hal_mgmt_hw_stats() - those are
+ * maintained by the Xframe hardware).
+ * Returned by HAL.
+ * See xge_hal_stats_device_info_t{}.
+ * @size: Size of the @device_stats buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_stats_device_info_t).
+ *
+ * Get HAL (layer) statistic counters.
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ */
+xge_hal_status_e
+xge_hal_mgmt_device_stats(xge_hal_device_h devh,
+ xge_hal_mgmt_device_stats_t *device_stats, int size)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_stats_device_info_t *device_info;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_stats_device_info_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ if ((status = xge_hal_stats_device (devh, &device_info)) !=
+ XGE_HAL_OK) {
+ return status;
+ }
+
+ xge_os_memcpy(device_stats, device_info,
+ sizeof(xge_hal_stats_device_info_t));
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_update_ring_bump - Update the ring bump counter for the
+ * particular channel.
+ * @hldev: HAL device handle.
+ * @queue: the queue who's data is to be collected.
+ * @chinfo: pointer to the statistics structure of the given channel.
+ * Usage: See xge_hal_aux_stats_hal_read{}
+ */
+
+static void
+__hal_update_ring_bump(xge_hal_device_t *hldev, int queue,
+ xge_hal_stats_channel_info_t *chinfo)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 rbc = 0;
+ int reg = (queue / 4);
+ void * addr;
+
+ addr = (reg == 1)? (&bar0->ring_bump_counter2) :
+ (&bar0->ring_bump_counter1);
+ rbc = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, addr);
+ chinfo->ring_bump_cnt = XGE_HAL_RING_BUMP_CNT(queue, rbc);
+}
+
+/**
+ * xge_hal_mgmt_channel_stats - Get HAL channel statistics.
+ * @channelh: HAL channel handle.
+ * @channel_stats: HAL channel statistics. Maintained by HAL itself
+ * (as opposed to xge_hal_mgmt_hw_stats() - those are
+ * maintained by the Xframe hardware).
+ * Returned by HAL.
+ * See xge_hal_stats_channel_info_t{}.
+ * @size: Size of the @channel_stats buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_mgmt_channel_stats_t).
+ *
+ * Get HAL per-channel statistic counters.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ */
+xge_hal_status_e
+xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh,
+ xge_hal_mgmt_channel_stats_t *channel_stats, int size)
+{
+ xge_hal_status_e status;
+ xge_hal_stats_channel_info_t *channel_info;
+ xge_hal_channel_t *channel = (xge_hal_channel_t* ) channelh;
+
+ if (size != sizeof(xge_hal_stats_channel_info_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ if ((status = xge_hal_stats_channel (channelh, &channel_info)) !=
+ XGE_HAL_OK) {
+ return status;
+ }
+
+ if (xge_hal_device_check_id(channel->devh) == XGE_HAL_CARD_HERC) {
+ __hal_update_ring_bump( (xge_hal_device_t *) channel->devh, channel->post_qid, channel_info);
+ }
+
+ xge_os_memcpy(channel_stats, channel_info,
+ sizeof(xge_hal_stats_channel_info_t));
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_pcireg_read - Read PCI configuration at a specified
+ * offset.
+ * @devh: HAL device handle.
+ * @offset: Offset in the 256 byte PCI configuration space.
+ * @value_bits: 8, 16, or 32 (bits) to read.
+ * @value: Value returned by HAL.
+ *
+ * Read PCI configuration, given device and offset in the PCI space.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_INVALID_OFFSET - Register offset in the BAR space is not
+ * valid.
+ * XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE - Invalid bits size. Valid
+ * values(8/16/32).
+ *
+ */
+xge_hal_status_e
+xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset,
+ int value_bits, u32 *value)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (offset > sizeof(xge_hal_pci_config_t)-value_bits/8) {
+ return XGE_HAL_ERR_INVALID_OFFSET;
+ }
+
+ if (value_bits == 8) {
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh, offset, (u8*)value);
+ } else if (value_bits == 16) {
+ xge_os_pci_read16(hldev->pdev, hldev->cfgh, offset,
+ (u16*)value);
+ } else if (value_bits == 32) {
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh, offset, value);
+ } else {
+ return XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_device_config - Retrieve device configuration.
+ * @devh: HAL device handle.
+ * @dev_config: Device configuration, see xge_hal_device_config_t{}.
+ * @size: Size of the @dev_config buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_mgmt_device_config_t).
+ *
+ * Get device configuration. Permits to retrieve at run-time configuration
+ * values that were used to initialize and configure the device.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_device_config_t{}, xge_hal_mgmt_driver_config().
+ */
+xge_hal_status_e
+xge_hal_mgmt_device_config(xge_hal_device_h devh,
+ xge_hal_mgmt_device_config_t *dev_config, int size)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_mgmt_device_config_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ xge_os_memcpy(dev_config, &hldev->config,
+ sizeof(xge_hal_device_config_t));
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_driver_config - Retrieve driver configuration.
+ * @drv_config: Device configuration, see xge_hal_driver_config_t{}.
+ * @size: Size of the @dev_config buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_mgmt_driver_config_t).
+ *
+ * Get driver configuration. Permits to retrieve at run-time configuration
+ * values that were used to configure the device at load-time.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - HAL is not initialized.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version is not maching.
+ *
+ * See also: xge_hal_driver_config_t{}, xge_hal_mgmt_device_config().
+ */
+xge_hal_status_e
+xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config, int size)
+{
+
+ if (g_xge_hal_driver == NULL) {
+ return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
+ }
+
+ if (size != sizeof(xge_hal_mgmt_driver_config_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ xge_os_memcpy(drv_config, &g_xge_hal_driver->config,
+ sizeof(xge_hal_mgmt_driver_config_t));
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_mgmt_pci_config - Retrieve PCI configuration.
+ * @devh: HAL device handle.
+ * @pci_config: 256 byte long buffer for PCI configuration space.
+ * @size: Size of the @ buffer. HAL will return an error
+ * if the size is smaller than sizeof(xge_hal_mgmt_pci_config_t).
+ *
+ * Get PCI configuration. Permits to retrieve at run-time configuration
+ * values that were used to configure the device at load-time.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ */
+xge_hal_status_e
+xge_hal_mgmt_pci_config(xge_hal_device_h devh,
+ xge_hal_mgmt_pci_config_t *pci_config, int size)
+{
+ int i;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (size != sizeof(xge_hal_mgmt_pci_config_t)) {
+ return XGE_HAL_ERR_VERSION_CONFLICT;
+ }
+
+ /* refresh PCI config space */
+ for (i = 0; i < 0x68/4+1; i++) {
+ xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
+ (u32*)&hldev->pci_config_space + i);
+ }
+
+ xge_os_memcpy(pci_config, &hldev->pci_config_space,
+ sizeof(xge_hal_mgmt_pci_config_t));
+
+ return XGE_HAL_OK;
+}
+
+#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
+/**
+ * xge_hal_mgmt_trace_read - Read trace buffer contents.
+ * @buffer: Buffer to store the trace buffer contents.
+ * @buf_size: Size of the buffer.
+ * @offset: Offset in the internal trace buffer to read data.
+ * @read_length: Size of the valid data in the buffer.
+ *
+ * Read HAL trace buffer contents starting from the offset
+ * upto the size of the buffer or till EOF is reached.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_EOF_TRACE_BUF - No more data in the trace buffer.
+ *
+ */
+xge_hal_status_e
+xge_hal_mgmt_trace_read (char *buffer,
+ unsigned buf_size,
+ unsigned *offset,
+ unsigned *read_length)
+{
+ int data_offset;
+ int start_offset;
+
+ if ((g_xge_os_tracebuf == NULL) ||
+ (g_xge_os_tracebuf->offset == g_xge_os_tracebuf->size - 2)) {
+ return XGE_HAL_EOF_TRACE_BUF;
+ }
+
+ data_offset = g_xge_os_tracebuf->offset + 1;
+
+ if (*offset >= (unsigned)xge_os_strlen(g_xge_os_tracebuf->data +
+ data_offset)) {
+
+ return XGE_HAL_EOF_TRACE_BUF;
+ }
+
+ xge_os_memzero(buffer, buf_size);
+
+ start_offset = data_offset + *offset;
+ *read_length = xge_os_strlen(g_xge_os_tracebuf->data +
+ start_offset);
+
+ if (*read_length >= buf_size) {
+ *read_length = buf_size - 1;
+ }
+
+ xge_os_memcpy(buffer, g_xge_os_tracebuf->data + start_offset,
+ *read_length);
+
+ *offset += *read_length;
+ (*read_length) ++;
+
+ return XGE_HAL_OK;
+}
+
+#endif
+
+/**
+ * xge_hal_restore_link_led - Restore link LED to its original state.
+ * @devh: HAL device handle.
+ */
+void
+xge_hal_restore_link_led(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ /*
+ * If the current link state is UP, switch on LED else make it
+ * off.
+ */
+
+ /*
+ * For Xena 3 and lower revision cards, adapter control needs to be
+ * used for making LED ON/OFF.
+ */
+ if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) &&
+ (xge_hal_device_rev(hldev) <= 3)) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ if (hldev->link_state == XGE_HAL_LINK_UP) {
+ val64 |= XGE_HAL_ADAPTER_LED_ON;
+ } else {
+ val64 &= ~XGE_HAL_ADAPTER_LED_ON;
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+ return;
+ }
+
+ /*
+ * Use beacon control register to control the LED.
+ * LED link output corresponds to bit 8 of the beacon control
+ * register. Note that, in the case of Xena, beacon control register
+ * represents the gpio control register. In the case of Herc, LED
+ * handling is done by beacon control register as opposed to gpio
+ * control register in Xena. Beacon control is used only to toggle
+ * and the value written into it does not depend on the link state.
+ * It is upto the ULD to toggle the LED even number of times which
+ * brings the LED to it's original state.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->beacon_control);
+ val64 |= 0x0000800000000000ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->beacon_control);
+}
+
+/**
+ * xge_hal_flick_link_led - Flick (blink) link LED.
+ * @devh: HAL device handle.
+ *
+ * Depending on the card revision flicker the link LED by using the
+ * beacon control or the adapter_control register.
+ */
+void
+xge_hal_flick_link_led(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64 = 0;
+
+ /*
+ * For Xena 3 and lower revision cards, adapter control needs to be
+ * used for making LED ON/OFF.
+ */
+ if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) &&
+ (xge_hal_device_rev(hldev) <= 3)) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 ^= XGE_HAL_ADAPTER_LED_ON;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+ return;
+ }
+
+ /*
+ * Use beacon control register to control the Link LED.
+ * Note that, in the case of Xena, beacon control register represents
+ * the gpio control register. In the case of Herc, LED handling is
+ * done by beacon control register as opposed to gpio control register
+ * in Xena.
+ */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->beacon_control);
+ val64 ^= XGE_HAL_GPIO_CTRL_GPIO_0;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->beacon_control);
+}
+
+/**
+ * xge_hal_read_eeprom - Read 4 bytes of data from user given offset.
+ * @devh: HAL device handle.
+ * @off: offset at which the data must be written
+ * @data: output parameter where the data is stored.
+ *
+ * Read 4 bytes of data from the user given offset and return the
+ * read data.
+ * Note: will allow to read only part of the EEPROM visible through the
+ * I2C bus.
+ * Returns: -1 on failure, 0 on success.
+ */
+xge_hal_status_e
+xge_hal_read_eeprom(xge_hal_device_h devh, int off, u32* data)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
+ u32 exit_cnt = 0;
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+
+ val64 = XGE_HAL_I2C_CONTROL_DEV_ID(XGE_DEV_ID) |
+ XGE_HAL_I2C_CONTROL_ADDR(off) |
+ XGE_HAL_I2C_CONTROL_BYTE_CNT(0x3) |
+ XGE_HAL_I2C_CONTROL_READ | XGE_HAL_I2C_CONTROL_CNTL_START;
+
+ __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control);
+
+ while (exit_cnt < 5) {
+ val64 = __hal_serial_mem_read64(hldev, &bar0->i2c_control);
+ if (XGE_HAL_I2C_CONTROL_CNTL_END(val64)) {
+ *data = XGE_HAL_I2C_CONTROL_GET_DATA(val64);
+ ret = XGE_HAL_OK;
+ break;
+ }
+ exit_cnt++;
+ }
+
+ return ret;
+}
+
+/*
+ * xge_hal_write_eeprom - actually writes the relevant part of the data
+ value.
+ * @devh: HAL device handle.
+ * @off: offset at which the data must be written
+ * @data : The data that is to be written
+ * @cnt : Number of bytes of the data that are actually to be written into
+ * the Eeprom. (max of 3)
+ *
+ * Actually writes the relevant part of the data value into the Eeprom
+ * through the I2C bus.
+ * Return value:
+ * 0 on success, -1 on failure.
+ */
+
+xge_hal_status_e
+xge_hal_write_eeprom(xge_hal_device_h devh, int off, u32 data, int cnt)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
+ u32 exit_cnt = 0;
+ u64 val64;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+
+ val64 = XGE_HAL_I2C_CONTROL_DEV_ID(XGE_DEV_ID) |
+ XGE_HAL_I2C_CONTROL_ADDR(off) |
+ XGE_HAL_I2C_CONTROL_BYTE_CNT(cnt) |
+ XGE_HAL_I2C_CONTROL_SET_DATA(data) |
+ XGE_HAL_I2C_CONTROL_CNTL_START;
+ __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control);
+
+ while (exit_cnt < 5) {
+ val64 = __hal_serial_mem_read64(hldev, &bar0->i2c_control);
+ if (XGE_HAL_I2C_CONTROL_CNTL_END(val64)) {
+ if (!(val64 & XGE_HAL_I2C_CONTROL_NACK))
+ ret = XGE_HAL_OK;
+ break;
+ }
+ exit_cnt++;
+ }
+
+ return ret;
+}
+
+/*
+ * xge_hal_register_test - reads and writes into all clock domains.
+ * @hldev : private member of the device structure.
+ * xge_nic structure.
+ * @data : variable that returns the result of each of the test conducted b
+ * by the driver.
+ *
+ * Read and write into all clock domains. The NIC has 3 clock domains,
+ * see that registers in all the three regions are accessible.
+ * Return value:
+ * 0 on success.
+ */
+xge_hal_status_e
+xge_hal_register_test(xge_hal_device_h devh, u64 *data)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64 = 0;
+ int fail = 0;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pif_rd_swapper_fb);
+ if (val64 != 0x123456789abcdefULL) {
+ fail = 1;
+ xge_debug_osdep(XGE_TRACE, "Read Test level 1 fails");
+ }
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rmac_pause_cfg);
+ if (val64 != 0xc000ffff00000000ULL) {
+ fail = 1;
+ xge_debug_osdep(XGE_TRACE, "Read Test level 2 fails");
+ }
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rx_queue_cfg);
+ if (val64 != 0x0808080808080808ULL) {
+ fail = 1;
+ xge_debug_osdep(XGE_TRACE, "Read Test level 3 fails");
+ }
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xgxs_efifo_cfg);
+ if (val64 != 0x000000001923141EULL) {
+ fail = 1;
+ xge_debug_osdep(XGE_TRACE, "Read Test level 4 fails");
+ }
+
+ val64 = 0x5A5A5A5A5A5A5A5AULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->xmsi_data);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xmsi_data);
+ if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
+ fail = 1;
+ xge_debug_osdep(XGE_ERR, "Write Test level 1 fails");
+ }
+
+ val64 = 0xA5A5A5A5A5A5A5A5ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->xmsi_data);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->xmsi_data);
+ if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
+ fail = 1;
+ xge_debug_osdep(XGE_ERR, "Write Test level 2 fails");
+ }
+
+ *data = fail;
+ return XGE_HAL_OK;
+}
+
+/*
+ * xge_hal_rldram_test - offline test for access to the RldRam chip on
+ the NIC
+ * @devh: HAL device handle.
+ * @data: variable that returns the result of each of the test
+ * conducted by the driver.
+ *
+ * This is one of the offline test that tests the read and write
+ * access to the RldRam chip on the NIC.
+ * Return value:
+ * 0 on success.
+ */
+xge_hal_status_e
+xge_hal_rldram_test(xge_hal_device_h devh, u64 *data)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+ int cnt, iteration = 0, test_pass = 0;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_control);
+ val64 &= ~XGE_HAL_ADAPTER_ECC_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->adapter_control);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mc_rldram_test_ctrl);
+ val64 |= XGE_HAL_MC_RLDRAM_TEST_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_ctrl);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mc_rldram_mrs);
+ val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE;
+ __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control);
+
+ val64 |= XGE_HAL_MC_RLDRAM_MRS_ENABLE;
+ __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control);
+
+ while (iteration < 2) {
+ val64 = 0x55555555aaaa0000ULL;
+ if (iteration == 1) {
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_d0);
+
+ val64 = 0xaaaa5a5555550000ULL;
+ if (iteration == 1) {
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_d1);
+
+ val64 = 0x55aaaaaaaa5a0000ULL;
+ if (iteration == 1) {
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_d2);
+
+ val64 = (u64) (0x0000003fffff0000ULL);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_add);
+
+
+ val64 = XGE_HAL_MC_RLDRAM_TEST_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_ctrl);
+
+ val64 |=
+ XGE_HAL_MC_RLDRAM_TEST_MODE | XGE_HAL_MC_RLDRAM_TEST_WRITE |
+ XGE_HAL_MC_RLDRAM_TEST_GO;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_ctrl);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->mc_rldram_test_ctrl);
+ if (val64 & XGE_HAL_MC_RLDRAM_TEST_DONE)
+ break;
+ xge_os_mdelay(200);
+ }
+
+ if (cnt == 5)
+ break;
+
+ val64 = XGE_HAL_MC_RLDRAM_TEST_MODE;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_ctrl);
+
+ val64 |= XGE_HAL_MC_RLDRAM_TEST_MODE |
+ XGE_HAL_MC_RLDRAM_TEST_GO;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_test_ctrl);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->mc_rldram_test_ctrl);
+ if (val64 & XGE_HAL_MC_RLDRAM_TEST_DONE)
+ break;
+ xge_os_mdelay(500);
+ }
+
+ if (cnt == 5)
+ break;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mc_rldram_test_ctrl);
+ if (val64 & XGE_HAL_MC_RLDRAM_TEST_PASS)
+ test_pass = 1;
+
+ iteration++;
+ }
+
+ if (!test_pass)
+ *data = 1;
+ else
+ *data = 0;
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * xge_hal_pma_loopback - Enable or disable PMA loopback
+ * @devh: HAL device handle.
+ * @enable:Boolean set to 1 to enable and 0 to disable.
+ *
+ * Enable or disable PMA loopback.
+ * Return value:
+ * 0 on success.
+ */
+xge_hal_status_e
+xge_hal_pma_loopback( xge_hal_device_h devh, int enable )
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+ u16 data;
+
+ /*
+ * This code if for MAC loopbak
+ * Should be enabled through another parameter
+ */
+#if 0
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+ if ( enable )
+ {
+ val64 |= ( XGE_HAL_MAC_CFG_TMAC_LOOPBACK | XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
+ }
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
+ xge_os_mdelay(1);
+#endif
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control);
+
+ data = (u16)XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(val64);
+
+#define _HAL_LOOPBK_PMA 1
+
+ if( enable )
+ data |= 1;
+ else
+ data &= 0xfe;
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DATA(data) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ return XGE_HAL_OK;
+}
+
+u16
+xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr )
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64 = 0x0;
+ u16 rval16 = 0x0;
+ u8 i = 0;
+
+ /* address transaction */
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+ do
+ {
+ val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ /* Data transaction */
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ i = 0;
+
+ do
+ {
+ val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ rval16 = (u16)XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(val64);
+
+ return rval16;
+}
+
+xge_hal_status_e
+xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value )
+{
+ u64 val64 = 0x0;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u8 i = 0;
+ /* address transaction */
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ do
+ {
+ val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ } while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) !=
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ /* Data transaction */
+
+ val64 = 0x0;
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DATA(value) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control);
+
+ i = 0;
+
+ do
+ {
+ val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * xge_hal_eeprom_test - to verify that EEprom in the xena can be
+ programmed.
+ * @devh: HAL device handle.
+ * @data:variable that returns the result of each of the test conducted by
+ * the driver.
+ *
+ * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
+ * register.
+ * Return value:
+ * 0 on success.
+ */
+xge_hal_status_e
+xge_hal_eeprom_test(xge_hal_device_h devh, u64 *data)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ int fail = 0;
+ u32 ret_data = 0;
+
+ /* Test Write Error at offset 0 */
+ if (!xge_hal_write_eeprom(hldev, 0, 0, 3))
+ fail = 1;
+
+ /* Test Write at offset 4f0 */
+ if (xge_hal_write_eeprom(hldev, 0x4F0, 0x01234567, 3))
+ fail = 1;
+ if (xge_hal_read_eeprom(hldev, 0x4F0, &ret_data))
+ fail = 1;
+
+ if (ret_data != 0x01234567)
+ fail = 1;
+
+ /* Reset the EEPROM data go FFFF */
+ (void) xge_hal_write_eeprom(hldev, 0x4F0, 0xFFFFFFFF, 3);
+
+ /* Test Write Request Error at offset 0x7c */
+ if (!xge_hal_write_eeprom(hldev, 0x07C, 0, 3))
+ fail = 1;
+
+ /* Test Write Request at offset 0x7fc */
+ if (xge_hal_write_eeprom(hldev, 0x7FC, 0x01234567, 3))
+ fail = 1;
+ if (xge_hal_read_eeprom(hldev, 0x7FC, &ret_data))
+ fail = 1;
+
+ if (ret_data != 0x01234567)
+ fail = 1;
+
+ /* Reset the EEPROM data go FFFF */
+ (void) xge_hal_write_eeprom(hldev, 0x7FC, 0xFFFFFFFF, 3);
+
+ /* Test Write Error at offset 0x80 */
+ if (!xge_hal_write_eeprom(hldev, 0x080, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 0xfc */
+ if (!xge_hal_write_eeprom(hldev, 0x0FC, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 0x100 */
+ if (!xge_hal_write_eeprom(hldev, 0x100, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 4ec */
+ if (!xge_hal_write_eeprom(hldev, 0x4EC, 0, 3))
+ fail = 1;
+
+ *data = fail;
+ return XGE_HAL_OK;
+}
+
+/*
+ * xge_hal_bist_test - invokes the MemBist test of the card .
+ * @devh: HAL device handle.
+ * xge_nic structure.
+ * @data:variable that returns the result of each of the test conducted by
+ * the driver.
+ *
+ * This invokes the MemBist test of the card. We give around
+ * 2 secs time for the Test to complete. If it's still not complete
+ * within this peiod, we consider that the test failed.
+ * Return value:
+ * 0 on success and -1 on failure.
+ */
+xge_hal_status_e
+xge_hal_bist_test(xge_hal_device_h devh, u64 *data)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ u8 bist = 0;
+ int cnt = 0;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
+
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh, 0x0f, &bist);
+ bist |= 0x40;
+ xge_os_pci_write8(hldev->pdev, hldev->cfgh, 0x0f, bist);
+
+ while (cnt < 20) {
+ xge_os_pci_read8(hldev->pdev, hldev->cfgh, 0x0f, &bist);
+ if (!(bist & 0x40)) {
+ *data = (bist & 0x0f);
+ ret = XGE_HAL_OK;
+ break;
+ }
+ xge_os_mdelay(100);
+ cnt++;
+ }
+
+ return ret;
+}
+
+/*
+ * xge_hal_link_test - verifies the link state of the nic
+ * @devh: HAL device handle.
+ * @data: variable that returns the result of each of the test conducted by
+ * the driver.
+ *
+ * Verify the link state of the NIC and updates the input
+ * argument 'data' appropriately.
+ * Return value:
+ * 0 on success.
+ */
+xge_hal_status_e
+xge_hal_link_test(xge_hal_device_h devh, u64 *data)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->adapter_status);
+ if (val64 & XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)
+ *data = 1;
+
+ return XGE_HAL_OK;
+}
+
+
+/**
+ * xge_hal_getpause_data -Pause frame frame generation and reception.
+ * @devh: HAL device handle.
+ * @tx : A field to return the pause generation capability of the NIC.
+ * @rx : A field to return the pause reception capability of the NIC.
+ *
+ * Returns the Pause frame generation and reception capability of the NIC.
+ * Return value:
+ * void
+ */
+void xge_hal_getpause_data(xge_hal_device_h devh, int *tx, int *rx)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rmac_pause_cfg);
+ if (val64 & XGE_HAL_RMAC_PAUSE_GEN_EN)
+ *tx = 1;
+ if (val64 & XGE_HAL_RMAC_PAUSE_RCV_EN)
+ *rx = 1;
+}
+
+/**
+ * xge_hal_setpause_data - set/reset pause frame generation.
+ * @devh: HAL device handle.
+ * @tx: A field that indicates the pause generation capability to be
+ * set on the NIC.
+ * @rx: A field that indicates the pause reception capability to be
+ * set on the NIC.
+ *
+ * It can be used to set or reset Pause frame generation or reception
+ * support of the NIC.
+ * Return value:
+ * int, returns 0 on Success
+ */
+
+int xge_hal_setpause_data(xge_hal_device_h devh, int tx, int rx)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rmac_pause_cfg);
+ if (tx)
+ val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
+ else
+ val64 &= ~XGE_HAL_RMAC_PAUSE_GEN_EN;
+ if (rx)
+ val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
+ else
+ val64 &= ~XGE_HAL_RMAC_PAUSE_RCV_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rmac_pause_cfg);
+ return 0;
+}
+
+/**
+ * xge_hal_read_xfp_current_temp -
+ * @hldev: HAL device handle.
+ *
+ * This routine only gets the temperature for XFP modules. Also, updating of the
+ * NVRAM can sometimes fail and so the reading we might get may not be uptodate.
+ */
+u32 xge_hal_read_xfp_current_temp(xge_hal_device_h hldev)
+{
+ u16 val_1, val_2, i = 0;
+ u32 actual;
+
+ /* First update the NVRAM table of XFP. */
+
+ (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000, 0x3);
+
+
+ /* Now wait for the transfer to complete */
+ do
+ {
+ xge_os_mdelay( 50 ); // wait 50 milliseonds
+
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000);
+
+ if ( i++ > 10 )
+ {
+ // waited 500 ms which should be plenty of time.
+ break;
+ }
+ }while (( val_1 & 0x000C ) != 0x0004);
+
+ /* Now NVRAM table of XFP should be updated, so read the temp */
+ val_1 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8067);
+ val_2 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8068);
+
+ actual = ((val_1 << 8) | val_2);
+
+ if (actual >= 32768)
+ actual = actual- 65536;
+ actual = actual/256;
+
+ return actual;
+}
+
+/**
+ * __hal_chk_xpak_counter - check the Xpak error count and log the msg.
+ * @hldev: pointer to xge_hal_device_t structure
+ * @type: xpak stats error type
+ * @value: xpak stats value
+ *
+ * It is used to log the error message based on the xpak stats value
+ * Return value:
+ * None
+ */
+
+void __hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value)
+{
+ /*
+ * If the value is high for three consecutive cylce,
+ * log a error message
+ */
+ if(value == 3)
+ {
+ switch(type)
+ {
+ case 1:
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_temp = 0;
+
+ /*
+ * Notify the ULD on Excess Xpak temperature alarm msg
+ */
+ if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) {
+ g_xge_hal_driver->uld_callbacks.xpak_alarm_log(
+ hldev->upper_layer_info,
+ XGE_HAL_XPAK_ALARM_EXCESS_TEMP);
+ }
+ break;
+ case 2:
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current = 0;
+
+ /*
+ * Notify the ULD on Excess xpak bias current alarm msg
+ */
+ if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) {
+ g_xge_hal_driver->uld_callbacks.xpak_alarm_log(
+ hldev->upper_layer_info,
+ XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT);
+ }
+ break;
+ case 3:
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output = 0;
+
+ /*
+ * Notify the ULD on Excess Xpak Laser o/p power
+ * alarm msg
+ */
+ if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) {
+ g_xge_hal_driver->uld_callbacks.xpak_alarm_log(
+ hldev->upper_layer_info,
+ XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT);
+ }
+ break;
+ default:
+ xge_debug_osdep(XGE_TRACE, "Incorrect XPAK Alarm "
+ "type ");
+ }
+ }
+
+}
+
+/**
+ * __hal_updt_stats_xpak - update the Xpak error count.
+ * @hldev: pointer to xge_hal_device_t structure
+ *
+ * It is used to update the xpak stats value
+ * Return value:
+ * None
+ */
+void __hal_updt_stats_xpak(xge_hal_device_t *hldev)
+{
+ u16 val_1;
+ u64 addr;
+
+ /* Check the communication with the MDIO slave */
+ addr = 0x0000;
+ val_1 = 0x0;
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+ if((val_1 == 0xFFFF) || (val_1 == 0x0000))
+ {
+ xge_debug_osdep(XGE_TRACE, "ERR: MDIO slave access failed - "
+ "Returned %x", val_1);
+ return;
+ }
+
+ /* Check for the expected value of 2040 at PMA address 0x0000 */
+ if(val_1 != 0x2040)
+ {
+ xge_debug_osdep(XGE_TRACE, "Incorrect value at PMA address 0x0000 - ");
+ xge_debug_osdep(XGE_TRACE, "Returned: %llx- Expected: 0x2040",
+ (unsigned long long)(unsigned long)val_1);
+ return;
+ }
+
+ /* Loading the DOM register to MDIO register */
+ addr = 0xA100;
+ (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr, 0x0);
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+
+ /*
+ * Reading the Alarm flags
+ */
+ addr = 0xA070;
+ val_1 = 0x0;
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+ if(CHECKBIT(val_1, 0x7))
+ {
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_transceiver_temp_high++;
+ hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp++;
+ __hal_chk_xpak_counter(hldev, 0x1,
+ hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp);
+ } else {
+ hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp = 0;
+ }
+ if(CHECKBIT(val_1, 0x6))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_transceiver_temp_low++;
+
+ if(CHECKBIT(val_1, 0x3))
+ {
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_bias_current_high++;
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current++;
+ __hal_chk_xpak_counter(hldev, 0x2,
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current);
+ } else {
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current = 0;
+ }
+ if(CHECKBIT(val_1, 0x2))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_bias_current_low++;
+
+ if(CHECKBIT(val_1, 0x1))
+ {
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_output_power_high++;
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output++;
+ __hal_chk_xpak_counter(hldev, 0x3,
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output);
+ } else {
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output = 0;
+ }
+ if(CHECKBIT(val_1, 0x0))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_output_power_low++;
+
+ /*
+ * Reading the warning flags
+ */
+ addr = 0xA074;
+ val_1 = 0x0;
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+ if(CHECKBIT(val_1, 0x7))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_transceiver_temp_high++;
+ if(CHECKBIT(val_1, 0x6))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_transceiver_temp_low++;
+ if(CHECKBIT(val_1, 0x3))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_bias_current_high++;
+ if(CHECKBIT(val_1, 0x2))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_bias_current_low++;
+ if(CHECKBIT(val_1, 0x1))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_output_power_high++;
+ if(CHECKBIT(val_1, 0x0))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_output_power_low++;
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-mgmtaux.c b/sys/dev/nxge/xgehal/xgehal-mgmtaux.c
new file mode 100644
index 0000000..e2f0046
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-mgmtaux.c
@@ -0,0 +1,1731 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-mgmtaux.c
+ *
+ * Description: Xframe-family management auxiliary API implementation
+ *
+ * Created: 1 September 2004
+ */
+
+#include <dev/nxge/include/xgehal-mgmt.h>
+#include <dev/nxge/include/xgehal-driver.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+#ifdef XGE_OS_HAS_SNPRINTF
+#define __hal_aux_snprintf(retbuf, bufsize, fmt, key, value, retsize) \
+ if (bufsize <= 0) return XGE_HAL_ERR_OUT_OF_SPACE; \
+ retsize = xge_os_snprintf(retbuf, bufsize, fmt, key, \
+ XGE_HAL_AUX_SEPA, value); \
+ if (retsize < 0 || retsize >= bufsize) return XGE_HAL_ERR_OUT_OF_SPACE;
+#else
+#define __hal_aux_snprintf(retbuf, bufsize, fmt, key, value, retsize) \
+ if (bufsize <= 0) return XGE_HAL_ERR_OUT_OF_SPACE; \
+ retsize = xge_os_sprintf(retbuf, fmt, key, XGE_HAL_AUX_SEPA, value); \
+ xge_assert(retsize < bufsize); \
+ if (retsize < 0 || retsize >= bufsize) \
+ return XGE_HAL_ERR_OUT_OF_SPACE;
+#endif
+
+#define __HAL_AUX_ENTRY_DECLARE(size, buf) \
+ int entrysize = 0, leftsize = size; \
+ char *ptr = buf;
+
+#define __HAL_AUX_ENTRY(key, value, fmt) \
+ ptr += entrysize; leftsize -= entrysize; \
+ __hal_aux_snprintf(ptr, leftsize, "%s%c"fmt"\n", key, value, entrysize)
+
+#define __HAL_AUX_ENTRY_END(bufsize, retsize) \
+ leftsize -= entrysize; \
+ *retsize = bufsize - leftsize;
+
+#define __hal_aux_pci_link_info(name, index, var) { \
+ __HAL_AUX_ENTRY(name, \
+ (unsigned long long)pcim.link_info[index].var, "%llu") \
+ }
+
+#define __hal_aux_pci_aggr_info(name, index, var) { \
+ __HAL_AUX_ENTRY(name, \
+ (unsigned long long)pcim.aggr_info[index].var, "%llu") \
+ }
+
+/**
+ * xge_hal_aux_bar0_read - Read and format Xframe BAR0 register.
+ * @devh: HAL device handle.
+ * @offset: Register offset in the BAR0 space.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read Xframe register from BAR0 space. The result is formatted as an ascii string.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_OUT_OF_SPACE - Buffer size is very small.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_INVALID_OFFSET - Register offset in the BAR space is not
+ * valid.
+ * XGE_HAL_ERR_INVALID_BAR_ID - BAR id is not valid.
+ *
+ * See also: xge_hal_mgmt_reg_read().
+ */
+xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh,
+ unsigned int offset, int bufsize, char *retbuf,
+ int *retsize)
+{
+ xge_hal_status_e status;
+ u64 retval;
+
+ status = xge_hal_mgmt_reg_read(devh, 0, offset, &retval);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ if (bufsize < XGE_OS_SPRINTF_STRLEN) {
+ return XGE_HAL_ERR_OUT_OF_SPACE;
+ }
+
+ *retsize = xge_os_sprintf(retbuf, "0x%04X%c0x%08X%08X\n", offset,
+ XGE_HAL_AUX_SEPA, (u32)(retval>>32), (u32)retval);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_bar1_read - Read and format Xframe BAR1 register.
+ * @devh: HAL device handle.
+ * @offset: Register offset in the BAR1 space.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read Xframe register from BAR1 space. The result is formatted as ascii string.
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_OUT_OF_SPACE - Buffer size is very small.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_INVALID_OFFSET - Register offset in the BAR space is not
+ * valid.
+ * XGE_HAL_ERR_INVALID_BAR_ID - BAR id is not valid.
+ *
+ * See also: xge_hal_mgmt_reg_read().
+ */
+xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh,
+ unsigned int offset, int bufsize, char *retbuf,
+ int *retsize)
+{
+ xge_hal_status_e status;
+ u64 retval;
+
+ status = xge_hal_mgmt_reg_read(devh, 1, offset, &retval);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ if (bufsize < XGE_OS_SPRINTF_STRLEN) {
+ return XGE_HAL_ERR_OUT_OF_SPACE;
+ }
+
+ *retsize = xge_os_sprintf(retbuf, "0x%04X%c0x%08X%08X\n",
+ offset,
+ XGE_HAL_AUX_SEPA, (u32)(retval>>32), (u32)retval);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_bar0_write - Write BAR0 register.
+ * @devh: HAL device handle.
+ * @offset: Register offset in the BAR0 space.
+ * @value: Regsister value (to write).
+ *
+ * Write BAR0 register.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_INVALID_OFFSET - Register offset in the BAR space is not
+ * valid.
+ * XGE_HAL_ERR_INVALID_BAR_ID - BAR id is not valid.
+ *
+ * See also: xge_hal_mgmt_reg_write().
+ */
+xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh,
+ unsigned int offset, u64 value)
+{
+ xge_hal_status_e status;
+
+ status = xge_hal_mgmt_reg_write(devh, 0, offset, value);
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_about_read - Retrieve and format about info.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Retrieve about info (using xge_hal_mgmt_about()) and sprintf it
+ * into the provided @retbuf.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ * XGE_HAL_FAIL - Failed to retrieve the information.
+ *
+ * See also: xge_hal_mgmt_about(), xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_mgmt_about_info_t about_info;
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ status = xge_hal_mgmt_about(devh, &about_info,
+ sizeof(xge_hal_mgmt_about_info_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("vendor", about_info.vendor, "0x%x");
+ __HAL_AUX_ENTRY("device", about_info.device, "0x%x");
+ __HAL_AUX_ENTRY("subsys_vendor", about_info.subsys_vendor, "0x%x");
+ __HAL_AUX_ENTRY("subsys_device", about_info.subsys_device, "0x%x");
+ __HAL_AUX_ENTRY("board_rev", about_info.board_rev, "0x%x");
+ __HAL_AUX_ENTRY("vendor_name", about_info.vendor_name, "%s");
+ __HAL_AUX_ENTRY("chip_name", about_info.chip_name, "%s");
+ __HAL_AUX_ENTRY("media", about_info.media, "%s");
+ __HAL_AUX_ENTRY("hal_major", about_info.hal_major, "%s");
+ __HAL_AUX_ENTRY("hal_minor", about_info.hal_minor, "%s");
+ __HAL_AUX_ENTRY("hal_fix", about_info.hal_fix, "%s");
+ __HAL_AUX_ENTRY("hal_build", about_info.hal_build, "%s");
+ __HAL_AUX_ENTRY("ll_major", about_info.ll_major, "%s");
+ __HAL_AUX_ENTRY("ll_minor", about_info.ll_minor, "%s");
+ __HAL_AUX_ENTRY("ll_fix", about_info.ll_fix, "%s");
+ __HAL_AUX_ENTRY("ll_build", about_info.ll_build, "%s");
+
+ __HAL_AUX_ENTRY("transponder_temperature",
+ about_info.transponder_temperature, "%d C");
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_stats_tmac_read - Read TMAC hardware statistics.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read TMAC hardware statistics. This is a subset of stats counters
+ * from xge_hal_stats_hw_info_t{}.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_hw_stats{}, xge_hal_stats_hw_info_t{},
+ * xge_hal_aux_stats_pci_read(),
+ * xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
+ xge_hal_mgmt_hw_stats_t hw;
+
+ status = xge_hal_mgmt_hw_stats(devh, &hw,
+ sizeof(xge_hal_mgmt_hw_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("tmac_data_octets", hw.tmac_data_octets, "%u");
+ __HAL_AUX_ENTRY("tmac_frms", hw.tmac_frms, "%u");
+ __HAL_AUX_ENTRY("tmac_drop_frms", (unsigned long long)
+ hw.tmac_drop_frms, "%llu");
+ __HAL_AUX_ENTRY("tmac_bcst_frms", hw.tmac_bcst_frms, "%u");
+ __HAL_AUX_ENTRY("tmac_mcst_frms", hw.tmac_mcst_frms, "%u");
+ __HAL_AUX_ENTRY("tmac_pause_ctrl_frms", (unsigned long long)
+ hw.tmac_pause_ctrl_frms, "%llu");
+ __HAL_AUX_ENTRY("tmac_ucst_frms", hw.tmac_ucst_frms, "%u");
+ __HAL_AUX_ENTRY("tmac_ttl_octets", hw.tmac_ttl_octets, "%u");
+ __HAL_AUX_ENTRY("tmac_any_err_frms", hw.tmac_any_err_frms, "%u");
+ __HAL_AUX_ENTRY("tmac_nucst_frms", hw.tmac_nucst_frms, "%u");
+ __HAL_AUX_ENTRY("tmac_ttl_less_fb_octets", (unsigned long long)
+ hw.tmac_ttl_less_fb_octets, "%llu");
+ __HAL_AUX_ENTRY("tmac_vld_ip_octets", (unsigned long long)
+ hw.tmac_vld_ip_octets, "%llu");
+ __HAL_AUX_ENTRY("tmac_drop_ip", hw.tmac_drop_ip, "%u");
+ __HAL_AUX_ENTRY("tmac_vld_ip", hw.tmac_vld_ip, "%u");
+ __HAL_AUX_ENTRY("tmac_rst_tcp", hw.tmac_rst_tcp, "%u");
+ __HAL_AUX_ENTRY("tmac_icmp", hw.tmac_icmp, "%u");
+ __HAL_AUX_ENTRY("tmac_tcp", (unsigned long long)
+ hw.tmac_tcp, "%llu");
+ __HAL_AUX_ENTRY("reserved_0", hw.reserved_0, "%u");
+ __HAL_AUX_ENTRY("tmac_udp", hw.tmac_udp, "%u");
+ } else {
+ int i;
+ xge_hal_mgmt_pcim_stats_t pcim;
+ status = xge_hal_mgmt_pcim_stats(devh, &pcim,
+ sizeof(xge_hal_mgmt_pcim_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ for (i = 0; i < XGE_HAL_MAC_LINKS; i++) {
+ __hal_aux_pci_link_info("tx_frms", i,
+ tx_frms);
+ __hal_aux_pci_link_info("tx_ttl_eth_octets",
+ i, tx_ttl_eth_octets );
+ __hal_aux_pci_link_info("tx_data_octets", i,
+ tx_data_octets);
+ __hal_aux_pci_link_info("tx_mcst_frms", i,
+ tx_mcst_frms);
+ __hal_aux_pci_link_info("tx_bcst_frms", i,
+ tx_bcst_frms);
+ __hal_aux_pci_link_info("tx_ucst_frms", i,
+ tx_ucst_frms);
+ __hal_aux_pci_link_info("tx_tagged_frms", i,
+ tx_tagged_frms);
+ __hal_aux_pci_link_info("tx_vld_ip", i,
+ tx_vld_ip);
+ __hal_aux_pci_link_info("tx_vld_ip_octets", i,
+ tx_vld_ip_octets);
+ __hal_aux_pci_link_info("tx_icmp", i,
+ tx_icmp);
+ __hal_aux_pci_link_info("tx_tcp", i,
+ tx_tcp);
+ __hal_aux_pci_link_info("tx_rst_tcp", i,
+ tx_rst_tcp);
+ __hal_aux_pci_link_info("tx_udp", i,
+ tx_udp);
+ __hal_aux_pci_link_info("tx_unknown_protocol", i,
+ tx_unknown_protocol);
+ __hal_aux_pci_link_info("tx_parse_error", i,
+ tx_parse_error);
+ __hal_aux_pci_link_info("tx_pause_ctrl_frms", i,
+ tx_pause_ctrl_frms);
+ __hal_aux_pci_link_info("tx_lacpdu_frms", i,
+ tx_lacpdu_frms);
+ __hal_aux_pci_link_info("tx_marker_pdu_frms", i,
+ tx_marker_pdu_frms);
+ __hal_aux_pci_link_info("tx_marker_resp_pdu_frms", i,
+ tx_marker_resp_pdu_frms);
+ __hal_aux_pci_link_info("tx_drop_ip", i,
+ tx_drop_ip);
+ __hal_aux_pci_link_info("tx_xgmii_char1_match", i,
+ tx_xgmii_char1_match);
+ __hal_aux_pci_link_info("tx_xgmii_char2_match", i,
+ tx_xgmii_char2_match);
+ __hal_aux_pci_link_info("tx_xgmii_column1_match", i,
+ tx_xgmii_column1_match);
+ __hal_aux_pci_link_info("tx_xgmii_column2_match", i,
+ tx_xgmii_column2_match);
+ __hal_aux_pci_link_info("tx_drop_frms", i,
+ tx_drop_frms);
+ __hal_aux_pci_link_info("tx_any_err_frms", i,
+ tx_any_err_frms);
+ }
+
+ for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) {
+ __hal_aux_pci_aggr_info("tx_frms", i, tx_frms);
+ __hal_aux_pci_aggr_info("tx_mcst_frms", i,
+ tx_mcst_frms);
+ __hal_aux_pci_aggr_info("tx_bcst_frms", i,
+ tx_bcst_frms);
+ __hal_aux_pci_aggr_info("tx_discarded_frms", i,
+ tx_discarded_frms);
+ __hal_aux_pci_aggr_info("tx_errored_frms", i,
+ tx_errored_frms);
+ }
+ }
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_stats_rmac_read - Read RMAC hardware statistics.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read RMAC hardware statistics. This is a subset of stats counters
+ * from xge_hal_stats_hw_info_t{}.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_hw_stats{}, xge_hal_stats_hw_info_t{},
+ * xge_hal_aux_stats_pci_read(), xge_hal_aux_stats_tmac_read(),
+ * xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
+ xge_hal_mgmt_hw_stats_t hw;
+
+ status = xge_hal_mgmt_hw_stats(devh, &hw,
+ sizeof(xge_hal_mgmt_hw_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("rmac_data_octets", hw.rmac_data_octets, "%u");
+ __HAL_AUX_ENTRY("rmac_vld_frms", hw.rmac_vld_frms, "%u");
+ __HAL_AUX_ENTRY("rmac_fcs_err_frms", (unsigned long long)
+ hw.rmac_fcs_err_frms, "%llu");
+ __HAL_AUX_ENTRY("mac_drop_frms", (unsigned long long)
+ hw.rmac_drop_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_vld_bcst_frms", hw.rmac_vld_bcst_frms,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_vld_mcst_frms", hw.rmac_vld_mcst_frms,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_out_rng_len_err_frms",
+ hw.rmac_out_rng_len_err_frms, "%u");
+ __HAL_AUX_ENTRY("rmac_in_rng_len_err_frms",
+ hw.rmac_in_rng_len_err_frms, "%u");
+ __HAL_AUX_ENTRY("rmac_long_frms", (unsigned long long)
+ hw.rmac_long_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_pause_ctrl_frms", (unsigned long long)
+ hw.rmac_pause_ctrl_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_unsup_ctrl_frms", (unsigned long long)
+ hw.rmac_unsup_ctrl_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_accepted_ucst_frms",
+ hw.rmac_accepted_ucst_frms, "%u");
+ __HAL_AUX_ENTRY("rmac_ttl_octets", hw.rmac_ttl_octets, "%u");
+ __HAL_AUX_ENTRY("rmac_discarded_frms", hw.rmac_discarded_frms,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_accepted_nucst_frms",
+ hw.rmac_accepted_nucst_frms, "%u");
+ __HAL_AUX_ENTRY("reserved_1", hw.reserved_1, "%u");
+ __HAL_AUX_ENTRY("rmac_drop_events", hw.rmac_drop_events, "%u");
+ __HAL_AUX_ENTRY("rmac_ttl_less_fb_octets", (unsigned long long)
+ hw.rmac_ttl_less_fb_octets, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_frms", (unsigned long long)
+ hw.rmac_ttl_frms, "%llu");
+ __HAL_AUX_ENTRY("reserved_2", (unsigned long long)
+ hw.reserved_2, "%llu");
+ __HAL_AUX_ENTRY("rmac_usized_frms", hw.rmac_usized_frms, "%u");
+ __HAL_AUX_ENTRY("reserved_3", hw.reserved_3, "%u");
+ __HAL_AUX_ENTRY("rmac_frag_frms", hw.rmac_frag_frms, "%u");
+ __HAL_AUX_ENTRY("rmac_osized_frms", hw.rmac_osized_frms, "%u");
+ __HAL_AUX_ENTRY("reserved_4", hw.reserved_4, "%u");
+ __HAL_AUX_ENTRY("rmac_jabber_frms", hw.rmac_jabber_frms, "%u");
+ __HAL_AUX_ENTRY("rmac_ttl_64_frms", (unsigned long long)
+ hw.rmac_ttl_64_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_65_127_frms", (unsigned long long)
+ hw.rmac_ttl_65_127_frms, "%llu");
+ __HAL_AUX_ENTRY("reserved_5", (unsigned long long)
+ hw.reserved_5, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_128_255_frms", (unsigned long long)
+ hw.rmac_ttl_128_255_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_256_511_frms", (unsigned long long)
+ hw.rmac_ttl_256_511_frms, "%llu");
+ __HAL_AUX_ENTRY("reserved_6", (unsigned long long)
+ hw.reserved_6, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_512_1023_frms", (unsigned long long)
+ hw.rmac_ttl_512_1023_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_1024_1518_frms", (unsigned long long)
+ hw.rmac_ttl_1024_1518_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ip", hw.rmac_ip, "%u");
+ __HAL_AUX_ENTRY("reserved_7", hw.reserved_7, "%u");
+ __HAL_AUX_ENTRY("rmac_ip_octets", (unsigned long long)
+ hw.rmac_ip_octets, "%llu");
+ __HAL_AUX_ENTRY("rmac_drop_ip", hw.rmac_drop_ip, "%u");
+ __HAL_AUX_ENTRY("rmac_hdr_err_ip", hw.rmac_hdr_err_ip, "%u");
+ __HAL_AUX_ENTRY("reserved_8", hw.reserved_8, "%u");
+ __HAL_AUX_ENTRY("rmac_icmp", hw.rmac_icmp, "%u");
+ __HAL_AUX_ENTRY("rmac_tcp", (unsigned long long)
+ hw.rmac_tcp, "%llu");
+ __HAL_AUX_ENTRY("rmac_err_drp_udp", hw.rmac_err_drp_udp, "%u");
+ __HAL_AUX_ENTRY("rmac_udp", hw.rmac_udp, "%u");
+ __HAL_AUX_ENTRY("rmac_xgmii_err_sym", (unsigned long long)
+ hw.rmac_xgmii_err_sym, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q0", (unsigned long long)
+ hw.rmac_frms_q0, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q1", (unsigned long long)
+ hw.rmac_frms_q1, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q2", (unsigned long long)
+ hw.rmac_frms_q2, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q3", (unsigned long long)
+ hw.rmac_frms_q3, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q4", (unsigned long long)
+ hw.rmac_frms_q4, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q5", (unsigned long long)
+ hw.rmac_frms_q5, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q6", (unsigned long long)
+ hw.rmac_frms_q6, "%llu");
+ __HAL_AUX_ENTRY("rmac_frms_q7", (unsigned long long)
+ hw.rmac_frms_q7, "%llu");
+ __HAL_AUX_ENTRY("rmac_full_q3", hw.rmac_full_q3, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q2", hw.rmac_full_q2, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q1", hw.rmac_full_q1, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q0", hw.rmac_full_q0, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q7", hw.rmac_full_q7, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q6", hw.rmac_full_q6, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q5", hw.rmac_full_q5, "%d");
+ __HAL_AUX_ENTRY("rmac_full_q4", hw.rmac_full_q4, "%d");
+ __HAL_AUX_ENTRY("reserved_9", hw.reserved_9, "%u");
+ __HAL_AUX_ENTRY("rmac_pause_cnt", hw.rmac_pause_cnt, "%u");
+ __HAL_AUX_ENTRY("rmac_xgmii_data_err_cnt", (unsigned long long)
+ hw.rmac_xgmii_data_err_cnt, "%llu");
+ __HAL_AUX_ENTRY("rmac_xgmii_ctrl_err_cnt", (unsigned long long)
+ hw.rmac_xgmii_ctrl_err_cnt, "%llu");
+ __HAL_AUX_ENTRY("rmac_err_tcp", hw.rmac_err_tcp, "%u");
+ __HAL_AUX_ENTRY("rmac_accepted_ip", hw.rmac_accepted_ip, "%u");
+ } else {
+ int i;
+ xge_hal_mgmt_pcim_stats_t pcim;
+ status = xge_hal_mgmt_pcim_stats(devh, &pcim,
+ sizeof(xge_hal_mgmt_pcim_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ for (i = 0; i < XGE_HAL_MAC_LINKS; i++) {
+ __hal_aux_pci_link_info("rx_ttl_frms", i,
+ rx_ttl_frms);
+ __hal_aux_pci_link_info("rx_vld_frms", i,
+ rx_vld_frms);
+ __hal_aux_pci_link_info("rx_offld_frms", i,
+ rx_offld_frms);
+ __hal_aux_pci_link_info("rx_ttl_eth_octets", i,
+ rx_ttl_eth_octets);
+ __hal_aux_pci_link_info("rx_data_octets", i,
+ rx_data_octets);
+ __hal_aux_pci_link_info("rx_offld_octets", i,
+ rx_offld_octets);
+ __hal_aux_pci_link_info("rx_vld_mcst_frms", i,
+ rx_vld_mcst_frms);
+ __hal_aux_pci_link_info("rx_vld_bcst_frms", i,
+ rx_vld_bcst_frms);
+ __hal_aux_pci_link_info("rx_accepted_ucst_frms", i,
+ rx_accepted_ucst_frms);
+ __hal_aux_pci_link_info("rx_accepted_nucst_frms", i,
+ rx_accepted_nucst_frms);
+ __hal_aux_pci_link_info("rx_tagged_frms", i,
+ rx_tagged_frms);
+ __hal_aux_pci_link_info("rx_long_frms", i,
+ rx_long_frms);
+ __hal_aux_pci_link_info("rx_usized_frms", i,
+ rx_usized_frms);
+ __hal_aux_pci_link_info("rx_osized_frms", i,
+ rx_osized_frms);
+ __hal_aux_pci_link_info("rx_frag_frms", i,
+ rx_frag_frms);
+ __hal_aux_pci_link_info("rx_jabber_frms", i,
+ rx_jabber_frms);
+ __hal_aux_pci_link_info("rx_ttl_64_frms", i,
+ rx_ttl_64_frms);
+ __hal_aux_pci_link_info("rx_ttl_65_127_frms", i,
+ rx_ttl_65_127_frms);
+ __hal_aux_pci_link_info("rx_ttl_128_255_frms", i,
+ rx_ttl_128_255_frms);
+ __hal_aux_pci_link_info("rx_ttl_256_511_frms", i,
+ rx_ttl_256_511_frms);
+ __hal_aux_pci_link_info("rx_ttl_512_1023_frms", i,
+ rx_ttl_512_1023_frms);
+ __hal_aux_pci_link_info("rx_ttl_1024_1518_frms", i,
+ rx_ttl_1024_1518_frms);
+ __hal_aux_pci_link_info("rx_ttl_1519_4095_frms", i,
+ rx_ttl_1519_4095_frms);
+ __hal_aux_pci_link_info("rx_ttl_40956_8191_frms", i,
+ rx_ttl_40956_8191_frms);
+ __hal_aux_pci_link_info("rx_ttl_8192_max_frms", i,
+ rx_ttl_8192_max_frms);
+ __hal_aux_pci_link_info("rx_ttl_gt_max_frms", i,
+ rx_ttl_gt_max_frms);
+ __hal_aux_pci_link_info("rx_ip", i,
+ rx_ip);
+ __hal_aux_pci_link_info("rx_ip_octets", i,
+ rx_ip_octets);
+
+ __hal_aux_pci_link_info("rx_hdr_err_ip", i,
+ rx_hdr_err_ip);
+
+ __hal_aux_pci_link_info("rx_icmp", i,
+ rx_icmp);
+ __hal_aux_pci_link_info("rx_tcp", i,
+ rx_tcp);
+ __hal_aux_pci_link_info("rx_udp", i,
+ rx_udp);
+ __hal_aux_pci_link_info("rx_err_tcp", i,
+ rx_err_tcp);
+ __hal_aux_pci_link_info("rx_pause_cnt", i,
+ rx_pause_cnt);
+ __hal_aux_pci_link_info("rx_pause_ctrl_frms", i,
+ rx_pause_ctrl_frms);
+ __hal_aux_pci_link_info("rx_unsup_ctrl_frms", i,
+ rx_pause_cnt);
+ __hal_aux_pci_link_info("rx_in_rng_len_err_frms", i,
+ rx_in_rng_len_err_frms);
+ __hal_aux_pci_link_info("rx_out_rng_len_err_frms", i,
+ rx_out_rng_len_err_frms);
+ __hal_aux_pci_link_info("rx_drop_frms", i,
+ rx_drop_frms);
+ __hal_aux_pci_link_info("rx_discarded_frms", i,
+ rx_discarded_frms);
+ __hal_aux_pci_link_info("rx_drop_ip", i,
+ rx_drop_ip);
+ __hal_aux_pci_link_info("rx_err_drp_udp", i,
+ rx_err_drp_udp);
+ __hal_aux_pci_link_info("rx_lacpdu_frms", i,
+ rx_lacpdu_frms);
+ __hal_aux_pci_link_info("rx_marker_pdu_frms", i,
+ rx_marker_pdu_frms);
+ __hal_aux_pci_link_info("rx_marker_resp_pdu_frms", i,
+ rx_marker_resp_pdu_frms);
+ __hal_aux_pci_link_info("rx_unknown_pdu_frms", i,
+ rx_unknown_pdu_frms);
+ __hal_aux_pci_link_info("rx_illegal_pdu_frms", i,
+ rx_illegal_pdu_frms);
+ __hal_aux_pci_link_info("rx_fcs_discard", i,
+ rx_fcs_discard);
+ __hal_aux_pci_link_info("rx_len_discard", i,
+ rx_len_discard);
+ __hal_aux_pci_link_info("rx_pf_discard", i,
+ rx_pf_discard);
+ __hal_aux_pci_link_info("rx_trash_discard", i,
+ rx_trash_discard);
+ __hal_aux_pci_link_info("rx_rts_discard", i,
+ rx_trash_discard);
+ __hal_aux_pci_link_info("rx_wol_discard", i,
+ rx_wol_discard);
+ __hal_aux_pci_link_info("rx_red_discard", i,
+ rx_red_discard);
+ __hal_aux_pci_link_info("rx_ingm_full_discard", i,
+ rx_ingm_full_discard);
+ __hal_aux_pci_link_info("rx_xgmii_data_err_cnt", i,
+ rx_xgmii_data_err_cnt);
+ __hal_aux_pci_link_info("rx_xgmii_ctrl_err_cnt", i,
+ rx_xgmii_ctrl_err_cnt);
+ __hal_aux_pci_link_info("rx_xgmii_err_sym", i,
+ rx_xgmii_err_sym);
+ __hal_aux_pci_link_info("rx_xgmii_char1_match", i,
+ rx_xgmii_char1_match);
+ __hal_aux_pci_link_info("rx_xgmii_char2_match", i,
+ rx_xgmii_char2_match);
+ __hal_aux_pci_link_info("rx_xgmii_column1_match", i,
+ rx_xgmii_column1_match);
+ __hal_aux_pci_link_info("rx_xgmii_column2_match", i,
+ rx_xgmii_column2_match);
+ __hal_aux_pci_link_info("rx_local_fault", i,
+ rx_local_fault);
+ __hal_aux_pci_link_info("rx_remote_fault", i,
+ rx_remote_fault);
+ __hal_aux_pci_link_info("rx_queue_full", i,
+ rx_queue_full);
+ }
+ for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) {
+ __hal_aux_pci_aggr_info("rx_frms", i, rx_frms);
+ __hal_aux_pci_link_info("rx_data_octets", i,
+ rx_data_octets);
+ __hal_aux_pci_aggr_info("rx_mcst_frms", i,
+ rx_mcst_frms);
+ __hal_aux_pci_aggr_info("rx_bcst_frms", i,
+ rx_bcst_frms);
+ __hal_aux_pci_aggr_info("rx_discarded_frms", i,
+ rx_discarded_frms);
+ __hal_aux_pci_aggr_info("rx_errored_frms", i,
+ rx_errored_frms);
+ __hal_aux_pci_aggr_info("rx_unknown_protocol_frms", i,
+ rx_unknown_protocol_frms);
+ }
+
+ }
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_stats_herc_enchanced - Get Hercules hardware statistics.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read Hercules device hardware statistics.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_hw_stats{}, xge_hal_stats_hw_info_t{},
+ * xge_hal_aux_stats_tmac_read(), xge_hal_aux_stats_rmac_read(),
+ * xge_hal_aux_device_dump().
+*/
+xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_mgmt_hw_stats_t hw;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) {
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+ }
+
+
+ status = xge_hal_mgmt_hw_stats(devh, &hw,
+ sizeof(xge_hal_mgmt_hw_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+ __HAL_AUX_ENTRY("tmac_frms_oflow", hw.tmac_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_data_octets_oflow", hw.tmac_data_octets_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("tmac_mcst_frms_oflow", hw.tmac_mcst_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_bcst_frms_oflow", hw.tmac_bcst_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_ttl_octets_oflow", hw.tmac_ttl_octets_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("tmac_ucst_frms_oflow", hw.tmac_ucst_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_nucst_frms_oflow", hw.tmac_nucst_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("tmac_any_err_frms_oflow", hw.tmac_any_err_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("tmac_vlan_frms", (unsigned long long)hw.tmac_vlan_frms,
+ "%llu");
+ __HAL_AUX_ENTRY("tmac_vld_ip_oflow", hw.tmac_vld_ip_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_drop_ip_oflow", hw.tmac_drop_ip_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_icmp_oflow", hw.tmac_icmp_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_rst_tcp_oflow", hw.tmac_rst_tcp_oflow, "%u");
+ __HAL_AUX_ENTRY("tmac_udp_oflow", hw.tmac_udp_oflow, "%u");
+ __HAL_AUX_ENTRY("tpa_unknown_protocol", hw.tpa_unknown_protocol, "%u");
+ __HAL_AUX_ENTRY("tpa_parse_failure", hw.tpa_parse_failure, "%u");
+ __HAL_AUX_ENTRY("rmac_vld_frms_oflow", hw.rmac_vld_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_data_octets_oflow", hw.rmac_data_octets_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_vld_mcst_frms_oflow", hw.rmac_vld_mcst_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_vld_bcst_frms_oflow", hw.rmac_vld_bcst_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_ttl_octets_oflow", hw.rmac_ttl_octets_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_accepted_ucst_frms_oflow",
+ hw.rmac_accepted_ucst_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_accepted_nucst_frms_oflow",
+ hw.rmac_accepted_nucst_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_discarded_frms_oflow",
+ hw.rmac_discarded_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_drop_events_oflow", hw.rmac_drop_events_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_usized_frms_oflow", hw.rmac_usized_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_osized_frms_oflow", hw.rmac_osized_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_frag_frms_oflow", hw.rmac_frag_frms_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_jabber_frms_oflow", hw.rmac_jabber_frms_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_ip_oflow", hw.rmac_ip_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_drop_ip_oflow", hw.rmac_drop_ip_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_icmp_oflow", hw.rmac_icmp_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_udp_oflow", hw.rmac_udp_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_err_drp_udp_oflow", hw.rmac_err_drp_udp_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_pause_cnt_oflow", hw.rmac_pause_cnt_oflow, "%u");
+ __HAL_AUX_ENTRY("rmac_ttl_1519_4095_frms",
+ (unsigned long long)hw.rmac_ttl_1519_4095_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_4096_8191_frms",
+ (unsigned long long)hw.rmac_ttl_4096_8191_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_8192_max_frms",
+ (unsigned long long)hw.rmac_ttl_8192_max_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_ttl_gt_max_frms",
+ (unsigned long long)hw.rmac_ttl_gt_max_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_osized_alt_frms",
+ (unsigned long long)hw.rmac_osized_alt_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_jabber_alt_frms",
+ (unsigned long long)hw.rmac_jabber_alt_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_gt_max_alt_frms",
+ (unsigned long long)hw.rmac_gt_max_alt_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_vlan_frms",
+ (unsigned long long)hw.rmac_vlan_frms, "%llu");
+ __HAL_AUX_ENTRY("rmac_fcs_discard", hw.rmac_fcs_discard, "%u");
+ __HAL_AUX_ENTRY("rmac_len_discard", hw.rmac_len_discard, "%u");
+ __HAL_AUX_ENTRY("rmac_da_discard", hw.rmac_da_discard, "%u");
+ __HAL_AUX_ENTRY("rmac_pf_discard", hw.rmac_pf_discard, "%u");
+ __HAL_AUX_ENTRY("rmac_rts_discard", hw.rmac_rts_discard, "%u");
+ __HAL_AUX_ENTRY("rmac_red_discard", hw.rmac_red_discard, "%u");
+ __HAL_AUX_ENTRY("rmac_ingm_full_discard", hw.rmac_ingm_full_discard,
+ "%u");
+ __HAL_AUX_ENTRY("rmac_accepted_ip_oflow", hw.rmac_accepted_ip_oflow,
+ "%u");
+ __HAL_AUX_ENTRY("link_fault_cnt", hw.link_fault_cnt, "%u");
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_stats_rmac_read - Read PCI hardware statistics.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read PCI statistics counters, including number of PCI read and
+ * write transactions, PCI retries, discards, etc.
+ * This is a subset of stats counters from xge_hal_stats_hw_info_t{}.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_hw_stats{}, xge_hal_stats_hw_info_t{},
+ * xge_hal_aux_stats_tmac_read(), xge_hal_aux_stats_rmac_read(),
+ * xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_mgmt_hw_stats_t hw;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) {
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+ }
+
+
+ status = xge_hal_mgmt_hw_stats(devh, &hw,
+ sizeof(xge_hal_mgmt_hw_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("new_rd_req_cnt", hw.new_rd_req_cnt, "%u");
+ __HAL_AUX_ENTRY("rd_req_cnt", hw.rd_req_cnt, "%u");
+ __HAL_AUX_ENTRY("rd_rtry_cnt", hw.rd_rtry_cnt, "%u");
+ __HAL_AUX_ENTRY("new_rd_req_rtry_cnt", hw.new_rd_req_rtry_cnt, "%u");
+ __HAL_AUX_ENTRY("wr_req_cnt", hw.wr_req_cnt, "%u");
+ __HAL_AUX_ENTRY("wr_rtry_rd_ack_cnt", hw.wr_rtry_rd_ack_cnt, "%u");
+ __HAL_AUX_ENTRY("new_wr_req_rtry_cnt", hw.new_wr_req_rtry_cnt, "%u");
+ __HAL_AUX_ENTRY("new_wr_req_cnt", hw.new_wr_req_cnt, "%u");
+ __HAL_AUX_ENTRY("wr_disc_cnt", hw.wr_disc_cnt, "%u");
+ __HAL_AUX_ENTRY("wr_rtry_cnt", hw.wr_rtry_cnt, "%u");
+ __HAL_AUX_ENTRY("txp_wr_cnt", hw.txp_wr_cnt, "%u");
+ __HAL_AUX_ENTRY("rd_rtry_wr_ack_cnt", hw.rd_rtry_wr_ack_cnt, "%u");
+ __HAL_AUX_ENTRY("txd_wr_cnt", hw.txd_wr_cnt, "%u");
+ __HAL_AUX_ENTRY("txd_rd_cnt", hw.txd_rd_cnt, "%u");
+ __HAL_AUX_ENTRY("rxd_wr_cnt", hw.rxd_wr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxd_rd_cnt", hw.rxd_rd_cnt, "%u");
+ __HAL_AUX_ENTRY("rxf_wr_cnt", hw.rxf_wr_cnt, "%u");
+ __HAL_AUX_ENTRY("txf_rd_cnt", hw.txf_rd_cnt, "%u");
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_stats_hal_read - Read HAL (layer) statistics.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read HAL statistics.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize)
+{
+ xge_list_t *item;
+ xge_hal_channel_t *channel;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_status_e status;
+ xge_hal_mgmt_device_stats_t devstat;
+ xge_hal_mgmt_channel_stats_t chstat;
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ status = xge_hal_mgmt_device_stats(hldev, &devstat,
+ sizeof(xge_hal_mgmt_device_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ if (!hldev->config.bimodal_interrupts) {
+ __HAL_AUX_ENTRY("rx_traffic_intr_cnt",
+ devstat.rx_traffic_intr_cnt, "%u");
+ }
+ __HAL_AUX_ENTRY("tx_traffic_intr_cnt", devstat.tx_traffic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txpic_intr_cnt", devstat.txpic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txdma_intr_cnt", devstat.txdma_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txmac_intr_cnt", devstat.txmac_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txxgxs_intr_cnt", devstat.txxgxs_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxpic_intr_cnt", devstat.rxpic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxdma_intr_cnt", devstat.rxdma_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxmac_intr_cnt", devstat.rxmac_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxxgxs_intr_cnt", devstat.rxxgxs_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("mc_intr_cnt", devstat.mc_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("not_xge_intr_cnt", devstat.not_xge_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("not_traffic_intr_cnt",
+ devstat.not_traffic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("traffic_intr_cnt", devstat.traffic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("total_intr_cnt", devstat.total_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("soft_reset_cnt", devstat.soft_reset_cnt, "%u");
+
+ if (hldev->config.rxufca_hi_lim != hldev->config.rxufca_lo_lim &&
+ hldev->config.rxufca_lo_lim != 0) {
+ __HAL_AUX_ENTRY("rxufca_lo_adjust_cnt",
+ devstat.rxufca_lo_adjust_cnt, "%u");
+ __HAL_AUX_ENTRY("rxufca_hi_adjust_cnt",
+ devstat.rxufca_hi_adjust_cnt, "%u");
+ }
+
+ if (hldev->config.bimodal_interrupts) {
+ __HAL_AUX_ENTRY("bimodal_lo_adjust_cnt",
+ devstat.bimodal_lo_adjust_cnt, "%u");
+ __HAL_AUX_ENTRY("bimodal_hi_adjust_cnt",
+ devstat.bimodal_hi_adjust_cnt, "%u");
+ }
+
+#if defined(XGE_HAL_CONFIG_LRO)
+ __HAL_AUX_ENTRY("tot_frms_lroised",
+ devstat.tot_frms_lroised, "%u");
+ __HAL_AUX_ENTRY("tot_lro_sessions",
+ devstat.tot_lro_sessions, "%u");
+ __HAL_AUX_ENTRY("lro_frm_len_exceed_cnt",
+ devstat.lro_frm_len_exceed_cnt, "%u");
+ __HAL_AUX_ENTRY("lro_sg_exceed_cnt",
+ devstat.lro_sg_exceed_cnt, "%u");
+ __HAL_AUX_ENTRY("lro_out_of_seq_pkt_cnt",
+ devstat.lro_out_of_seq_pkt_cnt, "%u");
+ __HAL_AUX_ENTRY("lro_dup_pkt_cnt",
+ devstat.lro_dup_pkt_cnt, "%u");
+#endif
+
+ /* for each opened rx channel */
+ xge_list_for_each(item, &hldev->ring_channels) {
+ char key[XGE_OS_SPRINTF_STRLEN];
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+
+ status = xge_hal_mgmt_channel_stats(channel, &chstat,
+ sizeof(xge_hal_mgmt_channel_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ (void) xge_os_sprintf(key, "ring%d_", channel->post_qid);
+
+ xge_os_strcpy(key+6, "full_cnt");
+ __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u");
+ xge_os_strcpy(key+6, "usage_max");
+ __HAL_AUX_ENTRY(key, chstat.usage_max, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
+ xge_os_strcpy(key+6, "reserve_free_swaps_cnt");
+ __HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u");
+ if (!hldev->config.bimodal_interrupts) {
+ xge_os_strcpy(key+6, "avg_compl_per_intr_cnt");
+ __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u");
+ }
+ xge_os_strcpy(key+6, "total_compl_cnt");
+ __HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u");
+ xge_os_strcpy(key+6, "bump_cnt");
+ __HAL_AUX_ENTRY(key, chstat.ring_bump_cnt, "%u");
+ }
+
+ /* for each opened tx channel */
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ char key[XGE_OS_SPRINTF_STRLEN];
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+
+ status = xge_hal_mgmt_channel_stats(channel, &chstat,
+ sizeof(xge_hal_mgmt_channel_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ (void) xge_os_sprintf(key, "fifo%d_", channel->post_qid);
+
+ xge_os_strcpy(key+6, "full_cnt");
+ __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u");
+ xge_os_strcpy(key+6, "usage_max");
+ __HAL_AUX_ENTRY(key, chstat.usage_max, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
+ xge_os_strcpy(key+6, "reserve_free_swaps_cnt");
+ __HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u");
+ xge_os_strcpy(key+6, "avg_compl_per_intr_cnt");
+ __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u");
+ xge_os_strcpy(key+6, "total_compl_cnt");
+ __HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u");
+ xge_os_strcpy(key+6, "total_posts");
+ __HAL_AUX_ENTRY(key, chstat.total_posts, "%u");
+ xge_os_strcpy(key+6, "total_posts_many");
+ __HAL_AUX_ENTRY(key, chstat.total_posts_many, "%u");
+ xge_os_strcpy(key+6, "copied_frags");
+ __HAL_AUX_ENTRY(key, chstat.copied_frags, "%u");
+ xge_os_strcpy(key+6, "copied_buffers");
+ __HAL_AUX_ENTRY(key, chstat.copied_buffers, "%u");
+ xge_os_strcpy(key+6, "total_buffers");
+ __HAL_AUX_ENTRY(key, chstat.total_buffers, "%u");
+ xge_os_strcpy(key+6, "avg_buffers_per_post");
+ __HAL_AUX_ENTRY(key, chstat.avg_buffers_per_post, "%u");
+ xge_os_strcpy(key+6, "avg_buffer_size");
+ __HAL_AUX_ENTRY(key, chstat.avg_buffer_size, "%u");
+ xge_os_strcpy(key+6, "avg_post_size");
+ __HAL_AUX_ENTRY(key, chstat.avg_post_size, "%u");
+ xge_os_strcpy(key+6, "total_posts_dtrs_many");
+ __HAL_AUX_ENTRY(key, chstat.total_posts_dtrs_many, "%u");
+ xge_os_strcpy(key+6, "total_posts_frags_many");
+ __HAL_AUX_ENTRY(key, chstat.total_posts_frags_many, "%u");
+ xge_os_strcpy(key+6, "total_posts_dang_dtrs");
+ __HAL_AUX_ENTRY(key, chstat.total_posts_dang_dtrs, "%u");
+ xge_os_strcpy(key+6, "total_posts_dang_frags");
+ __HAL_AUX_ENTRY(key, chstat.total_posts_dang_frags, "%u");
+ }
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+
+
+/**
+ * xge_hal_aux_stats_sw_dev_read - Read software device statistics.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read software-maintained device statistics.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_status_e status;
+ xge_hal_mgmt_sw_stats_t sw_dev_err_stats;
+ int t_code;
+ char buf[XGE_OS_SPRINTF_STRLEN];
+
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ status = xge_hal_mgmt_sw_stats(hldev, &sw_dev_err_stats,
+ sizeof(xge_hal_mgmt_sw_stats_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("sm_err_cnt",sw_dev_err_stats.sm_err_cnt, "%u");
+ __HAL_AUX_ENTRY("single_ecc_err_cnt",sw_dev_err_stats.single_ecc_err_cnt, "%u");
+ __HAL_AUX_ENTRY("double_ecc_err_cnt",sw_dev_err_stats.double_ecc_err_cnt, "%u");
+ __HAL_AUX_ENTRY("ecc_err_cnt", sw_dev_err_stats.ecc_err_cnt, "%u");
+ __HAL_AUX_ENTRY("parity_err_cnt",sw_dev_err_stats.parity_err_cnt, "%u");
+ __HAL_AUX_ENTRY("serr_cnt",sw_dev_err_stats.serr_cnt, "%u");
+
+ for (t_code = 1; t_code < 16; t_code++) {
+ int t_code_cnt = sw_dev_err_stats.rxd_t_code_err_cnt[t_code];
+ if (t_code_cnt) {
+ (void) xge_os_sprintf(buf, "rxd_t_code_%d", t_code);
+ __HAL_AUX_ENTRY(buf, t_code_cnt, "%u");
+ }
+ t_code_cnt = sw_dev_err_stats.txd_t_code_err_cnt[t_code];
+ if (t_code_cnt) {
+ (void) xge_os_sprintf(buf, "txd_t_code_%d", t_code);
+ __HAL_AUX_ENTRY(buf, t_code_cnt, "%u");
+ }
+ }
+ __HAL_AUX_ENTRY("alarm_transceiver_temp_high",sw_dev_err_stats.
+ stats_xpak.alarm_transceiver_temp_high, "%u");
+ __HAL_AUX_ENTRY("alarm_transceiver_temp_low",sw_dev_err_stats.
+ stats_xpak.alarm_transceiver_temp_low, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_bias_current_high",sw_dev_err_stats.
+ stats_xpak.alarm_laser_bias_current_high, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_bias_current_low",sw_dev_err_stats.
+ stats_xpak.alarm_laser_bias_current_low, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_output_power_high",sw_dev_err_stats.
+ stats_xpak.alarm_laser_output_power_high, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_output_power_low",sw_dev_err_stats.
+ stats_xpak.alarm_laser_output_power_low, "%u");
+ __HAL_AUX_ENTRY("warn_transceiver_temp_high",sw_dev_err_stats.
+ stats_xpak.warn_transceiver_temp_high, "%u");
+ __HAL_AUX_ENTRY("warn_transceiver_temp_low",sw_dev_err_stats.
+ stats_xpak.warn_transceiver_temp_low, "%u");
+ __HAL_AUX_ENTRY("warn_laser_bias_current_high",sw_dev_err_stats.
+ stats_xpak.warn_laser_bias_current_high, "%u");
+ __HAL_AUX_ENTRY("warn_laser_bias_current_low",sw_dev_err_stats.
+ stats_xpak.warn_laser_bias_current_low, "%u");
+ __HAL_AUX_ENTRY("warn_laser_output_power_high",sw_dev_err_stats.
+ stats_xpak.warn_laser_output_power_high, "%u");
+ __HAL_AUX_ENTRY("warn_laser_output_power_low",sw_dev_err_stats.
+ stats_xpak.warn_laser_output_power_low, "%u");
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_pci_config_read - Retrieve and format PCI Configuration
+ * info.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Retrieve about info (using xge_hal_mgmt_pci_config()) and sprintf it
+ * into the provided @retbuf.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_pci_config(), xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize,
+ char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_mgmt_pci_config_t pci_config;
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ status = xge_hal_mgmt_pci_config(devh, &pci_config,
+ sizeof(xge_hal_mgmt_pci_config_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("vendor_id", pci_config.vendor_id, "0x%04X");
+ __HAL_AUX_ENTRY("device_id", pci_config.device_id, "0x%04X");
+ __HAL_AUX_ENTRY("command", pci_config.command, "0x%04X");
+ __HAL_AUX_ENTRY("status", pci_config.status, "0x%04X");
+ __HAL_AUX_ENTRY("revision", pci_config.revision, "0x%02X");
+ __HAL_AUX_ENTRY("pciClass1", pci_config.pciClass[0], "0x%02X");
+ __HAL_AUX_ENTRY("pciClass2", pci_config.pciClass[1], "0x%02X");
+ __HAL_AUX_ENTRY("pciClass3", pci_config.pciClass[2], "0x%02X");
+ __HAL_AUX_ENTRY("cache_line_size",
+ pci_config.cache_line_size, "0x%02X");
+ __HAL_AUX_ENTRY("latency_timer", pci_config.latency_timer, "0x%02X");
+ __HAL_AUX_ENTRY("header_type", pci_config.header_type, "0x%02X");
+ __HAL_AUX_ENTRY("bist", pci_config.bist, "0x%02X");
+ __HAL_AUX_ENTRY("base_addr0_lo", pci_config.base_addr0_lo, "0x%08X");
+ __HAL_AUX_ENTRY("base_addr0_hi", pci_config.base_addr0_hi, "0x%08X");
+ __HAL_AUX_ENTRY("base_addr1_lo", pci_config.base_addr1_lo, "0x%08X");
+ __HAL_AUX_ENTRY("base_addr1_hi", pci_config.base_addr1_hi, "0x%08X");
+ __HAL_AUX_ENTRY("not_Implemented1",
+ pci_config.not_Implemented1, "0x%08X");
+ __HAL_AUX_ENTRY("not_Implemented2", pci_config.not_Implemented2,
+ "0x%08X");
+ __HAL_AUX_ENTRY("cardbus_cis_pointer", pci_config.cardbus_cis_pointer,
+ "0x%08X");
+ __HAL_AUX_ENTRY("subsystem_vendor_id", pci_config.subsystem_vendor_id,
+ "0x%04X");
+ __HAL_AUX_ENTRY("subsystem_id", pci_config.subsystem_id, "0x%04X");
+ __HAL_AUX_ENTRY("rom_base", pci_config.rom_base, "0x%08X");
+ __HAL_AUX_ENTRY("capabilities_pointer",
+ pci_config.capabilities_pointer, "0x%02X");
+ __HAL_AUX_ENTRY("interrupt_line", pci_config.interrupt_line, "0x%02X");
+ __HAL_AUX_ENTRY("interrupt_pin", pci_config.interrupt_pin, "0x%02X");
+ __HAL_AUX_ENTRY("min_grant", pci_config.min_grant, "0x%02X");
+ __HAL_AUX_ENTRY("max_latency", pci_config.max_latency, "0x%02X");
+ __HAL_AUX_ENTRY("msi_cap_id", pci_config.msi_cap_id, "0x%02X");
+ __HAL_AUX_ENTRY("msi_next_ptr", pci_config.msi_next_ptr, "0x%02X");
+ __HAL_AUX_ENTRY("msi_control", pci_config.msi_control, "0x%04X");
+ __HAL_AUX_ENTRY("msi_lower_address", pci_config.msi_lower_address,
+ "0x%08X");
+ __HAL_AUX_ENTRY("msi_higher_address", pci_config.msi_higher_address,
+ "0x%08X");
+ __HAL_AUX_ENTRY("msi_data", pci_config.msi_data, "0x%04X");
+ __HAL_AUX_ENTRY("msi_unused", pci_config.msi_unused, "0x%04X");
+ __HAL_AUX_ENTRY("vpd_cap_id", pci_config.vpd_cap_id, "0x%02X");
+ __HAL_AUX_ENTRY("vpd_next_cap", pci_config.vpd_next_cap, "0x%02X");
+ __HAL_AUX_ENTRY("vpd_addr", pci_config.vpd_addr, "0x%04X");
+ __HAL_AUX_ENTRY("vpd_data", pci_config.vpd_data, "0x%08X");
+ __HAL_AUX_ENTRY("pcix_cap", pci_config.pcix_cap, "0x%02X");
+ __HAL_AUX_ENTRY("pcix_next_cap", pci_config.pcix_next_cap, "0x%02X");
+ __HAL_AUX_ENTRY("pcix_command", pci_config.pcix_command, "0x%04X");
+ __HAL_AUX_ENTRY("pcix_status", pci_config.pcix_status, "0x%08X");
+
+ if (xge_hal_device_check_id(devh) == XGE_HAL_CARD_HERC) {
+ char key[XGE_OS_SPRINTF_STRLEN];
+ int i;
+
+ for (i = 0;
+ i < (XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE - 0x68)/4;
+ i++) {
+ (void) xge_os_sprintf(key, "%03x:", 4*i + 0x68);
+ __HAL_AUX_ENTRY(key, *((int *)pci_config.rsvd_b1 + i),
+ "0x%08X");
+ }
+ }
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+
+/**
+ * xge_hal_aux_channel_read - Read channels information.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read HAL statistics.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_OUT_OF_SPACE - Buffer size is very small.
+ * See also: xge_hal_aux_device_dump().
+ */
+xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize)
+{
+ xge_list_t *item;
+ xge_hal_channel_t *channel;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ if (hldev->magic != XGE_HAL_MAGIC) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ /* for each opened rx channel */
+ xge_list_for_each(item, &hldev->ring_channels) {
+ char key[XGE_OS_SPRINTF_STRLEN];
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+
+ if (channel->is_open != 1)
+ continue;
+
+ (void) xge_os_sprintf(key, "ring%d_", channel->post_qid);
+ xge_os_strcpy(key+6, "type");
+ __HAL_AUX_ENTRY(key, channel->type, "%u");
+ xge_os_strcpy(key+6, "length");
+ __HAL_AUX_ENTRY(key, channel->length, "%u");
+ xge_os_strcpy(key+6, "is_open");
+ __HAL_AUX_ENTRY(key, channel->is_open, "%u");
+ xge_os_strcpy(key+6, "reserve_initial");
+ __HAL_AUX_ENTRY(key, channel->reserve_initial, "%u");
+ xge_os_strcpy(key+6, "reserve_max");
+ __HAL_AUX_ENTRY(key, channel->reserve_max, "%u");
+ xge_os_strcpy(key+6, "reserve_length");
+ __HAL_AUX_ENTRY(key, channel->reserve_length, "%u");
+ xge_os_strcpy(key+6, "reserve_top");
+ __HAL_AUX_ENTRY(key, channel->reserve_top, "%u");
+ xge_os_strcpy(key+6, "reserve_threshold");
+ __HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u");
+ xge_os_strcpy(key+6, "free_length");
+ __HAL_AUX_ENTRY(key, channel->free_length, "%u");
+ xge_os_strcpy(key+6, "post_index");
+ __HAL_AUX_ENTRY(key, channel->post_index, "%u");
+ xge_os_strcpy(key+6, "compl_index");
+ __HAL_AUX_ENTRY(key, channel->compl_index, "%u");
+ xge_os_strcpy(key+6, "per_dtr_space");
+ __HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
+ }
+
+ /* for each opened tx channel */
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ char key[XGE_OS_SPRINTF_STRLEN];
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+
+ if (channel->is_open != 1)
+ continue;
+
+ (void) xge_os_sprintf(key, "fifo%d_", channel->post_qid);
+ xge_os_strcpy(key+6, "type");
+ __HAL_AUX_ENTRY(key, channel->type, "%u");
+ xge_os_strcpy(key+6, "length");
+ __HAL_AUX_ENTRY(key, channel->length, "%u");
+ xge_os_strcpy(key+6, "is_open");
+ __HAL_AUX_ENTRY(key, channel->is_open, "%u");
+ xge_os_strcpy(key+6, "reserve_initial");
+ __HAL_AUX_ENTRY(key, channel->reserve_initial, "%u");
+ xge_os_strcpy(key+6, "reserve_max");
+ __HAL_AUX_ENTRY(key, channel->reserve_max, "%u");
+ xge_os_strcpy(key+6, "reserve_length");
+ __HAL_AUX_ENTRY(key, channel->reserve_length, "%u");
+ xge_os_strcpy(key+6, "reserve_top");
+ __HAL_AUX_ENTRY(key, channel->reserve_top, "%u");
+ xge_os_strcpy(key+6, "reserve_threshold");
+ __HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u");
+ xge_os_strcpy(key+6, "free_length");
+ __HAL_AUX_ENTRY(key, channel->free_length, "%u");
+ xge_os_strcpy(key+6, "post_index");
+ __HAL_AUX_ENTRY(key, channel->post_index, "%u");
+ xge_os_strcpy(key+6, "compl_index");
+ __HAL_AUX_ENTRY(key, channel->compl_index, "%u");
+ xge_os_strcpy(key+6, "per_dtr_space");
+ __HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
+ }
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_aux_device_dump - Dump driver "about" info and device state.
+ * @devh: HAL device handle.
+ *
+ * Dump driver & device "about" info and device state,
+ * including all BAR0 registers, hardware and software statistics, PCI
+ * configuration space.
+ * See also: xge_hal_aux_about_read(), xge_hal_mgmt_reg_read(),
+ * xge_hal_aux_pci_config_read(), xge_hal_aux_stats_sw_dev_read(),
+ * xge_hal_aux_stats_tmac_read(), xge_hal_aux_stats_rmac_read(),
+ * xge_hal_aux_channel_read(), xge_hal_aux_stats_hal_read().
+ * Returns:
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_OUT_OF_SPACE - Buffer size is very small.
+ */
+xge_hal_status_e
+xge_hal_aux_device_dump(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_status_e status;
+ int retsize;
+ int offset;
+ u64 retval;
+
+ xge_assert(hldev->dump_buf != NULL);
+
+ xge_os_println("********* xge DEVICE DUMP BEGIN **********");
+
+ status = xge_hal_aux_about_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+
+ for (offset = 0; offset < 1574; offset++) {
+
+ status = xge_hal_mgmt_reg_read(hldev, 0, offset*8, &retval);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+
+ if (!retval) continue;
+
+ xge_os_printf("0x%04x 0x%08x%08x", offset*8,
+ (u32)(retval>>32), (u32)retval);
+ }
+ xge_os_println("\n");
+
+ status = xge_hal_aux_pci_config_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ status = xge_hal_aux_stats_tmac_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ status = xge_hal_aux_stats_rmac_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ status = xge_hal_aux_stats_pci_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ status = xge_hal_aux_stats_herc_enchanced(hldev,
+ XGE_HAL_DUMP_BUF_SIZE, hldev->dump_buf, &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+ }
+
+ status = xge_hal_aux_stats_sw_dev_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf, &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ status = xge_hal_aux_channel_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ status = xge_hal_aux_stats_hal_read(hldev, XGE_HAL_DUMP_BUF_SIZE,
+ hldev->dump_buf,
+ &retsize);
+ if (status != XGE_HAL_OK) {
+ goto error;
+ }
+ xge_os_println(hldev->dump_buf);
+
+ xge_os_println("********* XFRAME DEVICE DUMP END **********");
+
+error:
+ return status;
+}
+
+
+/**
+ * xge_hal_aux_driver_config_read - Read Driver configuration.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read driver configuration,
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_aux_device_config_read().
+ */
+xge_hal_status_e
+xge_hal_aux_driver_config_read(int bufsize, char *retbuf, int *retsize)
+{
+ xge_hal_status_e status;
+ xge_hal_driver_config_t drv_config;
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ status = xge_hal_mgmt_driver_config(&drv_config,
+ sizeof(xge_hal_driver_config_t));
+ if (status != XGE_HAL_OK) {
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("queue size initial",
+ drv_config.queue_size_initial, "%u");
+ __HAL_AUX_ENTRY("queue size max", drv_config.queue_size_max, "%u");
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ return XGE_HAL_OK;
+}
+
+
+/**
+ * xge_hal_aux_device_config_read - Read device configuration.
+ * @devh: HAL device handle.
+ * @bufsize: Buffer size.
+ * @retbuf: Buffer pointer.
+ * @retsize: Size of the result. Cannot be greater than @bufsize.
+ *
+ * Read device configuration,
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_aux_driver_config_read().
+ */
+xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
+ int bufsize, char *retbuf, int *retsize)
+{
+ int i;
+ xge_hal_status_e status;
+ xge_hal_device_config_t *dev_config;
+ xge_hal_device_t *hldev = (xge_hal_device_t *) devh;
+ char key[XGE_OS_SPRINTF_STRLEN];
+ __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf);
+
+ dev_config = (xge_hal_device_config_t *) xge_os_malloc(hldev->pdev,
+ sizeof(xge_hal_device_config_t));
+ if (dev_config == NULL) {
+ return XGE_HAL_FAIL;
+ }
+
+ status = xge_hal_mgmt_device_config(devh, dev_config,
+ sizeof(xge_hal_device_config_t));
+ if (status != XGE_HAL_OK) {
+ xge_os_free(hldev->pdev, dev_config,
+ sizeof(xge_hal_device_config_t));
+ return status;
+ }
+
+ __HAL_AUX_ENTRY("mtu", dev_config->mtu, "%u");
+ __HAL_AUX_ENTRY("isr_polling_count", dev_config->isr_polling_cnt, "%u");
+ __HAL_AUX_ENTRY("latency_timer", dev_config->latency_timer, "%u");
+ __HAL_AUX_ENTRY("max_splits_trans",
+ dev_config->max_splits_trans, "%u");
+ __HAL_AUX_ENTRY("mmrb_count", dev_config->mmrb_count, "%d");
+ __HAL_AUX_ENTRY("shared_splits", dev_config->shared_splits, "%u");
+ __HAL_AUX_ENTRY("stats_refresh_time_sec",
+ dev_config->stats_refresh_time_sec, "%u");
+ __HAL_AUX_ENTRY("pci_freq_mherz", dev_config->pci_freq_mherz, "%u");
+ __HAL_AUX_ENTRY("intr_mode", dev_config->intr_mode, "%u");
+ __HAL_AUX_ENTRY("ring_memblock_size",
+ dev_config->ring.memblock_size, "%u");
+
+ __HAL_AUX_ENTRY("sched_timer_us", dev_config->sched_timer_us, "%u");
+ __HAL_AUX_ENTRY("sched_timer_one_shot",
+ dev_config->sched_timer_one_shot, "%u");
+ __HAL_AUX_ENTRY("rxufca_intr_thres", dev_config->rxufca_intr_thres, "%u");
+ __HAL_AUX_ENTRY("rxufca_lo_lim", dev_config->rxufca_lo_lim, "%u");
+ __HAL_AUX_ENTRY("rxufca_hi_lim", dev_config->rxufca_hi_lim, "%u");
+ __HAL_AUX_ENTRY("rxufca_lbolt_period", dev_config->rxufca_lbolt_period, "%u");
+
+ for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
+ {
+ xge_hal_ring_queue_t *ring = &dev_config->ring.queue[i];
+ xge_hal_rti_config_t *rti = &ring->rti;
+
+ if (!ring->configured)
+ continue;
+
+ (void) xge_os_sprintf(key, "ring%d_", i);
+ xge_os_strcpy(key+6, "inital");
+ __HAL_AUX_ENTRY(key, ring->initial, "%u");
+ xge_os_strcpy(key+6, "max");
+ __HAL_AUX_ENTRY(key, ring->max, "%u");
+ xge_os_strcpy(key+6, "buffer_mode");
+ __HAL_AUX_ENTRY(key, ring->buffer_mode, "%u");
+ xge_os_strcpy(key+6, "dram_size_mb");
+ __HAL_AUX_ENTRY(key, ring->dram_size_mb, "%u");
+ xge_os_strcpy(key+6, "backoff_interval_us");
+ __HAL_AUX_ENTRY(key, ring->backoff_interval_us, "%u");
+ xge_os_strcpy(key+6, "max_frame_len");
+ __HAL_AUX_ENTRY(key, ring->max_frm_len, "%d");
+ xge_os_strcpy(key+6, "priority");
+ __HAL_AUX_ENTRY(key, ring->priority, "%u");
+ xge_os_strcpy(key+6, "rth_en");
+ __HAL_AUX_ENTRY(key, ring->rth_en, "%u");
+ xge_os_strcpy(key+6, "no_snoop_bits");
+ __HAL_AUX_ENTRY(key, ring->no_snoop_bits, "%u");
+ xge_os_strcpy(key+6, "indicate_max_pkts");
+ __HAL_AUX_ENTRY(key, ring->indicate_max_pkts, "%u");
+
+ xge_os_strcpy(key+6, "urange_a");
+ __HAL_AUX_ENTRY(key, rti->urange_a, "%u");
+ xge_os_strcpy(key+6, "ufc_a");
+ __HAL_AUX_ENTRY(key, rti->ufc_a, "%u");
+ xge_os_strcpy(key+6, "urange_b");
+ __HAL_AUX_ENTRY(key, rti->urange_b, "%u");
+ xge_os_strcpy(key+6, "ufc_b");
+ __HAL_AUX_ENTRY(key, rti->ufc_b, "%u");
+ xge_os_strcpy(key+6, "urange_c");
+ __HAL_AUX_ENTRY(key, rti->urange_c, "%u");
+ xge_os_strcpy(key+6, "ufc_c");
+ __HAL_AUX_ENTRY(key, rti->ufc_c, "%u");
+ xge_os_strcpy(key+6, "ufc_d");
+ __HAL_AUX_ENTRY(key, rti->ufc_d, "%u");
+ xge_os_strcpy(key+6, "timer_val_us");
+ __HAL_AUX_ENTRY(key, rti->timer_val_us, "%u");
+ }
+
+
+ {
+ xge_hal_mac_config_t *mac= &dev_config->mac;
+
+ __HAL_AUX_ENTRY("tmac_util_period",
+ mac->tmac_util_period, "%u");
+ __HAL_AUX_ENTRY("rmac_util_period",
+ mac->rmac_util_period, "%u");
+ __HAL_AUX_ENTRY("rmac_bcast_en",
+ mac->rmac_bcast_en, "%u");
+ __HAL_AUX_ENTRY("rmac_pause_gen_en",
+ mac->rmac_pause_gen_en, "%d");
+ __HAL_AUX_ENTRY("rmac_pause_rcv_en",
+ mac->rmac_pause_rcv_en, "%d");
+ __HAL_AUX_ENTRY("rmac_pause_time",
+ mac->rmac_pause_time, "%u");
+ __HAL_AUX_ENTRY("mc_pause_threshold_q0q3",
+ mac->mc_pause_threshold_q0q3, "%u");
+ __HAL_AUX_ENTRY("mc_pause_threshold_q4q7",
+ mac->mc_pause_threshold_q4q7, "%u");
+ }
+
+
+ __HAL_AUX_ENTRY("fifo_max_frags", dev_config->fifo.max_frags, "%u");
+ __HAL_AUX_ENTRY("fifo_reserve_threshold",
+ dev_config->fifo.reserve_threshold, "%u");
+ __HAL_AUX_ENTRY("fifo_memblock_size",
+ dev_config->fifo.memblock_size, "%u");
+#ifdef XGE_HAL_ALIGN_XMIT
+ __HAL_AUX_ENTRY("fifo_alignment_size",
+ dev_config->fifo.alignment_size, "%u");
+#endif
+
+ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
+ int j;
+ xge_hal_fifo_queue_t *fifo = &dev_config->fifo.queue[i];
+
+ if (!fifo->configured)
+ continue;
+
+ (void) xge_os_sprintf(key, "fifo%d_", i);
+ xge_os_strcpy(key+6, "initial");
+ __HAL_AUX_ENTRY(key, fifo->initial, "%u");
+ xge_os_strcpy(key+6, "max");
+ __HAL_AUX_ENTRY(key, fifo->max, "%u");
+ xge_os_strcpy(key+6, "intr");
+ __HAL_AUX_ENTRY(key, fifo->intr, "%u");
+ xge_os_strcpy(key+6, "no_snoop_bits");
+ __HAL_AUX_ENTRY(key, fifo->no_snoop_bits, "%u");
+
+ for (j = 0; j < XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
+ xge_hal_tti_config_t *tti =
+ &dev_config->fifo.queue[i].tti[j];
+
+ if (!tti->enabled)
+ continue;
+
+ (void) xge_os_sprintf(key, "fifo%d_tti%02d_", i,
+ i * XGE_HAL_MAX_FIFO_TTI_NUM + j);
+ xge_os_strcpy(key+12, "urange_a");
+ __HAL_AUX_ENTRY(key, tti->urange_a, "%u");
+ xge_os_strcpy(key+12, "ufc_a");
+ __HAL_AUX_ENTRY(key, tti->ufc_a, "%u");
+ xge_os_strcpy(key+12, "urange_b");
+ __HAL_AUX_ENTRY(key, tti->urange_b, "%u");
+ xge_os_strcpy(key+12, "ufc_b");
+ __HAL_AUX_ENTRY(key, tti->ufc_b, "%u");
+ xge_os_strcpy(key+12, "urange_c");
+ __HAL_AUX_ENTRY(key, tti->urange_c, "%u");
+ xge_os_strcpy(key+12, "ufc_c");
+ __HAL_AUX_ENTRY(key, tti->ufc_c, "%u");
+ xge_os_strcpy(key+12, "ufc_d");
+ __HAL_AUX_ENTRY(key, tti->ufc_d, "%u");
+ xge_os_strcpy(key+12, "timer_val_us");
+ __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u");
+ xge_os_strcpy(key+12, "timer_ci_en");
+ __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u");
+ }
+ }
+
+ /* and bimodal TTIs */
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_tti_config_t *tti = &hldev->bimodal_tti[i];
+
+ if (!tti->enabled)
+ continue;
+
+ (void) xge_os_sprintf(key, "tti%02d_",
+ XGE_HAL_MAX_FIFO_TTI_RING_0 + i);
+
+ xge_os_strcpy(key+6, "urange_a");
+ __HAL_AUX_ENTRY(key, tti->urange_a, "%u");
+ xge_os_strcpy(key+6, "ufc_a");
+ __HAL_AUX_ENTRY(key, tti->ufc_a, "%u");
+ xge_os_strcpy(key+6, "urange_b");
+ __HAL_AUX_ENTRY(key, tti->urange_b, "%u");
+ xge_os_strcpy(key+6, "ufc_b");
+ __HAL_AUX_ENTRY(key, tti->ufc_b, "%u");
+ xge_os_strcpy(key+6, "urange_c");
+ __HAL_AUX_ENTRY(key, tti->urange_c, "%u");
+ xge_os_strcpy(key+6, "ufc_c");
+ __HAL_AUX_ENTRY(key, tti->ufc_c, "%u");
+ xge_os_strcpy(key+6, "ufc_d");
+ __HAL_AUX_ENTRY(key, tti->ufc_d, "%u");
+ xge_os_strcpy(key+6, "timer_val_us");
+ __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u");
+ xge_os_strcpy(key+6, "timer_ac_en");
+ __HAL_AUX_ENTRY(key, tti->timer_ac_en, "%u");
+ xge_os_strcpy(key+6, "timer_ci_en");
+ __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u");
+ }
+ __HAL_AUX_ENTRY("dump_on_serr", dev_config->dump_on_serr, "%u");
+ __HAL_AUX_ENTRY("dump_on_eccerr",
+ dev_config->dump_on_eccerr, "%u");
+ __HAL_AUX_ENTRY("dump_on_parityerr",
+ dev_config->dump_on_parityerr, "%u");
+ __HAL_AUX_ENTRY("rth_en", dev_config->rth_en, "%u");
+ __HAL_AUX_ENTRY("rth_bucket_size", dev_config->rth_bucket_size, "%u");
+
+ __HAL_AUX_ENTRY_END(bufsize, retsize);
+
+ xge_os_free(hldev->pdev, dev_config,
+ sizeof(xge_hal_device_config_t));
+
+ return XGE_HAL_OK;
+}
+
diff --git a/sys/dev/nxge/xgehal/xgehal-mm.c b/sys/dev/nxge/xgehal/xgehal-mm.c
new file mode 100644
index 0000000..d23f88a
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-mm.c
@@ -0,0 +1,436 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : hal-mm.c
+ *
+ * Description: chipset memory pool object implementation
+ *
+ * Created: 10 May 2004
+ */
+
+#include <dev/nxge/include/xge-os-pal.h>
+#include <dev/nxge/include/xgehal-mm.h>
+#include <dev/nxge/include/xge-debug.h>
+
+/*
+ * __hal_mempool_grow
+ *
+ * Will resize mempool up to %num_allocate value.
+ */
+xge_hal_status_e
+__hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
+ int *num_allocated)
+{
+ int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
+ int n_items = mempool->items_per_memblock;
+
+ *num_allocated = 0;
+
+ if ((mempool->memblocks_allocated + num_allocate) >
+ mempool->memblocks_max) {
+ xge_debug_mm(XGE_ERR, "%s",
+ "__hal_mempool_grow: can grow anymore");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ for (i = mempool->memblocks_allocated;
+ i < mempool->memblocks_allocated + num_allocate; i++) {
+ int j;
+ int is_last =
+ ((mempool->memblocks_allocated+num_allocate-1) == i);
+ xge_hal_mempool_dma_t *dma_object =
+ mempool->memblocks_dma_arr + i;
+ void *the_memblock;
+ int dma_flags;
+
+ dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
+#ifdef XGE_HAL_DMA_DTR_CONSISTENT
+ dma_flags |= XGE_OS_DMA_CONSISTENT;
+#else
+ dma_flags |= XGE_OS_DMA_STREAMING;
+#endif
+
+ /* allocate DMA-capable memblock */
+ mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
+ mempool->memblock_size,
+ dma_flags,
+ &dma_object->handle,
+ &dma_object->acc_handle);
+ if (mempool->memblocks_arr[i] == NULL) {
+ xge_debug_mm(XGE_ERR,
+ "memblock[%d]: out of DMA memory", i);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ xge_os_memzero(mempool->memblocks_arr[i],
+ mempool->memblock_size);
+ the_memblock = mempool->memblocks_arr[i];
+
+ /* allocate memblock's private part. Each DMA memblock
+ * has a space allocated for item's private usage upon
+ * mempool's user request. Each time mempool grows, it will
+ * allocate new memblock and its private part at once.
+ * This helps to minimize memory usage a lot. */
+ mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
+ mempool->items_priv_size * n_items);
+ if (mempool->memblocks_priv_arr[i] == NULL) {
+ xge_os_dma_free(mempool->pdev,
+ the_memblock,
+ mempool->memblock_size,
+ &dma_object->acc_handle,
+ &dma_object->handle);
+ xge_debug_mm(XGE_ERR,
+ "memblock_priv[%d]: out of virtual memory, "
+ "requested %d(%d:%d) bytes", i,
+ mempool->items_priv_size * n_items,
+ mempool->items_priv_size, n_items);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ xge_os_memzero(mempool->memblocks_priv_arr[i],
+ mempool->items_priv_size * n_items);
+
+ /* map memblock to physical memory */
+ dma_object->addr = xge_os_dma_map(mempool->pdev,
+ dma_object->handle,
+ the_memblock,
+ mempool->memblock_size,
+ XGE_OS_DMA_DIR_BIDIRECTIONAL,
+#ifdef XGE_HAL_DMA_DTR_CONSISTENT
+ XGE_OS_DMA_CONSISTENT
+#else
+ XGE_OS_DMA_STREAMING
+#endif
+ );
+ if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
+ xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
+ mempool->items_priv_size *
+ n_items);
+ xge_os_dma_free(mempool->pdev,
+ the_memblock,
+ mempool->memblock_size,
+ &dma_object->acc_handle,
+ &dma_object->handle);
+ return XGE_HAL_ERR_OUT_OF_MAPPING;
+ }
+
+ /* fill the items hash array */
+ for (j=0; j<n_items; j++) {
+ int index = i*n_items + j;
+
+ if (first_time && index >= mempool->items_initial) {
+ break;
+ }
+
+ mempool->items_arr[index] =
+ ((char *)the_memblock + j*mempool->item_size);
+
+ /* let caller to do more job on each item */
+ if (mempool->item_func_alloc != NULL) {
+ xge_hal_status_e status;
+
+ if ((status = mempool->item_func_alloc(
+ mempool,
+ the_memblock,
+ i,
+ dma_object,
+ mempool->items_arr[index],
+ index,
+ is_last,
+ mempool->userdata)) != XGE_HAL_OK) {
+
+ if (mempool->item_func_free != NULL) {
+ int k;
+
+ for (k=0; k<j; k++) {
+
+ index =i*n_items + k;
+
+ (void)mempool->item_func_free(
+ mempool, the_memblock,
+ i, dma_object,
+ mempool->items_arr[index],
+ index, is_last,
+ mempool->userdata);
+ }
+ }
+
+ xge_os_free(mempool->pdev,
+ mempool->memblocks_priv_arr[i],
+ mempool->items_priv_size *
+ n_items);
+ xge_os_dma_unmap(mempool->pdev,
+ dma_object->handle,
+ dma_object->addr,
+ mempool->memblock_size,
+ XGE_OS_DMA_DIR_BIDIRECTIONAL);
+ xge_os_dma_free(mempool->pdev,
+ the_memblock,
+ mempool->memblock_size,
+ &dma_object->acc_handle,
+ &dma_object->handle);
+ return status;
+ }
+ }
+
+ mempool->items_current = index + 1;
+ }
+
+ xge_debug_mm(XGE_TRACE,
+ "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
+ "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
+ (unsigned long long)(ulong_t)mempool->memblocks_arr[i],
+ (unsigned long long)dma_object->addr);
+
+ (*num_allocated)++;
+
+ if (first_time && mempool->items_current ==
+ mempool->items_initial) {
+ break;
+ }
+ }
+
+ /* increment actual number of allocated memblocks */
+ mempool->memblocks_allocated += *num_allocated;
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * xge_hal_mempool_create
+ * @memblock_size:
+ * @items_initial:
+ * @items_max:
+ * @item_size:
+ * @item_func:
+ *
+ * This function will create memory pool object. Pool may grow but will
+ * never shrink. Pool consists of number of dynamically allocated blocks
+ * with size enough to hold %items_initial number of items. Memory is
+ * DMA-able but client must map/unmap before interoperating with the device.
+ * See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}.
+ */
+xge_hal_mempool_t*
+__hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
+ int items_priv_size, int items_initial, int items_max,
+ xge_hal_mempool_item_f item_func_alloc,
+ xge_hal_mempool_item_f item_func_free, void *userdata)
+{
+ xge_hal_status_e status;
+ int memblocks_to_allocate;
+ xge_hal_mempool_t *mempool;
+ int allocated;
+
+ if (memblock_size < item_size) {
+ xge_debug_mm(XGE_ERR,
+ "memblock_size %d < item_size %d: misconfiguration",
+ memblock_size, item_size);
+ return NULL;
+ }
+
+ mempool = (xge_hal_mempool_t *) \
+ xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
+ if (mempool == NULL) {
+ xge_debug_mm(XGE_ERR, "mempool allocation failure");
+ return NULL;
+ }
+ xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));
+
+ mempool->pdev = pdev;
+ mempool->memblock_size = memblock_size;
+ mempool->items_max = items_max;
+ mempool->items_initial = items_initial;
+ mempool->item_size = item_size;
+ mempool->items_priv_size = items_priv_size;
+ mempool->item_func_alloc = item_func_alloc;
+ mempool->item_func_free = item_func_free;
+ mempool->userdata = userdata;
+
+ mempool->memblocks_allocated = 0;
+
+ mempool->items_per_memblock = memblock_size / item_size;
+
+ mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
+ mempool->items_per_memblock;
+
+ /* allocate array of memblocks */
+ mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
+ sizeof(void*) * mempool->memblocks_max);
+ if (mempool->memblocks_arr == NULL) {
+ xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
+ __hal_mempool_destroy(mempool);
+ return NULL;
+ }
+ xge_os_memzero(mempool->memblocks_arr,
+ sizeof(void*) * mempool->memblocks_max);
+
+ /* allocate array of private parts of items per memblocks */
+ mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
+ sizeof(void*) * mempool->memblocks_max);
+ if (mempool->memblocks_priv_arr == NULL) {
+ xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
+ __hal_mempool_destroy(mempool);
+ return NULL;
+ }
+ xge_os_memzero(mempool->memblocks_priv_arr,
+ sizeof(void*) * mempool->memblocks_max);
+
+ /* allocate array of memblocks DMA objects */
+ mempool->memblocks_dma_arr =
+ (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
+ sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
+
+ if (mempool->memblocks_dma_arr == NULL) {
+ xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
+ __hal_mempool_destroy(mempool);
+ return NULL;
+ }
+ xge_os_memzero(mempool->memblocks_dma_arr,
+ sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
+
+ /* allocate hash array of items */
+ mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
+ sizeof(void*) * mempool->items_max);
+ if (mempool->items_arr == NULL) {
+ xge_debug_mm(XGE_ERR, "items_arr allocation failure");
+ __hal_mempool_destroy(mempool);
+ return NULL;
+ }
+ xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);
+
+ mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
+ sizeof(void*) * mempool->items_max);
+ if (mempool->shadow_items_arr == NULL) {
+ xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
+ __hal_mempool_destroy(mempool);
+ return NULL;
+ }
+ xge_os_memzero(mempool->shadow_items_arr,
+ sizeof(void *) * mempool->items_max);
+
+ /* calculate initial number of memblocks */
+ memblocks_to_allocate = (mempool->items_initial +
+ mempool->items_per_memblock - 1) /
+ mempool->items_per_memblock;
+
+ xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
+ "%d items per memblock", memblocks_to_allocate,
+ mempool->items_per_memblock);
+
+ /* pre-allocate the mempool */
+ status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
+ xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
+ sizeof(void*) * mempool->items_max);
+ if (status != XGE_HAL_OK) {
+ xge_debug_mm(XGE_ERR, "mempool_grow failure");
+ __hal_mempool_destroy(mempool);
+ return NULL;
+ }
+
+ xge_debug_mm(XGE_TRACE,
+ "total: allocated %dk of DMA-capable memory",
+ mempool->memblock_size * allocated / 1024);
+
+ return mempool;
+}
+
+/*
+ * xge_hal_mempool_destroy
+ */
+void
+__hal_mempool_destroy(xge_hal_mempool_t *mempool)
+{
+ int i, j;
+
+ for (i=0; i<mempool->memblocks_allocated; i++) {
+ xge_hal_mempool_dma_t *dma_object;
+
+ xge_assert(mempool->memblocks_arr[i]);
+ xge_assert(mempool->memblocks_dma_arr + i);
+
+ dma_object = mempool->memblocks_dma_arr + i;
+
+ for (j=0; j<mempool->items_per_memblock; j++) {
+ int index = i*mempool->items_per_memblock + j;
+
+ /* to skip last partially filled(if any) memblock */
+ if (index >= mempool->items_current) {
+ break;
+ }
+
+ /* let caller to do more job on each item */
+ if (mempool->item_func_free != NULL) {
+
+ mempool->item_func_free(mempool,
+ mempool->memblocks_arr[i],
+ i, dma_object,
+ mempool->shadow_items_arr[index],
+ index, /* unused */ -1,
+ mempool->userdata);
+ }
+ }
+
+ xge_os_dma_unmap(mempool->pdev,
+ dma_object->handle, dma_object->addr,
+ mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
+
+ xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
+ mempool->items_priv_size * mempool->items_per_memblock);
+
+ xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
+ mempool->memblock_size, &dma_object->acc_handle,
+ &dma_object->handle);
+ }
+
+ if (mempool->items_arr) {
+ xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
+ mempool->items_max);
+ }
+
+ if (mempool->shadow_items_arr) {
+ xge_os_free(mempool->pdev, mempool->shadow_items_arr,
+ sizeof(void*) * mempool->items_max);
+ }
+
+ if (mempool->memblocks_dma_arr) {
+ xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
+ sizeof(xge_hal_mempool_dma_t) *
+ mempool->memblocks_max);
+ }
+
+ if (mempool->memblocks_priv_arr) {
+ xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
+ sizeof(void*) * mempool->memblocks_max);
+ }
+
+ if (mempool->memblocks_arr) {
+ xge_os_free(mempool->pdev, mempool->memblocks_arr,
+ sizeof(void*) * mempool->memblocks_max);
+ }
+
+ xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-ring-fp.c b/sys/dev/nxge/xgehal/xgehal-ring-fp.c
new file mode 100644
index 0000000..9d5a09e
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-ring-fp.c
@@ -0,0 +1,852 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-ring-fp.c
+ *
+ * Description: HAL Rx ring object functionality (fast path)
+ *
+ * Created: 10 June 2004
+ */
+
+#ifdef XGE_DEBUG_FP
+#include <dev/nxge/include/xgehal-ring.h>
+#endif
+
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
+__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
+{
+
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ xge_hal_ring_rxd_priv_t *rxd_priv;
+
+ xge_assert(rxdp);
+
+#if defined(XGE_HAL_USE_5B_MODE)
+ xge_assert(ring);
+ if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
+#if defined (XGE_OS_PLATFORM_64BIT)
+ int memblock_idx = rxdp_5->host_control >> 16;
+ int i = rxdp_5->host_control & 0xFFFF;
+ rxd_priv = (xge_hal_ring_rxd_priv_t *)
+ ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
+#else
+ /* 32-bit case */
+ rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
+#endif
+ } else
+#endif
+ {
+ rxd_priv = (xge_hal_ring_rxd_priv_t *)
+ (ulong_t)rxdp->host_control;
+ }
+
+ xge_assert(rxd_priv);
+ xge_assert(rxd_priv->dma_object);
+
+ xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
+
+ xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
+ rxd_priv->dma_addr);
+
+ return rxd_priv;
+}
+
+__HAL_STATIC_RING __HAL_INLINE_RING int
+__hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
+{
+ return (int)*((u64 *)(void *)((char *)block +
+ XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
+}
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
+{
+ *((u64 *)(void *)((char *)block +
+ XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
+ memblock_idx;
+}
+
+
+__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
+__hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
+{
+ return (dma_addr_t)*((u64 *)(void *)((char *)block +
+ XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
+}
+
+__HAL_STATIC_RING __HAL_INLINE_RING void
+__hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
+ dma_addr_t dma_next)
+{
+ *((u64 *)(void *)((char *)block +
+ XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
+}
+
+/**
+ * xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * Returns: private ULD info associated with the descriptor.
+ * ULD requests per-descriptor space via xge_hal_channel_open().
+ *
+ * See also: xge_hal_fifo_dtr_private().
+ * Usage: See ex_rx_compl{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void*
+xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) +
+ sizeof(xge_hal_ring_rxd_priv_t);
+}
+
+/**
+ * xge_hal_ring_dtr_reserve - Reserve ring descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
+ * with a valid handle.
+ *
+ * Reserve Rx descriptor for the subsequent filling-in (by upper layer
+ * driver (ULD)) and posting on the corresponding channel (@channelh)
+ * via xge_hal_ring_dtr_post().
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
+ *
+ * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
+ * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
+ * Usage: See ex_post_all_rx{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
+{
+ xge_hal_status_e status;
+#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
+ unsigned long flags;
+#endif
+
+#if defined(XGE_HAL_RX_MULTI_RESERVE)
+ xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
+#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
+ flags);
+#endif
+
+ status = __hal_channel_dtr_alloc(channelh, dtrh);
+
+#if defined(XGE_HAL_RX_MULTI_RESERVE)
+ xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
+#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
+ xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
+ flags);
+#endif
+
+ if (status == XGE_HAL_OK) {
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
+
+ /* instead of memset: reset this RxD */
+ rxdp->control_1 = rxdp->control_2 = 0;
+
+#if defined(XGE_OS_MEMORY_CHECK)
+ __hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1;
+#endif
+ }
+
+ return status;
+}
+
+/**
+ * xge_hal_ring_dtr_info_get - Get extended information associated with
+ * a completed receive descriptor for 1b mode.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
+ *
+ * Retrieve extended information associated with a completed receive descriptor.
+ *
+ * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
+ * xge_hal_ring_dtr_5b_get().
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ xge_hal_dtr_info_t *ext_info)
+{
+ /* cast to 1-buffer mode RxD: the code below relies on the fact
+ * that control_1 and control_2 are formatted the same way.. */
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+
+ ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
+ ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
+ ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
+ ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
+ ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
+
+ /* Herc only, a few extra cycles imposed on Xena and/or
+ * when RTH is not enabled.
+ * Alternatively, could check
+ * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
+ ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
+ ext_info->rth_spdm_hit =
+ XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
+ ext_info->rth_hash_type =
+ XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
+ ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
+}
+
+/**
+ * xge_hal_ring_dtr_info_nb_get - Get extended information associated
+ * with a completed receive descriptor for 3b or 5b
+ * modes.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
+ *
+ * Retrieve extended information associated with a completed receive descriptor.
+ *
+ * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
+ * xge_hal_ring_dtr_5b_get().
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ xge_hal_dtr_info_t *ext_info)
+{
+ /* cast to 1-buffer mode RxD: the code below relies on the fact
+ * that control_1 and control_2 are formatted the same way.. */
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+
+ ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
+ ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
+ ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
+ ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
+ ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
+ /* Herc only, a few extra cycles imposed on Xena and/or
+ * when RTH is not enabled. Same comment as above. */
+ ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
+ ext_info->rth_spdm_hit =
+ XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
+ ext_info->rth_hash_type =
+ XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
+ ext_info->rth_value = (u32)rxdp->buffer0_ptr;
+}
+
+/**
+ * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
+ * @dtrh: Descriptor handle.
+ * @dma_pointer: DMA address of a single receive buffer this descriptor
+ * should carry. Note that by the time
+ * xge_hal_ring_dtr_1b_set
+ * is called, the receive buffer should be already mapped
+ * to the corresponding Xframe device.
+ * @size: Size of the receive @dma_pointer buffer.
+ *
+ * Prepare 1-buffer-mode Rx descriptor for posting
+ * (via xge_hal_ring_dtr_post()).
+ *
+ * This inline helper-function does not return any parameters and always
+ * succeeds.
+ *
+ * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
+ * Usage: See ex_post_all_rx{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
+{
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ rxdp->buffer0_ptr = dma_pointer;
+ rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
+
+ xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_1b_set: rxdp %p control_2 %p buffer0_ptr %p",
+ (xge_hal_ring_rxd_1_t *)dtrh,
+ rxdp->control_2,
+ rxdp->buffer0_ptr);
+}
+
+/**
+ * xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf
+ * descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @dma_pointer: DMA address of a single receive buffer _this_ descriptor
+ * carries. Returned by HAL.
+ * @pkt_length: Length (in bytes) of the data in the buffer pointed by
+ * @dma_pointer. Returned by HAL.
+ *
+ * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
+ *
+ * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
+ * Usage: See ex_rx_compl{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ dma_addr_t *dma_pointer, int *pkt_length)
+{
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+
+ *pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
+ *dma_pointer = rxdp->buffer0_ptr;
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length;
+}
+
+/**
+ * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
+ * @dtrh: Descriptor handle.
+ * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
+ * _this_ descriptor should carry.
+ * Note that by the time xge_hal_ring_dtr_3b_set
+ * is called, the receive buffers should be mapped
+ * to the corresponding Xframe device.
+ * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
+ * buffer from @dma_pointers.
+ *
+ * Prepare 3-buffer-mode Rx descriptor for posting (via
+ * xge_hal_ring_dtr_post()).
+ * This inline helper-function does not return any parameters and always
+ * succeeds.
+ *
+ * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
+ int sizes[])
+{
+ xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
+ rxdp->buffer0_ptr = dma_pointers[0];
+ rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
+ rxdp->buffer1_ptr = dma_pointers[1];
+ rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
+ rxdp->buffer2_ptr = dma_pointers[2];
+ rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
+}
+
+/**
+ * xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf
+ * descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
+ * carries. The first two buffers contain ethernet and
+ * (IP + transport) headers. The 3rd buffer contains packet
+ * data.
+ * Returned by HAL.
+ * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
+ * buffer from @dma_pointers. Returned by HAL.
+ *
+ * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
+ *
+ * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ dma_addr_t dma_pointers[], int sizes[])
+{
+ xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
+
+ dma_pointers[0] = rxdp->buffer0_ptr;
+ sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
+
+ dma_pointers[1] = rxdp->buffer1_ptr;
+ sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
+
+ dma_pointers[2] = rxdp->buffer2_ptr;
+ sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
+ sizes[2];
+}
+
+/**
+ * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
+ * @dtrh: Descriptor handle.
+ * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
+ * _this_ descriptor should carry.
+ * Note that by the time xge_hal_ring_dtr_5b_set
+ * is called, the receive buffers should be mapped
+ * to the corresponding Xframe device.
+ * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
+ * buffer from @dma_pointers.
+ *
+ * Prepare 3-buffer-mode Rx descriptor for posting (via
+ * xge_hal_ring_dtr_post()).
+ * This inline helper-function does not return any parameters and always
+ * succeeds.
+ *
+ * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
+ int sizes[])
+{
+ xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
+ rxdp->buffer0_ptr = dma_pointers[0];
+ rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
+ rxdp->buffer1_ptr = dma_pointers[1];
+ rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
+ rxdp->buffer2_ptr = dma_pointers[2];
+ rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
+ rxdp->buffer3_ptr = dma_pointers[3];
+ rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
+ rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
+ rxdp->buffer4_ptr = dma_pointers[4];
+ rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
+ rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]);
+}
+
+/**
+ * xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf
+ * descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
+ * carries. The first 4 buffers contains L2 (ethernet) through
+ * L5 headers. The 5th buffer contain received (applicaion)
+ * data. Returned by HAL.
+ * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
+ * buffer from @dma_pointers. Returned by HAL.
+ *
+ * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
+ *
+ * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+ dma_addr_t dma_pointers[], int sizes[])
+{
+ xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
+
+ dma_pointers[0] = rxdp->buffer0_ptr;
+ sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
+
+ dma_pointers[1] = rxdp->buffer1_ptr;
+ sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
+
+ dma_pointers[2] = rxdp->buffer2_ptr;
+ sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
+
+ dma_pointers[3] = rxdp->buffer3_ptr;
+ sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
+
+ dma_pointers[4] = rxdp->buffer4_ptr;
+ sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
+ sizes[2] + sizes[3] + sizes[4];
+}
+
+
+/**
+ * xge_hal_ring_dtr_pre_post - FIXME.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * TBD
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+#endif
+#if defined(XGE_HAL_RX_MULTI_POST_IRQ)
+ unsigned long flags;
+#endif
+
+ rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
+
+#ifdef XGE_DEBUG_ASSERT
+ /* make sure Xena overwrites the (illegal) t_code on completion */
+ XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C);
+#endif
+
+ xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d post_qid %d",
+ (unsigned long long)(ulong_t)dtrh,
+ ((xge_hal_ring_t *)channelh)->channel.post_index,
+ ((xge_hal_ring_t *)channelh)->channel.post_qid);
+
+#if defined(XGE_HAL_RX_MULTI_POST)
+ xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
+#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
+ xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
+ flags);
+#endif
+
+#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
+ {
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ if (channel->post_index != 0) {
+ xge_hal_dtr_h prev_dtrh;
+ xge_hal_ring_rxd_priv_t *rxdp_priv;
+
+ rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp);
+ prev_dtrh = channel->work_arr[channel->post_index - 1];
+
+ if (prev_dtrh != NULL &&
+ (rxdp_priv->dma_offset & (~0xFFF)) !=
+ rxdp_priv->dma_offset) {
+ xge_assert((char *)prev_dtrh +
+ ((xge_hal_ring_t*)channel)->rxd_size == dtrh);
+ }
+ }
+ }
+#endif
+
+ __hal_channel_dtr_post(channelh, dtrh);
+
+#if defined(XGE_HAL_RX_MULTI_POST)
+ xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
+#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
+ xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
+ flags);
+#endif
+}
+
+
+/**
+ * xge_hal_ring_dtr_post_post - FIXME.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * TBD
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
+#endif
+ /* do POST */
+ rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ priv = __hal_ring_rxd_priv(ring, rxdp);
+ xge_os_dma_sync(ring->channel.pdev,
+ priv->dma_handle, priv->dma_addr,
+ priv->dma_offset, ring->rxd_size,
+ XGE_OS_DMA_DIR_TODEVICE);
+#endif
+
+ xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post: rxdp %p control_1 %p",
+ (xge_hal_ring_rxd_1_t *)dtrh,
+ rxdp->control_1);
+
+ if (ring->channel.usage_cnt > 0)
+ ring->channel.usage_cnt--;
+}
+
+/**
+ * xge_hal_ring_dtr_post_post_wmb.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * Similar as xge_hal_ring_dtr_post_post, but in addition it does memory barrier.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
+#endif
+ /* Do memory barrier before changing the ownership */
+ xge_os_wmb();
+
+ /* do POST */
+ rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ priv = __hal_ring_rxd_priv(ring, rxdp);
+ xge_os_dma_sync(ring->channel.pdev,
+ priv->dma_handle, priv->dma_addr,
+ priv->dma_offset, ring->rxd_size,
+ XGE_OS_DMA_DIR_TODEVICE);
+#endif
+
+ if (ring->channel.usage_cnt > 0)
+ ring->channel.usage_cnt--;
+
+ xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post_wmb: rxdp %p control_1 %p rxds_with_host %d",
+ (xge_hal_ring_rxd_1_t *)dtrh,
+ rxdp->control_1, ring->channel.usage_cnt);
+
+}
+
+/**
+ * xge_hal_ring_dtr_post - Post descriptor on the ring channel.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
+ *
+ * Post descriptor on the 'ring' type channel.
+ * Prior to posting the descriptor should be filled in accordance with
+ * Host/Xframe interface specification for a given service (LL, etc.).
+ *
+ * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
+ * Usage: See ex_post_all_rx{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+ xge_hal_ring_dtr_pre_post(channelh, dtrh);
+ xge_hal_ring_dtr_post_post(channelh, dtrh);
+}
+
+/**
+ * xge_hal_ring_dtr_next_completed - Get the _next_ completed
+ * descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle. Returned by HAL.
+ * @t_code: Transfer code, as per Xframe User Guide,
+ * Receive Descriptor Format. Returned by HAL.
+ *
+ * Retrieve the _next_ completed descriptor.
+ * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
+ * upper-layer driver (ULD) of new completed descriptors. After that
+ * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
+ * completions (the very first completion is passed by HAL via
+ * xge_hal_channel_callback_f).
+ *
+ * Implementation-wise, the upper-layer driver is free to call
+ * xge_hal_ring_dtr_next_completed either immediately from inside the
+ * channel callback, or in a deferred fashion and separate (from HAL)
+ * context.
+ *
+ * Non-zero @t_code means failure to fill-in receive buffer(s)
+ * of the descriptor.
+ * For instance, parity error detected during the data transfer.
+ * In this case Xframe will complete the descriptor and indicate
+ * for the host that the received data is not to be used.
+ * For details please refer to Xframe User Guide.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ *
+ * See also: xge_hal_channel_callback_f{},
+ * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
+ * Usage: See ex_rx_compl{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
+ u8 *t_code)
+{
+ xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
+#endif
+
+ __hal_channel_dtr_try_complete(ring, dtrh);
+ rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
+ if (rxdp == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ /* Note: 24 bytes at most means:
+ * - Control_3 in case of 5-buffer mode
+ * - Control_1 and Control_2
+ *
+ * This is the only length needs to be invalidated
+ * type of channels.*/
+ priv = __hal_ring_rxd_priv(ring, rxdp);
+ xge_os_dma_sync(ring->channel.pdev,
+ priv->dma_handle, priv->dma_addr,
+ priv->dma_offset, 24,
+ XGE_OS_DMA_DIR_FROMDEVICE);
+#endif
+
+ /* check whether it is not the end */
+ if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
+ !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
+#ifndef XGE_HAL_IRQ_POLLING
+ if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
+ /* reset it. since we don't want to return
+ * garbage to the ULD */
+ *dtrh = 0;
+ return XGE_HAL_COMPLETIONS_REMAIN;
+ }
+#endif
+
+#ifdef XGE_DEBUG_ASSERT
+#if defined(XGE_HAL_USE_5B_MODE)
+#if !defined(XGE_OS_PLATFORM_64BIT)
+ if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_assert(((xge_hal_ring_rxd_5_t *)
+ rxdp)->host_control!=0);
+ }
+#endif
+
+#else
+ xge_assert(rxdp->host_control!=0);
+#endif
+#endif
+
+ __hal_channel_dtr_complete(ring);
+
+ *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
+
+ /* see XGE_HAL_SET_RXD_T_CODE() above.. */
+ xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
+
+ xge_debug_ring(XGE_TRACE,
+ "compl_index %d post_qid %d t_code %d rxd 0x"XGE_OS_LLXFMT,
+ ((xge_hal_channel_t*)ring)->compl_index,
+ ((xge_hal_channel_t*)ring)->post_qid, *t_code,
+ (unsigned long long)(ulong_t)rxdp);
+
+ ring->channel.usage_cnt++;
+ if (ring->channel.stats.usage_max < ring->channel.usage_cnt)
+ ring->channel.stats.usage_max = ring->channel.usage_cnt;
+
+ return XGE_HAL_OK;
+ }
+
+ /* reset it. since we don't want to return
+ * garbage to the ULD */
+ *dtrh = 0;
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+}
+
+/**
+ * xge_hal_ring_dtr_free - Free descriptor.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * Free the reserved descriptor. This operation is "symmetrical" to
+ * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
+ * lifecycle.
+ *
+ * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
+ * be:
+ *
+ * - reserved (xge_hal_ring_dtr_reserve);
+ *
+ * - posted (xge_hal_ring_dtr_post);
+ *
+ * - completed (xge_hal_ring_dtr_next_completed);
+ *
+ * - and recycled again (xge_hal_ring_dtr_free).
+ *
+ * For alternative state transitions and more details please refer to
+ * the design doc.
+ *
+ * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
+ * Usage: See ex_rx_compl{}.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+{
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
+ unsigned long flags;
+#endif
+
+#if defined(XGE_HAL_RX_MULTI_FREE)
+ xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
+#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
+ xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
+ flags);
+#endif
+
+ __hal_channel_dtr_free(channelh, dtrh);
+#if defined(XGE_OS_MEMORY_CHECK)
+ __hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtrh)->allocated = 0;
+#endif
+
+#if defined(XGE_HAL_RX_MULTI_FREE)
+ xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
+#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
+ xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
+ flags);
+#endif
+}
+
+/**
+ * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed
+ * @channelh: Channel handle.
+ *
+ * Checks if the the _next_ completed descriptor is in host memory
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)
+{
+ xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+ xge_hal_dtr_h dtrh;
+
+ __hal_channel_dtr_try_complete(ring, &dtrh);
+ rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ if (rxdp == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+ /* check whether it is not the end */
+ if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
+ !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
+
+#ifdef XGE_DEBUG_ASSERT
+#if defined(XGE_HAL_USE_5B_MODE)
+#if !defined(XGE_OS_PLATFORM_64BIT)
+ if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_assert(((xge_hal_ring_rxd_5_t *)
+ rxdp)->host_control!=0);
+ }
+#endif
+
+#else
+ xge_assert(rxdp->host_control!=0);
+#endif
+#endif
+ return XGE_HAL_OK;
+ }
+
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-ring.c b/sys/dev/nxge/xgehal/xgehal-ring.c
new file mode 100644
index 0000000..84e8f9b
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-ring.c
@@ -0,0 +1,669 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : hal-ring.c
+ *
+ * Description: Rx ring object implementation
+ *
+ * Created: 10 May 2004
+ */
+
+#include <dev/nxge/include/xgehal-ring.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+static ptrdiff_t
+__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
+ void *item)
+{
+ int memblock_idx;
+ void *memblock;
+
+ /* get owner memblock index */
+ memblock_idx = __hal_ring_block_memblock_idx(item);
+
+ /* get owner memblock by memblock index */
+ memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
+
+ return (char*)item - (char*)memblock;
+}
+#endif
+
+static dma_addr_t
+__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
+ pci_dma_h *dma_handle)
+{
+ int memblock_idx;
+ void *memblock;
+ xge_hal_mempool_dma_t *memblock_dma_object;
+ ptrdiff_t dma_item_offset;
+
+ /* get owner memblock index */
+ memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
+
+ /* get owner memblock by memblock index */
+ memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
+ memblock_idx);
+
+ /* get memblock DMA object by memblock index */
+ memblock_dma_object =
+ __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
+ memblock_idx);
+
+ /* calculate offset in the memblock of this item */
+ dma_item_offset = (char*)item - (char*)memblock;
+
+ *dma_handle = memblock_dma_object->handle;
+
+ return memblock_dma_object->addr + dma_item_offset;
+}
+
+static void
+__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
+ xge_hal_ring_t *ring, int from, int to)
+{
+ xge_hal_ring_block_t *to_item, *from_item;
+ dma_addr_t to_dma, from_dma;
+ pci_dma_h to_dma_handle, from_dma_handle;
+
+ /* get "from" RxD block */
+ from_item = (xge_hal_ring_block_t *)
+ __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
+ xge_assert(from_item);
+
+ /* get "to" RxD block */
+ to_item = (xge_hal_ring_block_t *)
+ __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
+ xge_assert(to_item);
+
+ /* return address of the beginning of previous RxD block */
+ to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
+
+ /* set next pointer for this RxD block to point on
+ * previous item's DMA start address */
+ __hal_ring_block_next_pointer_set(from_item, to_dma);
+
+ /* return "from" RxD block's DMA start address */
+ from_dma =
+ __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ /* we must sync "from" RxD block, so hardware will see it */
+ xge_os_dma_sync(ring->channel.pdev,
+ from_dma_handle,
+ from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
+ __hal_ring_item_dma_offset(mempoolh, from_item) +
+ XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
+ sizeof(u64),
+ XGE_OS_DMA_DIR_TODEVICE);
+#endif
+
+ xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
+ from, (unsigned long long)from_dma, to,
+ (unsigned long long)to_dma);
+}
+
+static xge_hal_status_e
+__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
+ void *memblock,
+ int memblock_index,
+ xge_hal_mempool_dma_t *dma_object,
+ void *item,
+ int index,
+ int is_last,
+ void *userdata)
+{
+ int i;
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
+
+ xge_assert(item);
+ xge_assert(ring);
+
+
+ /* format rxds array */
+ for (i=ring->rxds_per_block-1; i>=0; i--) {
+ void *rxdblock_priv;
+ xge_hal_ring_rxd_priv_t *rxd_priv;
+ xge_hal_ring_rxd_1_t *rxdp;
+ int reserve_index = index * ring->rxds_per_block + i;
+ int memblock_item_idx;
+
+ ring->reserved_rxds_arr[reserve_index] = (char *)item +
+ (ring->rxds_per_block - 1 - i) * ring->rxd_size;
+
+ /* Note: memblock_item_idx is index of the item within
+ * the memblock. For instance, in case of three RxD-blocks
+ * per memblock this value can be 0,1 or 2. */
+ rxdblock_priv =
+ __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
+ memblock_index, item,
+ &memblock_item_idx);
+ rxdp = (xge_hal_ring_rxd_1_t *)
+ ring->reserved_rxds_arr[reserve_index];
+ rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
+ ((char*)rxdblock_priv + ring->rxd_priv_size * i);
+
+ /* pre-format per-RxD Ring's private */
+ rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
+ rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
+ rxd_priv->dma_handle = dma_object->handle;
+#ifdef XGE_DEBUG_ASSERT
+ rxd_priv->dma_object = dma_object;
+#endif
+
+ /* pre-format Host_Control */
+#if defined(XGE_HAL_USE_5B_MODE)
+ if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
+#if defined(XGE_OS_PLATFORM_64BIT)
+ xge_assert(memblock_index <= 0xFFFF);
+ xge_assert(i <= 0xFFFF);
+ /* store memblock's index */
+ rxdp_5->host_control = (u32)memblock_index << 16;
+ /* store index of memblock's private */
+ rxdp_5->host_control |= (u32)(memblock_item_idx *
+ ring->rxds_per_block + i);
+#else
+ /* 32-bit case */
+ rxdp_5->host_control = (u32)rxd_priv;
+#endif
+ } else {
+ /* 1b and 3b modes */
+ rxdp->host_control = (u64)(ulong_t)rxd_priv;
+ }
+#else
+ /* 1b and 3b modes */
+ rxdp->host_control = (u64)(ulong_t)rxd_priv;
+#endif
+ }
+
+ __hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
+
+ if (is_last) {
+ /* link last one with first one */
+ __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
+ }
+
+ if (index > 0 ) {
+ /* link this RxD block with previous one */
+ __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
+ }
+
+ return XGE_HAL_OK;
+}
+
+ xge_hal_status_e
+__hal_ring_initial_replenish(xge_hal_channel_t *channel,
+ xge_hal_channel_reopen_e reopen)
+{
+ xge_hal_dtr_h dtr;
+
+ while (xge_hal_channel_dtr_count(channel) > 0) {
+ xge_hal_status_e status;
+
+ status = xge_hal_ring_dtr_reserve(channel, &dtr);
+ xge_assert(status == XGE_HAL_OK);
+
+ if (channel->dtr_init) {
+ status = channel->dtr_init(channel,
+ dtr, channel->reserve_length,
+ channel->userdata,
+ reopen);
+ if (status != XGE_HAL_OK) {
+ xge_hal_ring_dtr_free(channel, dtr);
+ xge_hal_channel_abort(channel,
+ XGE_HAL_CHANNEL_OC_NORMAL);
+ return status;
+ }
+ }
+
+ xge_hal_ring_dtr_post(channel, dtr);
+ }
+
+ return XGE_HAL_OK;
+}
+
+xge_hal_status_e
+__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
+{
+ xge_hal_status_e status;
+ xge_hal_device_t *hldev;
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+ xge_hal_ring_queue_t *queue;
+
+
+ /* Note: at this point we have channel.devh and channel.pdev
+ * pre-set only! */
+
+ hldev = (xge_hal_device_t *)ring->channel.devh;
+ ring->config = &hldev->config.ring;
+ queue = &ring->config->queue[attr->post_qid];
+ ring->indicate_max_pkts = queue->indicate_max_pkts;
+ ring->buffer_mode = queue->buffer_mode;
+
+ xge_assert(queue->configured);
+
+#if defined(XGE_HAL_RX_MULTI_RESERVE)
+ xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
+#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
+#endif
+#if defined(XGE_HAL_RX_MULTI_POST)
+ xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
+#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
+ xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
+#endif
+
+ ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
+ ring->rxd_priv_size =
+ sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
+
+ /* how many RxDs can fit into one block. Depends on configured
+ * buffer_mode. */
+ ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
+
+ /* calculate actual RxD block private size */
+ ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+
+ ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
+ sizeof(void*) * queue->max * ring->rxds_per_block);
+
+ if (ring->reserved_rxds_arr == NULL) {
+ __hal_ring_close(channelh);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ ring->mempool = __hal_mempool_create(
+ hldev->pdev,
+ ring->config->memblock_size,
+ XGE_HAL_RING_RXDBLOCK_SIZE,
+ ring->rxdblock_priv_size,
+ queue->initial, queue->max,
+ __hal_ring_mempool_item_alloc,
+ NULL, /* nothing to free */
+ ring);
+ if (ring->mempool == NULL) {
+ __hal_ring_close(channelh);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ status = __hal_channel_initialize(channelh,
+ attr,
+ ring->reserved_rxds_arr,
+ queue->initial * ring->rxds_per_block,
+ queue->max * ring->rxds_per_block,
+ 0 /* no threshold for ring! */);
+ if (status != XGE_HAL_OK) {
+ __hal_ring_close(channelh);
+ return status;
+ }
+
+ /* sanity check that everything formatted ok */
+ xge_assert(ring->reserved_rxds_arr[0] ==
+ (char *)ring->mempool->items_arr[0] +
+ (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
+
+ /* Note:
+ * Specifying dtr_init callback means two things:
+ * 1) dtrs need to be initialized by ULD at channel-open time;
+ * 2) dtrs need to be posted at channel-open time
+ * (that's what the initial_replenish() below does)
+ * Currently we don't have a case when the 1) is done without the 2).
+ */
+ if (ring->channel.dtr_init) {
+ if ((status = __hal_ring_initial_replenish (
+ (xge_hal_channel_t *) channelh,
+ XGE_HAL_CHANNEL_OC_NORMAL) )
+ != XGE_HAL_OK) {
+ __hal_ring_close(channelh);
+ return status;
+ }
+ }
+
+ /* initial replenish will increment the counter in its post() routine,
+ * we have to reset it */
+ ring->channel.usage_cnt = 0;
+
+ return XGE_HAL_OK;
+}
+
+void
+__hal_ring_close(xge_hal_channel_h channelh)
+{
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+ xge_hal_ring_queue_t *queue;
+#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
+ defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
+ xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
+#endif
+
+ xge_assert(ring->channel.pdev);
+
+ queue = &ring->config->queue[ring->channel.post_qid];
+
+ if (ring->mempool) {
+ __hal_mempool_destroy(ring->mempool);
+ }
+
+ if (ring->reserved_rxds_arr) {
+ xge_os_free(ring->channel.pdev,
+ ring->reserved_rxds_arr,
+ sizeof(void*) * queue->max * ring->rxds_per_block);
+ }
+
+ __hal_channel_terminate(channelh);
+
+#if defined(XGE_HAL_RX_MULTI_RESERVE)
+ xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
+#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
+ xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
+#endif
+#if defined(XGE_HAL_RX_MULTI_POST)
+ xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
+#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
+ xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
+#endif
+}
+
+void
+__hal_ring_prc_enable(xge_hal_channel_h channelh)
+{
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
+ xge_hal_pci_bar0_t *bar0;
+ u64 val64;
+ void *first_block;
+ int block_num;
+ xge_hal_ring_queue_t *queue;
+ pci_dma_h dma_handle;
+
+ xge_assert(ring);
+ xge_assert(ring->channel.pdev);
+ bar0 = (xge_hal_pci_bar0_t *) (void *)
+ ((xge_hal_device_t *)ring->channel.devh)->bar0;
+
+ queue = &ring->config->queue[ring->channel.post_qid];
+ xge_assert(queue->buffer_mode == 1 ||
+ queue->buffer_mode == 3 ||
+ queue->buffer_mode == 5);
+
+ /* last block in fact becomes first. This is just the way it
+ * is filled up and linked by item_alloc() */
+
+ block_num = queue->initial;
+ first_block = __hal_mempool_item(ring->mempool, block_num - 1);
+ val64 = __hal_ring_item_dma_addr(ring->mempool,
+ first_block, &dma_handle);
+ xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
+ val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
+
+ xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
+ ring->channel.post_qid, (unsigned long long)val64);
+
+ val64 = xge_os_pio_mem_read64(ring->channel.pdev,
+ ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
+ !queue->rth_en) {
+ val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
+ }
+ val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
+
+ val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
+ val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
+ val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
+ (hldev->config.pci_freq_mherz * queue->backoff_interval_us));
+
+ /* Beware: no snoop by the bridge if (no_snoop_bits) */
+ val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
+
+ /* Herc: always use group_reads */
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
+
+ if (hldev->config.bimodal_interrupts)
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
+
+ xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
+ val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
+
+ /* Configure Receive Protocol Assist */
+ val64 = xge_os_pio_mem_read64(ring->channel.pdev,
+ ring->channel.regh0, &bar0->rx_pa_cfg);
+ val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
+ val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
+ /* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
+ val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
+ val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
+
+ xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
+ val64, &bar0->rx_pa_cfg);
+
+ xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
+ ring->channel.post_qid, queue->buffer_mode);
+}
+
+void
+__hal_ring_prc_disable(xge_hal_channel_h channelh)
+{
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+ xge_hal_pci_bar0_t *bar0;
+ u64 val64;
+
+ xge_assert(ring);
+ xge_assert(ring->channel.pdev);
+ bar0 = (xge_hal_pci_bar0_t *) (void *)
+ ((xge_hal_device_t *)ring->channel.devh)->bar0;
+
+ val64 = xge_os_pio_mem_read64(ring->channel.pdev,
+ ring->channel.regh0,
+ &bar0->prc_ctrl_n[ring->channel.post_qid]);
+ val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
+ xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
+ val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
+}
+
+void
+__hal_ring_hw_initialize(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int i, j;
+
+ /* Rx DMA intialization. */
+
+ val64 = 0;
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ val64 |= vBIT(hldev->config.ring.queue[i].priority,
+ (5 + (i * 8)), 3);
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_queue_priority);
+ xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+ /* Configuring ring queues according to per-ring configuration */
+ val64 = 0;
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_queue_cfg);
+ xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+ if (!hldev->config.rts_qos_en &&
+ !hldev->config.rts_port_en &&
+ !hldev->config.rts_mac_en) {
+
+ /*
+ * Activate default (QoS-based) Rx steering
+ */
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_qos_steering);
+ for (j = 0; j < 8 /* QoS max */; j++)
+ {
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
+ {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (!hldev->config.ring.queue[i].rth_en)
+ val64 |= (BIT(i) >> (j*8));
+ }
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_qos_steering);
+ xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+ }
+
+ /* Note: If a queue does not exist, it should be assigned a maximum
+ * length of zero. Otherwise, packet loss could occur.
+ * P. 4-4 User guide.
+ *
+ * All configured rings will be properly set at device open time
+ * by utilizing device_mtu_set() API call. */
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (hldev->config.ring.queue[i].configured)
+ continue;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
+ &bar0->rts_frm_len_n[i]);
+ }
+
+#ifdef XGE_HAL_HERC_EMULATION
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
+ val64 |= 0x0000000000010000;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ ((u8 *)bar0 + 0x2e60));
+
+ val64 |= 0x003a000000000000;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
+ xge_os_mdelay(2000);
+#endif
+
+ /* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mc_rldram_mrs);
+ val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
+ XGE_HAL_MC_RLDRAM_MRS_ENABLE;
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
+ &bar0->mc_rldram_mrs);
+ xge_os_wmb();
+ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
+ &bar0->mc_rldram_mrs);
+
+ /* RLDRAM initialization procedure require 500us to complete */
+ xge_os_mdelay(1);
+
+ /* Temporary fixes for Herc RLDRAM */
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_ref_per_herc);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mc_rldram_mrs_herc);
+ xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+ val64 = 0x0003570003010300ULL;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mc_rldram_mrs_herc);
+
+ xge_os_mdelay(1);
+ }
+
+ /*
+ * Assign MSI-X vectors
+ */
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ xge_list_t *item;
+ xge_hal_channel_t *channel = NULL;
+
+ if (!hldev->config.ring.queue[i].configured ||
+ !hldev->config.ring.queue[i].intr_vector ||
+ !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
+ continue;
+
+ /* find channel */
+ xge_list_for_each(item, &hldev->free_channels) {
+ xge_hal_channel_t *tmp;
+ tmp = xge_container_of(item, xge_hal_channel_t,
+ item);
+ if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
+ tmp->post_qid == i) {
+ channel = tmp;
+ break;
+ }
+ }
+
+ if (channel) {
+ xge_hal_channel_msix_set(channel,
+ hldev->config.ring.queue[i].intr_vector);
+ }
+ }
+
+ xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
+}
+
+void
+__hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
+{
+ int i;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (hldev->config.ring.queue[i].max_frm_len !=
+ XGE_HAL_RING_USE_MTU) {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_MAC_RTS_FRM_LEN_SET(
+ hldev->config.ring.queue[i].max_frm_len),
+ &bar0->rts_frm_len_n[i]);
+ } else {
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
+ &bar0->rts_frm_len_n[i]);
+ }
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
+ &bar0->rmac_max_pyld_len);
+}
diff --git a/sys/dev/nxge/xgehal/xgehal-stats.c b/sys/dev/nxge/xgehal/xgehal-stats.c
new file mode 100644
index 0000000..2755ebb
--- /dev/null
+++ b/sys/dev/nxge/xgehal/xgehal-stats.c
@@ -0,0 +1,1019 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : xgehal-stats.c
+ *
+ * Description: statistics object implementation
+ *
+ * Created: 2 June 2004
+ */
+
+#include <dev/nxge/include/xgehal-stats.h>
+#include <dev/nxge/include/xgehal-device.h>
+
+/*
+ * __hal_stats_initialize
+ * @stats: xge_hal_stats_t structure that contains, in particular,
+ * Xframe hw stat counters.
+ * @devh: HAL device handle.
+ *
+ * Initialize per-device statistics object.
+ * See also: xge_hal_stats_getinfo(), xge_hal_status_e{}.
+ */
+xge_hal_status_e
+__hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
+{
+ int dma_flags;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+
+ xge_assert(!stats->is_initialized);
+
+ dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
+#ifdef XGE_HAL_DMA_STATS_CONSISTENT
+ dma_flags |= XGE_OS_DMA_CONSISTENT;
+#else
+ dma_flags |= XGE_OS_DMA_STREAMING;
+#endif
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
+ stats->hw_info =
+ (xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
+ hldev->pdev,
+ sizeof(xge_hal_stats_hw_info_t),
+ dma_flags,
+ &stats->hw_info_dmah,
+ &stats->hw_info_dma_acch);
+
+ if (stats->hw_info == NULL) {
+ xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ xge_os_memzero(stats->hw_info,
+ sizeof(xge_hal_stats_hw_info_t));
+ xge_os_memzero(&stats->hw_info_saved,
+ sizeof(xge_hal_stats_hw_info_t));
+ xge_os_memzero(&stats->hw_info_latest,
+ sizeof(xge_hal_stats_hw_info_t));
+
+
+
+ stats->dma_addr = xge_os_dma_map(hldev->pdev,
+ stats->hw_info_dmah,
+ stats->hw_info,
+ sizeof(xge_hal_stats_hw_info_t),
+ XGE_OS_DMA_DIR_FROMDEVICE,
+ XGE_OS_DMA_CACHELINE_ALIGNED |
+#ifdef XGE_HAL_DMA_STATS_CONSISTENT
+ XGE_OS_DMA_CONSISTENT
+#else
+ XGE_OS_DMA_STREAMING
+#endif
+ );
+ if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
+ xge_debug_stats(XGE_ERR,
+ "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
+ (unsigned long long)(ulong_t)stats->hw_info);
+ xge_os_dma_free(hldev->pdev,
+ stats->hw_info,
+ sizeof(xge_hal_stats_hw_info_t),
+ &stats->hw_info_dma_acch,
+ &stats->hw_info_dmah);
+ return XGE_HAL_ERR_OUT_OF_MAPPING;
+ }
+ }
+ else {
+ stats->pcim_info_saved =
+ (xge_hal_stats_pcim_info_t *)xge_os_malloc(
+ hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
+ if (stats->pcim_info_saved == NULL) {
+ xge_debug_stats(XGE_ERR, "%s", "can not alloc");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ stats->pcim_info_latest =
+ (xge_hal_stats_pcim_info_t *)xge_os_malloc(
+ hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
+ if (stats->pcim_info_latest == NULL) {
+ xge_os_free(hldev->pdev, stats->pcim_info_saved,
+ sizeof(xge_hal_stats_pcim_info_t));
+ xge_debug_stats(XGE_ERR, "%s", "can not alloc");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ stats->pcim_info =
+ (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
+ hldev->pdev,
+ sizeof(xge_hal_stats_pcim_info_t),
+ dma_flags,
+ &stats->hw_info_dmah,
+ &stats->hw_info_dma_acch);
+
+ if (stats->pcim_info == NULL) {
+ xge_os_free(hldev->pdev, stats->pcim_info_saved,
+ sizeof(xge_hal_stats_pcim_info_t));
+ xge_os_free(hldev->pdev, stats->pcim_info_latest,
+ sizeof(xge_hal_stats_pcim_info_t));
+ xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+
+ xge_os_memzero(stats->pcim_info,
+ sizeof(xge_hal_stats_pcim_info_t));
+ xge_os_memzero(stats->pcim_info_saved,
+ sizeof(xge_hal_stats_pcim_info_t));
+ xge_os_memzero(stats->pcim_info_latest,
+ sizeof(xge_hal_stats_pcim_info_t));
+
+
+
+ stats->dma_addr = xge_os_dma_map(hldev->pdev,
+ stats->hw_info_dmah,
+ stats->pcim_info,
+ sizeof(xge_hal_stats_pcim_info_t),
+ XGE_OS_DMA_DIR_FROMDEVICE,
+ XGE_OS_DMA_CACHELINE_ALIGNED |
+#ifdef XGE_HAL_DMA_STATS_CONSISTENT
+ XGE_OS_DMA_CONSISTENT
+#else
+ XGE_OS_DMA_STREAMING
+#endif
+ );
+ if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
+ xge_debug_stats(XGE_ERR,
+ "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
+ (unsigned long long)(ulong_t)stats->hw_info);
+
+ xge_os_dma_free(hldev->pdev,
+ stats->pcim_info,
+ sizeof(xge_hal_stats_pcim_info_t),
+ &stats->hw_info_dma_acch,
+ &stats->hw_info_dmah);
+
+ xge_os_free(hldev->pdev, stats->pcim_info_saved,
+ sizeof(xge_hal_stats_pcim_info_t));
+
+ xge_os_free(hldev->pdev, stats->pcim_info_latest,
+ sizeof(xge_hal_stats_pcim_info_t));
+
+ return XGE_HAL_ERR_OUT_OF_MAPPING;
+ }
+ }
+ stats->devh = devh;
+ xge_os_memzero(&stats->sw_dev_info_stats,
+ sizeof(xge_hal_stats_device_info_t));
+
+ stats->is_initialized = 1;
+
+ return XGE_HAL_OK;
+}
+
+static void
+__hal_stats_save (xge_hal_stats_t *stats)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)stats->devh;
+
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
+ xge_hal_stats_hw_info_t *latest;
+
+ (void) xge_hal_stats_hw(stats->devh, &latest);
+
+ xge_os_memcpy(&stats->hw_info_saved, stats->hw_info,
+ sizeof(xge_hal_stats_hw_info_t));
+ } else {
+ xge_hal_stats_pcim_info_t *latest;
+
+ (void) xge_hal_stats_pcim(stats->devh, &latest);
+
+ xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info,
+ sizeof(xge_hal_stats_pcim_info_t));
+ }
+}
+
+/*
+ * __hal_stats_disable
+ * @stats: xge_hal_stats_t structure that contains, in particular,
+ * Xframe hw stat counters.
+ *
+ * Ask device to stop collecting stats.
+ * See also: xge_hal_stats_getinfo().
+ */
+void
+__hal_stats_disable (xge_hal_stats_t *stats)
+{
+ xge_hal_device_t *hldev;
+ xge_hal_pci_bar0_t *bar0;
+ u64 val64;
+
+ xge_assert(stats->hw_info);
+
+ hldev = (xge_hal_device_t*)stats->devh;
+ xge_assert(hldev);
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->stat_cfg);
+ val64 &= ~XGE_HAL_STAT_CFG_STAT_EN;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->stat_cfg);
+ /* flush the write */
+ (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->stat_cfg);
+
+ xge_debug_stats(XGE_TRACE, "stats disabled at 0x"XGE_OS_LLXFMT,
+ (unsigned long long)stats->dma_addr);
+
+ stats->is_enabled = 0;
+}
+
+/*
+ * __hal_stats_terminate
+ * @stats: xge_hal_stats_t structure that contains, in particular,
+ * Xframe hw stat counters.
+ * Terminate per-device statistics object.
+ */
+void
+__hal_stats_terminate (xge_hal_stats_t *stats)
+{
+ xge_hal_device_t *hldev;
+
+ xge_assert(stats->hw_info);
+
+ hldev = (xge_hal_device_t*)stats->devh;
+ xge_assert(hldev);
+ xge_assert(stats->is_initialized);
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
+ xge_os_dma_unmap(hldev->pdev,
+ stats->hw_info_dmah,
+ stats->dma_addr,
+ sizeof(xge_hal_stats_hw_info_t),
+ XGE_OS_DMA_DIR_FROMDEVICE);
+
+ xge_os_dma_free(hldev->pdev,
+ stats->hw_info,
+ sizeof(xge_hal_stats_hw_info_t),
+ &stats->hw_info_dma_acch,
+ &stats->hw_info_dmah);
+ } else {
+ xge_os_dma_unmap(hldev->pdev,
+ stats->hw_info_dmah,
+ stats->dma_addr,
+ sizeof(xge_hal_stats_pcim_info_t),
+ XGE_OS_DMA_DIR_FROMDEVICE);
+
+ xge_os_dma_free(hldev->pdev,
+ stats->pcim_info,
+ sizeof(xge_hal_stats_pcim_info_t),
+ &stats->hw_info_dma_acch,
+ &stats->hw_info_dmah);
+
+ xge_os_free(hldev->pdev, stats->pcim_info_saved,
+ sizeof(xge_hal_stats_pcim_info_t));
+
+ xge_os_free(hldev->pdev, stats->pcim_info_latest,
+ sizeof(xge_hal_stats_pcim_info_t));
+
+ }
+
+ stats->is_initialized = 0;
+ stats->is_enabled = 0;
+}
+
+
+
+/*
+ * __hal_stats_enable
+ * @stats: xge_hal_stats_t structure that contains, in particular,
+ * Xframe hw stat counters.
+ *
+ * Ask device to start collecting stats.
+ * See also: xge_hal_stats_getinfo().
+ */
+void
+__hal_stats_enable (xge_hal_stats_t *stats)
+{
+ xge_hal_device_t *hldev;
+ xge_hal_pci_bar0_t *bar0;
+ u64 val64;
+ unsigned int refresh_time_pci_clocks;
+
+ xge_assert(stats->hw_info);
+
+ hldev = (xge_hal_device_t*)stats->devh;
+ xge_assert(hldev);
+
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ /* enable statistics
+ * For Titan stat_addr offset == 0x09d8, and stat_cfg offset == 0x09d0
+ */
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ stats->dma_addr, &bar0->stat_addr);
+
+ refresh_time_pci_clocks = XGE_HAL_XENA_PER_SEC *
+ hldev->config.stats_refresh_time_sec;
+ refresh_time_pci_clocks =
+ __hal_fix_time_ival_herc(hldev,
+ refresh_time_pci_clocks);
+
+#ifdef XGE_HAL_HERC_EMULATION
+ /*
+ * The clocks in the emulator are running ~1000 times slower
+ * than real world, so the stats transfer will occur ~1000
+ * times less frequent. STAT_CFG.STAT_TRSF_PERIOD should be
+ * set to 0x20C for Hercules emulation (stats transferred
+ * every 0.5 sec).
+ */
+
+ val64 = (0x20C | XGE_HAL_STAT_CFG_STAT_RO |
+ XGE_HAL_STAT_CFG_STAT_EN);
+#else
+ val64 = XGE_HAL_SET_UPDT_PERIOD(refresh_time_pci_clocks) |
+ XGE_HAL_STAT_CFG_STAT_RO |
+ XGE_HAL_STAT_CFG_STAT_EN;
+#endif
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->stat_cfg);
+
+ xge_debug_stats(XGE_TRACE, "stats enabled at 0x"XGE_OS_LLXFMT,
+ (unsigned long long)stats->dma_addr);
+
+ stats->is_enabled = 1;
+}
+
+/*
+ * __hal_stats_pcim_update_latest - Update hw ER stats counters, based on the
+ * real hardware maintained counters and the stored "reset" values.
+ */
+static void
+__hal_stats_pcim_update_latest(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ int i;
+
+#define set_latest_stat_link_cnt(_link, _p) \
+ hldev->stats.pcim_info_latest->link_info[_link]._p = \
+ ((hldev->stats.pcim_info->link_info[_link]._p >= \
+ hldev->stats.pcim_info_saved->link_info[_link]._p) ? \
+ hldev->stats.pcim_info->link_info[_link]._p - \
+ hldev->stats.pcim_info_saved->link_info[_link]._p : \
+ ((-1) - hldev->stats.pcim_info_saved->link_info[_link]._p) + \
+ hldev->stats.pcim_info->link_info[_link]._p)
+
+
+#define set_latest_stat_aggr_cnt(_aggr, _p) \
+ hldev->stats.pcim_info_latest->aggr_info[_aggr]._p = \
+ ((hldev->stats.pcim_info->aggr_info[_aggr]._p >= \
+ hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) ? \
+ hldev->stats.pcim_info->aggr_info[_aggr]._p - \
+ hldev->stats.pcim_info_saved->aggr_info[_aggr]._p : \
+ ((-1) - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) + \
+ hldev->stats.pcim_info->aggr_info[_aggr]._p)
+
+
+ for (i = 0; i < XGE_HAL_MAC_LINKS; i++) {
+ set_latest_stat_link_cnt(i, tx_frms);
+ set_latest_stat_link_cnt(i, tx_ttl_eth_octets);
+ set_latest_stat_link_cnt(i, tx_data_octets);
+ set_latest_stat_link_cnt(i, tx_mcst_frms);
+ set_latest_stat_link_cnt(i, tx_bcst_frms);
+ set_latest_stat_link_cnt(i, tx_ucst_frms);
+ set_latest_stat_link_cnt(i, tx_tagged_frms);
+ set_latest_stat_link_cnt(i, tx_vld_ip);
+ set_latest_stat_link_cnt(i, tx_vld_ip_octets);
+ set_latest_stat_link_cnt(i, tx_icmp);
+ set_latest_stat_link_cnt(i, tx_tcp);
+ set_latest_stat_link_cnt(i, tx_rst_tcp);
+ set_latest_stat_link_cnt(i, tx_udp);
+ set_latest_stat_link_cnt(i, tx_unknown_protocol);
+ set_latest_stat_link_cnt(i, tx_parse_error);
+ set_latest_stat_link_cnt(i, tx_pause_ctrl_frms);
+ set_latest_stat_link_cnt(i, tx_lacpdu_frms);
+ set_latest_stat_link_cnt(i, tx_marker_pdu_frms);
+ set_latest_stat_link_cnt(i, tx_marker_resp_pdu_frms);
+ set_latest_stat_link_cnt(i, tx_drop_ip);
+ set_latest_stat_link_cnt(i, tx_xgmii_char1_match);
+ set_latest_stat_link_cnt(i, tx_xgmii_char2_match);
+ set_latest_stat_link_cnt(i, tx_xgmii_column1_match);
+ set_latest_stat_link_cnt(i, tx_xgmii_column2_match);
+ set_latest_stat_link_cnt(i, tx_drop_frms);
+ set_latest_stat_link_cnt(i, tx_any_err_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_frms);
+ set_latest_stat_link_cnt(i, rx_vld_frms);
+ set_latest_stat_link_cnt(i, rx_offld_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_eth_octets);
+ set_latest_stat_link_cnt(i, rx_data_octets);
+ set_latest_stat_link_cnt(i, rx_offld_octets);
+ set_latest_stat_link_cnt(i, rx_vld_mcst_frms);
+ set_latest_stat_link_cnt(i, rx_vld_bcst_frms);
+ set_latest_stat_link_cnt(i, rx_accepted_ucst_frms);
+ set_latest_stat_link_cnt(i, rx_accepted_nucst_frms);
+ set_latest_stat_link_cnt(i, rx_tagged_frms);
+ set_latest_stat_link_cnt(i, rx_long_frms);
+ set_latest_stat_link_cnt(i, rx_usized_frms);
+ set_latest_stat_link_cnt(i, rx_osized_frms);
+ set_latest_stat_link_cnt(i, rx_frag_frms);
+ set_latest_stat_link_cnt(i, rx_jabber_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_64_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_65_127_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_128_255_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_256_511_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_512_1023_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_1024_1518_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_1519_4095_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_40956_8191_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_8192_max_frms);
+ set_latest_stat_link_cnt(i, rx_ttl_gt_max_frms);
+ set_latest_stat_link_cnt(i, rx_ip);
+ set_latest_stat_link_cnt(i, rx_ip_octets);
+ set_latest_stat_link_cnt(i, rx_hdr_err_ip);
+ set_latest_stat_link_cnt(i, rx_icmp);
+ set_latest_stat_link_cnt(i, rx_tcp);
+ set_latest_stat_link_cnt(i, rx_udp);
+ set_latest_stat_link_cnt(i, rx_err_tcp);
+ set_latest_stat_link_cnt(i, rx_pause_cnt);
+ set_latest_stat_link_cnt(i, rx_pause_ctrl_frms);
+ set_latest_stat_link_cnt(i, rx_unsup_ctrl_frms);
+ set_latest_stat_link_cnt(i, rx_in_rng_len_err_frms);
+ set_latest_stat_link_cnt(i, rx_out_rng_len_err_frms);
+ set_latest_stat_link_cnt(i, rx_drop_frms);
+ set_latest_stat_link_cnt(i, rx_discarded_frms);
+ set_latest_stat_link_cnt(i, rx_drop_ip);
+ set_latest_stat_link_cnt(i, rx_err_drp_udp);
+ set_latest_stat_link_cnt(i, rx_lacpdu_frms);
+ set_latest_stat_link_cnt(i, rx_marker_pdu_frms);
+ set_latest_stat_link_cnt(i, rx_marker_resp_pdu_frms);
+ set_latest_stat_link_cnt(i, rx_unknown_pdu_frms);
+ set_latest_stat_link_cnt(i, rx_illegal_pdu_frms);
+ set_latest_stat_link_cnt(i, rx_fcs_discard);
+ set_latest_stat_link_cnt(i, rx_len_discard);
+ set_latest_stat_link_cnt(i, rx_pf_discard);
+ set_latest_stat_link_cnt(i, rx_trash_discard);
+ set_latest_stat_link_cnt(i, rx_rts_discard);
+ set_latest_stat_link_cnt(i, rx_wol_discard);
+ set_latest_stat_link_cnt(i, rx_red_discard);
+ set_latest_stat_link_cnt(i, rx_ingm_full_discard);
+ set_latest_stat_link_cnt(i, rx_xgmii_data_err_cnt);
+ set_latest_stat_link_cnt(i, rx_xgmii_ctrl_err_cnt);
+ set_latest_stat_link_cnt(i, rx_xgmii_err_sym);
+ set_latest_stat_link_cnt(i, rx_xgmii_char1_match);
+ set_latest_stat_link_cnt(i, rx_xgmii_char2_match);
+ set_latest_stat_link_cnt(i, rx_xgmii_column1_match);
+ set_latest_stat_link_cnt(i, rx_xgmii_column2_match);
+ set_latest_stat_link_cnt(i, rx_local_fault);
+ set_latest_stat_link_cnt(i, rx_remote_fault);
+ set_latest_stat_link_cnt(i, rx_queue_full);
+ }
+
+ for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) {
+ set_latest_stat_aggr_cnt(i, tx_frms);
+ set_latest_stat_aggr_cnt(i, tx_mcst_frms);
+ set_latest_stat_aggr_cnt(i, tx_bcst_frms);
+ set_latest_stat_aggr_cnt(i, tx_discarded_frms);
+ set_latest_stat_aggr_cnt(i, tx_errored_frms);
+ set_latest_stat_aggr_cnt(i, rx_frms);
+ set_latest_stat_aggr_cnt(i, rx_data_octets);
+ set_latest_stat_aggr_cnt(i, rx_mcst_frms);
+ set_latest_stat_aggr_cnt(i, rx_bcst_frms);
+ set_latest_stat_aggr_cnt(i, rx_discarded_frms);
+ set_latest_stat_aggr_cnt(i, rx_errored_frms);
+ set_latest_stat_aggr_cnt(i, rx_unknown_protocol_frms);
+ }
+ return;
+}
+
+/*
+ * __hal_stats_update_latest - Update hw stats counters, based on the real
+ * hardware maintained counters and the stored "reset" values.
+ */
+static void
+__hal_stats_update_latest(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+#define set_latest_stat_cnt(_dev, _p) \
+ hldev->stats.hw_info_latest._p = \
+ ((hldev->stats.hw_info->_p >= hldev->stats.hw_info_saved._p) ? \
+ hldev->stats.hw_info->_p - hldev->stats.hw_info_saved._p : \
+ ((-1) - hldev->stats.hw_info_saved._p) + hldev->stats.hw_info->_p)
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) {
+ __hal_stats_pcim_update_latest(devh);
+ return;
+ }
+
+ /* Tx MAC statistics counters. */
+ set_latest_stat_cnt(hldev, tmac_frms);
+ set_latest_stat_cnt(hldev, tmac_data_octets);
+ set_latest_stat_cnt(hldev, tmac_drop_frms);
+ set_latest_stat_cnt(hldev, tmac_mcst_frms);
+ set_latest_stat_cnt(hldev, tmac_bcst_frms);
+ set_latest_stat_cnt(hldev, tmac_pause_ctrl_frms);
+ set_latest_stat_cnt(hldev, tmac_ttl_octets);
+ set_latest_stat_cnt(hldev, tmac_ucst_frms);
+ set_latest_stat_cnt(hldev, tmac_nucst_frms);
+ set_latest_stat_cnt(hldev, tmac_any_err_frms);
+ set_latest_stat_cnt(hldev, tmac_ttl_less_fb_octets);
+ set_latest_stat_cnt(hldev, tmac_vld_ip_octets);
+ set_latest_stat_cnt(hldev, tmac_vld_ip);
+ set_latest_stat_cnt(hldev, tmac_drop_ip);
+ set_latest_stat_cnt(hldev, tmac_icmp);
+ set_latest_stat_cnt(hldev, tmac_rst_tcp);
+ set_latest_stat_cnt(hldev, tmac_tcp);
+ set_latest_stat_cnt(hldev, tmac_udp);
+ set_latest_stat_cnt(hldev, reserved_0);
+
+ /* Rx MAC Statistics counters. */
+ set_latest_stat_cnt(hldev, rmac_vld_frms);
+ set_latest_stat_cnt(hldev, rmac_data_octets);
+ set_latest_stat_cnt(hldev, rmac_fcs_err_frms);
+ set_latest_stat_cnt(hldev, rmac_drop_frms);
+ set_latest_stat_cnt(hldev, rmac_vld_mcst_frms);
+ set_latest_stat_cnt(hldev, rmac_vld_bcst_frms);
+ set_latest_stat_cnt(hldev, rmac_in_rng_len_err_frms);
+ set_latest_stat_cnt(hldev, rmac_out_rng_len_err_frms);
+ set_latest_stat_cnt(hldev, rmac_long_frms);
+ set_latest_stat_cnt(hldev, rmac_pause_ctrl_frms);
+ set_latest_stat_cnt(hldev, rmac_unsup_ctrl_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_octets);
+ set_latest_stat_cnt(hldev, rmac_accepted_ucst_frms);
+ set_latest_stat_cnt(hldev, rmac_accepted_nucst_frms);
+ set_latest_stat_cnt(hldev, rmac_discarded_frms);
+ set_latest_stat_cnt(hldev, rmac_drop_events);
+ set_latest_stat_cnt(hldev, reserved_1);
+ set_latest_stat_cnt(hldev, rmac_ttl_less_fb_octets);
+ set_latest_stat_cnt(hldev, rmac_ttl_frms);
+ set_latest_stat_cnt(hldev, reserved_2);
+ set_latest_stat_cnt(hldev, reserved_3);
+ set_latest_stat_cnt(hldev, rmac_usized_frms);
+ set_latest_stat_cnt(hldev, rmac_osized_frms);
+ set_latest_stat_cnt(hldev, rmac_frag_frms);
+ set_latest_stat_cnt(hldev, rmac_jabber_frms);
+ set_latest_stat_cnt(hldev, reserved_4);
+ set_latest_stat_cnt(hldev, rmac_ttl_64_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_65_127_frms);
+ set_latest_stat_cnt(hldev, reserved_5);
+ set_latest_stat_cnt(hldev, rmac_ttl_128_255_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_256_511_frms);
+ set_latest_stat_cnt(hldev, reserved_6);
+ set_latest_stat_cnt(hldev, rmac_ttl_512_1023_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_1024_1518_frms);
+ set_latest_stat_cnt(hldev, reserved_7);
+ set_latest_stat_cnt(hldev, rmac_ip);
+ set_latest_stat_cnt(hldev, rmac_ip_octets);
+ set_latest_stat_cnt(hldev, rmac_hdr_err_ip);
+ set_latest_stat_cnt(hldev, rmac_drop_ip);
+ set_latest_stat_cnt(hldev, rmac_icmp);
+ set_latest_stat_cnt(hldev, reserved_8);
+ set_latest_stat_cnt(hldev, rmac_tcp);
+ set_latest_stat_cnt(hldev, rmac_udp);
+ set_latest_stat_cnt(hldev, rmac_err_drp_udp);
+ set_latest_stat_cnt(hldev, rmac_xgmii_err_sym);
+ set_latest_stat_cnt(hldev, rmac_frms_q0);
+ set_latest_stat_cnt(hldev, rmac_frms_q1);
+ set_latest_stat_cnt(hldev, rmac_frms_q2);
+ set_latest_stat_cnt(hldev, rmac_frms_q3);
+ set_latest_stat_cnt(hldev, rmac_frms_q4);
+ set_latest_stat_cnt(hldev, rmac_frms_q5);
+ set_latest_stat_cnt(hldev, rmac_frms_q6);
+ set_latest_stat_cnt(hldev, rmac_frms_q7);
+ set_latest_stat_cnt(hldev, rmac_full_q0);
+ set_latest_stat_cnt(hldev, rmac_full_q1);
+ set_latest_stat_cnt(hldev, rmac_full_q2);
+ set_latest_stat_cnt(hldev, rmac_full_q3);
+ set_latest_stat_cnt(hldev, rmac_full_q4);
+ set_latest_stat_cnt(hldev, rmac_full_q5);
+ set_latest_stat_cnt(hldev, rmac_full_q6);
+ set_latest_stat_cnt(hldev, rmac_full_q7);
+ set_latest_stat_cnt(hldev, rmac_pause_cnt);
+ set_latest_stat_cnt(hldev, reserved_9);
+ set_latest_stat_cnt(hldev, rmac_xgmii_data_err_cnt);
+ set_latest_stat_cnt(hldev, rmac_xgmii_ctrl_err_cnt);
+ set_latest_stat_cnt(hldev, rmac_accepted_ip);
+ set_latest_stat_cnt(hldev, rmac_err_tcp);
+
+ /* PCI/PCI-X Read transaction statistics. */
+ set_latest_stat_cnt(hldev, rd_req_cnt);
+ set_latest_stat_cnt(hldev, new_rd_req_cnt);
+ set_latest_stat_cnt(hldev, new_rd_req_rtry_cnt);
+ set_latest_stat_cnt(hldev, rd_rtry_cnt);
+ set_latest_stat_cnt(hldev, wr_rtry_rd_ack_cnt);
+
+ /* PCI/PCI-X write transaction statistics. */
+ set_latest_stat_cnt(hldev, wr_req_cnt);
+ set_latest_stat_cnt(hldev, new_wr_req_cnt);
+ set_latest_stat_cnt(hldev, new_wr_req_rtry_cnt);
+ set_latest_stat_cnt(hldev, wr_rtry_cnt);
+ set_latest_stat_cnt(hldev, wr_disc_cnt);
+ set_latest_stat_cnt(hldev, rd_rtry_wr_ack_cnt);
+
+ /* DMA Transaction statistics. */
+ set_latest_stat_cnt(hldev, txp_wr_cnt);
+ set_latest_stat_cnt(hldev, txd_rd_cnt);
+ set_latest_stat_cnt(hldev, txd_wr_cnt);
+ set_latest_stat_cnt(hldev, rxd_rd_cnt);
+ set_latest_stat_cnt(hldev, rxd_wr_cnt);
+ set_latest_stat_cnt(hldev, txf_rd_cnt);
+ set_latest_stat_cnt(hldev, rxf_wr_cnt);
+
+ /* Enhanced Herc statistics */
+ set_latest_stat_cnt(hldev, tmac_frms_oflow);
+ set_latest_stat_cnt(hldev, tmac_data_octets_oflow);
+ set_latest_stat_cnt(hldev, tmac_mcst_frms_oflow);
+ set_latest_stat_cnt(hldev, tmac_bcst_frms_oflow);
+ set_latest_stat_cnt(hldev, tmac_ttl_octets_oflow);
+ set_latest_stat_cnt(hldev, tmac_ucst_frms_oflow);
+ set_latest_stat_cnt(hldev, tmac_nucst_frms_oflow);
+ set_latest_stat_cnt(hldev, tmac_any_err_frms_oflow);
+ set_latest_stat_cnt(hldev, tmac_vlan_frms);
+ set_latest_stat_cnt(hldev, tmac_vld_ip_oflow);
+ set_latest_stat_cnt(hldev, tmac_drop_ip_oflow);
+ set_latest_stat_cnt(hldev, tmac_icmp_oflow);
+ set_latest_stat_cnt(hldev, tmac_rst_tcp_oflow);
+ set_latest_stat_cnt(hldev, tmac_udp_oflow);
+ set_latest_stat_cnt(hldev, tpa_unknown_protocol);
+ set_latest_stat_cnt(hldev, tpa_parse_failure);
+ set_latest_stat_cnt(hldev, rmac_vld_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_data_octets_oflow);
+ set_latest_stat_cnt(hldev, rmac_vld_mcst_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_vld_bcst_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_ttl_octets_oflow);
+ set_latest_stat_cnt(hldev, rmac_accepted_ucst_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_accepted_nucst_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_discarded_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_drop_events_oflow);
+ set_latest_stat_cnt(hldev, rmac_usized_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_osized_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_frag_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_jabber_frms_oflow);
+ set_latest_stat_cnt(hldev, rmac_ip_oflow);
+ set_latest_stat_cnt(hldev, rmac_drop_ip_oflow);
+ set_latest_stat_cnt(hldev, rmac_icmp_oflow);
+ set_latest_stat_cnt(hldev, rmac_udp_oflow);
+ set_latest_stat_cnt(hldev, rmac_err_drp_udp_oflow);
+ set_latest_stat_cnt(hldev, rmac_pause_cnt_oflow);
+ set_latest_stat_cnt(hldev, rmac_ttl_1519_4095_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_4096_8191_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_8192_max_frms);
+ set_latest_stat_cnt(hldev, rmac_ttl_gt_max_frms);
+ set_latest_stat_cnt(hldev, rmac_osized_alt_frms);
+ set_latest_stat_cnt(hldev, rmac_jabber_alt_frms);
+ set_latest_stat_cnt(hldev, rmac_gt_max_alt_frms);
+ set_latest_stat_cnt(hldev, rmac_vlan_frms);
+ set_latest_stat_cnt(hldev, rmac_fcs_discard);
+ set_latest_stat_cnt(hldev, rmac_len_discard);
+ set_latest_stat_cnt(hldev, rmac_da_discard);
+ set_latest_stat_cnt(hldev, rmac_pf_discard);
+ set_latest_stat_cnt(hldev, rmac_rts_discard);
+ set_latest_stat_cnt(hldev, rmac_red_discard);
+ set_latest_stat_cnt(hldev, rmac_ingm_full_discard);
+ set_latest_stat_cnt(hldev, rmac_accepted_ip_oflow);
+ set_latest_stat_cnt(hldev, link_fault_cnt);
+}
+
+/**
+ * xge_hal_stats_hw - Get HW device statistics.
+ * @devh: HAL device handle.
+ * @hw_info: Xframe statistic counters. See xge_hal_stats_hw_info_t.
+ * Returned by HAL.
+ *
+ * Get device and HAL statistics. The latter is part of the in-host statistics
+ * that HAL maintains for _that_ device.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_stats_hw(xge_hal_device_h devh, xge_hal_stats_hw_info_t **hw_info)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ xge_assert(xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN)
+
+ if (!hldev->stats.is_initialized ||
+ !hldev->stats.is_enabled) {
+ *hw_info = NULL;
+ return XGE_HAL_INF_STATS_IS_NOT_READY;
+ }
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING)
+ xge_os_dma_sync(hldev->pdev,
+ hldev->stats.hw_info_dmah,
+ hldev->stats.dma_addr,
+ 0,
+ sizeof(xge_hal_stats_hw_info_t),
+ XGE_OS_DMA_DIR_FROMDEVICE);
+#endif
+
+ /*
+ * update hw counters, taking into account
+ * the "reset" or "saved"
+ * values
+ */
+ __hal_stats_update_latest(devh);
+
+ /*
+ * statistics HW bug fixups for Xena and Herc
+ */
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA ||
+ xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
+ u64 mcst, bcst;
+ xge_hal_stats_hw_info_t *hwsta = &hldev->stats.hw_info_latest;
+
+ mcst = ((u64)hwsta->rmac_vld_mcst_frms_oflow << 32) |
+ hwsta->rmac_vld_mcst_frms;
+
+ bcst = ((u64)hwsta->rmac_vld_bcst_frms_oflow << 32) |
+ hwsta->rmac_vld_bcst_frms;
+
+ mcst -= bcst;
+
+ hwsta->rmac_vld_mcst_frms_oflow = (u32)(mcst >> 32);
+ hwsta->rmac_vld_mcst_frms = (u32)mcst;
+ }
+
+ *hw_info = &hldev->stats.hw_info_latest;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_stats_pcim - Get HW device statistics.
+ * @devh: HAL device handle.
+ * @hw_info: Xframe statistic counters. See xge_hal_stats_pcim_info_t.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_stats_pcim(xge_hal_device_h devh, xge_hal_stats_pcim_info_t **hw_info)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN)
+
+ if (!hldev->stats.is_initialized ||
+ !hldev->stats.is_enabled) {
+ *hw_info = NULL;
+ return XGE_HAL_INF_STATS_IS_NOT_READY;
+ }
+
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING)
+ xge_os_dma_sync(hldev->pdev,
+ hldev->stats.hw_info_dmah,
+ hldev->stats.dma_addr,
+ 0,
+ sizeof(xge_hal_stats_pcim_info_t),
+ XGE_OS_DMA_DIR_FROMDEVICE);
+#endif
+
+ /*
+ * update hw counters, taking into account
+ * the "reset" or "saved"
+ * values
+ */
+ __hal_stats_pcim_update_latest(devh);
+
+ *hw_info = hldev->stats.pcim_info_latest;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_stats_device - Get HAL statistics.
+ * @devh: HAL device handle.
+ * @hw_info: Xframe statistic counters. See xge_hal_stats_hw_info_t.
+ * Returned by HAL.
+ * @device_info: HAL statistics. See xge_hal_stats_device_info_t.
+ * Returned by HAL.
+ *
+ * Get device and HAL statistics. The latter is part of the in-host statistics
+ * that HAL maintains for _that_ device.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_stats_device(xge_hal_device_h devh,
+ xge_hal_stats_device_info_t **device_info)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ if (!hldev->stats.is_initialized ||
+ !hldev->stats.is_enabled) {
+ *device_info = NULL;
+ return XGE_HAL_INF_STATS_IS_NOT_READY;
+ }
+
+ hldev->stats.sw_dev_info_stats.traffic_intr_cnt =
+ hldev->stats.sw_dev_info_stats.total_intr_cnt -
+ hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
+
+ *device_info = &hldev->stats.sw_dev_info_stats;
+
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_stats_channel - Get channel statistics.
+ * @channelh: Channel handle.
+ * @channel_info: HAL channel statistic counters.
+ * See xge_hal_stats_channel_info_t{}. Returned by HAL.
+ *
+ * Retrieve statistics of a particular HAL channel. This includes, for instance,
+ * number of completions per interrupt, number of traffic interrupts, etc.
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_status_e{}.
+ */
+xge_hal_status_e
+xge_hal_stats_channel(xge_hal_channel_h channelh,
+ xge_hal_stats_channel_info_t **channel_info)
+{
+ xge_hal_stats_hw_info_t *latest;
+ xge_hal_channel_t *channel;
+ xge_hal_device_t *hldev;
+
+ channel = (xge_hal_channel_t *)channelh;
+ hldev = (xge_hal_device_t *)channel->devh;
+ if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+ if ((channel == NULL) || (channel->magic != XGE_HAL_MAGIC)) {
+ return XGE_HAL_ERR_INVALID_DEVICE;
+ }
+
+ if (!hldev->stats.is_initialized ||
+ !hldev->stats.is_enabled ||
+ !channel->is_open) {
+ *channel_info = NULL;
+ return XGE_HAL_INF_STATS_IS_NOT_READY;
+ }
+
+ hldev->stats.sw_dev_info_stats.traffic_intr_cnt =
+ hldev->stats.sw_dev_info_stats.total_intr_cnt -
+ hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
+
+ if (hldev->stats.sw_dev_info_stats.traffic_intr_cnt) {
+ int rxcnt = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
+ int txcnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ if (!txcnt)
+ txcnt = 1;
+ channel->stats.avg_compl_per_intr_cnt =
+ channel->stats.total_compl_cnt / txcnt;
+ } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING &&
+ !hldev->config.bimodal_interrupts) {
+ if (!rxcnt)
+ rxcnt = 1;
+ channel->stats.avg_compl_per_intr_cnt =
+ channel->stats.total_compl_cnt / rxcnt;
+ }
+ if (channel->stats.avg_compl_per_intr_cnt == 0) {
+ /* to not confuse user */
+ channel->stats.avg_compl_per_intr_cnt = 1;
+ }
+ }
+
+ (void) xge_hal_stats_hw(hldev, &latest);
+
+ if (channel->stats.total_posts) {
+ channel->stats.avg_buffers_per_post =
+ channel->stats.total_buffers /
+ channel->stats.total_posts;
+#ifdef XGE_OS_PLATFORM_64BIT
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ channel->stats.avg_post_size =
+ (u32)(latest->tmac_ttl_less_fb_octets /
+ channel->stats.total_posts);
+ }
+#endif
+ }
+
+#ifdef XGE_OS_PLATFORM_64BIT
+ if (channel->stats.total_buffers &&
+ channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ channel->stats.avg_buffer_size =
+ (u32)(latest->tmac_ttl_less_fb_octets /
+ channel->stats.total_buffers);
+ }
+#endif
+
+ *channel_info = &channel->stats;
+ return XGE_HAL_OK;
+}
+
+/**
+ * xge_hal_stats_reset - Reset (zero-out) device statistics
+ * @devh: HAL device handle.
+ *
+ * Reset all device statistics.
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
+ * currently available.
+ *
+ * See also: xge_hal_status_e{}, xge_hal_stats_channel_info_t{},
+ * xge_hal_stats_sw_err_t{}, xge_hal_stats_device_info_t{}.
+ */
+xge_hal_status_e
+xge_hal_stats_reset(xge_hal_device_h devh)
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ if (!hldev->stats.is_initialized ||
+ !hldev->stats.is_enabled) {
+ return XGE_HAL_INF_STATS_IS_NOT_READY;
+ }
+
+ /* save hw stats to calculate the after-reset values */
+ __hal_stats_save(&hldev->stats);
+
+ /* zero-out driver-maintained stats, don't reset the saved */
+ __hal_stats_soft_reset(hldev, 0);
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_stats_soft_reset - Reset software-maintained statistics.
+ */
+void
+__hal_stats_soft_reset (xge_hal_device_h devh, int reset_all)
+{
+ xge_list_t *item;
+ xge_hal_channel_t *channel;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+
+ if (reset_all) {
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
+ xge_os_memzero(&hldev->stats.hw_info_saved,
+ sizeof(xge_hal_stats_hw_info_t));
+ xge_os_memzero(&hldev->stats.hw_info_latest,
+ sizeof(xge_hal_stats_hw_info_t));
+ } else {
+ xge_os_memzero(&hldev->stats.pcim_info_saved,
+ sizeof(xge_hal_stats_pcim_info_t));
+ xge_os_memzero(&hldev->stats.pcim_info_latest,
+ sizeof(xge_hal_stats_pcim_info_t));
+ }
+ }
+
+ /* Reset the "soft" error and informational statistics */
+ xge_os_memzero(&hldev->stats.sw_dev_err_stats,
+ sizeof(xge_hal_stats_sw_err_t));
+ xge_os_memzero(&hldev->stats.sw_dev_info_stats,
+ sizeof(xge_hal_stats_device_info_t));
+
+ /* for each Rx channel */
+ xge_list_for_each(item, &hldev->ring_channels) {
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ xge_os_memzero(&channel->stats,
+ sizeof(xge_hal_stats_channel_info_t));
+ }
+
+ /* for each Tx channel */
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ xge_os_memzero(&channel->stats,
+ sizeof(xge_hal_stats_channel_info_t));
+ }
+}
+
diff --git a/sys/dev/nxge/xgell-version.h b/sys/dev/nxge/xgell-version.h
new file mode 100644
index 0000000..f694833
--- /dev/null
+++ b/sys/dev/nxge/xgell-version.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2002-2007 Neterion, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * FileName : version.h
+ *
+ * Description: versioning file
+ *
+ * Created: 3 September 2004
+ */
+
+#ifndef XGELL_VERSION_H
+#define XGELL_VERSION_H
+
+#define XGELL_VERSION_MAJOR "2"
+#define XGELL_VERSION_MINOR "0"
+#define XGELL_VERSION_FIX "7"
+#define XGELL_VERSION_BUILD GENERATED_BUILD_VERSION
+#define XGELL_VERSION XGELL_VERSION_MAJOR"."XGELL_VERSION_MINOR"." \
+ GENERATED_BUILD_VERSION
+#define XGELL_DESC XGE_DRIVER_NAME" v."XGELL_VERSION
+
+#endif /* XGELL_VERSION_H */
OpenPOWER on IntegriCloud