diff options
48 files changed, 15564 insertions, 16400 deletions
diff --git a/share/man/man4/nxge.4 b/share/man/man4/nxge.4 index 21bb5e1..65a4826 100644 --- a/share/man/man4/nxge.4 +++ b/share/man/man4/nxge.4 @@ -24,7 +24,7 @@ .\" .\" $FreeBSD$ .\" -.Dd June 28, 2007 +.Dd October 16, 2007 .Dt NXGE 4 .Os .Sh NAME @@ -48,9 +48,9 @@ if_nxge_load="YES" The .Nm driver provides support for Neterion Xframe-I and Xframe-II adapters. -The driver supports TCP Segmentation Offload (TSO/LSO), Jumbo Frames -(5 buffer mode), Header Separation (2 and 3 Receive buffer modes), -VLAN, and Promiscuous mode. +The driver supports TCP Segmentation Offload (TSO/LSO), +Large Receive Offlaod (LRO), Jumbo Frames (5 buffer mode), +Header Separation (Rx 2 buffer mode), VLAN, and Promiscuous mode. .Pp For general information and support, please visit the Neterion support page .Pa http://www.neterion.com/support/support.html . @@ -61,6 +61,11 @@ Selecting an MTU larger than 1500 bytes with the utility configures the adapter to transmit and receive Jumbo Frames. Xframe adapters support Jumbo Frames up to 9600 bytes. .Pp +.Pp +For Jumbo frames, the driver will try to allocate physically contiguous buffers. Failures to do so, may degrade the performance. To resolve such problems, please visit +.Pa http://www.neterion.com +where additional information and Kernel patch can be found. +.Pp For more information on configuring this device, see .Xr ifconfig 8 . .Sh HARDWARE diff --git a/sys/dev/nxge/if_nxge.c b/sys/dev/nxge/if_nxge.c index 0b8b508..95f97a8 100644 --- a/sys/dev/nxge/if_nxge.c +++ b/sys/dev/nxge/if_nxge.c @@ -26,12 +26,6 @@ * $FreeBSD$ */ -/* - * if_nxge.c - * - * FreeBSD specific initialization & routines - */ - #include <dev/nxge/if_nxge.h> #include <dev/nxge/xge-osdep.h> #include <net/if_arp.h> @@ -43,12 +37,18 @@ int copyright_print = 0; int hal_driver_init_count = 0; size_t size = sizeof(int); -/****************************************** +static void inline xge_flush_txds(xge_hal_channel_h); + +/** * xge_probe - * Parameters: Device structure - * Return: BUS_PROBE_DEFAULT/ENXIO/ENOMEM - * Description: Probes for Xframe device - ******************************************/ + * Probes for Xframe devices + * + * @dev Device handle + * + * Returns + * BUS_PROBE_DEFAULT if device is supported + * ENXIO if device is not supported + */ int xge_probe(device_t dev) { @@ -56,13 +56,11 @@ xge_probe(device_t dev) int vendorid = pci_get_vendor(dev); int retValue = ENXIO; - ENTER_FUNCTION - if(vendorid == XGE_PCI_VENDOR_ID) { if((devid == XGE_PCI_DEVICE_ID_XENA_2) || (devid == XGE_PCI_DEVICE_ID_HERC_2)) { if(!copyright_print) { - PRINT_COPYRIGHT; + xge_os_printf(XGE_COPYRIGHT); copyright_print = 1; } device_set_desc_copy(dev, @@ -71,61 +69,22 @@ xge_probe(device_t dev) } } - LEAVE_FUNCTION return retValue; } -/****************************************** +/** * xge_init_params - * Parameters: HAL device configuration - * structure, device pointer - * Return: None - * Description: Sets parameter values in - * xge_hal_device_config_t structure - ******************************************/ + * Sets HAL parameter values (from kenv). + * + * @dconfig Device Configuration + * @dev Device Handle + */ void xge_init_params(xge_hal_device_config_t *dconfig, device_t dev) { - int index, revision; + int qindex, tindex, revision; device_t checkdev; - - ENTER_FUNCTION - -#define SAVE_PARAM(to, what, value) to.what = value; - -#define GET_PARAM(str_kenv, to, param, hardcode) { \ - static int param##__LINE__; \ - if(testenv(str_kenv) == 1) { \ - getenv_int(str_kenv, ¶m##__LINE__); \ - } \ - else { \ - param##__LINE__ = hardcode; \ - } \ - SAVE_PARAM(to, param, param##__LINE__); \ -} - -#define GET_PARAM_MAC(str_kenv, param, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).mac), param, hardcode); - -#define GET_PARAM_FIFO(str_kenv, param, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).fifo), param, hardcode); - -#define GET_PARAM_FIFO_QUEUE(str_kenv, param, qindex, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex]), param, hardcode); - -#define GET_PARAM_FIFO_QUEUE_TTI(str_kenv, param, qindex, tindex, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex].tti[tindex]), \ - param, hardcode); - -#define GET_PARAM_RING(str_kenv, param, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).ring), param, hardcode); - -#define GET_PARAM_RING_QUEUE(str_kenv, param, qindex, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex]), param, hardcode); - -#define GET_PARAM_RING_QUEUE_RTI(str_kenv, param, qindex, hardcode) \ - GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex].rti), param, \ - hardcode); + xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); dconfig->mtu = XGE_DEFAULT_INITIAL_MTU; dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED; @@ -134,133 +93,156 @@ xge_init_params(xge_hal_device_config_t *dconfig, device_t dev) dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN; dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE; - GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer, + XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso, + XGE_DEFAULT_ENABLED_TSO); + XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro, + XGE_DEFAULT_ENABLED_LRO); + XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi, + XGE_DEFAULT_ENABLED_MSI); + + XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer, XGE_DEFAULT_LATENCY_TIMER); - GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans, + XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans, XGE_DEFAULT_MAX_SPLITS_TRANS); - GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count, + XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count, XGE_DEFAULT_MMRB_COUNT); - GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits, + XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits, XGE_DEFAULT_SHARED_SPLITS); - GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt, + XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt, XGE_DEFAULT_ISR_POLLING_CNT); - GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig), + XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig), stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC); - GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period, + XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period, XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD); - GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period, + XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period, XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD); - GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en, + XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en, XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN); - GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en, + XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en, XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN); - GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time, + XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time, XGE_DEFAULT_MAC_RMAC_PAUSE_TIME); - GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3", + XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3", mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3); - GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7", + XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7", mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7); - GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size, + XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size, XGE_DEFAULT_FIFO_MEMBLOCK_SIZE); - GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold, + XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold, XGE_DEFAULT_FIFO_RESERVE_THRESHOLD); - GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags, + XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags, XGE_DEFAULT_FIFO_MAX_FRAGS); - GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, 0, - XGE_DEFAULT_FIFO_QUEUE_INTR); - GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, 0, - XGE_DEFAULT_FIFO_QUEUE_MAX); - GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial, 0, - XGE_DEFAULT_FIFO_QUEUE_INITIAL); - - for (index = 0; index < XGE_HAL_MAX_FIFO_TTI_NUM; index++) { - dconfig->fifo.queue[0].tti[index].enabled = 1; - dconfig->fifo.queue[0].configured = 1; - - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a", - urange_a, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b", - urange_b, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c", - urange_c, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a", - ufc_a, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b", - ufc_b, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c", - ufc_c, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d", - ufc_d, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_ci_en", - timer_ci_en, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_ac_en", - timer_ac_en, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN); - GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_val_us", - timer_val_us, 0, index, - XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US); - } - - GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size, + for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { + XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex, + XGE_DEFAULT_FIFO_QUEUE_INTR); + XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex, + XGE_DEFAULT_FIFO_QUEUE_MAX); + XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial, + qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL); + + for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) { + dconfig->fifo.queue[qindex].tti[tindex].enabled = 1; + dconfig->fifo.queue[qindex].configured = 1; + + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a", + urange_a, qindex, tindex, + XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A); + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b", + urange_b, qindex, tindex, + XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B); + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c", + urange_c, qindex, tindex, + XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C); + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a", + ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A); + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b", + ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B); + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c", + ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C); + XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d", + ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D); + XGE_GET_PARAM_FIFO_QUEUE_TTI( + "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex, + tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN); + XGE_GET_PARAM_FIFO_QUEUE_TTI( + "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex, + tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN); + XGE_GET_PARAM_FIFO_QUEUE_TTI( + "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex, + tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US); + } + } + + XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size, XGE_DEFAULT_RING_MEMBLOCK_SIZE); - - GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag, + + XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag, XGE_DEFAULT_RING_STRIP_VLAN_TAG); - - for (index = 0; index < XGE_HAL_MIN_RING_NUM; index++) { - dconfig->ring.queue[index].max_frm_len = XGE_HAL_RING_USE_MTU; - dconfig->ring.queue[index].priority = 0; - dconfig->ring.queue[index].configured = 1; - dconfig->ring.queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_1; - - GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, index, + + XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode, + XGE_DEFAULT_BUFFER_MODE); + if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) || + (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) { + xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2"); + lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1; + } + + for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) { + dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU; + dconfig->ring.queue[qindex].priority = 0; + dconfig->ring.queue[qindex].configured = 1; + dconfig->ring.queue[qindex].buffer_mode = + (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ? + XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode; + + XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex, XGE_DEFAULT_RING_QUEUE_MAX); - GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial, index, - XGE_DEFAULT_RING_QUEUE_INITIAL); - GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb", dram_size_mb, - index, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB); - GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts", - indicate_max_pkts, index, + XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial, + qindex, XGE_DEFAULT_RING_QUEUE_INITIAL); + XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb", + dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB); + XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts", + indicate_max_pkts, qindex, XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS); - GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us", - backoff_interval_us, index, + XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us", + backoff_interval_us, qindex, XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a, - index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b, - index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c, - index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d, - index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en", - timer_ac_en, index, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us", - timer_val_us, index, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a", urange_a, - index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b", urange_b, - index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B); - GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c", urange_c, - index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a, + qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b, + qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c, + qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d, + qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en", + timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us", + timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a", + urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b", + urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B); + XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c", + urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C); } if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) { - xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags); + xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags) xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d", - (int)(PAGE_SIZE / 32)); - xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32)); + (int)(PAGE_SIZE / 32)) + xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32)) dconfig->fifo.max_frags = (PAGE_SIZE / 32); } checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE); if(checkdev != NULL) { /* Check Revision for 0x12 */ - revision = pci_read_config(checkdev, + revision = pci_read_config(checkdev, xge_offsetof(xge_hal_pci_config_t, revision), 1); if(revision <= 0x12) { /* Set mmrb_count to 1k and max splits = 2 */ @@ -268,23 +250,96 @@ xge_init_params(xge_hal_device_config_t *dconfig, device_t dev) dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION; } } +} -#ifdef XGE_FEATURE_LRO - /* updating the LRO frame's sg size and frame len size. */ - dconfig->lro_sg_size = 20; - dconfig->lro_frm_len = 65536; -#endif +/** + * xge_buffer_sizes_set + * Set buffer sizes based on Rx buffer mode + * + * @lldev Per-adapter Data + * @buffer_mode Rx Buffer Mode + */ +void +xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu) +{ + int index = 0; + int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE; + int buffer_size = mtu + frame_header; + + xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len)); + + if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) + lldev->rxd_mbuf_len[buffer_mode - 1] = mtu; + + lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header; + + if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) + lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE; + + if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { + index = 2; + buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE; + while(buffer_size > MJUMPAGESIZE) { + lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE; + buffer_size -= MJUMPAGESIZE; + } + XGE_ALIGN_TO(buffer_size, 128); + lldev->rxd_mbuf_len[index] = buffer_size; + lldev->rxd_mbuf_cnt = index + 1; + } - LEAVE_FUNCTION + for(index = 0; index < buffer_mode; index++) + xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index, + lldev->rxd_mbuf_len[index]); } -/****************************************** +/** + * xge_buffer_mode_init + * Init Rx buffer mode + * + * @lldev Per-adapter Data + * @mtu Interface MTU + */ +void +xge_buffer_mode_init(xge_lldev_t *lldev, int mtu) +{ + int index = 0, buffer_size = 0; + xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring); + + buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE; + + if(lldev->enabled_lro) + (lldev->ifnetp)->if_capenable |= IFCAP_LRO; + else + (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO; + + lldev->rxd_mbuf_cnt = lldev->buffer_mode; + if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) { + XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3); + ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B; + } + else { + XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode); + ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A; + } + xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu); + + xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device), + ((lldev->enabled_tso) ? "Enabled":"Disabled")); + xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device), + ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled"); + xge_os_printf("%s: Rx %d Buffer Mode Enabled", + device_get_nameunit(lldev->device), lldev->buffer_mode); +} + +/** * xge_driver_initialize - * Parameters: None - * Return: 0/1 - * Description: Defines HAL-ULD callbacks - * and initializes the HAL driver - ******************************************/ + * Initializes HAL driver (common for all devices) + * + * Returns + * XGE_HAL_OK if success + * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid + */ int xge_driver_initialize(void) { @@ -292,29 +347,27 @@ xge_driver_initialize(void) xge_hal_driver_config_t driver_config; xge_hal_status_e status = XGE_HAL_OK; - ENTER_FUNCTION - /* Initialize HAL driver */ if(!hal_driver_init_count) { xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t)); + xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t)); /* * Initial and maximum size of the queue used to store the events * like Link up/down (xge_hal_event_e) */ - driver_config.queue_size_initial = 1; - driver_config.queue_size_max = 4; + driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL; + driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX; - uld_callbacks.link_up = xgell_callback_link_up; - uld_callbacks.link_down = xgell_callback_link_down; - uld_callbacks.crit_err = xgell_callback_crit_err; - uld_callbacks.event = xgell_callback_event; + uld_callbacks.link_up = xge_callback_link_up; + uld_callbacks.link_down = xge_callback_link_down; + uld_callbacks.crit_err = xge_callback_crit_err; + uld_callbacks.event = xge_callback_event; status = xge_hal_driver_initialize(&driver_config, &uld_callbacks); if(status != XGE_HAL_OK) { - xge_os_printf("xgeX: Initialization failed (Status: %d)", - status); - goto xdi_out; + XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed", + xdi_out, status); } } hal_driver_init_count = hal_driver_init_count + 1; @@ -323,156 +376,451 @@ xge_driver_initialize(void) xge_hal_driver_debug_level_set(XGE_TRACE); xdi_out: - LEAVE_FUNCTION return status; } -/****************************************** - * Function: xge_media_init - * Parameters: Device pointer - * Return: None - * Description: Initializes, adds and sets - * media - ******************************************/ +/** + * xge_media_init + * Initializes, adds and sets media + * + * @devc Device Handle + */ void xge_media_init(device_t devc) { - xgelldev_t *lldev = (xgelldev_t *)device_get_softc(devc); - - ENTER_FUNCTION + xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc); /* Initialize Media */ - ifmedia_init(&lldev->xge_media, IFM_IMASK, xge_ifmedia_change, + ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change, xge_ifmedia_status); /* Add supported media */ - ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_1000_SX | IFM_FDX, - 0, NULL); - ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_1000_SX, 0, NULL); - ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_10G_SR, 0, NULL); - ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_10G_LR, 0, NULL); + ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); + ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL); + ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL); + ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL); /* Set media */ - ifmedia_set(&lldev->xge_media, IFM_ETHER | IFM_AUTO); - - LEAVE_FUNCTION + ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO); } -/* +/** * xge_pci_space_save * Save PCI configuration space - * @dev Device structure + * + * @dev Device Handle */ void xge_pci_space_save(device_t dev) { - ENTER_FUNCTION - struct pci_devinfo *dinfo = NULL; dinfo = device_get_ivars(dev); xge_trace(XGE_TRACE, "Saving PCI configuration space"); pci_cfg_save(dev, dinfo, 0); - - LEAVE_FUNCTION } -/* +/** * xge_pci_space_restore * Restore saved PCI configuration space - * @dev Device structure + * + * @dev Device Handle */ void xge_pci_space_restore(device_t dev) { - ENTER_FUNCTION - struct pci_devinfo *dinfo = NULL; dinfo = device_get_ivars(dev); xge_trace(XGE_TRACE, "Restoring PCI configuration space"); pci_cfg_restore(dev, dinfo); +} + +/** + * xge_msi_info_save + * Save MSI info + * + * @lldev Per-adapter Data + */ +void +xge_msi_info_save(xge_lldev_t * lldev) +{ + xge_os_pci_read16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_control), + &lldev->msi_info.msi_control); + xge_os_pci_read32(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address), + &lldev->msi_info.msi_lower_address); + xge_os_pci_read32(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address), + &lldev->msi_info.msi_higher_address); + xge_os_pci_read16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_data), + &lldev->msi_info.msi_data); +} + +/** + * xge_msi_info_restore + * Restore saved MSI info + * + * @dev Device Handle + */ +void +xge_msi_info_restore(xge_lldev_t *lldev) +{ + /* + * If interface is made down and up, traffic fails. It was observed that + * MSI information were getting reset on down. Restoring them. + */ + xge_os_pci_write16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_control), + lldev->msi_info.msi_control); + + xge_os_pci_write32(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address), + lldev->msi_info.msi_lower_address); + + xge_os_pci_write32(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address), + lldev->msi_info.msi_higher_address); + + xge_os_pci_write16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_data), + lldev->msi_info.msi_data); +} + +/** + * xge_init_mutex + * Initializes mutexes used in driver + * + * @lldev Per-adapter Data + */ +void +xge_mutex_init(xge_lldev_t *lldev) +{ + int qindex; + + sprintf(lldev->mtx_name_drv, "%s_drv", + device_get_nameunit(lldev->device)); + mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK, + MTX_DEF); + + for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { + sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d", + device_get_nameunit(lldev->device), qindex); + mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL, + MTX_DEF); + } +} + +/** + * xge_mutex_destroy + * Destroys mutexes used in driver + * + * @lldev Per-adapter Data + */ +void +xge_mutex_destroy(xge_lldev_t *lldev) +{ + int qindex; + + for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) + mtx_destroy(&lldev->mtx_tx[qindex]); + mtx_destroy(&lldev->mtx_drv); +} + +/** + * xge_print_info + * Print device and driver information + * + * @lldev Per-adapter Data + */ +void +xge_print_info(xge_lldev_t *lldev) +{ + device_t dev = lldev->device; + xge_hal_device_t *hldev = lldev->devh; + xge_hal_status_e status = XGE_HAL_OK; + u64 val64 = 0; + const char *xge_pci_bus_speeds[17] = { + "PCI 33MHz Bus", + "PCI 66MHz Bus", + "PCIX(M1) 66MHz Bus", + "PCIX(M1) 100MHz Bus", + "PCIX(M1) 133MHz Bus", + "PCIX(M2) 133MHz Bus", + "PCIX(M2) 200MHz Bus", + "PCIX(M2) 266MHz Bus", + "PCIX(M1) Reserved", + "PCIX(M1) 66MHz Bus (Not Supported)", + "PCIX(M1) 100MHz Bus (Not Supported)", + "PCIX(M1) 133MHz Bus (Not Supported)", + "PCIX(M2) Reserved", + "PCIX 533 Reserved", + "PCI Basic Mode", + "PCIX Basic Mode", + "PCI Invalid Mode" + }; + + xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s", + device_get_nameunit(dev), + ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"), + hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION); + xge_os_printf("%s: Serial Number %s", + device_get_nameunit(dev), hldev->vpd_data.serial_num); + + if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) { + status = xge_hal_mgmt_reg_read(hldev, 0, + xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64); + if(status != XGE_HAL_OK) + xge_trace(XGE_ERR, "Error for getting bus speed"); + + xge_os_printf("%s: Adapter is on %s bit %s", + device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"), + (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)])); + } - LEAVE_FUNCTION + xge_os_printf("%s: Using %s Interrupts", + device_get_nameunit(dev), + (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line"); } -/****************************************** +/** + * xge_create_dma_tags + * Creates DMA tags for both Tx and Rx + * + * @dev Device Handle + * + * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors) + */ +xge_hal_status_e +xge_create_dma_tags(device_t dev) +{ + xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); + xge_hal_status_e status = XGE_HAL_FAIL; + int mtu = (lldev->ifnetp)->if_mtu, maxsize; + + /* DMA tag for Tx */ + status = bus_dma_tag_create( + bus_get_dma_tag(dev), /* Parent */ + PAGE_SIZE, /* Alignment */ + 0, /* Bounds */ + BUS_SPACE_MAXADDR, /* Low Address */ + BUS_SPACE_MAXADDR, /* High Address */ + NULL, /* Filter Function */ + NULL, /* Filter Function Arguments */ + MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */ + XGE_MAX_SEGS, /* Number of Segments */ + MCLBYTES, /* Maximum Segment Size */ + BUS_DMA_ALLOCNOW, /* Flags */ + NULL, /* Lock Function */ + NULL, /* Lock Function Arguments */ + (&lldev->dma_tag_tx)); /* DMA Tag */ + if(status != 0) + goto _exit; + + maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE; + if(maxsize <= MCLBYTES) { + maxsize = MCLBYTES; + } + else { + if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) + maxsize = MJUMPAGESIZE; + else + maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES; + } + + /* DMA tag for Rx */ + status = bus_dma_tag_create( + bus_get_dma_tag(dev), /* Parent */ + PAGE_SIZE, /* Alignment */ + 0, /* Bounds */ + BUS_SPACE_MAXADDR, /* Low Address */ + BUS_SPACE_MAXADDR, /* High Address */ + NULL, /* Filter Function */ + NULL, /* Filter Function Arguments */ + maxsize, /* Maximum Size */ + 1, /* Number of Segments */ + maxsize, /* Maximum Segment Size */ + BUS_DMA_ALLOCNOW, /* Flags */ + NULL, /* Lock Function */ + NULL, /* Lock Function Arguments */ + (&lldev->dma_tag_rx)); /* DMA Tag */ + if(status != 0) + goto _exit1; + + status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT, + &lldev->extra_dma_map); + if(status != 0) + goto _exit2; + + status = XGE_HAL_OK; + goto _exit; + +_exit2: + status = bus_dma_tag_destroy(lldev->dma_tag_rx); + if(status != 0) + xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); +_exit1: + status = bus_dma_tag_destroy(lldev->dma_tag_tx); + if(status != 0) + xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); + status = XGE_HAL_FAIL; +_exit: + return status; +} + +/** + * xge_confirm_changes + * Disables and Enables interface to apply requested change + * + * @lldev Per-adapter Data + * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0) + * + * Returns 0 or Error Number + */ +void +xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option) +{ + if(lldev->initialized == 0) goto _exit1; + + mtx_lock(&lldev->mtx_drv); + if_down(lldev->ifnetp); + xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL); + + if(option == XGE_SET_MTU) + (lldev->ifnetp)->if_mtu = lldev->mtu; + else + xge_buffer_mode_init(lldev, lldev->mtu); + + xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL); + if_up(lldev->ifnetp); + mtx_unlock(&lldev->mtx_drv); + goto _exit; + +_exit1: + /* Request was to change MTU and device not initialized */ + if(option == XGE_SET_MTU) { + (lldev->ifnetp)->if_mtu = lldev->mtu; + xge_buffer_mode_init(lldev, lldev->mtu); + } +_exit: + return; +} + +/** + * xge_change_lro_status + * Enable/Disable LRO feature + * + * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments + * + * Returns 0 or error number. + */ +static int +xge_change_lro_status(SYSCTL_HANDLER_ARGS) +{ + xge_lldev_t *lldev = (xge_lldev_t *)arg1; + int request = lldev->enabled_lro, status = XGE_HAL_OK; + + status = sysctl_handle_int(oidp, &request, arg2, req); + if((status != XGE_HAL_OK) || (!req->newptr)) + goto _exit; + + if((request < 0) || (request > 1)) { + status = EINVAL; + goto _exit; + } + + /* Return if current and requested states are same */ + if(request == lldev->enabled_lro){ + xge_trace(XGE_ERR, "LRO is already %s", + ((request) ? "enabled" : "disabled")); + goto _exit; + } + + lldev->enabled_lro = request; + xge_confirm_changes(lldev, XGE_CHANGE_LRO); + arg2 = lldev->enabled_lro; + +_exit: + return status; +} + +/** + * xge_add_sysctl_handlers + * Registers sysctl parameter value update handlers + * + * @lldev Per-adapter data + */ +void +xge_add_sysctl_handlers(xge_lldev_t *lldev) +{ + struct sysctl_ctx_list *context_list = + device_get_sysctl_ctx(lldev->device); + struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device); + + SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO, + "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0, + xge_change_lro_status, "I", "Enable or disable LRO feature"); +} + +/** * xge_attach - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: None - * Description: Connects the driver to the - * system if the probe routine returned success - ******************************************/ + * Connects driver to the system if probe was success + * + * @dev Device Handle + */ int xge_attach(device_t dev) { xge_hal_device_config_t *device_config; - xge_hal_ring_config_t *pRingConfig; xge_hal_device_attr_t attr; - xgelldev_t *lldev; + xge_lldev_t *lldev; xge_hal_device_t *hldev; - pci_info_t *pci_info; + xge_pci_info_t *pci_info; struct ifnet *ifnetp; - char *mesg; - char *desc; - int rid; - int rid0; - int rid1; - int error; - u64 val64 = 0; - int retValue = 0; - int mode = 0; - int buffer_index, buffer_length, index; - - ENTER_FUNCTION - - device_config = xge_malloc(sizeof(xge_hal_device_config_t)); + int rid, rid0, rid1, error; + int msi_count = 0, status = XGE_HAL_OK; + int enable_msi = XGE_HAL_INTR_MODE_IRQLINE; + + device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t)); if(!device_config) { - xge_ctrace(XGE_ERR, "Malloc of device config failed"); - retValue = ENOMEM; - goto attach_out_config; + XGE_EXIT_ON_ERR("Memory allocation for device configuration failed", + attach_out_config, ENOMEM); } - lldev = (xgelldev_t *) device_get_softc(dev); + lldev = (xge_lldev_t *) device_get_softc(dev); if(!lldev) { - xge_ctrace(XGE_ERR, "Adapter softc structure allocation failed"); - retValue = ENOMEM; - goto attach_out; + XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM); } lldev->device = dev; - /* Initialize mutex */ - if(mtx_initialized(&lldev->xge_lock) == 0) { - mtx_init((&lldev->xge_lock), "xge", MTX_NETWORK_LOCK, MTX_DEF); - } + xge_mutex_init(lldev); error = xge_driver_initialize(); if(error != XGE_HAL_OK) { - xge_ctrace(XGE_ERR, "Initializing driver failed"); - freeResources(dev, 1); - retValue = ENXIO; - goto attach_out; + xge_resources_free(dev, xge_free_mutex); + XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO); } /* HAL device */ - hldev = (xge_hal_device_t *)xge_malloc(sizeof(xge_hal_device_t)); + hldev = + (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t)); if(!hldev) { - xge_trace(XGE_ERR, "Allocating memory for xge_hal_device_t failed"); - freeResources(dev, 2); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_terminate_hal_driver); + XGE_EXIT_ON_ERR("Memory allocation for HAL device failed", + attach_out, ENOMEM); } lldev->devh = hldev; /* Our private structure */ - pci_info = (pci_info_t*) xge_malloc(sizeof(pci_info_t)); + pci_info = + (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t)); if(!pci_info) { - xge_trace(XGE_ERR, "Allocating memory for pci_info_t failed"); - freeResources(dev, 3); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_hal_device); + XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed", + attach_out, ENOMEM); } lldev->pdev = pci_info; pci_info->device = dev; @@ -485,26 +833,24 @@ xge_attach(device_t dev) pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(pci_info->regmap0 == NULL) { - xge_trace(XGE_ERR, "NULL handler for BAR0"); - freeResources(dev, 4); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_pci_info); + XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed", + attach_out, ENOMEM); } attr.bar0 = (char *)pci_info->regmap0; - pci_info->bar0resource = - (busresource_t*) xge_malloc(sizeof(busresource_t)); + pci_info->bar0resource = (xge_bus_resource_t*) + xge_os_malloc(NULL, sizeof(xge_bus_resource_t)); if(pci_info->bar0resource == NULL) { - xge_trace(XGE_ERR, "Allocating memory for bar0resources failed"); - freeResources(dev, 5); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_bar0); + XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed", + attach_out, ENOMEM); } - ((struct busresources *)(pci_info->bar0resource))->bus_tag = + ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag = rman_get_bustag(pci_info->regmap0); - ((struct busresources *)(pci_info->bar0resource))->bus_handle = + ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle = rman_get_bushandle(pci_info->regmap0); - ((struct busresources *)(pci_info->bar0resource))->bar_start_addr = + ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr = pci_info->regmap0; /* Get virtual address for BAR1 */ @@ -512,33 +858,31 @@ xge_attach(device_t dev) pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1, RF_ACTIVE); if(pci_info->regmap1 == NULL) { - xge_trace(XGE_ERR, "NULL handler for BAR1"); - freeResources(dev, 6); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_bar0_resource); + XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed", + attach_out, ENOMEM); } attr.bar1 = (char *)pci_info->regmap1; - pci_info->bar1resource = - (busresource_t*) xge_malloc(sizeof(busresource_t)); + pci_info->bar1resource = (xge_bus_resource_t*) + xge_os_malloc(NULL, sizeof(xge_bus_resource_t)); if(pci_info->bar1resource == NULL) { - xge_trace(XGE_ERR, "Allocating memory for bar0resources failed"); - freeResources(dev, 7); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_bar1); + XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed", + attach_out, ENOMEM); } - ((struct busresources *)(pci_info->bar1resource))->bus_tag = + ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag = rman_get_bustag(pci_info->regmap1); - ((struct busresources *)(pci_info->bar1resource))->bus_handle = + ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle = rman_get_bushandle(pci_info->regmap1); - ((struct busresources *)(pci_info->bar1resource))->bar_start_addr = + ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr = pci_info->regmap1; /* Save PCI config space */ xge_pci_space_save(dev); - attr.regh0 = (busresource_t *) pci_info->bar0resource; - attr.regh1 = (busresource_t *) pci_info->bar1resource; + attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource; + attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource; attr.irqh = lldev->irqhandle; attr.cfgh = pci_info; attr.pdev = pci_info; @@ -546,95 +890,78 @@ xge_attach(device_t dev) /* Initialize device configuration parameters */ xge_init_params(device_config, dev); - /* Initialize HAL device */ - error = xge_hal_device_initialize(hldev, &attr, device_config); - if(error != XGE_HAL_OK) { - switch(error) { - case XGE_HAL_ERR_DRIVER_NOT_INITIALIZED: - xge_trace(XGE_ERR, "XGE_HAL_ERR_DRIVER_NOT_INITIALIZED"); - break; - - case XGE_HAL_ERR_OUT_OF_MEMORY: - xge_trace(XGE_ERR, "XGE_HAL_ERR_OUT_OF_MEMORY"); - break; - - case XGE_HAL_ERR_BAD_SUBSYSTEM_ID: - xge_trace(XGE_ERR, "XGE_HAL_ERR_BAD_SUBSYSTEM_ID"); - break; - - case XGE_HAL_ERR_INVALID_MAC_ADDRESS: - xge_trace(XGE_ERR, "XGE_HAL_ERR_INVALID_MAC_ADDRESS"); - break; - - case XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING: - xge_trace(XGE_ERR, "XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING"); - break; - - case XGE_HAL_ERR_SWAPPER_CTRL: - xge_trace(XGE_ERR, "XGE_HAL_ERR_SWAPPER_CTRL"); - break; - - case XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT: - xge_trace(XGE_ERR, "XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT"); - break; + rid = 0; + if(lldev->enabled_msi) { + /* Number of MSI messages supported by device */ + msi_count = pci_msi_count(dev); + if(msi_count > 1) { + /* Device supports MSI */ + if(bootverbose) { + xge_trace(XGE_ERR, "MSI count: %d", msi_count); + xge_trace(XGE_ERR, "Now, driver supporting 1 message"); + } + msi_count = 1; + error = pci_alloc_msi(dev, &msi_count); + if(error == 0) { + if(bootverbose) + xge_trace(XGE_ERR, "Allocated messages: %d", msi_count); + enable_msi = XGE_HAL_INTR_MODE_MSI; + rid = 1; + } + else { + if(bootverbose) + xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error); + } } - xge_trace(XGE_ERR, "Initializing HAL device failed (error: %d)\n", - error); - freeResources(dev, 8); - retValue = ENXIO; - goto attach_out; - } - - desc = (char *) malloc(100, M_DEVBUF, M_NOWAIT); - if(desc == NULL) { - retValue = ENOMEM; } - else { - sprintf(desc, "%s (Rev %d) Driver v%s \n%s: Serial Number: %s ", - hldev->vpd_data.product_name, hldev->revision, DRIVER_VERSION, - device_get_nameunit(dev), hldev->vpd_data.serial_num); - printf("%s: Xframe%s %s\n", device_get_nameunit(dev), - ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I": "II"), - desc); - free(desc, M_DEVBUF); + lldev->enabled_msi = enable_msi; - } - - if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) { - error = xge_hal_mgmt_reg_read(hldev, 0, - xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64); - if(error != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Error for getting bus speed"); + /* Allocate resource for irq */ + lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + (RF_SHAREABLE | RF_ACTIVE)); + if(lldev->irq == NULL) { + xge_trace(XGE_ERR, "Allocating irq resource for %s failed", + ((rid == 0) ? "line interrupt" : "MSI")); + if(rid == 1) { + error = pci_release_msi(dev); + if(error != 0) { + xge_trace(XGE_ERR, "Releasing MSI resources failed %d", + error); + xge_trace(XGE_ERR, "Requires reboot to use MSI again"); + } + xge_trace(XGE_ERR, "Trying line interrupts"); + rid = 0; + lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE; + lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + (RF_SHAREABLE | RF_ACTIVE)); } - mesg = (char *) xge_malloc(20); - if(mesg == NULL) { - freeResources(dev, 8); - retValue = ENOMEM; + if(lldev->irq == NULL) { + xge_trace(XGE_ERR, "Allocating irq resource failed"); + xge_resources_free(dev, xge_free_bar1_resource); + status = ENOMEM; goto attach_out; } + } - sprintf(mesg, "%s: Device is on %s bit", device_get_nameunit(dev), - (val64 & BIT(8)) ? "32":"64"); - - mode = (u8)((val64 & vBIT(0xF, 0, 4)) >> 60); - switch(mode) { - case 0x00: xge_os_printf("%s PCI 33MHz bus", mesg); break; - case 0x01: xge_os_printf("%s PCI 66MHz bus", mesg); break; - case 0x02: xge_os_printf("%s PCIX(M1) 66MHz bus", mesg); break; - case 0x03: xge_os_printf("%s PCIX(M1) 100MHz bus", mesg); break; - case 0x04: xge_os_printf("%s PCIX(M1) 133MHz bus", mesg); break; - case 0x05: xge_os_printf("%s PCIX(M2) 133MHz bus", mesg); break; - case 0x06: xge_os_printf("%s PCIX(M2) 200MHz bus", mesg); break; - case 0x07: xge_os_printf("%s PCIX(M2) 266MHz bus", mesg); break; - } - free(mesg, M_DEVBUF); + device_config->intr_mode = lldev->enabled_msi; + if(bootverbose) { + xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid, + lldev->enabled_msi, msi_count); + } + + /* Initialize HAL device */ + error = xge_hal_device_initialize(hldev, &attr, device_config); + if(error != XGE_HAL_OK) { + xge_resources_free(dev, xge_free_irq_resource); + XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out, + ENXIO); } xge_hal_device_private_set(hldev, lldev); error = xge_interface_setup(dev); if(error != 0) { - retValue = error; + status = error; goto attach_out; } @@ -643,205 +970,72 @@ xge_attach(device_t dev) xge_media_init(dev); - /* Interrupt */ - rid = 0; - lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, - RF_SHAREABLE | RF_ACTIVE); - if(lldev->irq == NULL) { - xge_trace(XGE_ERR, "NULL handler for IRQ"); - freeResources(dev, 10); - retValue = ENOMEM; - goto attach_out; - } - /* Associate interrupt handler with the device */ - error = bus_setup_intr(dev, lldev->irq, INTR_TYPE_NET | INTR_MPSAFE, + if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) { + error = bus_setup_intr(dev, lldev->irq, + (INTR_TYPE_NET | INTR_MPSAFE), #if __FreeBSD_version > 700030 - xge_intr_filter, + NULL, #endif - (void *)xge_intr, lldev, &lldev->irqhandle); - if(error != 0) { - xge_trace(XGE_ERR, - "Associating interrupt handler with device failed"); - freeResources(dev, 11); - retValue = ENXIO; - goto attach_out; + xge_isr_msi, lldev, &lldev->irqhandle); + xge_msi_info_save(lldev); } - - /* Create DMA tags */ - error = bus_dma_tag_create( - bus_get_dma_tag(dev), /* Parent */ - PAGE_SIZE, /* Alignment */ - 0, /* Bounds */ - BUS_SPACE_MAXADDR, /* Low Address */ - BUS_SPACE_MAXADDR, /* High Address */ - NULL, /* Filter Function */ - NULL, /* Filter Function Arguments */ - MCLBYTES * MAX_SEGS, /* Maximum Size */ - MAX_SEGS, /* Number of Segments */ - MCLBYTES, /* Maximum Segment Size */ - BUS_DMA_ALLOCNOW, /* Flags */ - NULL, /* Lock Function */ - NULL, /* Lock Function Arguments */ - (&lldev->dma_tag_tx)); /* DMA Tag */ - if(error != 0) { - xge_trace(XGE_ERR, "Tx DMA tag creation failed"); - freeResources(dev, 12); - retValue = ENOMEM; - goto attach_out; + else { + error = bus_setup_intr(dev, lldev->irq, + (INTR_TYPE_NET | INTR_MPSAFE), +#if __FreeBSD_version > 700030 + xge_isr_filter, +#endif + xge_isr_line, lldev, &lldev->irqhandle); } - - error = bus_dma_tag_create( - bus_get_dma_tag(dev), /* Parent */ - PAGE_SIZE, /* Alignment */ - 0, /* Bounds */ - BUS_SPACE_MAXADDR, /* Low Address */ - BUS_SPACE_MAXADDR, /* High Address */ - NULL, /* Filter Function */ - NULL, /* Filter Function Arguments */ - MJUMPAGESIZE, /* Maximum Size */ - 1, /* Number of Segments */ - MJUMPAGESIZE, /* Maximum Segment Size */ - BUS_DMA_ALLOCNOW, /* Flags */ - NULL, /* Lock Function */ - NULL, /* Lock Function Arguments */ - (&lldev->dma_tag_rx)); /* DMA Tag */ - if(error != 0) { - xge_trace(XGE_ERR, "Rx DMA tag creation failed"); - freeResources(dev, 13); - retValue = ENOMEM; - goto attach_out; + xge_resources_free(dev, xge_free_media_interface); + XGE_EXIT_ON_ERR("Associating interrupt handler with device failed", + attach_out, ENXIO); } - /*Updating lldev->buffer_mode parameter*/ - pRingConfig = &(hldev->config.ring); - - if((device_config->mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) <= PAGE_SIZE) { -#if defined(XGE_FEATURE_BUFFER_MODE_3) - xge_os_printf("%s: 3 Buffer Mode Enabled", - device_get_nameunit(dev)); - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_3; - } - pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A; - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_3; - lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[2] = device_config->mtu; - lldev->rxd_mbuf_cnt = 3; -#else -#if defined(XGE_FEATURE_BUFFER_MODE_2) - xge_os_printf("%s: 2 Buffer Mode Enabled", - device_get_nameunit(dev)); - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_3; - } - pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B; - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_2; - lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[1] = device_config->mtu; - lldev->rxd_mbuf_cnt = 2; -#else - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1; - lldev->rxd_mbuf_len[0] = device_config->mtu; - lldev->rxd_mbuf_cnt = 1; -#endif -#endif - } - else { - xge_os_printf("%s: 5 Buffer Mode Enabled", - device_get_nameunit(dev)); - xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len)); - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_5; - } - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5; - buffer_length = device_config->mtu; - buffer_index = 2; - lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE; - - while(buffer_length > PAGE_SIZE) { - buffer_length -= PAGE_SIZE; - lldev->rxd_mbuf_len[buffer_index] = PAGE_SIZE; - buffer_index++; - } - - BUFALIGN(buffer_length); - - lldev->rxd_mbuf_len[buffer_index] = buffer_length; - lldev->rxd_mbuf_cnt = buffer_index; - } + xge_print_info(lldev); -#ifdef XGE_FEATURE_LRO - xge_os_printf("%s: LRO (Large Receive Offload) Enabled", - device_get_nameunit(dev)); -#endif + xge_add_sysctl_handlers(lldev); -#ifdef XGE_FEATURE_TSO - xge_os_printf("%s: TSO (TCP Segmentation Offload) enabled", - device_get_nameunit(dev)); -#endif + xge_buffer_mode_init(lldev, device_config->mtu); attach_out: - free(device_config, M_DEVBUF); + xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t)); attach_out_config: - LEAVE_FUNCTION - return retValue; + return status; } -/****************************************** - * freeResources - * Parameters: Device structure, error (used - * to branch freeing) - * Return: None - * Description: Frees allocated resources - ******************************************/ +/** + * xge_resources_free + * Undo what-all we did during load/attach + * + * @dev Device Handle + * @error Identifies what-all to undo + */ void -freeResources(device_t dev, int error) +xge_resources_free(device_t dev, xge_lables_e error) { - xgelldev_t *lldev; - pci_info_t *pci_info; + xge_lldev_t *lldev; + xge_pci_info_t *pci_info; xge_hal_device_t *hldev; int rid, status; - ENTER_FUNCTION - /* LL Device */ - lldev = (xgelldev_t *) device_get_softc(dev); + lldev = (xge_lldev_t *) device_get_softc(dev); pci_info = lldev->pdev; /* HAL Device */ hldev = lldev->devh; switch(error) { - case 0: - status = bus_dma_tag_destroy(lldev->dma_tag_rx); - if(status) { - xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); - } - - case 13: - status = bus_dma_tag_destroy(lldev->dma_tag_tx); - if(status) { - xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); - } - - case 12: + case xge_free_all: /* Teardown interrupt handler - device association */ bus_teardown_intr(dev, lldev->irq, lldev->irqhandle); - case 11: - /* Release IRQ */ - bus_release_resource(dev, SYS_RES_IRQ, 0, lldev->irq); - - case 10: + case xge_free_media_interface: /* Media */ - ifmedia_removeall(&lldev->xge_media); + ifmedia_removeall(&lldev->media); /* Detach Ether */ ether_ifdetach(lldev->ifnetp); @@ -850,167 +1044,145 @@ freeResources(device_t dev, int error) xge_hal_device_private_set(hldev, NULL); xge_hal_device_disable(hldev); - case 9: + case xge_free_terminate_hal_device: /* HAL Device */ xge_hal_device_terminate(hldev); - case 8: + case xge_free_irq_resource: + /* Release IRQ resource */ + bus_release_resource(dev, SYS_RES_IRQ, + ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1), + lldev->irq); + + if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) { + status = pci_release_msi(dev); + if(status != 0) { + if(bootverbose) { + xge_trace(XGE_ERR, + "pci_release_msi returned %d", status); + } + } + } + + case xge_free_bar1_resource: /* Restore PCI configuration space */ xge_pci_space_restore(dev); /* Free bar1resource */ - free(pci_info->bar1resource, M_DEVBUF); + xge_os_free(NULL, pci_info->bar1resource, + sizeof(xge_bus_resource_t)); - case 7: + case xge_free_bar1: /* Release BAR1 */ rid = PCIR_BAR(2); bus_release_resource(dev, SYS_RES_MEMORY, rid, pci_info->regmap1); - case 6: + case xge_free_bar0_resource: /* Free bar0resource */ - free(pci_info->bar0resource, M_DEVBUF); + xge_os_free(NULL, pci_info->bar0resource, + sizeof(xge_bus_resource_t)); - case 5: + case xge_free_bar0: /* Release BAR0 */ rid = PCIR_BAR(0); bus_release_resource(dev, SYS_RES_MEMORY, rid, pci_info->regmap0); - case 4: + case xge_free_pci_info: /* Disable Bus Master */ pci_disable_busmaster(dev); /* Free pci_info_t */ lldev->pdev = NULL; - free(pci_info, M_DEVBUF); + xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t)); - case 3: + case xge_free_hal_device: /* Free device configuration struct and HAL device */ - free(hldev, M_DEVBUF); + xge_os_free(NULL, hldev, sizeof(xge_hal_device_t)); - case 2: + case xge_free_terminate_hal_driver: /* Terminate HAL driver */ hal_driver_init_count = hal_driver_init_count - 1; if(!hal_driver_init_count) { xge_hal_driver_terminate(); } - case 1: - if(mtx_initialized(&lldev->xge_lock) != 0) { - mtx_destroy(&lldev->xge_lock); - } + case xge_free_mutex: + xge_mutex_destroy(lldev); } - - LEAVE_FUNCTION } -/****************************************** +/** * xge_detach - * Parameters: Device structure - * Return: 0 - * Description: Detaches the driver from the - * kernel subsystem. - ******************************************/ + * Detaches driver from the Kernel subsystem + * + * @dev Device Handle + */ int xge_detach(device_t dev) { - xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev); - - ENTER_FUNCTION + xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); - mtx_lock(&lldev->xge_lock); - lldev->in_detach = 1; - xge_stop(lldev); - mtx_unlock(&lldev->xge_lock); - - freeResources(dev, 0); - - LEAVE_FUNCTION + if(lldev->in_detach == 0) { + lldev->in_detach = 1; + xge_stop(lldev); + xge_resources_free(dev, xge_free_all); + } return 0; } -/****************************************** +/** * xge_shutdown - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: None - * Description: Gets called when the system - * is about to be shutdown. - ******************************************/ + * To shutdown device before system shutdown + * + * @dev Device Handle + */ int xge_shutdown(device_t dev) { - xgelldev_t *lldev = (xgelldev_t *) device_get_softc(dev); - - ENTER_FUNCTION - mtx_lock(&lldev->xge_lock); + xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev); xge_stop(lldev); - mtx_unlock(&lldev->xge_lock); - LEAVE_FUNCTION + return 0; } -/****************************************** - * Function: xge_interface_setup - * Parameters: Device pointer - * Return: 0/ENXIO/ENOMEM - * Description: Sets up the interface - * through ifnet pointer - ******************************************/ +/** + * xge_interface_setup + * Setup interface + * + * @dev Device Handle + * + * Returns 0 on success, ENXIO/ENOMEM on failure + */ int xge_interface_setup(device_t dev) { u8 mcaddr[ETHER_ADDR_LEN]; - xge_hal_status_e status_code; - xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev); + xge_hal_status_e status; + xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); struct ifnet *ifnetp; xge_hal_device_t *hldev = lldev->devh; - int retValue = 0; - - ENTER_FUNCTION /* Get the MAC address of the device */ - status_code = xge_hal_device_macaddr_get(hldev, 0, &mcaddr); - if(status_code != XGE_HAL_OK) { - switch(status_code) { - case XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING: - xge_trace(XGE_ERR, - "Failed to retrieve MAC address (timeout)"); - break; - - case XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES: - xge_trace(XGE_ERR, "Invalid MAC address index"); - break; - - default: - xge_trace(XGE_TRACE, "Default Case"); - break; - } - freeResources(dev, 9); - retValue = ENXIO; - goto ifsetup_out; + status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr); + if(status != XGE_HAL_OK) { + xge_resources_free(dev, xge_free_terminate_hal_device); + XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO); } /* Get interface ifnet structure for this Ether device */ ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER); if(ifnetp == NULL) { - xge_trace(XGE_ERR, "Allocating/getting ifnet structure failed"); - freeResources(dev, 9); - retValue = ENOMEM; - goto ifsetup_out; + xge_resources_free(dev, xge_free_terminate_hal_device); + XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM); } /* Initialize interface ifnet structure */ if_initname(ifnetp, device_get_name(dev), device_get_unit(dev)); - ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU; - - /* - * TODO: Can't set more than 2Gbps. -- Higher value results in overflow. - * But there is no effect in performance even if you set this to 10 Mbps - */ - ifnetp->if_baudrate = IF_Gbps(2); + ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU; + ifnetp->if_baudrate = XGE_BAUDRATE; ifnetp->if_init = xge_init; ifnetp->if_softc = lldev; ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; @@ -1020,175 +1192,149 @@ xge_interface_setup(device_t dev) /* TODO: Check and assign optimal value */ ifnetp->if_snd.ifq_maxlen = IFQ_MAXLEN; - ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | + ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM; + if(lldev->enabled_tso) + ifnetp->if_capabilities |= IFCAP_TSO4; + if(lldev->enabled_lro) + ifnetp->if_capabilities |= IFCAP_LRO; ifnetp->if_capenable = ifnetp->if_capabilities; -#ifdef XGE_FEATURE_TSO - ifnetp->if_capabilities |= IFCAP_TSO4; - ifnetp->if_capenable |= IFCAP_TSO4; -#endif - /* Attach the interface */ ether_ifattach(ifnetp, mcaddr); ifsetup_out: - LEAVE_FUNCTION - - return retValue; + return status; } -/****************************************** - * xgell_callback_link_up - * Parameters: Per adapter xgelldev_t - * structure pointer as void * - * Return: None - * Description: Called by HAL to notify - * hardware link up state change - ******************************************/ +/** + * xge_callback_link_up + * Callback for Link-up indication from HAL + * + * @userdata Per-adapter data + */ void -xgell_callback_link_up(void *userdata) +xge_callback_link_up(void *userdata) { - xgelldev_t *lldev = (xgelldev_t *)userdata; + xge_lldev_t *lldev = (xge_lldev_t *)userdata; struct ifnet *ifnetp = lldev->ifnetp; - ENTER_FUNCTION - ifnetp->if_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifnetp, LINK_STATE_UP); - - LEAVE_FUNCTION } -/****************************************** - * xgell_callback_link_down - * Parameters: Per adapter xgelldev_t - * structure pointer as void * - * Return: None - * Description: Called by HAL to notify - * hardware link up state change - ******************************************/ +/** + * xge_callback_link_down + * Callback for Link-down indication from HAL + * + * @userdata Per-adapter data + */ void -xgell_callback_link_down(void *userdata) +xge_callback_link_down(void *userdata) { - xgelldev_t *lldev = (xgelldev_t *)userdata; + xge_lldev_t *lldev = (xge_lldev_t *)userdata; struct ifnet *ifnetp = lldev->ifnetp; - ENTER_FUNCTION - ifnetp->if_flags |= IFF_DRV_OACTIVE; if_link_state_change(ifnetp, LINK_STATE_DOWN); - - LEAVE_FUNCTION } -/****************************************** - * xgell_callback_crit_err - * Parameters: Per adapter xgelldev_t - * structure pointer as void *, event, - * serr_data -> - * Return: None - * Description: Called by HAL on serious - * error event - ******************************************/ +/** + * xge_callback_crit_err + * Callback for Critical error indication from HAL + * + * @userdata Per-adapter data + * @type Event type (Enumerated hardware error) + * @serr_data Hardware status + */ void -xgell_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data) +xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data) { - ENTER_FUNCTION - xge_trace(XGE_ERR, "Critical Error"); - xgell_reset(userdata); - - LEAVE_FUNCTION + xge_reset(userdata); } -/****************************************** - * xgell_callback_event - * Parameters: Queue item - * Return: None - * Description: Called by HAL in case of - * some unknown to HAL events. - ******************************************/ +/** + * xge_callback_event + * Callback from HAL indicating that some event has been queued + * + * @item Queued event item + */ void -xgell_callback_event(xge_queue_item_t *item) +xge_callback_event(xge_queue_item_t *item) { - xgelldev_t *lldev = NULL; + xge_lldev_t *lldev = NULL; xge_hal_device_t *hldev = NULL; struct ifnet *ifnetp = NULL; - ENTER_FUNCTION - hldev = item->context; lldev = xge_hal_device_private(hldev); ifnetp = lldev->ifnetp; - if(item->event_type == XGE_LL_EVENT_TRY_XMIT_AGAIN) { - if(lldev->initialized) { - if(xge_hal_channel_dtr_count(lldev->fifo_channel_0) > 0) { - ifnetp->if_flags &= ~IFF_DRV_OACTIVE; - } - else { - /* try next time */ - xge_queue_produce_context( - xge_hal_device_queue(lldev->devh), - XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh); + switch(item->event_type) { + case XGE_LL_EVENT_TRY_XMIT_AGAIN: + if(lldev->initialized) { + if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) { + ifnetp->if_flags &= ~IFF_DRV_OACTIVE; + } + else { + xge_queue_produce_context( + xge_hal_device_queue(lldev->devh), + XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh); + } } - } - } - else if(item->event_type == XGE_LL_EVENT_DEVICE_RESETTING) { - xgell_reset(item->context); - } + break; + + case XGE_LL_EVENT_DEVICE_RESETTING: + xge_reset(item->context); + break; - LEAVE_FUNCTION + default: + break; + } } -/****************************************** - * Function: xge_ifmedia_change - * Parameters: Pointer to ifnet structure - * Return: 0 for success, EINVAL if media - * type is not IFM_ETHER. - * Description: Media change driver callback - ******************************************/ +/** + * xge_ifmedia_change + * Media change driver callback + * + * @ifnetp Interface Handle + * + * Returns 0 if media is Ether else EINVAL + */ int xge_ifmedia_change(struct ifnet *ifnetp) { - xgelldev_t *lldev = ifnetp->if_softc; - struct ifmedia *ifmediap = &lldev->xge_media; - - ENTER_FUNCTION - LEAVE_FUNCTION + xge_lldev_t *lldev = ifnetp->if_softc; + struct ifmedia *ifmediap = &lldev->media; return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0; } -/****************************************** - * Function: xge_ifmedia_status - * Parameters: Pointer to ifnet structure - * ifmediareq structure pointer - * through which status of media - * will be returned. - * Return: None - * Description: Media status driver callback - ******************************************/ +/** + * xge_ifmedia_status + * Media status driver callback + * + * @ifnetp Interface Handle + * @ifmr Interface Media Settings + */ void xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr) { xge_hal_status_e status; u64 regvalue; - xgelldev_t *lldev = ifnetp->if_softc; + xge_lldev_t *lldev = ifnetp->if_softc; xge_hal_device_t *hldev = lldev->devh; - ENTER_FUNCTION - ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; status = xge_hal_mgmt_reg_read(hldev, 0, xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value); if(status != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Getting adapter status failed"); - return; + xge_trace(XGE_TRACE, "Getting adapter status failed"); + goto _exit; } if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | @@ -1200,43 +1346,261 @@ xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr) else { if_link_state_change(ifnetp, LINK_STATE_DOWN); } +_exit: + return; +} - LEAVE_FUNCTION +/** + * xge_ioctl_stats + * IOCTL to get statistics + * + * @lldev Per-adapter data + * @ifreqp Interface request + */ +int +xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp) +{ + xge_hal_status_e status = XGE_HAL_OK; + char *data = (char *)ifreqp->ifr_data; + void *info = NULL; + int retValue = EINVAL; + + switch(*data) { + case XGE_QUERY_STATS: + mtx_lock(&lldev->mtx_drv); + status = xge_hal_stats_hw(lldev->devh, + (xge_hal_stats_hw_info_t **)&info); + mtx_unlock(&lldev->mtx_drv); + if(status == XGE_HAL_OK) { + if(copyout(info, ifreqp->ifr_data, + sizeof(xge_hal_stats_hw_info_t)) == 0) + retValue = 0; + } + else { + xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)", + status); + } + break; + + case XGE_QUERY_PCICONF: + info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t)); + if(info != NULL) { + mtx_lock(&lldev->mtx_drv); + status = xge_hal_mgmt_pci_config(lldev->devh, info, + sizeof(xge_hal_pci_config_t)); + mtx_unlock(&lldev->mtx_drv); + if(status == XGE_HAL_OK) { + if(copyout(info, ifreqp->ifr_data, + sizeof(xge_hal_pci_config_t)) == 0) + retValue = 0; + } + else { + xge_trace(XGE_ERR, + "Getting PCI configuration failed (%d)", status); + } + xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t)); + } + break; + + case XGE_QUERY_DEVSTATS: + info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t)); + if(info != NULL) { + mtx_lock(&lldev->mtx_drv); + status =xge_hal_mgmt_device_stats(lldev->devh, info, + sizeof(xge_hal_stats_device_info_t)); + mtx_unlock(&lldev->mtx_drv); + if(status == XGE_HAL_OK) { + if(copyout(info, ifreqp->ifr_data, + sizeof(xge_hal_stats_device_info_t)) == 0) + retValue = 0; + } + else { + xge_trace(XGE_ERR, "Getting device info failed (%d)", + status); + } + xge_os_free(NULL, info, + sizeof(xge_hal_stats_device_info_t)); + } + break; + + case XGE_QUERY_SWSTATS: + info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t)); + if(info != NULL) { + mtx_lock(&lldev->mtx_drv); + status =xge_hal_mgmt_sw_stats(lldev->devh, info, + sizeof(xge_hal_stats_sw_err_t)); + mtx_unlock(&lldev->mtx_drv); + if(status == XGE_HAL_OK) { + if(copyout(info, ifreqp->ifr_data, + sizeof(xge_hal_stats_sw_err_t)) == 0) + retValue = 0; + } + else { + xge_trace(XGE_ERR, + "Getting tcode statistics failed (%d)", status); + } + xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t)); + } + break; + + case XGE_QUERY_DRIVERSTATS: + if(copyout(&lldev->driver_stats, ifreqp->ifr_data, + sizeof(xge_driver_stats_t)) == 0) { + retValue = 0; + } + else { + xge_trace(XGE_ERR, + "Copyout of driver statistics failed (%d)", status); + } + break; + + case XGE_READ_VERSION: + info = xge_os_malloc(NULL, XGE_BUFFER_SIZE); + if(version != NULL) { + strcpy(info, XGE_DRIVER_VERSION); + if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0) + retValue = 0; + xge_os_free(NULL, info, XGE_BUFFER_SIZE); + } + break; + + case XGE_QUERY_DEVCONF: + info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t)); + if(info != NULL) { + mtx_lock(&lldev->mtx_drv); + status = xge_hal_mgmt_device_config(lldev->devh, info, + sizeof(xge_hal_device_config_t)); + mtx_unlock(&lldev->mtx_drv); + if(status == XGE_HAL_OK) { + if(copyout(info, ifreqp->ifr_data, + sizeof(xge_hal_device_config_t)) == 0) + retValue = 0; + } + else { + xge_trace(XGE_ERR, "Getting devconfig failed (%d)", + status); + } + xge_os_free(NULL, info, sizeof(xge_hal_device_config_t)); + } + break; + + case XGE_QUERY_BUFFER_MODE: + if(copyout(&lldev->buffer_mode, ifreqp->ifr_data, + sizeof(int)) == 0) + retValue = 0; + break; + + case XGE_SET_BUFFER_MODE_1: + case XGE_SET_BUFFER_MODE_2: + case XGE_SET_BUFFER_MODE_5: + *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N'; + if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0) + retValue = 0; + break; + default: + xge_trace(XGE_TRACE, "Nothing is matching"); + retValue = ENOTTY; + break; + } + return retValue; } -/****************************************** - * Function: xge_ioctl - * Parameters: Pointer to ifnet structure, - * command -> indicates requests, - * data -> passed values (if any) - * Return: - * Description: IOCTL entry point. Called - * when the user wants to - * configure the interface - ******************************************/ +/** + * xge_ioctl_registers + * IOCTL to get registers + * + * @lldev Per-adapter data + * @ifreqp Interface request + */ +int +xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp) +{ + xge_register_t *data = (xge_register_t *)ifreqp->ifr_data; + xge_hal_status_e status = XGE_HAL_OK; + int retValue = EINVAL, offset = 0, index = 0; + u64 val64 = 0; + + /* Reading a register */ + if(strcmp(data->option, "-r") == 0) { + data->value = 0x0000; + mtx_lock(&lldev->mtx_drv); + status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset, + &data->value); + mtx_unlock(&lldev->mtx_drv); + if(status == XGE_HAL_OK) { + if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0) + retValue = 0; + } + } + /* Writing to a register */ + else if(strcmp(data->option, "-w") == 0) { + mtx_lock(&lldev->mtx_drv); + status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset, + data->value); + if(status == XGE_HAL_OK) { + val64 = 0x0000; + status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset, + &val64); + if(status != XGE_HAL_OK) { + xge_trace(XGE_ERR, "Reading back updated register failed"); + } + else { + if(val64 != data->value) { + xge_trace(XGE_ERR, + "Read and written register values mismatched"); + } + else retValue = 0; + } + } + else { + xge_trace(XGE_ERR, "Getting register value failed"); + } + mtx_unlock(&lldev->mtx_drv); + } + else { + mtx_lock(&lldev->mtx_drv); + for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG; + index++, offset += 0x0008) { + val64 = 0; + status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64); + if(status != XGE_HAL_OK) { + xge_trace(XGE_ERR, "Getting register value failed"); + break; + } + *((u64 *)((u64 *)data + index)) = val64; + retValue = 0; + } + mtx_unlock(&lldev->mtx_drv); + + if(retValue == 0) { + if(copyout(data, ifreqp->ifr_data, + sizeof(xge_hal_pci_bar0_t)) != 0) { + xge_trace(XGE_ERR, "Copyout of register values failed"); + retValue = EINVAL; + } + } + else { + xge_trace(XGE_ERR, "Getting register values failed"); + } + } + return retValue; +} + +/** + * xge_ioctl + * Callback to control the device - Interface configuration + * + * @ifnetp Interface Handle + * @command Device control command + * @data Parameters associated with command (if any) + */ int xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data) { - struct ifmedia *ifmediap; - xge_hal_stats_hw_info_t *hw_stats; - xge_hal_pci_config_t *pci_conf; - xge_hal_device_config_t *device_conf; - xge_hal_stats_sw_err_t *tcode; - xge_hal_stats_device_info_t *intr; - bar0reg_t *reg; - xge_hal_status_e status_code; - xge_hal_device_t *hldev; - void *regInfo; - u64 value; - u64 offset; - char *pAccess; - char *version; - int retValue = 0, index = 0, buffer_mode = 0; - struct ifreq *ifreqp = (struct ifreq *) data; - xgelldev_t *lldev = ifnetp->if_softc; - - ifmediap = &lldev->xge_media; - hldev = lldev->devh; + struct ifreq *ifreqp = (struct ifreq *)data; + xge_lldev_t *lldev = ifnetp->if_softc; + struct ifmedia *ifmediap = &lldev->media; + int retValue = 0, mask = 0; if(lldev->in_detach) { return retValue; @@ -1251,16 +1615,15 @@ xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data) /* Set ifnet MTU */ case SIOCSIFMTU: - retValue = changeMtu(lldev, ifreqp->ifr_mtu); + retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu); break; /* Set ifnet flags */ case SIOCSIFFLAGS: - mtx_lock(&lldev->xge_lock); if(ifnetp->if_flags & IFF_UP) { /* Link status is UP */ if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) { - xge_init_locked(lldev); + xge_init(lldev); } xge_disable_promisc(lldev); xge_enable_promisc(lldev); @@ -1272,7 +1635,6 @@ xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data) xge_stop(lldev); } } - mtx_unlock(&lldev->xge_lock); break; /* Add/delete multicast address */ @@ -1291,244 +1653,47 @@ xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data) /* Set capabilities */ case SIOCSIFCAP: - mtx_lock(&lldev->xge_lock); - int mask = 0; + mtx_lock(&lldev->mtx_drv); mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable; -#if defined(__FreeBSD_version) && (__FreeBSD_version >= 700026) + if(mask & IFCAP_TXCSUM) { + if(ifnetp->if_capenable & IFCAP_TXCSUM) { + ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); + ifnetp->if_hwassist &= + ~(CSUM_TCP | CSUM_UDP | CSUM_TSO); + } + else { + ifnetp->if_capenable |= IFCAP_TXCSUM; + ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP); + } + } if(mask & IFCAP_TSO4) { if(ifnetp->if_capenable & IFCAP_TSO4) { ifnetp->if_capenable &= ~IFCAP_TSO4; ifnetp->if_hwassist &= ~CSUM_TSO; - } - /*enable tso only if txcsum is enabled*/ - if(ifnetp->if_capenable & IFCAP_TXCSUM) { + xge_os_printf("%s: TSO Disabled", + device_get_nameunit(lldev->device)); + } + else if(ifnetp->if_capenable & IFCAP_TXCSUM) { ifnetp->if_capenable |= IFCAP_TSO4; ifnetp->if_hwassist |= CSUM_TSO; + + xge_os_printf("%s: TSO Enabled", + device_get_nameunit(lldev->device)); } } -#endif - mtx_unlock(&lldev->xge_lock); + + mtx_unlock(&lldev->mtx_drv); break; - /* Custom IOCTL 0 : - * Used to get Statistics & PCI configuration through application */ + /* Custom IOCTL 0 */ case SIOCGPRIVATE_0: - pAccess = (char*) ifreqp->ifr_data; - if(*pAccess == XGE_QUERY_STATS) { - mtx_lock(&lldev->xge_lock); - status_code = xge_hal_stats_hw(hldev, &hw_stats); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, - "Getting statistics failed (Status: %d)", - status_code); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - } - copyout(hw_stats, ifreqp->ifr_data, - sizeof(xge_hal_stats_hw_info_t)); - mtx_unlock(&lldev->xge_lock); - } - else if(*pAccess == XGE_QUERY_PCICONF) { - pci_conf = xge_malloc(sizeof(xge_hal_pci_config_t)); - if(pci_conf == NULL) { - return(ENOMEM); - } - mtx_lock(&lldev->xge_lock); - status_code = xge_hal_mgmt_pci_config(hldev, pci_conf, - sizeof(xge_hal_pci_config_t)); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, - "Getting PCIconfiguration failed (Status: %d)", - status_code); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - } - copyout(pci_conf, ifreqp->ifr_data, - sizeof(xge_hal_pci_config_t)); - mtx_unlock(&lldev->xge_lock); - free(pci_conf, M_DEVBUF); - } - else if(*pAccess ==XGE_QUERY_INTRSTATS) { - intr = xge_malloc(sizeof(xge_hal_stats_device_info_t)); - if(intr == NULL) { - return(ENOMEM); - } - mtx_lock(&lldev->xge_lock); - status_code =xge_hal_mgmt_device_stats(hldev, intr, - sizeof(xge_hal_stats_device_info_t)); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, - "Getting intr statistics failed (Status: %d)", - status_code); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - } - copyout(intr, ifreqp->ifr_data, - sizeof(xge_hal_stats_device_info_t)); - mtx_unlock(&lldev->xge_lock); - free(intr, M_DEVBUF); - } - else if(*pAccess == XGE_QUERY_TCODE) { - tcode = xge_malloc(sizeof(xge_hal_stats_sw_err_t)); - if(tcode == NULL) { - return(ENOMEM); - } - mtx_lock(&lldev->xge_lock); - status_code =xge_hal_mgmt_sw_stats(hldev, tcode, - sizeof(xge_hal_stats_sw_err_t)); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, - "Getting tcode statistics failed (Status: %d)", - status_code); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - } - copyout(tcode, ifreqp->ifr_data, - sizeof(xge_hal_stats_sw_err_t)); - mtx_unlock(&lldev->xge_lock); - free(tcode, M_DEVBUF); - } - else if(*pAccess ==XGE_READ_VERSION) { - version = xge_malloc(BUFFER_SIZE); - if(version == NULL) { - return(ENOMEM); - } - mtx_lock(&lldev->xge_lock); - strcpy(version,DRIVER_VERSION); - copyout(version, ifreqp->ifr_data, BUFFER_SIZE); - mtx_unlock(&lldev->xge_lock); - free(version, M_DEVBUF); - } - else if(*pAccess == XGE_QUERY_DEVCONF) { - device_conf = xge_malloc(sizeof(xge_hal_device_config_t)); - if(device_conf == NULL) { - return(ENOMEM); - } - mtx_lock(&lldev->xge_lock); - status_code = xge_hal_mgmt_device_config(hldev, device_conf, - sizeof(xge_hal_device_config_t)); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, - "Getting devconfig failed (Status: %d)", - status_code); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - } - if(copyout(device_conf, ifreqp->ifr_data, - sizeof(xge_hal_device_config_t)) != 0) { - xge_trace(XGE_ERR, "Device configuration copyout erro"); - } - mtx_unlock(&lldev->xge_lock); - free(device_conf, M_DEVBUF); - } - else if(*pAccess == XGE_QUERY_BUFFER_MODE) { - buffer_mode = lldev->buffer_mode; - if(copyout(&buffer_mode, ifreqp->ifr_data, - sizeof(int)) != 0) { - xge_trace(XGE_ERR, "Error with copyout of buffermode"); - retValue = EINVAL; - } - } - else if((*pAccess == XGE_SET_BUFFER_MODE_1) || - (*pAccess == XGE_SET_BUFFER_MODE_2) || - (*pAccess == XGE_SET_BUFFER_MODE_3) || - (*pAccess == XGE_SET_BUFFER_MODE_5)) { - switch(*pAccess) { - case XGE_SET_BUFFER_MODE_1: *pAccess = 'Y'; break; - case XGE_SET_BUFFER_MODE_2: - case XGE_SET_BUFFER_MODE_3: - case XGE_SET_BUFFER_MODE_5: *pAccess = 'N'; break; - } - if(copyout(pAccess, ifreqp->ifr_data, - sizeof(pAccess)) != 0) { - xge_trace(XGE_ERR, - "Copyout of chgbufmode result failed"); - } - } - else { - xge_trace(XGE_TRACE, "Nothing is matching"); - } + retValue = xge_ioctl_stats(lldev, ifreqp); break; - /* - * Custom IOCTL 1 : - * Used to get BAR0 register values through application program - */ + /* Custom IOCTL 1 */ case SIOCGPRIVATE_1: - reg = (bar0reg_t *) ifreqp->ifr_data; - if(strcmp(reg->option,"-r") == 0) { - offset = reg->offset; - value = 0x0000; - mtx_lock(&lldev->xge_lock); - status_code = xge_hal_mgmt_reg_read(hldev, 0, offset, - &value ); - if(status_code == XGE_HAL_OK) { - reg->value = value; - } - else { - xge_trace(XGE_ERR, "Getting register value failed"); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - break; - } - copyout(reg, ifreqp->ifr_data, sizeof(bar0reg_t)); - mtx_unlock(&lldev->xge_lock); - } - else if(strcmp(reg->option,"-w") == 0) { - offset = reg->offset; - value = reg->value; - mtx_lock(&lldev->xge_lock); - status_code = xge_hal_mgmt_reg_write(hldev, 0, offset, - value ); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Getting register value failed"); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - break; - } - value = 0x0000; - status_code = xge_hal_mgmt_reg_read(hldev, 0, offset, - &value); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Getting register value failed"); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - break; - } - if(reg->value != value) { - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - break; - } - mtx_unlock(&lldev->xge_lock); - } - else - { - offset = 0x0000; - value = 0x0000; - regInfo = (void *)ifreqp->ifr_data; - - mtx_lock(&lldev->xge_lock); - for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG; - index++, offset += 0x0008) { - status_code = xge_hal_mgmt_reg_read(hldev, 0, offset, - &value); - if(status_code == XGE_HAL_OK) { - *( ( u64 *)( ( u64 * )regInfo + index ) ) = value; - } - else { - xge_trace(XGE_ERR, "Getting register value failed"); - mtx_unlock(&lldev->xge_lock); - retValue = EINVAL; - break; - } - } - - copyout(regInfo, ifreqp->ifr_data, - sizeof(xge_hal_pci_bar0_t)); - mtx_unlock(&lldev->xge_lock); - } + retValue = xge_ioctl_registers(lldev, ifreqp); break; default: @@ -1538,62 +1703,117 @@ xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data) return retValue; } -/****************************************** - * Function: xge_init - * Parameters: Pointer to per-device - * xgelldev_t structure as void*. - * Return: None - * Description: Init entry point. - ******************************************/ +/** + * xge_init + * Initialize the interface + * + * @plldev Per-adapter Data + */ void xge_init(void *plldev) { - ENTER_FUNCTION - - xgelldev_t *lldev = (xgelldev_t *)plldev; + xge_lldev_t *lldev = (xge_lldev_t *)plldev; - mtx_lock(&lldev->xge_lock); - xge_init_locked(lldev); - mtx_unlock(&lldev->xge_lock); - - LEAVE_FUNCTION + mtx_lock(&lldev->mtx_drv); + xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t)); + xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL); + mtx_unlock(&lldev->mtx_drv); } +/** + * xge_device_init + * Initialize the interface (called by holding lock) + * + * @pdevin Per-adapter Data + */ void -xge_init_locked(void *pdevin) +xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { - ENTER_FUNCTION - - xgelldev_t *lldev = (xgelldev_t *)pdevin; - struct ifnet *ifnetp = lldev->ifnetp; - device_t dev = lldev->device; + struct ifnet *ifnetp = lldev->ifnetp; + xge_hal_device_t *hldev = lldev->devh; + struct ifaddr *ifaddrp; + unsigned char *macaddr; + struct sockaddr_dl *sockaddrp; + int status = XGE_HAL_OK; - mtx_assert((&lldev->xge_lock), MA_OWNED); + mtx_assert((&lldev->mtx_drv), MA_OWNED); /* If device is in running state, initializing is not required */ - if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) { + if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) return; - } /* Initializing timer */ callout_init(&lldev->timer, CALLOUT_MPSAFE); - xge_initialize(dev, XGE_HAL_CHANNEL_OC_NORMAL); + xge_trace(XGE_TRACE, "Set MTU size"); + status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu); + if(status != XGE_HAL_OK) { + xge_trace(XGE_ERR, "Setting MTU in HAL device failed"); + goto _exit; + } + + /* Enable HAL device */ + xge_hal_device_enable(hldev); + + /* Get MAC address and update in HAL */ + ifaddrp = ifnetp->if_addr; + sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr; + sockaddrp->sdl_type = IFT_ETHER; + sockaddrp->sdl_alen = ifnetp->if_addrlen; + macaddr = LLADDR(sockaddrp); + xge_trace(XGE_TRACE, + "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", + *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3), + *(macaddr + 4), *(macaddr + 5)); + status = xge_hal_device_macaddr_set(hldev, 0, macaddr); + if(status != XGE_HAL_OK) + xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status); + + /* Opening channels */ + mtx_unlock(&lldev->mtx_drv); + status = xge_channel_open(lldev, option); + mtx_lock(&lldev->mtx_drv); + if(status != XGE_HAL_OK) + goto _exit; + + /* Set appropriate flags */ + ifnetp->if_drv_flags |= IFF_DRV_RUNNING; + ifnetp->if_flags &= ~IFF_DRV_OACTIVE; - LEAVE_FUNCTION + /* Checksum capability */ + ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ? + (CSUM_TCP | CSUM_UDP) : 0; + + if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4)) + ifnetp->if_hwassist |= CSUM_TSO; + + /* Enable interrupts */ + xge_hal_device_intr_enable(hldev); + + callout_reset(&lldev->timer, 10*hz, xge_timer, lldev); + + /* Disable promiscuous mode */ + xge_trace(XGE_TRACE, "If opted, enable promiscuous mode"); + xge_enable_promisc(lldev); + + /* Device is initialized */ + lldev->initialized = 1; + xge_os_mdelay(1000); + +_exit: + return; } -/****************************************** - * Function: xge_timer - * Parameters: Pointer to per-device - * xgelldev_t structure as void*. - * Return: None - * Description: Polls the changes. - ******************************************/ +/** + * xge_timer + * Timer timeout function to handle link status + * + * @devp Per-adapter Data + */ void xge_timer(void *devp) { - xgelldev_t *lldev = (xgelldev_t *)devp; + xge_lldev_t *lldev = (xge_lldev_t *)devp; xge_hal_device_t *hldev = lldev->devh; /* Poll for changes */ @@ -1605,285 +1825,355 @@ xge_timer(void *devp) return; } -/****************************************** - * Function: xge_stop - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: None - * Description: Deactivates the interface - * (Called on "ifconfig down" - ******************************************/ +/** + * xge_stop + * De-activate the interface + * + * @lldev Per-adater Data + */ void -xge_stop(xgelldev_t *lldev) +xge_stop(xge_lldev_t *lldev) { - struct ifnet *ifnetp = lldev->ifnetp; - device_t dev = lldev->device; - - ENTER_FUNCTION - - mtx_assert((&lldev->xge_lock), MA_OWNED); - - /* If device is not in "Running" state, return */ - if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) { - goto xfstop_out; - } - - xge_terminate(dev, XGE_HAL_CHANNEL_OC_NORMAL); - -xfstop_out: - LEAVE_FUNCTION - return; + mtx_lock(&lldev->mtx_drv); + xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL); + mtx_unlock(&lldev->mtx_drv); } -/* - * xge_intr_filter - * - * ISR filter function - * @handle softc/lldev per device structure +/** + * xge_isr_filter + * ISR filter function - to filter interrupts from other devices (shared) + * + * @handle Per-adapter Data + * + * Returns + * FILTER_STRAY if interrupt is from other device + * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device */ int -xge_intr_filter(void *handle) +xge_isr_filter(void *handle) { - xgelldev_t *lldev = NULL; - xge_hal_device_t *hldev = NULL; - xge_hal_pci_bar0_t *bar0 = NULL; - device_t dev = NULL; + xge_lldev_t *lldev = (xge_lldev_t *)handle; + xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0); u16 retValue = FILTER_STRAY; u64 val64 = 0; - lldev = (xgelldev_t *)handle; - hldev = lldev->devh; - dev = lldev->device; - bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; + XGE_DRV_STATS(isr_filter); - val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0, + val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0, &bar0->general_int_status); retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD; return retValue; } -/****************************************** - * xge_intr - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: None - * Description: Interrupt service routine - ******************************************/ +/** + * xge_isr_line + * Interrupt service routine for Line interrupts + * + * @plldev Per-adapter Data + */ void -xge_intr(void *plldev) +xge_isr_line(void *plldev) { xge_hal_status_e status; - xgelldev_t *lldev = (xgelldev_t *)plldev; + xge_lldev_t *lldev = (xge_lldev_t *)plldev; xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh; struct ifnet *ifnetp = lldev->ifnetp; - mtx_lock(&lldev->xge_lock); + XGE_DRV_STATS(isr_line); + if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) { status = xge_hal_device_handle_irq(hldev); - - if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd))) { - xge_send_locked(ifnetp); - } + if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd))) + xge_send(ifnetp); } - mtx_unlock(&lldev->xge_lock); - return; } -/******************************************** - * Function : xgell_rx_open - * Parameters: Queue index, channel - * open/close/reopen flag - * Return: 0 or ENODEV - * Description: Initialize and open all Rx - * channels. - ******************************************/ +/* + * xge_isr_msi + * ISR for Message signaled interrupts + */ +void +xge_isr_msi(void *plldev) +{ + xge_lldev_t *lldev = (xge_lldev_t *)plldev; + XGE_DRV_STATS(isr_msi); + xge_hal_device_continue_irq(lldev->devh); +} + +/** + * xge_rx_open + * Initiate and open all Rx channels + * + * @qid Ring Index + * @lldev Per-adapter Data + * @rflag Channel open/close/reopen flag + * + * Returns 0 or Error Number + */ int -xgell_rx_open(int qid, xgelldev_t *lldev, xge_hal_channel_reopen_e rflag) +xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag) { u64 adapter_status = 0x0; - int retValue = 0; - xge_hal_status_e status_code; - - ENTER_FUNCTION + xge_hal_status_e status = XGE_HAL_FAIL; xge_hal_channel_attr_t attr = { .post_qid = qid, .compl_qid = 0, - .callback = xgell_rx_compl, - .per_dtr_space = sizeof(xgell_rx_priv_t), + .callback = xge_rx_compl, + .per_dtr_space = sizeof(xge_rx_priv_t), .flags = 0, .type = XGE_HAL_CHANNEL_TYPE_RING, .userdata = lldev, - .dtr_init = xgell_rx_initial_replenish, - .dtr_term = xgell_rx_term + .dtr_init = xge_rx_initial_replenish, + .dtr_term = xge_rx_term }; /* If device is not ready, return */ - if(xge_hal_device_status(lldev->devh, &adapter_status)) { - xge_trace(XGE_ERR, "Device is not ready. Adapter status: 0x%llx", - (unsigned long long) adapter_status); - retValue = -ENODEV; - goto rxopen_out; + status = xge_hal_device_status(lldev->devh, &adapter_status); + if(status != XGE_HAL_OK) { + xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status); + XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL); } - - /* Open ring channel */ - status_code = xge_hal_channel_open(lldev->devh, &attr, - &lldev->ring_channel[qid], rflag); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Can not open Rx RING channel, Status: %d\n", - status_code); - retValue = -ENODEV; - goto rxopen_out; + else { + status = xge_hal_channel_open(lldev->devh, &attr, + &lldev->ring_channel[qid], rflag); } -rxopen_out: - LEAVE_FUNCTION - - return retValue; +_exit: + return status; } -/****************************************** - * Function: xgell_tx_open - * Parameters: Channel - * open/close/reopen flag - * Return: 0 or ENODEV - * Description: Initialize and open all Tx - * channels. - ******************************************/ +/** + * xge_tx_open + * Initialize and open all Tx channels + * + * @lldev Per-adapter Data + * @tflag Channel open/close/reopen flag + * + * Returns 0 or Error Number + */ int -xgell_tx_open(xgelldev_t *lldev, xge_hal_channel_reopen_e tflag) +xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag) { - xge_hal_status_e status_code; + xge_hal_status_e status = XGE_HAL_FAIL; u64 adapter_status = 0x0; - int retValue = 0; - - ENTER_FUNCTION + int qindex, index; xge_hal_channel_attr_t attr = { - .post_qid = 0, .compl_qid = 0, - .callback = xgell_tx_compl, - .per_dtr_space = sizeof(xgell_tx_priv_t), + .callback = xge_tx_compl, + .per_dtr_space = sizeof(xge_tx_priv_t), .flags = 0, .type = XGE_HAL_CHANNEL_TYPE_FIFO, .userdata = lldev, - .dtr_init = xgell_tx_initial_replenish, - .dtr_term = xgell_tx_term + .dtr_init = xge_tx_initial_replenish, + .dtr_term = xge_tx_term }; /* If device is not ready, return */ - if(xge_hal_device_status(lldev->devh, &adapter_status)) { - xge_trace(XGE_ERR, "Device is not ready. Adapter status: 0x%llx\n", - (unsigned long long) adapter_status); - retValue = -ENODEV; - goto txopen_out; + status = xge_hal_device_status(lldev->devh, &adapter_status); + if(status != XGE_HAL_OK) { + xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status); + XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL); } - /* Open FIFO channel */ - status_code = xge_hal_channel_open(lldev->devh, &attr, - &lldev->fifo_channel_0, tflag); - if(status_code != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Can not open Tx FIFO channel, Status: %d\n", - status_code); - retValue = -ENODEV; - goto txopen_out; + for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { + attr.post_qid = qindex, + status = xge_hal_channel_open(lldev->devh, &attr, + &lldev->fifo_channel[qindex], tflag); + if(status != XGE_HAL_OK) { + for(index = 0; index < qindex; index++) + xge_hal_channel_close(lldev->fifo_channel[index], tflag); + } } -txopen_out: - LEAVE_FUNCTION +_exit: + return status; +} - return retValue; +/** + * xge_enable_msi + * Enables MSI + * + * @lldev Per-adapter Data + */ +void +xge_enable_msi(xge_lldev_t *lldev) +{ + xge_list_t *item = NULL; + xge_hal_device_t *hldev = lldev->devh; + xge_hal_channel_t *channel = NULL; + u16 offset = 0, val16 = 0; + + xge_os_pci_read16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16); + + /* Update msi_data */ + offset = (val16 & 0x80) ? 0x4c : 0x48; + xge_os_pci_read16(lldev->pdev, NULL, offset, &val16); + if(val16 & 0x1) + val16 &= 0xfffe; + else + val16 |= 0x1; + xge_os_pci_write16(lldev->pdev, NULL, offset, val16); + + /* Update msi_control */ + xge_os_pci_read16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16); + val16 |= 0x10; + xge_os_pci_write16(lldev->pdev, NULL, + xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16); + + /* Set TxMAT and RxMAT registers with MSI */ + xge_list_for_each(item, &hldev->free_channels) { + channel = xge_container_of(item, xge_hal_channel_t, item); + xge_hal_channel_msi_set(channel, 1, (u32)val16); + } } -/****************************************** - * Function: xgell_channel_open - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: None - * Description: Opens both Rx and Tx channels. - ******************************************/ +/** + * xge_channel_open + * Open both Tx and Rx channels + * + * @lldev Per-adapter Data + * @option Channel reopen option + */ int -xgell_channel_open(xgelldev_t *lldev, xge_hal_channel_reopen_e option) +xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { - int status = XGE_HAL_OK; - int index = 0; - int index2 = 0; + xge_lro_entry_t *lro_session = NULL; + xge_hal_status_e status = XGE_HAL_OK; + int index = 0, index2 = 0; - ENTER_FUNCTION + if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) { + xge_msi_info_restore(lldev); + xge_enable_msi(lldev); + } + +_exit2: + status = xge_create_dma_tags(lldev->device); + if(status != XGE_HAL_OK) + XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status); /* Open ring (Rx) channel */ for(index = 0; index < XGE_RING_COUNT; index++) { - if((status = xgell_rx_open(index, lldev, option))) { - xge_trace(XGE_ERR, "Opening Rx channel failed (Status: %d)\n", - status); - for(index2 = 0; index2 < index; index2++) { - xge_hal_channel_close(lldev->ring_channel[index2], option); + status = xge_rx_open(index, lldev, option); + if(status != XGE_HAL_OK) { + /* + * DMA mapping fails in the unpatched Kernel which can't + * allocate contiguous memory for Jumbo frames. + * Try using 5 buffer mode. + */ + if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) && + (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) > + MJUMPAGESIZE)) { + /* Close so far opened channels */ + for(index2 = 0; index2 < index; index2++) { + xge_hal_channel_close(lldev->ring_channel[index2], + option); + } + + /* Destroy DMA tags intended to use for 1 buffer mode */ + if(bus_dmamap_destroy(lldev->dma_tag_rx, + lldev->extra_dma_map)) { + xge_trace(XGE_ERR, "Rx extra DMA map destroy failed"); + } + if(bus_dma_tag_destroy(lldev->dma_tag_rx)) + xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); + if(bus_dma_tag_destroy(lldev->dma_tag_tx)) + xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); + + /* Switch to 5 buffer mode */ + lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5; + xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu); + + /* Restart init */ + goto _exit2; + } + else { + XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1, + status); } - return status; } } -#ifdef XGE_FEATURE_LRO - status = xge_hal_lro_init(1, lldev->devh); - if (status != XGE_HAL_OK) { - xge_trace(XGE_ERR, "cannot init Rx LRO got status code %d", status); - return -ENODEV; - } -#endif - /* Open FIFO (Tx) channel */ - if((status = xgell_tx_open(lldev, option))) { - xge_trace(XGE_ERR, "Opening Tx channel failed (Status: %d)\n", - status); - for(index = 0; index < XGE_RING_COUNT; index++) { - xge_hal_channel_close(lldev->ring_channel[index], option); + if(lldev->enabled_lro) { + SLIST_INIT(&lldev->lro_free); + SLIST_INIT(&lldev->lro_active); + lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES; + + for(index = 0; index < lldev->lro_num; index++) { + lro_session = (xge_lro_entry_t *) + xge_os_malloc(NULL, sizeof(xge_lro_entry_t)); + if(lro_session == NULL) { + lldev->lro_num = index; + break; + } + SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next); } } - LEAVE_FUNCTION + /* Open FIFO (Tx) channel */ + status = xge_tx_open(lldev, option); + if(status != XGE_HAL_OK) + XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status); + + goto _exit; + +_exit1: + /* + * Opening Rx channel(s) failed (index is <last ring index - 1>) or + * Initialization of LRO failed (index is XGE_RING_COUNT) + * Opening Tx channel failed (index is XGE_RING_COUNT) + */ + for(index2 = 0; index2 < index; index2++) + xge_hal_channel_close(lldev->ring_channel[index2], option); + +_exit: return status; } -/****************************************** - * Function: xgell_channel_close - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: 0 for success, non-zero for - * failure - * Description: Closes both Tx and Rx channels - ******************************************/ -int -xgell_channel_close(xgelldev_t *lldev, xge_hal_channel_reopen_e option) +/** + * xge_channel_close + * Close both Tx and Rx channels + * + * @lldev Per-adapter Data + * @option Channel reopen option + * + */ +void +xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { - int index; - - ENTER_FUNCTION + int qindex = 0; DELAY(1000 * 1000); /* Close FIFO (Tx) channel */ - xge_hal_channel_close(lldev->fifo_channel_0, option); - - /* Close Ring (Rx) channel */ - for(index = 0; index < XGE_RING_COUNT; index++) { - xge_hal_channel_close(lldev->ring_channel[index], option); - } - - LEAVE_FUNCTION - - return 0; + for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) + xge_hal_channel_close(lldev->fifo_channel[qindex], option); + + /* Close Ring (Rx) channels */ + for(qindex = 0; qindex < XGE_RING_COUNT; qindex++) + xge_hal_channel_close(lldev->ring_channel[qindex], option); + + if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map)) + xge_trace(XGE_ERR, "Rx extra map destroy failed"); + if(bus_dma_tag_destroy(lldev->dma_tag_rx)) + xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); + if(bus_dma_tag_destroy(lldev->dma_tag_tx)) + xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); } - -/****************************************** - * Function: dmamap_cb - * Parameters: Parameter passed from dmamap - * function, Segment, Number of - * segments, error (if any) - * Return: None - * Description: Callback function used for - * DMA mapping - ******************************************/ +/** + * dmamap_cb + * DMA map callback + * + * @arg Parameter passed from dmamap + * @segs Segments + * @nseg Number of segments + * @error Error + */ void dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { @@ -1892,52 +2182,37 @@ dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) } } -/****************************************** - * Function: xgell_reset - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: HAL status code/EPERM - * Description: Resets the device - ******************************************/ +/** + * xge_reset + * Device Reset + * + * @lldev Per-adapter Data + */ void -xgell_reset(xgelldev_t *lldev) +xge_reset(xge_lldev_t *lldev) { - device_t dev = lldev->device; - - ENTER_FUNCTION - xge_trace(XGE_TRACE, "Reseting the chip"); - mtx_lock(&lldev->xge_lock); - /* If the device is not initialized, return */ - if(!lldev->initialized) { - goto xreset_out; + if(lldev->initialized) { + mtx_lock(&lldev->mtx_drv); + xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL); + xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL); + mtx_unlock(&lldev->mtx_drv); } - xge_terminate(dev, XGE_HAL_CHANNEL_OC_NORMAL); - - xge_initialize(dev, XGE_HAL_CHANNEL_OC_NORMAL); - -xreset_out: - LEAVE_FUNCTION - mtx_unlock(&lldev->xge_lock); - return; } -/****************************************** - * Function: xge_setmulti - * Parameters: Per adapter xgelldev_t - * structure pointer - * Return: None - * Description: Set an address as a multicast - * address - ******************************************/ +/** + * xge_setmulti + * Set an address as a multicast address + * + * @lldev Per-adapter Data + */ void -xge_setmulti(xgelldev_t *lldev) +xge_setmulti(xge_lldev_t *lldev) { - ENTER_FUNCTION struct ifmultiaddr *ifma; u8 *lladdr; xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh; @@ -1958,9 +2233,8 @@ xge_setmulti(xgelldev_t *lldev) } if(status != XGE_HAL_OK) { - printf("Failed to %s multicast (status: %d)\n", - (ifnetp->if_flags & IFF_ALLMULTI ? "enable" : "disable"), - status); + xge_trace(XGE_ERR, "Enabling/disabling multicast failed"); + goto _exit; } /* Updating address list */ @@ -1978,7 +2252,7 @@ xge_setmulti(xgelldev_t *lldev) if((!lldev->all_multicast) && (index)) { lldev->macaddr_count = (index + 1); if(lldev->macaddr_count > table_size) { - return; + goto _exit; } /* Clear old addresses */ @@ -2001,30 +2275,29 @@ xge_setmulti(xgelldev_t *lldev) } IF_ADDR_UNLOCK(ifnetp); - LEAVE_FUNCTION +_exit: + return; } -/****************************************** - * Function: xge_enable_promisc - * Parameters: Adapter structure - * Return: None - * Description: Enables promiscuous mode - ******************************************/ +/** + * xge_enable_promisc + * Enable Promiscuous Mode + * + * @lldev Per-adapter Data + */ void -xge_enable_promisc(xgelldev_t *lldev) +xge_enable_promisc(xge_lldev_t *lldev) { struct ifnet *ifnetp = lldev->ifnetp; xge_hal_device_t *hldev = lldev->devh; xge_hal_pci_bar0_t *bar0 = NULL; u64 val64 = 0; - ENTER_FUNCTION - bar0 = (xge_hal_pci_bar0_t *) hldev->bar0; if(ifnetp->if_flags & IFF_PROMISC) { xge_hal_device_promisc_enable(lldev->devh); - + /* * When operating in promiscuous mode, don't strip the VLAN tag */ @@ -2037,25 +2310,21 @@ xge_enable_promisc(xgelldev_t *lldev) xge_trace(XGE_TRACE, "Promiscuous mode ON"); } - - LEAVE_FUNCTION } -/****************************************** - * Function: xge_disable_promisc - * Parameters: Adapter structure - * Return: None - * Description: Disables promiscuous mode - ******************************************/ +/** + * xge_disable_promisc + * Disable Promiscuous Mode + * + * @lldev Per-adapter Data + */ void -xge_disable_promisc(xgelldev_t *lldev) +xge_disable_promisc(xge_lldev_t *lldev) { xge_hal_device_t *hldev = lldev->devh; xge_hal_pci_bar0_t *bar0 = NULL; u64 val64 = 0; - ENTER_FUNCTION - bar0 = (xge_hal_pci_bar0_t *) hldev->bar0; xge_hal_device_promisc_disable(lldev->devh); @@ -2071,251 +2340,55 @@ xge_disable_promisc(xgelldev_t *lldev) &bar0->rx_pa_cfg); xge_trace(XGE_TRACE, "Promiscuous mode OFF"); - - LEAVE_FUNCTION -} - -/****************************************** - * Function: changeMtu - * Parameters: Pointer to per-device - * xgelldev_t structure, New - * MTU size. - * Return: None - * Description: Changes MTU size to requested - ******************************************/ -int -changeMtu(xgelldev_t *lldev, int NewMtu) -{ - struct ifnet *ifnetp = lldev->ifnetp; - xge_hal_device_t *hldev = lldev->devh; - int retValue = 0; - - ENTER_FUNCTION - - do { - /* Check requested MTU size for boundary */ - if(xge_hal_device_mtu_check(hldev, NewMtu) != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Invalid MTU"); - retValue = EINVAL; - break; - } - - if(lldev->initialized != 0) { - mtx_lock(&lldev->xge_lock); - if_down(ifnetp); - xge_stop(lldev); - ifnetp->if_mtu = NewMtu; - changeBufmode(lldev, NewMtu); - xge_init_locked((void *)lldev); - if_up(ifnetp); - mtx_unlock(&lldev->xge_lock); - } - else { - ifnetp->if_mtu = NewMtu; - changeBufmode(lldev, NewMtu); - } - } while(FALSE); - - LEAVE_FUNCTION - return retValue; } -/****************************************** - * Function: changeBufmode - * Parameters: Pointer to per-device - * xgelldev_t structure, New - * MTU size. - * Return: None - * Description: Updates RingConfiguration structure - * depending the NewMtu size. - ******************************************/ -int -changeBufmode (xgelldev_t *lldev, int NewMtu) -{ - xge_hal_ring_config_t * pRingConfig; - xge_hal_device_t *hldev = lldev->devh; - device_t dev = lldev->device; - int buffer_length = 0, buffer_index = 0, index; - - pRingConfig = &(hldev->config.ring); - xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len)); - - if((NewMtu + XGE_HAL_MAC_HEADER_MAX_SIZE) <= MJUMPAGESIZE) { -#if defined(XGE_FEATURE_BUFFER_MODE_3) - xge_os_printf("%s: 3 Buffer Mode Enabled", - device_get_nameunit(dev)); - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_3; - } - pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A; - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_3; - lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[2] = NewMtu; - lldev->rxd_mbuf_cnt = 3; -#else -#if defined(XGE_FEATURE_BUFFER_MODE_2) - xge_os_printf("%s: 2 Buffer Mode Enabled", - device_get_nameunit(dev)); - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_3; - } - pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B; - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_2; - lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[1] = NewMtu; - lldev->rxd_mbuf_cnt = 2; -#else - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_1; - } - pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A; - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1; - lldev->rxd_mbuf_len[0] = NewMtu; - lldev->rxd_mbuf_cnt = 1; -#endif -#endif - } - else { -#if defined(XGE_FEATURE_BUFFER_MODE_3) || defined (XGE_FEATURE_BUFFER_MODE_2) - xge_os_printf("2 or 3 Buffer mode is not supported for given MTU"); - xge_os_printf("So changing buffer mode to 5 buffer mode\n"); -#endif - xge_os_printf("%s: 5 Buffer Mode Enabled", - device_get_nameunit(dev)); - for(index = 0; index < XGE_RING_COUNT; index++) { - pRingConfig->queue[index].buffer_mode = - XGE_HAL_RING_QUEUE_BUFFER_MODE_5; - } - lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5; - buffer_length = NewMtu; - buffer_index = 2; - lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE; - lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE; - - while(buffer_length > MJUMPAGESIZE) { - buffer_length -= MJUMPAGESIZE; - lldev->rxd_mbuf_len[buffer_index] = MJUMPAGESIZE; - buffer_index++; - } - - BUFALIGN(buffer_length); - - lldev->rxd_mbuf_len[buffer_index] = buffer_length; - lldev->rxd_mbuf_cnt = buffer_index+1; - } - - return XGE_HAL_OK; -} - -/************************************************************* - * xge_initialize - * - * @dev: Device structure - * @option: Normal/Reset option for channels +/** + * xge_change_mtu + * Change interface MTU to a requested valid size * - * Called by both init and reset functions to enable device, interrupts, and to - * open channels. + * @lldev Per-adapter Data + * @NewMtu Requested MTU * - **************************************************************/ -void xge_initialize(device_t dev, xge_hal_channel_reopen_e option) + * Returns 0 or Error Number + */ +int +xge_change_mtu(xge_lldev_t *lldev, int new_mtu) { - ENTER_FUNCTION - - struct ifaddr *ifaddrp; - struct sockaddr_dl *sockaddrp; - unsigned char *macaddr; - xgelldev_t *lldev = (xgelldev_t *) device_get_softc(dev); - xge_hal_device_t *hldev = lldev->devh; - struct ifnet *ifnetp = lldev->ifnetp; - int status = XGE_HAL_OK; + int status = XGE_HAL_OK; - xge_trace(XGE_TRACE, "Set MTU size"); - status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu); - if(status != XGE_HAL_OK) { - xge_trace(XGE_ERR, "Setting HAL device MTU failed (Status: %d)", - status); - goto init_sub_out; + /* Check requested MTU size for boundary */ + if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) { + XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL); } + lldev->mtu = new_mtu; + xge_confirm_changes(lldev, XGE_SET_MTU); - /* Enable HAL device */ - xge_hal_device_enable(hldev); - - /* Get MAC address and update in HAL */ - ifaddrp = ifnetp->if_addr; - sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr; - sockaddrp->sdl_type = IFT_ETHER; - sockaddrp->sdl_alen = ifnetp->if_addrlen; - macaddr = LLADDR(sockaddrp); - xge_trace(XGE_TRACE, - "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", - *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3), - *(macaddr + 4), *(macaddr + 5)); - status = xge_hal_device_macaddr_set(hldev, 0, macaddr); - if(status != XGE_HAL_OK) { - xge_trace(XGE_ERR, - "Setting MAC address failed (Status: %d)\n", status); - } - - /* Opening channels */ - mtx_unlock(&lldev->xge_lock); - status = xgell_channel_open(lldev, option); - mtx_lock(&lldev->xge_lock); - if(status != 0) { - goto init_sub_out; - } - - /* Set appropriate flags */ - ifnetp->if_drv_flags |= IFF_DRV_RUNNING; - ifnetp->if_flags &= ~IFF_DRV_OACTIVE; - - /* Checksum capability */ - ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ? - (CSUM_TCP | CSUM_UDP) : 0; - -#ifdef XGE_FEATURE_TSO - if(ifnetp->if_capenable & IFCAP_TSO4) - ifnetp->if_hwassist |= CSUM_TSO; -#endif - - /* Enable interrupts */ - xge_hal_device_intr_enable(hldev); - - callout_reset(&lldev->timer, 10*hz, xge_timer, lldev); - - /* Disable promiscuous mode */ - xge_trace(XGE_TRACE, "If opted, enable promiscuous mode"); - xge_enable_promisc(lldev); - - /* Device is initialized */ - lldev->initialized = 1; - xge_os_mdelay(1000); - -init_sub_out: - LEAVE_FUNCTION - return; +_exit: + return status; } -/******************************************************* - * xge_terminate +/** + * xge_device_stop * - * @dev: Device structure - * @option: Normal/Reset option for channels + * Common code for both stop and part of reset. Disables device, interrupts and + * closes channels * - * Called by both stop and reset functions to disable device, interrupts, and to - * close channels. - ******************************************************/ -void xge_terminate(device_t dev, xge_hal_channel_reopen_e option) + * @dev Device Handle + * @option Channel normal/reset option + */ +void +xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { - ENTER_FUNCTION - - xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev); xge_hal_device_t *hldev = lldev->devh; struct ifnet *ifnetp = lldev->ifnetp; + u64 val64 = 0; + + mtx_assert((&lldev->mtx_drv), MA_OWNED); + + /* If device is not in "Running" state, return */ + if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) + goto _exit; /* Set appropriate flags */ ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); @@ -2326,17 +2399,19 @@ void xge_terminate(device_t dev, xge_hal_channel_reopen_e option) /* Disable interrupts */ xge_hal_device_intr_disable(hldev); - mtx_unlock(&lldev->xge_lock); + mtx_unlock(&lldev->mtx_drv); xge_queue_flush(xge_hal_device_queue(lldev->devh)); - mtx_lock(&lldev->xge_lock); + mtx_lock(&lldev->mtx_drv); /* Disable HAL device */ if(xge_hal_device_disable(hldev) != XGE_HAL_OK) { xge_trace(XGE_ERR, "Disabling HAL device failed"); + xge_hal_device_status(hldev, &val64); + xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64); } /* Close Tx and Rx channels */ - xgell_channel_close(lldev, option); + xge_channel_close(lldev, option); /* Reset HAL device */ xge_hal_device_reset(hldev); @@ -2346,16 +2421,18 @@ void xge_terminate(device_t dev, xge_hal_channel_reopen_e option) if_link_state_change(ifnetp, LINK_STATE_DOWN); - LEAVE_FUNCTION +_exit: + return; } -/****************************************** - * Function: xgell_set_mbuf_cflags - * Parameters: mbuf structure pointer - * Return: None - * Description: This fuction will set the csum_flag of the mbuf - ******************************************/ -void xgell_set_mbuf_cflags(mbuf_t pkt) +/** + * xge_set_mbuf_cflags + * set checksum flag for the mbuf + * + * @pkt Packet + */ +void +xge_set_mbuf_cflags(mbuf_t pkt) { pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED; pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID; @@ -2363,244 +2440,348 @@ void xgell_set_mbuf_cflags(mbuf_t pkt) pkt->m_pkthdr.csum_data = htons(0xffff); } -#ifdef XGE_FEATURE_LRO -/****************************************** - * Function: xgell_lro_flush_sessions - * Parameters: Per adapter xgelldev_t - * Return: None - * Description: This function will flush the LRO session and send the - * accumulated LRO packet to Upper layer. - ******************************************/ -void xgell_lro_flush_sessions(xgelldev_t *lldev) +/** + * xge_lro_flush_sessions + * Flush LRO session and send accumulated LRO packet to upper layer + * + * @lldev Per-adapter Data + */ +void +xge_lro_flush_sessions(xge_lldev_t *lldev) { - lro_t *lro; - struct ifnet *ifnetp = lldev->ifnetp; - xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh; + xge_lro_entry_t *lro_session = NULL; - while (NULL != (lro = xge_hal_lro_get_next_session(hldev))) { - xgell_set_mbuf_cflags(lro->os_buf); - - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, lro->os_buf); - mtx_lock(&lldev->xge_lock); + while(!SLIST_EMPTY(&lldev->lro_active)) { + lro_session = SLIST_FIRST(&lldev->lro_active); + SLIST_REMOVE_HEAD(&lldev->lro_active, next); + xge_lro_flush(lldev, lro_session); + } +} - xge_hal_lro_close_session(lro); +/** + * xge_lro_flush + * Flush LRO session. Send accumulated LRO packet to upper layer + * + * @lldev Per-adapter Data + * @lro LRO session to be flushed + */ +static void +xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session) +{ + struct ip *header_ip; + struct tcphdr *header_tcp; + u32 *ptr; + + if(lro_session->append_cnt) { + header_ip = lro_session->lro_header_ip; + header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN); + lro_session->m_head->m_pkthdr.len = lro_session->len; + header_tcp = (struct tcphdr *)(header_ip + 1); + header_tcp->th_ack = lro_session->ack_seq; + header_tcp->th_win = lro_session->window; + if(lro_session->timestamp) { + ptr = (u32 *)(header_tcp + 1); + ptr[1] = htonl(lro_session->tsval); + ptr[2] = lro_session->tsecr; + } } + + (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head); + lro_session->m_head = NULL; + lro_session->timestamp = 0; + lro_session->append_cnt = 0; + SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next); } -/****************************************** - * Function: xgell_accumulate_large_rx - * Parameters: Descriptor info structure, current mbuf structure, - * packet length, Per adapter structure, Rx Desc private structure - * Return: None - * Description: This function will accumulate packets to form the LRO - * packets based on various condition. - ******************************************/ -void xgell_accumulate_large_rx(xge_hal_dtr_info_t *ext_info,mbuf_t pkt, - int pkt_length, xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv) +/** + * xge_lro_accumulate + * Accumulate packets to form a large LRO packet based on various conditions + * + * @lldev Per-adapter Data + * @m_head Current Packet + * + * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure) + */ +static int +xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head) { - tcplro_t *tcp; - lro_t *lro, *lro_end3; - xge_hal_status_e status; - unsigned char * temp; - struct ifnet *ifnetp = lldev->ifnetp; + struct ether_header *header_ethernet; + struct ip *header_ip; + struct tcphdr *header_tcp; + u32 seq, *ptr; + struct mbuf *buffer_next, *buffer_tail; + xge_lro_entry_t *lro_session; + xge_hal_status_e status = XGE_HAL_FAIL; + int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options; + int trim; + + /* Get Ethernet header */ + header_ethernet = mtod(m_head, struct ether_header *); + + /* Return if it is not IP packet */ + if(header_ethernet->ether_type != htons(ETHERTYPE_IP)) + goto _exit; + + /* Get IP header */ + header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ? + (struct ip *)(header_ethernet + 1) : + mtod(m_head->m_next, struct ip *); + + /* Return if it is not TCP packet */ + if(header_ip->ip_p != IPPROTO_TCP) + goto _exit; + + /* Return if packet has options */ + if((header_ip->ip_hl << 2) != sizeof(*header_ip)) + goto _exit; + + /* Return if packet is fragmented */ + if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK)) + goto _exit; + + /* Get TCP header */ + header_tcp = (struct tcphdr *)(header_ip + 1); + + /* Return if not ACK or PUSH */ + if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0) + goto _exit; + + /* Only timestamp option is handled */ + tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp); + tcp_hdr_len = sizeof(*header_tcp) + tcp_options; + ptr = (u32 *)(header_tcp + 1); + if(tcp_options != 0) { + if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) || + (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 | + TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) { + goto _exit; + } + } + + /* Total length of packet (IP) */ + ip_len = ntohs(header_ip->ip_len); - status = xge_hal_accumulate_large_rx(pkt->m_data, &tcp, &pkt_length, - &lro, ext_info, lldev->devh, &lro_end3); - pkt->m_next = NULL; - temp = (unsigned char *)tcp; + /* TCP data size */ + tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip); - if(status == XGE_HAL_INF_LRO_BEGIN) { - pkt->m_flags |= M_PKTHDR; - pkt->m_pkthdr.rcvif = ifnetp; - lro->os_buf = lro->os_buf_end = pkt; + /* If the frame is padded, trim it */ + tot_len = m_head->m_pkthdr.len; + trim = tot_len - (ip_len + ETHER_HDR_LEN); + if(trim != 0) { + if(trim < 0) + goto _exit; + m_adj(m_head, -trim); + tot_len = m_head->m_pkthdr.len; } - else if(status == XGE_HAL_INF_LRO_CONT) { - /* - * Current mbuf will be combine to form LRO frame, - * So mask the pkthdr of the flag variable for current mbuf - */ - pkt->m_flags = pkt->m_flags & 0xFFFD; //Mask pkthdr - pkt->m_data = (u8 *)tcp; - pkt->m_len = pkt_length; - /* - * Combine the current mbuf to the LRO frame and update - * the LRO's pkthdr len accordingly - */ - lro->os_buf_end->m_next = pkt; - lro->os_buf_end = pkt; - lro->os_buf->m_pkthdr.len += pkt_length; + buffer_next = m_head; + buffer_tail = NULL; + while(buffer_next != NULL) { + buffer_tail = buffer_next; + buffer_next = buffer_tail->m_next; } - else if(status == XGE_HAL_INF_LRO_END_2) { - lro->os_buf->m_flags |= M_EOR; - /* Update the Checksum flags of the LRO frames */ - xgell_set_mbuf_cflags(lro->os_buf); + /* Total size of only headers */ + hlen = ip_len + ETHER_HDR_LEN - tcp_data_len; - /* Post-Read sync */ - bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, - BUS_DMASYNC_POSTREAD); + /* Get sequence number */ + seq = ntohl(header_tcp->th_seq); - /* - * Current packet can not be combined with LRO frame. - * Flush the previous LRO frames and send the current packet - * seperately - */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, lro->os_buf); - (*ifnetp->if_input)(ifnetp, pkt); - mtx_lock(&lldev->xge_lock); - xge_hal_lro_close_session(lro); - } - else if(status == XGE_HAL_INF_LRO_END_1) { - pkt->m_flags = pkt->m_flags & 0xFFFD; - pkt->m_data = (u8 *)tcp; - pkt->m_len = pkt_length; - lro->os_buf_end->m_next = pkt; - lro->os_buf->m_pkthdr.len += pkt_length; - xgell_set_mbuf_cflags(lro->os_buf); - lro->os_buf->m_flags |= M_EOR; - - /* Post-Read sync */ - bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, - BUS_DMASYNC_POSTREAD); + SLIST_FOREACH(lro_session, &lldev->lro_active, next) { + if(lro_session->source_port == header_tcp->th_sport && + lro_session->dest_port == header_tcp->th_dport && + lro_session->source_ip == header_ip->ip_src.s_addr && + lro_session->dest_ip == header_ip->ip_dst.s_addr) { - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, lro->os_buf); - mtx_lock(&lldev->xge_lock); + /* Unmatched sequence number, flush LRO session */ + if(__predict_false(seq != lro_session->next_seq)) { + SLIST_REMOVE(&lldev->lro_active, lro_session, + xge_lro_entry_t, next); + xge_lro_flush(lldev, lro_session); + goto _exit; + } - xge_hal_lro_close_session(lro); + /* Handle timestamp option */ + if(tcp_options) { + u32 tsval = ntohl(*(ptr + 1)); + if(__predict_false(lro_session->tsval > tsval || + *(ptr + 2) == 0)) { + goto _exit; + } + lro_session->tsval = tsval; + lro_session->tsecr = *(ptr + 2); + } + + lro_session->next_seq += tcp_data_len; + lro_session->ack_seq = header_tcp->th_ack; + lro_session->window = header_tcp->th_win; + + /* If TCP data/payload is of 0 size, free mbuf */ + if(tcp_data_len == 0) { + m_freem(m_head); + status = XGE_HAL_OK; + goto _exit; + } + + lro_session->append_cnt++; + lro_session->len += tcp_data_len; + + /* Adjust mbuf so that m_data points to payload than headers */ + m_adj(m_head, hlen); + + /* Append this packet to LRO accumulated packet */ + lro_session->m_tail->m_next = m_head; + lro_session->m_tail = buffer_tail; + + /* Flush if LRO packet is exceeding maximum size */ + if(lro_session->len > + (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) { + SLIST_REMOVE(&lldev->lro_active, lro_session, + xge_lro_entry_t, next); + xge_lro_flush(lldev, lro_session); + } + status = XGE_HAL_OK; + goto _exit; + } } - else if(status == XGE_HAL_INF_LRO_END_3) { - pkt->m_flags |= M_PKTHDR; - pkt->m_len = pkt_length; - pkt->m_pkthdr.len = pkt_length; - lro_end3->os_buf = lro_end3->os_buf_end = pkt; - lro->os_buf->m_flags |= M_EOR; - xgell_set_mbuf_cflags(lro->os_buf); - /* Post-Read sync */ - bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, - BUS_DMASYNC_POSTREAD); + if(SLIST_EMPTY(&lldev->lro_free)) + goto _exit; + + /* Start a new LRO session */ + lro_session = SLIST_FIRST(&lldev->lro_free); + SLIST_REMOVE_HEAD(&lldev->lro_free, next); + SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next); + lro_session->source_port = header_tcp->th_sport; + lro_session->dest_port = header_tcp->th_dport; + lro_session->source_ip = header_ip->ip_src.s_addr; + lro_session->dest_ip = header_ip->ip_dst.s_addr; + lro_session->next_seq = seq + tcp_data_len; + lro_session->mss = tcp_data_len; + lro_session->ack_seq = header_tcp->th_ack; + lro_session->window = header_tcp->th_win; - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, lro->os_buf); - mtx_lock(&lldev->xge_lock); - xge_hal_lro_close_session(lro); + lro_session->lro_header_ip = header_ip; + + /* Handle timestamp option */ + if(tcp_options) { + lro_session->timestamp = 1; + lro_session->tsval = ntohl(*(ptr + 1)); + lro_session->tsecr = *(ptr + 2); } - else if((status == XGE_HAL_INF_LRO_UNCAPABLE) || - (status == XGE_HAL_INF_LRO_SESSIONS_XCDED)) { - pkt->m_flags |= M_PKTHDR; - pkt->m_len = pkt_length; - pkt->m_pkthdr.len = pkt_length; - /* Post-Read sync */ + lro_session->len = tot_len; + lro_session->m_head = m_head; + lro_session->m_tail = buffer_tail; + status = XGE_HAL_OK; + +_exit: + return status; +} + +/** + * xge_accumulate_large_rx + * Accumulate packets to form a large LRO packet based on various conditions + * + * @lldev Per-adapter Data + * @pkt Current packet + * @pkt_length Packet Length + * @rxd_priv Rx Descriptor Private Data + */ +void +xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length, + xge_rx_priv_t *rxd_priv) +{ + if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) { bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); - - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, pkt); - mtx_lock(&lldev->xge_lock); + (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt); } } -#endif -/****************************************** - * Function: xgell_rx_compl - * Parameters: Channel handle, descriptor, - * transfer code, userdata - * (not used) - * Return: HAL status code - * Description: If the interrupt is because - * of a received frame or if - * the receive ring contains - * fresh as yet un-processed - * frames, this function is - * called. - ******************************************/ +/** + * xge_rx_compl + * If the interrupt is due to received frame (Rx completion), send it up + * + * @channelh Ring Channel Handle + * @dtr Current Descriptor + * @t_code Transfer Code indicating success or error + * @userdata Per-adapter Data + * + * Returns XGE_HAL_OK or HAL error enums + */ xge_hal_status_e -xgell_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, +xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, void *userdata) { - xge_hal_dtr_info_t ext_info; - xge_hal_status_e status_code; struct ifnet *ifnetp; - device_t dev; - int index; - mbuf_t mbuf_up = NULL; - xgell_rx_priv_t *rxd_priv = NULL, old_rxd_priv; - u16 vlan_tag; - -// ENTER_FUNCTION - + xge_rx_priv_t *rxd_priv = NULL; + mbuf_t mbuf_up = NULL; + xge_hal_status_e status = XGE_HAL_OK; + xge_hal_dtr_info_t ext_info; + int index; + u16 vlan_tag; /*get the user data portion*/ - xgelldev_t *lldev = xge_hal_channel_userdata(channelh); + xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); if(!lldev) { - xge_ctrace(XGE_TRACE, "xgeX: %s: Failed to get user data", - __FUNCTION__); - return XGE_HAL_FAIL; + XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL); } - dev = lldev->device; - mtx_assert((&lldev->xge_lock), MA_OWNED); + XGE_DRV_STATS(rx_completions); /* get the interface pointer */ ifnetp = lldev->ifnetp; do { + XGE_DRV_STATS(rx_desc_compl); + if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) { - return XGE_HAL_FAIL; + status = XGE_HAL_FAIL; + goto _exit; } if(t_code) { xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code); + XGE_DRV_STATS(rx_tcode); xge_hal_device_handle_tcode(channelh, dtr, t_code); xge_hal_ring_dtr_post(channelh,dtr); continue; } /* Get the private data for this descriptor*/ - rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, + rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtr); if(!rxd_priv) { - xge_trace(XGE_ERR, "Failed to get descriptor private data"); - return XGE_HAL_FAIL; + XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit, + XGE_HAL_FAIL); } - /* Taking backup of rxd_priv structure details of current packet */ - xge_os_memcpy(&old_rxd_priv, rxd_priv, sizeof(xgell_rx_priv_t)); - - /* Prepare one buffer to send it to upper layer -- since the upper - * layer frees the buffer do not use rxd_priv->buffer - * Meanwhile prepare a new buffer, do mapping, use it in the - * current descriptor and post descriptor back to ring channel */ + /* + * Prepare one buffer to send it to upper layer -- since the upper + * layer frees the buffer do not use rxd_priv->buffer. Meanwhile + * prepare a new buffer, do mapping, use it in the current + * descriptor and post descriptor back to ring channel + */ mbuf_up = rxd_priv->bufferArray[0]; /* Gets details of mbuf i.e., packet length */ xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv); - status_code = + status = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ? - xgell_get_buf(dtr, rxd_priv, lldev, 0) : - xgell_get_buf_3b_5b(dtr, rxd_priv, lldev); + xge_get_buf(dtr, rxd_priv, lldev, 0) : + xge_get_buf_3b_5b(dtr, rxd_priv, lldev); - if(status_code != XGE_HAL_OK) { + if(status != XGE_HAL_OK) { xge_trace(XGE_ERR, "No memory"); + XGE_DRV_STATS(rx_no_buf); /* - * Do not deliver the received buffer to the stack. Instead, - * Re-post the descriptor with the same buffer + * Unable to allocate buffer. Instead of discarding, post + * descriptor back to channel for future processing of same + * packet. */ - - /* Get back previous rxd_priv structure before posting */ - xge_os_memcpy(rxd_priv, &old_rxd_priv, sizeof(xgell_rx_priv_t)); - xge_hal_ring_dtr_post(channelh, dtr); continue; } @@ -2608,136 +2789,80 @@ xgell_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, /* Get the extended information */ xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info); - if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { - /* - * As we have allocated a new mbuf for this descriptor, post - * this descriptor with new mbuf back to ring channel - */ - vlan_tag = ext_info.vlan; - xge_hal_ring_dtr_post(channelh, dtr); - if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) && - (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) && - (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) && - (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) { - /* set Checksum Flag */ - xgell_set_mbuf_cflags(mbuf_up); -#ifdef XGE_FEATURE_LRO - if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { - xgell_accumulate_large_rx(&ext_info, mbuf_up, - mbuf_up->m_len, lldev, rxd_priv); - } -#else - /* Post-Read sync for buffers*/ - bus_dmamap_sync(lldev->dma_tag_rx, - rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); - - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, mbuf_up); - mtx_lock(&lldev->xge_lock); -#endif + /* + * As we have allocated a new mbuf for this descriptor, post this + * descriptor with new mbuf back to ring channel + */ + vlan_tag = ext_info.vlan; + xge_hal_ring_dtr_post(channelh, dtr); + if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) && + (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) && + (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) && + (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) { + + /* set Checksum Flag */ + xge_set_mbuf_cflags(mbuf_up); + + if(lldev->enabled_lro) { + xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len, + rxd_priv); } else { - /* - * Packet with erroneous checksum , let the upper layer - * deal with it - */ - - /* Post-Read sync for buffers*/ - bus_dmamap_sync(lldev->dma_tag_rx, - rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); - -#ifdef XGE_FEATURE_LRO - xgell_lro_flush_sessions(lldev); -#endif - - if (vlan_tag) { - mbuf_up->m_pkthdr.ether_vtag = vlan_tag; - mbuf_up->m_flags |= M_VLANTAG; + /* Post-Read sync for buffers*/ + for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { + bus_dmamap_sync(lldev->dma_tag_rx, + rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); } - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, mbuf_up); - mtx_lock(&lldev->xge_lock); + (*ifnetp->if_input)(ifnetp, mbuf_up); } } else { /* - * As we have allocated a new mbuf for this descriptor, post - * this descriptor with new mbuf back to ring channel + * Packet with erroneous checksum , let the upper layer deal + * with it */ - xge_hal_ring_dtr_post(channelh, dtr); - if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) && - (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) && - (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) && - (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) { - /* set Checksum Flag */ - xgell_set_mbuf_cflags(mbuf_up); -#ifdef XGE_FEATURE_LRO - if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { - xgell_accumulate_large_rx(&ext_info, mbuf_up, - mbuf_up->m_len, lldev, rxd_priv); - } -#else - /* Post-Read sync for buffers*/ - for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { - /* Post-Read sync */ - bus_dmamap_sync(lldev->dma_tag_rx, - rxd_priv->dmainfo[index].dma_map, - BUS_DMASYNC_POSTREAD); - } - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, mbuf_up); - mtx_lock(&lldev->xge_lock); -#endif + /* Post-Read sync for buffers*/ + for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { + bus_dmamap_sync(lldev->dma_tag_rx, + rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); } - else { - /* - * Packet with erroneous checksum , let the upper layer - * deal with it - */ - for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { - /* Post-Read sync */ - bus_dmamap_sync(lldev->dma_tag_rx, - rxd_priv->dmainfo[index].dma_map, - BUS_DMASYNC_POSTREAD); - } -#ifdef XGE_FEATURE_LRO - xgell_lro_flush_sessions(lldev); -#endif - /* Send it up */ - mtx_unlock(&lldev->xge_lock); - (*ifnetp->if_input)(ifnetp, mbuf_up); - mtx_lock(&lldev->xge_lock); + if(vlan_tag) { + mbuf_up->m_pkthdr.ether_vtag = vlan_tag; + mbuf_up->m_flags |= M_VLANTAG; } + + if(lldev->enabled_lro) + xge_lro_flush_sessions(lldev); + + (*ifnetp->if_input)(ifnetp, mbuf_up); } } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) == XGE_HAL_OK); -#ifdef XGE_FEATURE_LRO - xgell_lro_flush_sessions(lldev); -#endif -// LEAVE_FUNCTION + if(lldev->enabled_lro) + xge_lro_flush_sessions(lldev); - return XGE_HAL_OK; +_exit: + return status; } -/****************************************** - * Function: xge_ring_dtr_get - * Parameters: mbuf pointer, channel handler - * descriptot, Per adapter xgelldev_t - * structure pointer, - * Rx private structure - * Return: HAL status code - * Description: Updates the mbuf lengths - * depending on packet lengths. - ******************************************/ +/** + * xge_ring_dtr_get + * Get descriptors + * + * @mbuf_up Packet to send up + * @channelh Ring Channel Handle + * @dtr Descriptor + * @lldev Per-adapter Data + * @rxd_priv Rx Descriptor Private Data + * + * Returns XGE_HAL_OK or HAL error enums + */ int xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr, - xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv) + xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv) { mbuf_t m; int pkt_length[5]={0,0}, pkt_len=0; @@ -2803,111 +2928,116 @@ xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr, mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0]; } -return XGE_HAL_OK; + return XGE_HAL_OK; } +/** + * xge_flush_txds + * Flush Tx descriptors + * + * @channelh Channel handle + */ +static void inline +xge_flush_txds(xge_hal_channel_h channelh) +{ + xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); + xge_hal_dtr_h tx_dtr; + xge_tx_priv_t *tx_priv; + struct ifnet *ifnetp = lldev->ifnetp; + u8 t_code; + + ifnetp->if_timer = 0; + while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code) + == XGE_HAL_OK) { + XGE_DRV_STATS(tx_desc_compl); + if(t_code) { + xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code); + XGE_DRV_STATS(tx_tcode); + xge_hal_device_handle_tcode(channelh, tx_dtr, t_code); + } + + tx_priv = xge_hal_fifo_dtr_private(tx_dtr); + bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map); + m_freem(tx_priv->buffer); + tx_priv->buffer = NULL; + xge_hal_fifo_dtr_free(channelh, tx_dtr); + } +} -/****************************************** - * Function: xge_send - * Parameters: Pointer to ifnet structure - * Return: None - * Description: Transmit entry point - ******************************************/ +/** + * xge_send + * Transmit function + * + * @ifnetp Interface Handle + */ void xge_send(struct ifnet *ifnetp) { - xgelldev_t *lldev = ifnetp->if_softc; + int qindex = 0; + xge_lldev_t *lldev = ifnetp->if_softc; - mtx_lock(&lldev->xge_lock); - xge_send_locked(ifnetp); - mtx_unlock(&lldev->xge_lock); + for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { + if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) { + XGE_DRV_STATS(tx_lock_fail); + break; + } + xge_send_locked(ifnetp, qindex); + mtx_unlock(&lldev->mtx_tx[qindex]); + } } -void -xge_send_locked(struct ifnet *ifnetp) +static void inline +xge_send_locked(struct ifnet *ifnetp, int qindex) { xge_hal_dtr_h dtr; - static bus_dma_segment_t segs[MAX_SEGS]; - xge_hal_status_e status_code; + static bus_dma_segment_t segs[XGE_MAX_SEGS]; + xge_hal_status_e status; unsigned int max_fragments; - xgelldev_t *lldev = ifnetp->if_softc; - xge_hal_channel_h channelh = lldev->fifo_channel_0; + xge_lldev_t *lldev = ifnetp->if_softc; + xge_hal_channel_h channelh = lldev->fifo_channel[qindex]; mbuf_t m_head = NULL; mbuf_t m_buf = NULL; - xgell_tx_priv_t *ll_tx_priv = NULL; + xge_tx_priv_t *ll_tx_priv = NULL; register unsigned int count = 0; unsigned int nsegs = 0; u16 vlan_tag; max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags; - mtx_assert((&lldev->xge_lock), MA_OWNED); - /* If device is not initialized, return */ - if((!lldev->initialized) || - (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))) { - xge_trace(XGE_ERR, "Device is not initialized"); - return; - } + if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))) + goto _exit; + + XGE_DRV_STATS(tx_calls); /* - * Get the number of free descriptors in the FIFO channel and return if - * the count is less than the XGELL_TX_LEVEL_LOW -- the low threshold + * This loop will be executed for each packet in the kernel maintained + * queue -- each packet can be with fragments as an mbuf chain */ - count = xge_hal_channel_dtr_count(channelh); - if(count <= XGELL_TX_LEVEL_LOW) { - ifnetp->if_drv_flags |= IFF_DRV_OACTIVE; - xge_trace(XGE_TRACE, "Free descriptor count %d/%d at low threshold", - count, XGELL_TX_LEVEL_LOW); - - /* Serialized -- through queue */ - xge_queue_produce_context(xge_hal_device_queue(lldev->devh), - XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev); - return; - } - - /* This loop will be executed for each packet in the kernel maintained - * queue -- each packet can be with fragments as an mbuf chain */ - while((ifnetp->if_snd.ifq_head) && - (xge_hal_channel_dtr_count(channelh) > XGELL_TX_LEVEL_LOW)) { + for(;;) { IF_DEQUEUE(&ifnetp->if_snd, m_head); + if(m_head == NULL) break; - for(count = 0, m_buf = m_head; m_buf != NULL; - m_buf = m_buf->m_next) { - if(m_buf->m_len) { - count += 1; - } + for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) { + if(m_buf->m_len) count += 1; } if(count >= max_fragments) { m_buf = m_defrag(m_head, M_DONTWAIT); - if(m_buf != NULL) { - m_head = m_buf; - } + if(m_buf != NULL) m_head = m_buf; + XGE_DRV_STATS(tx_defrag); } /* Reserve descriptors */ - status_code = xge_hal_fifo_dtr_reserve(channelh, &dtr); - if(status_code) { - switch(status_code) { - case XGE_HAL_INF_CHANNEL_IS_NOT_READY: - xge_trace(XGE_ERR, "Channel is not ready"); - break; - - case XGE_HAL_INF_OUT_OF_DESCRIPTORS: - xge_trace(XGE_ERR, "Out of descriptors"); - break; - - default: - xge_trace(XGE_ERR, - "Reserving (Tx) descriptors failed. Status %d", - status_code); - } - goto out2; - break; + status = xge_hal_fifo_dtr_reserve(channelh, &dtr); + if(status != XGE_HAL_OK) { + XGE_DRV_STATS(tx_no_txd); + xge_flush_txds(channelh); + goto _exit1; } - vlan_tag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; + vlan_tag = + (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag); /* Update Tx private structure for this descriptor */ @@ -2922,28 +3052,33 @@ xge_send_locked(struct ifnet *ifnetp) */ if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx, ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) { - xge_trace(XGE_ERR, "DMA map load with segments failed"); - goto out2; + xge_trace(XGE_TRACE, "DMA map load failed"); + XGE_DRV_STATS(tx_map_fail); + goto _exit1; } + if(lldev->driver_stats.tx_max_frags < nsegs) + lldev->driver_stats.tx_max_frags = nsegs; + /* Set descriptor buffer for header and each fragment/segment */ count = 0; do { xge_hal_fifo_dtr_buffer_set(channelh, dtr, count, (dma_addr_t)htole64(segs[count].ds_addr), segs[count].ds_len); - count = count + 1; + count++; } while(count < nsegs); /* Pre-write Sync of mapping */ bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map, BUS_DMASYNC_PREWRITE); -#ifdef XGE_FEATURE_TSO - if((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0) { - xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz); - } -#endif + if((lldev->enabled_tso) && + (m_head->m_pkthdr.csum_flags & CSUM_TSO)) { + XGE_DRV_STATS(tx_tso); + xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz); + } + /* Checksum */ if(ifnetp->if_hwassist > 0) { xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN @@ -2952,120 +3087,131 @@ xge_send_locked(struct ifnet *ifnetp) /* Post descriptor to FIFO channel */ xge_hal_fifo_dtr_post(channelh, dtr); + XGE_DRV_STATS(tx_posted); /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter) * listener so that we can use tools like tcpdump */ ETHER_BPF_MTAP(ifnetp, m_head); } - goto out1; -out2: + ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE); + goto _exit; +_exit1: /* Prepend the packet back to queue */ IF_PREPEND(&ifnetp->if_snd, m_head); -out1: + ifnetp->if_drv_flags |= IFF_DRV_OACTIVE; + + xge_queue_produce_context(xge_hal_device_queue(lldev->devh), + XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh); + XGE_DRV_STATS(tx_again); + +_exit: ifnetp->if_timer = 15; } -/****************************************** - * Function: xgell_get_buf - * Parameters: Per adapter xgelldev_t - * structure pointer, descriptor, - * Rx private structure, rxd_priv buffer - * buffer index for mapping - * Return: HAL status code - * Description: Gets buffer from system mbuf - * buffer pool. - ******************************************/ +/** + * xge_get_buf + * Allocates new mbufs to be placed into descriptors + * + * @dtrh Descriptor Handle + * @rxd_priv Rx Descriptor Private Data + * @lldev Per-adapter Data + * @index Buffer Index (if multi-buffer mode) + * + * Returns XGE_HAL_OK or HAL error enums + */ int -xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, - xgelldev_t *lldev, int index) +xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv, + xge_lldev_t *lldev, int index) { register mbuf_t mp = NULL; struct ifnet *ifnetp = lldev->ifnetp; - int retValue = XGE_HAL_OK; - bus_addr_t paddr; - int BUFLEN = 0, CLUSTLEN = 0; + int status = XGE_HAL_OK; + int buffer_size = 0, cluster_size = 0, count; + bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map; + bus_dma_segment_t segs[3]; - if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { - CLUSTLEN = MJUMPAGESIZE; - BUFLEN = MJUMPAGESIZE; + buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ? + ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE : + lldev->rxd_mbuf_len[index]; + + if(buffer_size <= MCLBYTES) { + cluster_size = MCLBYTES; + mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); } else { - BUFLEN = lldev->rxd_mbuf_len[index]; - if(BUFLEN < MCLBYTES) { - CLUSTLEN = MCLBYTES; - } - else { - CLUSTLEN = MJUMPAGESIZE; + cluster_size = MJUMPAGESIZE; + if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) && + (buffer_size > MJUMPAGESIZE)) { + cluster_size = MJUM9BYTES; } + mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, cluster_size); } - - /* Get mbuf with attached cluster */ - mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, CLUSTLEN); if(!mp) { xge_trace(XGE_ERR, "Out of memory to allocate mbuf"); - retValue = XGE_HAL_FAIL; + status = XGE_HAL_FAIL; goto getbuf_out; } /* Update mbuf's length, packet length and receive interface */ - mp->m_len = mp->m_pkthdr.len = BUFLEN; + mp->m_len = mp->m_pkthdr.len = buffer_size; mp->m_pkthdr.rcvif = ifnetp; - /* Unload DMA map of mbuf in current descriptor */ - bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map); - /* Load DMA map */ - if(bus_dmamap_load(lldev->dma_tag_rx , rxd_priv->dmainfo[index].dma_map, - mtod(mp, void*), mp->m_len, dmamap_cb , &paddr , 0)) { - xge_trace(XGE_ERR, "Loading DMA map failed"); + if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map, + mp, segs, &count, BUS_DMA_NOWAIT)) { + XGE_DRV_STATS(rx_map_fail); m_freem(mp); - retValue = XGE_HAL_FAIL; - goto getbuf_out; + XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL); } /* Update descriptor private data */ rxd_priv->bufferArray[index] = mp; - rxd_priv->dmainfo[index].dma_phyaddr = htole64(paddr); + rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr); + rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map; + lldev->extra_dma_map = map; /* Pre-Read/Write sync */ - bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD); + + /* Unload DMA map of mbuf in current descriptor */ + bus_dmamap_unload(lldev->dma_tag_rx, map); /* Set descriptor buffer */ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr, - MJUMPAGESIZE); + cluster_size); } getbuf_out: - return retValue; + return status; } -/****************************************** - * Function: xgell_get_buf_3b_5b - * Parameters: Per adapter xgelldev_t - * structure pointer, descriptor, - * Rx private structure - * Return: HAL status code - * Description: Gets buffers from system mbuf - * buffer pool. - ******************************************/ +/** + * xge_get_buf_3b_5b + * Allocates new mbufs to be placed into descriptors (in multi-buffer modes) + * + * @dtrh Descriptor Handle + * @rxd_priv Rx Descriptor Private Data + * @lldev Per-adapter Data + * + * Returns XGE_HAL_OK or HAL error enums + */ int -xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, - xgelldev_t *lldev) +xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv, + xge_lldev_t *lldev) { bus_addr_t dma_pointers[5]; int dma_sizes[5]; - int retValue = XGE_HAL_OK, index; + int status = XGE_HAL_OK, index; int newindex = 0; for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { - retValue = xgell_get_buf(dtrh, rxd_priv, lldev, index); - if(retValue != XGE_HAL_OK) { + status = xge_get_buf(dtrh, rxd_priv, lldev, index); + if(status != XGE_HAL_OK) { for(newindex = 0; newindex < index; newindex++) { m_freem(rxd_priv->bufferArray[newindex]); } - return retValue; + XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status); } } @@ -3094,40 +3240,46 @@ xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes); } - return retValue; +_exit: + return status; } -/****************************************** - * Function: xgell_tx_compl - * Parameters: Channel handle, descriptor, - * transfer code, - * userdata -> per adapter - * xgelldev_t structure as void * - * Return: HAL status code - * Description: If an interrupt was raised - * to indicate DMA complete of - * the Tx packet, this function - * is called. It identifies the - * last TxD whose buffer was - * freed and frees all skbs - * whose data have already DMA'ed - * into the NICs internal memory. - ******************************************/ +/** + * xge_tx_compl + * If the interrupt is due to Tx completion, free the sent buffer + * + * @channelh Channel Handle + * @dtr Descriptor + * @t_code Transfer Code indicating success or error + * @userdata Per-adapter Data + * + * Returns XGE_HAL_OK or HAL error enum + */ xge_hal_status_e -xgell_tx_compl(xge_hal_channel_h channelh, +xge_tx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, void *userdata) { - xgell_tx_priv_t *ll_tx_priv; - mbuf_t m_buffer; - xgelldev_t *lldev = (xgelldev_t *)userdata; - struct ifnet *ifnetp = lldev->ifnetp; + xge_tx_priv_t *ll_tx_priv = NULL; + xge_lldev_t *lldev = (xge_lldev_t *)userdata; + struct ifnet *ifnetp = lldev->ifnetp; + mbuf_t m_buffer = NULL; + int qindex = xge_hal_channel_id(channelh); + + mtx_lock(&lldev->mtx_tx[qindex]); + + XGE_DRV_STATS(tx_completions); ifnetp->if_timer = 0; - /* For each completed descriptor: Get private structure, free buffer, - * do unmapping, and free descriptor */ + /* + * For each completed descriptor: Get private structure, free buffer, + * do unmapping, and free descriptor + */ do { + XGE_DRV_STATS(tx_desc_compl); + if(t_code) { + XGE_DRV_STATS(tx_tcode); xge_trace(XGE_TRACE, "t_code %d", t_code); xge_hal_device_handle_tcode(channelh, dtr, t_code); } @@ -3140,140 +3292,128 @@ xgell_tx_compl(xge_hal_channel_h channelh, xge_hal_fifo_dtr_free(channelh, dtr); } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) == XGE_HAL_OK); + xge_send_locked(ifnetp, qindex); ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE; + mtx_unlock(&lldev->mtx_tx[qindex]); + return XGE_HAL_OK; } -/****************************************** - * Function: xgell_tx_initial_replenish - * Parameters: Channel handle, descriptor, - * index (not used), userdata - * (not used), channel - * open/close/reopen option. - * Return: HAL status code - * Description: Creates DMA maps to be used - * for Tx - ******************************************/ +/** + * xge_tx_initial_replenish + * Initially allocate buffers and set them into descriptors for later use + * + * @channelh Tx Channel Handle + * @dtrh Descriptor Handle + * @index + * @userdata Per-adapter Data + * @reopen Channel open/reopen option + * + * Returns XGE_HAL_OK or HAL error enums + */ xge_hal_status_e -xgell_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, +xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, int index, void *userdata, xge_hal_channel_reopen_e reopen) { - xgell_tx_priv_t *txd_priv = NULL; - int retValue = XGE_HAL_OK; - device_t dev = NULL; + xge_tx_priv_t *txd_priv = NULL; + int status = XGE_HAL_OK; /* Get the user data portion from channel handle */ - xgelldev_t *lldev = xge_hal_channel_userdata(channelh); + xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); if(lldev == NULL) { - xge_trace(XGE_ERR, "Failed to get user data"); - retValue = XGE_HAL_FAIL; - goto txinit_out; + XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out, + XGE_HAL_FAIL); } - dev = lldev->device; /* Get the private data */ - txd_priv = (xgell_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh); + txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh); if(txd_priv == NULL) { - xge_trace(XGE_ERR, "Failed to get descriptor private data"); - retValue = XGE_HAL_FAIL; - goto txinit_out; + XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out, + XGE_HAL_FAIL); } /* Create DMA map for this descriptor */ if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT, &txd_priv->dma_map)) { - xge_trace(XGE_ERR, "DMA map creation for Tx descriptor failed"); - retValue = XGE_HAL_FAIL; - goto txinit_out; + XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed", + txinit_out, XGE_HAL_FAIL); } txinit_out: - return retValue; + return status; } -/****************************************** - * Function: xgell_rx_initial_replenish - * Parameters: Channel handle, descriptor, - * ring index, userdata - * (not used), channel - * open/close/reopen option. - * Return: HAL status code - * Description: Replenish descriptor with - * rx_buffer in Rx buffer pool. - ******************************************/ +/** + * xge_rx_initial_replenish + * Initially allocate buffers and set them into descriptors for later use + * + * @channelh Tx Channel Handle + * @dtrh Descriptor Handle + * @index Ring Index + * @userdata Per-adapter Data + * @reopen Channel open/reopen option + * + * Returns XGE_HAL_OK or HAL error enums + */ xge_hal_status_e -xgell_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, +xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, int index, void *userdata, xge_hal_channel_reopen_e reopen) { - xgell_rx_priv_t *rxd_priv = NULL; - int retValue = XGE_HAL_OK; - struct ifnet *ifnetp; - device_t dev; - int index1, index2; + xge_rx_priv_t *rxd_priv = NULL; + int status = XGE_HAL_OK; + int index1 = 0, index2 = 0; /* Get the user data portion from channel handle */ - xgelldev_t *lldev = xge_hal_channel_userdata(channelh); + xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); if(lldev == NULL) { - xge_ctrace(XGE_ERR, "xgeX: %s: Failed to get user data", - __FUNCTION__); - retValue = XGE_HAL_FAIL; - goto rxinit_out; + XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out, + XGE_HAL_FAIL); } - dev = lldev->device; /* Get the private data */ - rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh); + rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh); if(rxd_priv == NULL) { - xge_trace(XGE_ERR, "Failed to get descriptor private data"); - retValue = XGE_HAL_FAIL; - goto rxinit_out; + XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out, + XGE_HAL_FAIL); } - rxd_priv->bufferArray = - malloc(((sizeof(rxd_priv->bufferArray)) * (lldev->rxd_mbuf_cnt)), - M_DEVBUF, M_NOWAIT); + rxd_priv->bufferArray = xge_os_malloc(NULL, + (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt)); if(rxd_priv->bufferArray == NULL) { - xge_trace(XGE_ERR, - "Failed to allocate buffers for Rxd private structure"); - retValue = XGE_HAL_FAIL; - goto rxinit_out; + XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out, + XGE_HAL_FAIL); } - ifnetp = lldev->ifnetp; - if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { /* Create DMA map for these descriptors*/ if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT, &rxd_priv->dmainfo[0].dma_map)) { - xge_trace(XGE_ERR, - "DMA map creation for Rx descriptor failed"); - retValue = XGE_HAL_FAIL; - goto rxinit_err_out; + XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed", + rxinit_err_out, XGE_HAL_FAIL); } /* Get a buffer, attach it to this descriptor */ - retValue = xgell_get_buf(dtrh, rxd_priv, lldev, 0); + status = xge_get_buf(dtrh, rxd_priv, lldev, 0); } else { for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) { /* Create DMA map for this descriptor */ if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT , &rxd_priv->dmainfo[index1].dma_map)) { - xge_trace(XGE_ERR, - "Jumbo DMA map creation for Rx descriptor failed"); for(index2 = index1 - 1; index2 >= 0; index2--) { bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[index2].dma_map); } - retValue = XGE_HAL_FAIL; - goto rxinit_err_out; + XGE_EXIT_ON_ERR( + "Jumbo DMA map creation for Rx descriptor failed", + rxinit_err_out, XGE_HAL_FAIL); } } - retValue = xgell_get_buf_3b_5b(dtrh, rxd_priv, lldev); + status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev); } - if(retValue != XGE_HAL_OK) { + if(status != XGE_HAL_OK) { for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) { bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[index1].dma_map); @@ -3285,119 +3425,89 @@ xgell_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, } rxinit_err_out: - free(rxd_priv->bufferArray,M_DEVBUF); + xge_os_free(NULL, rxd_priv->bufferArray, + (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt)); rxinit_out: - return retValue; + return status; } -/****************************************** - * Function: xgell_rx_term - * Parameters: Channel handle, descriptor, - * descriptor state, userdata - * (not used), channel - * open/close/reopen option. - * Return: None - * Description: Called by HAL to terminate - * all DTRs for ring channels. - ******************************************/ +/** + * xge_rx_term + * During unload terminate and free all descriptors + * + * @channelh Rx Channel Handle + * @dtrh Rx Descriptor Handle + * @state Descriptor State + * @userdata Per-adapter Data + * @reopen Channel open/reopen option + */ void -xgell_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, +xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen) { - xgell_rx_priv_t *rxd_priv; - xgelldev_t *lldev; - struct ifnet *ifnetp; - device_t dev; - int index; - -// ENTER_FUNCTION + xge_rx_priv_t *rxd_priv = NULL; + xge_lldev_t *lldev = NULL; + int index = 0; /* Descriptor state is not "Posted" */ - if(state != XGE_HAL_DTR_STATE_POSTED) { - xge_ctrace(XGE_ERR, "xgeX: %s: Descriptor not posted\n", - __FUNCTION__); - goto rxterm_out; - } + if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out; /* Get the user data portion */ lldev = xge_hal_channel_userdata(channelh); - dev = lldev->device; - ifnetp = lldev->ifnetp; - /* Get the private data */ - rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh); + rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh); - if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { - /* Post-Read sync */ - bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, - BUS_DMASYNC_POSTREAD); - - /* Do unmapping and destory DMA map */ - bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map); - m_freem(rxd_priv->bufferArray[0]); - bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map); - } - else { - for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { - /* Post-Read sync */ + for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { + if(rxd_priv->dmainfo[index].dma_map != NULL) { bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD); - - /* Do unmapping and destory DMA map */ bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map); - + if(rxd_priv->bufferArray[index] != NULL) + m_free(rxd_priv->bufferArray[index]); bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map); - - /* Free the buffer */ - m_free(rxd_priv->bufferArray[index]); } } - free(rxd_priv->bufferArray,M_DEVBUF); + xge_os_free(NULL, rxd_priv->bufferArray, + (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt)); /* Free the descriptor */ xge_hal_ring_dtr_free(channelh, dtrh); rxterm_out: -// LEAVE_FUNCTION return; } - -/****************************************** - * Function: xgell_tx_term - * Parameters: Channel handle, descriptor, - * descriptor state, userdata - * (not used), channel - * open/close/reopen option. - * Return: None - * Description: Called by HAL to terminate - * all DTRs for fifo channels. - ******************************************/ +/** + * xge_tx_term + * During unload terminate and free all descriptors + * + * @channelh Rx Channel Handle + * @dtrh Rx Descriptor Handle + * @state Descriptor State + * @userdata Per-adapter Data + * @reopen Channel open/reopen option + */ void -xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, +xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen) { - xgell_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr); - xgelldev_t *lldev = (xgelldev_t *)userdata; - -// ENTER_FUNCTION + xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr); + xge_lldev_t *lldev = (xge_lldev_t *)userdata; /* Destroy DMA map */ bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map); - -// LEAVE_FUNCTION } -/****************************************** +/** * xge_methods * * FreeBSD device interface entry points - ******************************************/ + */ static device_method_t xge_methods[] = { DEVMETHOD(device_probe, xge_probe), DEVMETHOD(device_attach, xge_attach), @@ -3409,7 +3519,8 @@ static device_method_t xge_methods[] = { static driver_t xge_driver = { "nxge", xge_methods, - sizeof(xgelldev_t), + sizeof(xge_lldev_t), }; static devclass_t xge_devclass; DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0); + diff --git a/sys/dev/nxge/if_nxge.h b/sys/dev/nxge/if_nxge.h index 1e99a62..1cc1286 100644 --- a/sys/dev/nxge/if_nxge.h +++ b/sys/dev/nxge/if_nxge.h @@ -26,75 +26,52 @@ * $FreeBSD$ */ -/* - * if_xge.h - */ - #ifndef _IF_XGE_H #define _IF_XGE_H #include <dev/nxge/include/xgehal.h> #include <dev/nxge/xge-osdep.h> -#if defined(XGE_FEATURE_TSO) && (__FreeBSD_version < 700026) -#undef XGE_FEATURE_TSO -#endif - -#if defined(XGE_FEATURE_LRO) -#if __FreeBSD_version < 700047 -#undef XGE_FEATURE_LRO -#undef XGE_HAL_CONFIG_LRO -#else -#define XGE_HAL_CONFIG_LRO -#endif -#endif - -#ifdef FUNC_PRINT -#define ENTER_FUNCTION xge_os_printf("Enter\t==>[%s]\n", __FUNCTION__); -#define LEAVE_FUNCTION xge_os_printf("Leave\t<==[%s]\n", __FUNCTION__); -#else -#define ENTER_FUNCTION -#define LEAVE_FUNCTION -#endif - /* Printing description, Copyright */ -#define DRIVER_VERSION XGELL_VERSION_MAJOR"." \ - XGELL_VERSION_MINOR"." \ - XGELL_VERSION_FIX"." \ - XGELL_VERSION_BUILD -#define COPYRIGHT_STRING "Copyright(c) 2002-2007 Neterion Inc." -#define PRINT_COPYRIGHT xge_os_printf("%s", COPYRIGHT_STRING) +#define XGE_DRIVER_VERSION \ + XGELL_VERSION_MAJOR"."XGELL_VERSION_MINOR"." \ + XGELL_VERSION_FIX"."XGELL_VERSION_BUILD +#define XGE_COPYRIGHT "Copyright(c) 2002-2007 Neterion Inc." /* Printing */ #define xge_trace(trace, fmt, args...) xge_debug_ll(trace, fmt, ## args); -#define xge_ctrace(trace, fmt...) xge_debug_ll(trace, fmt); - -#define BUFALIGN(buffer_length) \ - if((buffer_length % 128) != 0) { \ - buffer_length += (128 - (buffer_length % 128)); \ - } - -static inline void * -xge_malloc(unsigned long size) { - void *vaddr = malloc(size, M_DEVBUF, M_NOWAIT); - bzero(vaddr, size); - return vaddr; + +#define XGE_ALIGN_TO(buffer_length, to) { \ + if((buffer_length % to) != 0) { \ + buffer_length += (to - (buffer_length % to)); \ + } \ +} + +#define XGE_EXIT_ON_ERR(text, label, return_value) { \ + xge_trace(XGE_ERR, "%s (Status: %d)", text, return_value); \ + status = return_value; \ + goto label; \ +} + +#define XGE_SET_BUFFER_MODE_IN_RINGS(mode) { \ + for(index = 0; index < XGE_RING_COUNT; index++) \ + ring_config->queue[index].buffer_mode = mode; \ } -#define SINGLE_ALLOC 0 -#define MULTI_ALLOC 1 -#define SAVE 0 -#define RESTORE 1 -#define UP 1 -#define DOWN 0 #define XGE_DEFAULT_USER_HARDCODED -1 -#define MAX_MBUF_FRAGS 20 /* Maximum number of fragments */ -#define MAX_SEGS 100 /* Maximum number of segments */ -#define XGELL_TX_LEVEL_LOW 16 +#define XGE_MAX_SEGS 100 /* Maximum number of segments */ +#define XGE_TX_LEVEL_LOW 16 +#define XGE_FIFO_COUNT XGE_HAL_MIN_FIFO_NUM #define XGE_RING_COUNT XGE_HAL_MIN_RING_NUM -#define BUFFER_SIZE 20 +#define XGE_BUFFER_SIZE 20 +#define XGE_LRO_DEFAULT_ENTRIES 12 +#define XGE_BAUDRATE 1000000000 /* Default values to configuration parameters */ +#define XGE_DEFAULT_ENABLED_TSO 1 +#define XGE_DEFAULT_ENABLED_LRO 1 +#define XGE_DEFAULT_ENABLED_MSI 1 +#define XGE_DEFAULT_BUFFER_MODE 1 #define XGE_DEFAULT_INITIAL_MTU 1500 #define XGE_DEFAULT_LATENCY_TIMER -1 #define XGE_DEFAULT_MAX_SPLITS_TRANS -1 @@ -144,16 +121,56 @@ xge_malloc(unsigned long size) { #define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN 1 #define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US 250 +#define XGE_DRV_STATS(param) (lldev->driver_stats.param++) + +#define XGE_SAVE_PARAM(to, what, value) to.what = value; + +#define XGE_GET_PARAM(str_kenv, to, param, hardcode) { \ + static int param##__LINE__; \ + if(testenv(str_kenv) == 1) { \ + getenv_int(str_kenv, ¶m##__LINE__); \ + } \ + else { \ + param##__LINE__ = hardcode; \ + } \ + XGE_SAVE_PARAM(to, param, param##__LINE__); \ +} + +#define XGE_GET_PARAM_MAC(str_kenv, param, hardcode) \ + XGE_GET_PARAM(str_kenv, ((*dconfig).mac), param, hardcode); + +#define XGE_GET_PARAM_FIFO(str_kenv, param, hardcode) \ + XGE_GET_PARAM(str_kenv, ((*dconfig).fifo), param, hardcode); + +#define XGE_GET_PARAM_FIFO_QUEUE(str_kenv, param, qindex, hardcode) \ + XGE_GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex]), param, \ + hardcode); + +#define XGE_GET_PARAM_FIFO_QUEUE_TTI(str_kenv, param, qindex, tindex, hardcode)\ + XGE_GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex].tti[tindex]), \ + param, hardcode); + +#define XGE_GET_PARAM_RING(str_kenv, param, hardcode) \ + XGE_GET_PARAM(str_kenv, ((*dconfig).ring), param, hardcode); + +#define XGE_GET_PARAM_RING_QUEUE(str_kenv, param, qindex, hardcode) \ + XGE_GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex]), param, \ + hardcode); + +#define XGE_GET_PARAM_RING_QUEUE_RTI(str_kenv, param, qindex, hardcode) \ + XGE_GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex].rti), param, \ + hardcode); + /* Values to identify the requests from getinfo tool in ioctl */ #define XGE_QUERY_STATS 1 #define XGE_QUERY_PCICONF 2 -#define XGE_QUERY_INTRSTATS 3 +#define XGE_QUERY_DEVSTATS 3 #define XGE_QUERY_DEVCONF 4 #define XGE_READ_VERSION 5 -#define XGE_QUERY_TCODE 6 -#define XGE_SET_BUFFER_MODE_1 7 -#define XGE_SET_BUFFER_MODE_2 8 -#define XGE_SET_BUFFER_MODE_3 9 +#define XGE_QUERY_SWSTATS 6 +#define XGE_QUERY_DRIVERSTATS 7 +#define XGE_SET_BUFFER_MODE_1 8 +#define XGE_SET_BUFFER_MODE_2 9 #define XGE_SET_BUFFER_MODE_5 10 #define XGE_QUERY_BUFFER_MODE 11 @@ -164,124 +181,233 @@ xge_malloc(unsigned long size) { typedef struct mbuf *mbuf_t; -typedef enum xgell_event_e { +typedef enum xge_lables { + xge_free_all = 0, + xge_free_mutex = 1, + xge_free_terminate_hal_driver = 2, + xge_free_hal_device = 3, + xge_free_pci_info = 4, + xge_free_bar0 = 5, + xge_free_bar0_resource = 6, + xge_free_bar1 = 7, + xge_free_bar1_resource = 8, + xge_free_irq_resource = 9, + xge_free_terminate_hal_device = 10, + xge_free_media_interface = 11, +} xge_lables_e; + +typedef enum xge_option { + XGE_CHANGE_LRO = 0, + XGE_SET_MTU = 1 +} xge_option_e; + +typedef enum xge_event_e { XGE_LL_EVENT_TRY_XMIT_AGAIN = XGE_LL_EVENT_BASE + 1, - XGE_LL_EVENT_DEVICE_RESETTING = XGE_LL_EVENT_BASE + 2, -} xgell_event_e; + XGE_LL_EVENT_DEVICE_RESETTING = XGE_LL_EVENT_BASE + 2 +} xge_event_e; + +typedef struct xge_msi_info { + u16 msi_control; /* MSI control 0x42 */ + u32 msi_lower_address; /* MSI lower address 0x44 */ + u32 msi_higher_address; /* MSI higher address 0x48 */ + u16 msi_data; /* MSI data */ +} xge_msi_info_t; + +typedef struct xge_driver_stats_t { + /* ISR statistics */ + u64 isr_filter; + u64 isr_line; + u64 isr_msi; + + /* Tx statistics */ + u64 tx_calls; + u64 tx_completions; + u64 tx_desc_compl; + u64 tx_tcode; + u64 tx_defrag; + u64 tx_no_txd; + u64 tx_map_fail; + u64 tx_max_frags; + u64 tx_tso; + u64 tx_posted; + u64 tx_again; + u64 tx_lock_fail; + + /* Rx statistics */ + u64 rx_completions; + u64 rx_desc_compl; + u64 rx_tcode; + u64 rx_no_buf; + u64 rx_map_fail; + + /* LRO statistics */ + u64 lro_uncapable; + u64 lro_begin; + u64 lro_end1; + u64 lro_end2; + u64 lro_end3; + u64 lro_append; + u64 lro_session_exceeded; + u64 lro_close; +} xge_driver_stats_t; + +typedef struct xge_lro_entry_t { + SLIST_ENTRY(xge_lro_entry_t) next; + struct mbuf *m_head; + struct mbuf *m_tail; + struct ip *lro_header_ip; + int timestamp; + u32 tsval; + u32 tsecr; + u32 source_ip; + u32 dest_ip; + u32 next_seq; + u32 ack_seq; + u32 len; + u32 data_csum; + u16 window; + u16 source_port; + u16 dest_port; + u16 append_cnt; + u16 mss; +} xge_lro_entry_t; + +SLIST_HEAD(lro_head, xge_lro_entry_t); /* Adapter structure */ -typedef struct xgelldev { +typedef struct xge_lldev_t { device_t device; /* Device */ struct ifnet *ifnetp; /* Interface ifnet structure */ struct resource *irq; /* Resource structure for IRQ */ void *irqhandle; /* IRQ handle */ - pci_info_t *pdev; - struct ifmedia xge_media; /* In-kernel representation of a */ - /* single supported media type */ + xge_pci_info_t *pdev; /* PCI info */ xge_hal_device_t *devh; /* HAL: Device Handle */ - xge_hal_channel_h ring_channel[XGE_HAL_MAX_FIFO_NUM]; - /* Ring channel */ - xge_hal_channel_h fifo_channel_0; /* FIFO channel */ - struct mtx xge_lock; /* Mutex - Default */ + struct mtx mtx_drv; /* Mutex - Driver */ + struct mtx mtx_tx[XGE_FIFO_COUNT]; + /* Mutex - Tx */ + char mtx_name_drv[16];/*Mutex Name - Driver */ + char mtx_name_tx[16][XGE_FIFO_COUNT]; + /* Mutex Name - Tx */ struct callout timer; /* Timer for polling */ - struct xge_hal_stats_hw_info_t *hwstats; /* Hardware Statistics */ - int saved_regs[16]; /* To save register space */ - int xge_mtu; /* MTU */ - int initialized; /* Flag: Initialized or not */ + struct ifmedia media; /* In-kernel representation of a */ + /* single supported media type */ + xge_hal_channel_h fifo_channel[XGE_FIFO_COUNT]; + /* FIFO channels */ + xge_hal_channel_h ring_channel[XGE_RING_COUNT]; + /* Ring channels */ bus_dma_tag_t dma_tag_tx; /* Tag for dtr dma mapping (Tx) */ bus_dma_tag_t dma_tag_rx; /* Tag for dtr dma mapping (Rx) */ + bus_dmamap_t extra_dma_map; /* Extra DMA map for Rx */ + xge_msi_info_t msi_info; /* MSI info */ + xge_driver_stats_t driver_stats; /* Driver statistics */ + int initialized; /* Flag: Initialized or not */ int all_multicast; /* All multicast flag */ int macaddr_count; /* Multicast address count */ int in_detach; /* To avoid ioctl during detach */ int buffer_mode; /* Buffer Mode */ int rxd_mbuf_cnt; /* Number of buffers used */ int rxd_mbuf_len[5];/* Buffer lengths */ -} xgelldev_t; + int enabled_tso; /* Flag: TSO Enabled */ + int enabled_lro; /* Flag: LRO Enabled */ + int enabled_msi; /* Flag: MSI Enabled */ + int mtu; /* Interface MTU */ + int lro_num; /* Number of LRO sessions */ + struct lro_head lro_active; /* Active LRO sessions */ + struct lro_head lro_free; /* Free LRO sessions */ +} xge_lldev_t; /* Rx descriptor private structure */ -typedef struct { - mbuf_t *bufferArray; - struct xge_dma_mbuf dmainfo[5]; -} xgell_rx_priv_t; +typedef struct xge_rx_priv_t { + mbuf_t *bufferArray; + xge_dma_mbuf_t dmainfo[5]; +} xge_rx_priv_t; /* Tx descriptor private structure */ -typedef struct { +typedef struct xge_tx_priv_t { mbuf_t buffer; bus_dmamap_t dma_map; -} xgell_tx_priv_t; +} xge_tx_priv_t; /* BAR0 Register */ -typedef struct barregister { - char option[2]; +typedef struct xge_register_t { + char option[2]; u64 offset; u64 value; -}bar0reg_t; +}xge_register_t; -void xge_init_params(xge_hal_device_config_t *dconfig, device_t dev); +void xge_init_params(xge_hal_device_config_t *, device_t); void xge_init(void *); -void xge_init_locked(void *); -void xge_stop(xgelldev_t *); -void freeResources(device_t, int); -void xgell_callback_link_up(void *); -void xgell_callback_link_down(void *); -void xgell_callback_crit_err(void *, xge_hal_event_e, u64); -void xgell_callback_event(xge_queue_item_t *); +void xge_device_init(xge_lldev_t *, xge_hal_channel_reopen_e); +void xge_device_stop(xge_lldev_t *, xge_hal_channel_reopen_e); +void xge_stop(xge_lldev_t *); +void xge_resources_free(device_t, xge_lables_e); +void xge_callback_link_up(void *); +void xge_callback_link_down(void *); +void xge_callback_crit_err(void *, xge_hal_event_e, u64); +void xge_callback_event(xge_queue_item_t *); int xge_ifmedia_change(struct ifnet *); void xge_ifmedia_status(struct ifnet *, struct ifmediareq *); int xge_ioctl(struct ifnet *, unsigned long, caddr_t); +int xge_ioctl_stats(xge_lldev_t *, struct ifreq *); +int xge_ioctl_registers(xge_lldev_t *, struct ifreq *); void xge_timer(void *); -int xge_intr_filter(void *); -void xge_intr(void *); -int xgell_rx_open(int, xgelldev_t *, xge_hal_channel_reopen_e); -int xgell_tx_open(xgelldev_t *, xge_hal_channel_reopen_e); -int xgell_channel_close(xgelldev_t *, xge_hal_channel_reopen_e); -int xgell_channel_open(xgelldev_t *, xge_hal_channel_reopen_e); -xge_hal_status_e xgell_rx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *); -xge_hal_status_e xgell_tx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *); -xge_hal_status_e xgell_tx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h, - int, void *, xge_hal_channel_reopen_e); -xge_hal_status_e xgell_rx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h, - int, void *, xge_hal_channel_reopen_e); -void xgell_rx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e, - void *, xge_hal_channel_reopen_e); -void xgell_tx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e, - void *, xge_hal_channel_reopen_e); -void xgell_set_mbuf_cflags(mbuf_t); +int xge_isr_filter(void *); +void xge_isr_line(void *); +void xge_isr_msi(void *); +void xge_enable_msi(xge_lldev_t *); +int xge_rx_open(int, xge_lldev_t *, xge_hal_channel_reopen_e); +int xge_tx_open(xge_lldev_t *, xge_hal_channel_reopen_e); +void xge_channel_close(xge_lldev_t *, xge_hal_channel_reopen_e); +int xge_channel_open(xge_lldev_t *, xge_hal_channel_reopen_e); +xge_hal_status_e xge_rx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *); +xge_hal_status_e xge_tx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *); +xge_hal_status_e xge_tx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h, + int, void *, xge_hal_channel_reopen_e); +xge_hal_status_e xge_rx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h, + int, void *, xge_hal_channel_reopen_e); +void xge_rx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e, + void *, xge_hal_channel_reopen_e); +void xge_tx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e, + void *, xge_hal_channel_reopen_e); +void xge_set_mbuf_cflags(mbuf_t); void xge_send(struct ifnet *); -void xge_send_locked(struct ifnet *); -int xgell_get_multimode_normalbuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, - xgelldev_t *lldev); -int xgell_get_multimode_jumbobuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, - xgelldev_t *lldev, int lock); -int xgell_get_second_buffer(xgell_rx_priv_t *rxd_priv, xgelldev_t *lldev); -int xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, - xgelldev_t *lldev, int index); -int xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr, - xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv); -int xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv, - xgelldev_t *lldev); +static void inline xge_send_locked(struct ifnet *, int); +int xge_get_buf(xge_hal_dtr_h, xge_rx_priv_t *, xge_lldev_t *, int); +int xge_ring_dtr_get(mbuf_t, xge_hal_channel_h, xge_hal_dtr_h, xge_lldev_t *, + xge_rx_priv_t *); +int xge_get_buf_3b_5b(xge_hal_dtr_h, xge_rx_priv_t *, xge_lldev_t *); void dmamap_cb(void *, bus_dma_segment_t *, int, int); -void xgell_reset(xgelldev_t *); -void xge_setmulti(xgelldev_t *); -void xge_enable_promisc(xgelldev_t *); -void xge_disable_promisc(xgelldev_t *); -int changeMtu(xgelldev_t *, int); -int changeBufmode(xgelldev_t *, int); +void xge_reset(xge_lldev_t *); +void xge_setmulti(xge_lldev_t *); +void xge_enable_promisc(xge_lldev_t *); +void xge_disable_promisc(xge_lldev_t *); +int xge_change_mtu(xge_lldev_t *, int); +void xge_buffer_mode_init(xge_lldev_t *, int); void xge_initialize(device_t, xge_hal_channel_reopen_e); void xge_terminate(device_t, xge_hal_channel_reopen_e); -void if_up_locked(xgelldev_t *); -void if_down_locked(xgelldev_t *); int xge_probe(device_t); int xge_driver_initialize(void); void xge_media_init(device_t); void xge_pci_space_save(device_t); void xge_pci_space_restore(device_t); +void xge_msi_info_save(xge_lldev_t *); +void xge_msi_info_restore(xge_lldev_t *); int xge_attach(device_t); int xge_interface_setup(device_t); int xge_detach(device_t); int xge_shutdown(device_t); -int xge_suspend(device_t); -int xge_resume(device_t); +void xge_mutex_init(xge_lldev_t *); +void xge_mutex_destroy(xge_lldev_t *); +void xge_print_info(xge_lldev_t *); +void xge_lro_flush_sessions(xge_lldev_t *); +void xge_rx_buffer_sizes_set(xge_lldev_t *, int, int); +void xge_accumulate_large_rx(xge_lldev_t *, struct mbuf *, int, + xge_rx_priv_t *); +xge_hal_status_e xge_create_dma_tags(device_t); +void xge_add_sysctl_handlers(xge_lldev_t *); +void xge_confirm_changes(xge_lldev_t *, xge_option_e); +static int xge_lro_accumulate(xge_lldev_t *, struct mbuf *); +static void xge_lro_flush(xge_lldev_t *, xge_lro_entry_t *); #endif // _IF_XGE_H diff --git a/sys/dev/nxge/include/build-version.h b/sys/dev/nxge/include/build-version.h index b9b5e00..33fb24d 100644 --- a/sys/dev/nxge/include/build-version.h +++ b/sys/dev/nxge/include/build-version.h @@ -1,6 +1,6 @@ -/* $FreeBSD$ */ #ifndef BUILD_VERSION_H #define BUILD_VERSION_H /* Do not edit! Automatically generated when released.*/ -#define GENERATED_BUILD_VERSION "10294" +/* $FreeBSD$ */ +#define GENERATED_BUILD_VERSION "11230" #endif /* BUILD_VERSION_H */ diff --git a/sys/dev/nxge/include/version.h b/sys/dev/nxge/include/version.h index 0a212f4..47d4fc1 100644 --- a/sys/dev/nxge/include/version.h +++ b/sys/dev/nxge/include/version.h @@ -26,26 +26,18 @@ * $FreeBSD$ */ -/* - * FileName : version.h - * - * Description: versioning file - * - * Created: 3 September 2004 - */ - #ifndef VERSION_H #define VERSION_H #include <dev/nxge/include/build-version.h> -#define XGE_HAL_VERSION_MAJOR "2" -#define XGE_HAL_VERSION_MINOR "5" -#define XGE_HAL_VERSION_FIX "0" -#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION +#define XGE_HAL_VERSION_MAJOR "2" +#define XGE_HAL_VERSION_MINOR "0" +#define XGE_HAL_VERSION_FIX "9" +#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION #define XGE_HAL_VERSION XGE_HAL_VERSION_MAJOR"."XGE_HAL_VERSION_MINOR"."\ - XGE_HAL_VERSION_FIX"."XGE_HAL_VERSION_BUILD -#define XGE_HAL_DESC XGE_DRIVER_NAME" v."XGE_HAL_VERSION + XGE_HAL_VERSION_FIX"."XGE_HAL_VERSION_BUILD +#define XGE_HAL_DESC XGE_DRIVER_NAME" v."XGE_HAL_VERSION /* Link Layer versioning */ #include <dev/nxge/xgell-version.h> diff --git a/sys/dev/nxge/include/xge-debug.h b/sys/dev/nxge/include/xge-debug.h index a4efbcb..74b9756 100644 --- a/sys/dev/nxge/include/xge-debug.h +++ b/sys/dev/nxge/include/xge-debug.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xge-debug.h - * - * Description: debug facilities - * - * Created: 6 May 2004 - */ - #ifndef XGE_DEBUG_H #define XGE_DEBUG_H @@ -54,10 +46,11 @@ __EXTERN_BEGIN_DECLS #ifdef XGE_DEBUG_FP -#define XGE_DEBUG_FP_DEVICE 0x1 +#define XGE_DEBUG_FP_DEVICE 0x1 #define XGE_DEBUG_FP_CHANNEL 0x2 -#define XGE_DEBUG_FP_FIFO 0x4 -#define XGE_DEBUG_FP_RING 0x8 +#define XGE_DEBUG_FP_FIFO 0x4 +#define XGE_DEBUG_FP_RING 0x8 +#define XGE_DEBUG_FP_ALL 0xff #endif /** @@ -125,15 +118,6 @@ extern int *g_level; #define XGE_COMPONENT_HAL_QUEUE 0x00000040 #define XGE_COMPONENT_HAL_INTERRUPT 0x00000080 #define XGE_COMPONENT_HAL_STATS 0x00000100 -#ifdef XGEHAL_RNIC -#define XGE_COMPONENT_HAL_DMQ 0x00000200 -#define XGE_COMPONENT_HAL_UMQ 0x00000400 -#define XGE_COMPONENT_HAL_SQ 0x00000800 -#define XGE_COMPONENT_HAL_SRQ 0x00001000 -#define XGE_COMPONENT_HAL_CQRQ 0x00002000 -#define XGE_COMPONENT_HAL_POOL 0x00004000 -#define XGE_COMPONENT_HAL_BITMAP 0x00008000 -#endif /* space for CORE_XXX */ #define XGE_COMPONENT_OSDEP 0x10000000 @@ -146,9 +130,9 @@ extern int *g_level; #ifndef __GNUC__ #ifdef XGE_TRACE_INTO_CIRCULAR_ARR - #define xge_trace_aux(fmt) xge_os_vatrace(g_xge_os_tracebuf, fmt) + #define xge_trace_aux(fmt) xge_os_vatrace(g_xge_os_tracebuf, fmt) #else - #define xge_trace_aux(fmt) xge_os_vaprintf(fmt) + #define xge_trace_aux(fmt) xge_os_vaprintf(fmt) #endif /** @@ -164,24 +148,24 @@ extern int *g_level; */ #define xge_debug(module, level, fmt) { \ if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \ - (level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \ - level >= *g_level && module & *(unsigned int *)g_module_mask) { \ - xge_trace_aux(fmt); \ + (level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \ + level >= *g_level && module & *(unsigned int *)g_module_mask) { \ + xge_trace_aux(fmt); \ } \ } #else /* __GNUC__ */ #ifdef XGE_TRACE_INTO_CIRCULAR_ARR - #define xge_trace_aux(fmt...) xge_os_trace(g_xge_os_tracebuf, fmt) + #define xge_trace_aux(fmt...) xge_os_trace(g_xge_os_tracebuf, fmt) #else - #define xge_trace_aux(fmt...) xge_os_printf(fmt) + #define xge_trace_aux(fmt...) xge_os_printf(fmt) #endif #define xge_debug(module, level, fmt...) { \ if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \ - (level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \ - level >= *g_level && module & *(unsigned int *)g_module_mask) { \ - xge_trace_aux(fmt); \ + (level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \ + level >= *g_level && module & *(unsigned int *)g_module_mask) { \ + xge_trace_aux(fmt); \ } \ } #endif /* __GNUC__ */ @@ -353,136 +337,6 @@ static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {} #endif /* __GNUC__ */ #endif -#ifdef XGEHAL_RNIC - -#if (XGE_COMPONENT_HAL_DMQ & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_DMQ; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_dmq(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_DMQ, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_dmq(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#if (XGE_COMPONENT_HAL_UMQ & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_UMQ; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_umq(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_UMQ, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_umq(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#if (XGE_COMPONENT_HAL_SQ & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_SQ; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_sq(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_SQ, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_sq(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#if (XGE_COMPONENT_HAL_SRQ & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_SRQ; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_srq(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_SRQ, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_srq(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#if (XGE_COMPONENT_HAL_CQRQ & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_CQRQ; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_cqrq(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_CQRQ, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_cqrq(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#if (XGE_COMPONENT_HAL_POOL & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_POOL; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_pool(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_POOL, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_pool(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#if (XGE_COMPONENT_HAL_BITMAP & XGE_DEBUG_MODULE_MASK) -#ifndef __GNUC__ -static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) { - u32 module = XGE_COMPONENT_HAL_BITMAP; - xge_debug(module, level, fmt); -} -#else /* __GNUC__ */ -#define xge_debug_bitmap(level, fmt...) \ - xge_debug(XGE_COMPONENT_HAL_BITMAP, level, fmt) -#endif /* __GNUC__ */ -#else -#ifndef __GNUC__ -static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {} -#else /* __GNUC__ */ -#define xge_debug_bitmap(level, fmt...) -#endif /* __GNUC__ */ -#endif - -#endif - #if (XGE_COMPONENT_OSDEP & XGE_DEBUG_MODULE_MASK) #ifndef __GNUC__ static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) { @@ -531,13 +385,6 @@ static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {} static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {} static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {} static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {} -static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {} static inline void xge_debug_hal(xge_debug_level_e level, char *fmt, ...) {} static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {} static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {} @@ -557,7 +404,7 @@ static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {} * time. */ #define xge_assert(test) { \ - if (!(test)) xge_os_bug("bad cond: "#test" at %s:%d\n", \ + if (!(test)) xge_os_bug("bad cond: "#test" at %s:%d\n", \ __FILE__, __LINE__); } #else #define xge_assert(test) diff --git a/sys/dev/nxge/include/xge-defs.h b/sys/dev/nxge/include/xge-defs.h index 744a6b9..17bc907 100644 --- a/sys/dev/nxge/include/xge-defs.h +++ b/sys/dev/nxge/include/xge-defs.h @@ -26,35 +26,27 @@ * $FreeBSD$ */ -/* - * FileName : xge-defs.h - * - * Description: global definitions - * - * Created: 13 May 2004 - */ - #ifndef XGE_DEFS_H #define XGE_DEFS_H -#define XGE_PCI_VENDOR_ID 0x17D5 -#define XGE_PCI_DEVICE_ID_XENA_1 0x5731 -#define XGE_PCI_DEVICE_ID_XENA_2 0x5831 -#define XGE_PCI_DEVICE_ID_HERC_1 0x5732 -#define XGE_PCI_DEVICE_ID_HERC_2 0x5832 -#define XGE_PCI_DEVICE_ID_TITAN_1 0x5733 -#define XGE_PCI_DEVICE_ID_TITAN_2 0x5833 +#define XGE_PCI_VENDOR_ID 0x17D5 +#define XGE_PCI_DEVICE_ID_XENA_1 0x5731 +#define XGE_PCI_DEVICE_ID_XENA_2 0x5831 +#define XGE_PCI_DEVICE_ID_HERC_1 0x5732 +#define XGE_PCI_DEVICE_ID_HERC_2 0x5832 +#define XGE_PCI_DEVICE_ID_TITAN_1 0x5733 +#define XGE_PCI_DEVICE_ID_TITAN_2 0x5833 -#define XGE_DRIVER_NAME "Xge driver" -#define XGE_DRIVER_VENDOR "Neterion, Inc" -#define XGE_CHIP_FAMILY "Xframe" -#define XGE_SUPPORTED_MEDIA_0 "Fiber" +#define XGE_DRIVER_NAME "Xge driver" +#define XGE_DRIVER_VENDOR "Neterion, Inc" +#define XGE_CHIP_FAMILY "Xframe" +#define XGE_SUPPORTED_MEDIA_0 "Fiber" #include <dev/nxge/include/version.h> #if defined(__cplusplus) -#define __EXTERN_BEGIN_DECLS extern "C" { -#define __EXTERN_END_DECLS } +#define __EXTERN_BEGIN_DECLS extern "C" { +#define __EXTERN_END_DECLS } #else #define __EXTERN_BEGIN_DECLS #define __EXTERN_END_DECLS @@ -67,7 +59,7 @@ __EXTERN_BEGIN_DECLS /*---------------------------- DMA attributes ------------------------------*/ /* XGE_OS_DMA_REQUIRES_SYNC - should be defined or - NOT defined in the Makefile */ + NOT defined in the Makefile */ #define XGE_OS_DMA_CACHELINE_ALIGNED 0x1 /* Either STREAMING or CONSISTENT should be used. The combination of both or none is invalid */ @@ -77,7 +69,7 @@ __EXTERN_BEGIN_DECLS /*---------------------------- common stuffs -------------------------------*/ -#define XGE_OS_LLXFMT "%llx" +#define XGE_OS_LLXFMT "%llx" #define XGE_OS_NEWLINE "\n" #ifdef XGE_OS_MEMORY_CHECK typedef struct { @@ -87,56 +79,56 @@ typedef struct { int line; } xge_os_malloc_t; -#define XGE_OS_MALLOC_CNT_MAX 64*1024 +#define XGE_OS_MALLOC_CNT_MAX 64*1024 extern xge_os_malloc_t g_malloc_arr[XGE_OS_MALLOC_CNT_MAX]; extern int g_malloc_cnt; #define XGE_OS_MEMORY_CHECK_MALLOC(_vaddr, _size, _file, _line) { \ if (_vaddr) { \ - int i; \ - for (i=0; i<g_malloc_cnt; i++) { \ - if (g_malloc_arr[i].ptr == NULL) { \ - break; \ - } \ - } \ - if (i == g_malloc_cnt) { \ - g_malloc_cnt++; \ - if (g_malloc_cnt >= XGE_OS_MALLOC_CNT_MAX) { \ - xge_os_bug("g_malloc_cnt exceed %d", \ - XGE_OS_MALLOC_CNT_MAX); \ - } \ - } \ - g_malloc_arr[i].ptr = _vaddr; \ - g_malloc_arr[i].size = _size; \ - g_malloc_arr[i].file = _file; \ - g_malloc_arr[i].line = _line; \ - for (i=0; i<_size; i++) { \ - *((char *)_vaddr+i) = 0x5a; \ - } \ + int index_mem_chk; \ + for (index_mem_chk=0; index_mem_chk < g_malloc_cnt; index_mem_chk++) { \ + if (g_malloc_arr[index_mem_chk].ptr == NULL) { \ + break; \ + } \ + } \ + if (index_mem_chk == g_malloc_cnt) { \ + g_malloc_cnt++; \ + if (g_malloc_cnt >= XGE_OS_MALLOC_CNT_MAX) { \ + xge_os_bug("g_malloc_cnt exceed %d", \ + XGE_OS_MALLOC_CNT_MAX); \ + } \ + } \ + g_malloc_arr[index_mem_chk].ptr = _vaddr; \ + g_malloc_arr[index_mem_chk].size = _size; \ + g_malloc_arr[index_mem_chk].file = _file; \ + g_malloc_arr[index_mem_chk].line = _line; \ + for (index_mem_chk=0; index_mem_chk<_size; index_mem_chk++) { \ + *((char *)_vaddr+index_mem_chk) = 0x5a; \ + } \ } \ } #define XGE_OS_MEMORY_CHECK_FREE(_vaddr, _check_size) { \ - int i; \ - for (i=0; i<XGE_OS_MALLOC_CNT_MAX; i++) { \ - if (g_malloc_arr[i].ptr == _vaddr) { \ - g_malloc_arr[i].ptr = NULL; \ - if(_check_size && g_malloc_arr[i].size!=_check_size) { \ - xge_os_printf("OSPAL: freeing with wrong " \ - "size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \ - (int)_check_size, \ - g_malloc_arr[i].file, \ - g_malloc_arr[i].line, \ - (unsigned long long)(ulong_t) \ - g_malloc_arr[i].ptr, \ - g_malloc_arr[i].size); \ - } \ - break; \ - } \ + int index_mem_chk; \ + for (index_mem_chk=0; index_mem_chk < XGE_OS_MALLOC_CNT_MAX; index_mem_chk++) { \ + if (g_malloc_arr[index_mem_chk].ptr == _vaddr) { \ + g_malloc_arr[index_mem_chk].ptr = NULL; \ + if(_check_size && g_malloc_arr[index_mem_chk].size!=_check_size) { \ + xge_os_printf("OSPAL: freeing with wrong " \ + "size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \ + (int)_check_size, \ + g_malloc_arr[index_mem_chk].file, \ + g_malloc_arr[index_mem_chk].line, \ + (unsigned long long)(ulong_t) \ + g_malloc_arr[index_mem_chk].ptr, \ + g_malloc_arr[index_mem_chk].size); \ + } \ + break; \ + } \ } \ - if (i == XGE_OS_MALLOC_CNT_MAX) { \ - xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \ - (unsigned long long)(ulong_t)_vaddr); \ + if (index_mem_chk == XGE_OS_MALLOC_CNT_MAX) { \ + xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \ + (unsigned long long)(ulong_t)_vaddr); \ } \ } #else diff --git a/sys/dev/nxge/include/xge-list.h b/sys/dev/nxge/include/xge-list.h index c49424d..544a623 100644 --- a/sys/dev/nxge/include/xge-list.h +++ b/sys/dev/nxge/include/xge-list.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xge-list.h - * - * Description: Generic bi-directional linked list implementation - * - * Created: 14 May 2004 - */ - #ifndef XGE_LIST_H #define XGE_LIST_H @@ -76,9 +68,9 @@ static inline void xge_list_init (xge_list_t *header) */ static inline int xge_list_is_empty(xge_list_t *header) { - xge_assert(header != NULL); + xge_assert(header != NULL); - return header->next == header; + return header->next == header; } /** @@ -96,9 +88,9 @@ static inline xge_list_t *xge_list_first_get(xge_list_t *header) xge_assert(header->prev != NULL); if(header->next == header) - return NULL; + return NULL; else - return header->next; + return header->next; } /** @@ -131,7 +123,7 @@ static inline void xge_list_remove(xge_list_t *item) * See also: xge_list_remove(), xge_list_insert_before(), xge_list_t{}. */ static inline void xge_list_insert (xge_list_t *new_item, - xge_list_t *prev_item) + xge_list_t *prev_item) { xge_assert(new_item != NULL); xge_assert(prev_item != NULL); @@ -151,7 +143,7 @@ static inline void xge_list_insert (xge_list_t *new_item, * Insert new item (new_item) before given item (next_item). */ static inline void xge_list_insert_before (xge_list_t *new_item, - xge_list_t *next_item) + xge_list_t *next_item) { xge_assert(new_item != NULL); xge_assert(next_item != NULL); @@ -165,34 +157,34 @@ static inline void xge_list_insert_before (xge_list_t *new_item, #define xge_list_for_each(_p, _h) \ for (_p = (_h)->next, xge_os_prefetch(_p->next); _p != (_h); \ - _p = _p->next, xge_os_prefetch(_p->next)) + _p = _p->next, xge_os_prefetch(_p->next)) #define xge_list_for_each_safe(_p, _n, _h) \ - for (_p = (_h)->next, _n = _p->next; _p != (_h); \ - _p = _n, _n = _p->next) + for (_p = (_h)->next, _n = _p->next; _p != (_h); \ + _p = _n, _n = _p->next) #ifdef __GNUC__ /** * xge_container_of - Given a member, return the containing structure. - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. * * Cast a member of a structure out to the containing structure. */ -#define xge_container_of(ptr, type, member) ({ \ - __typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)(void *)( (char *)__mptr - ((size_t) &((type *)0)->member) );}) +#define xge_container_of(ptr, type, member) ({ \ + __typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)(void *)( (char *)__mptr - ((size_t) &((type *)0)->member) );}) #else /* type unsafe version */ #define xge_container_of(ptr, type, member) \ - ((type*)(void*)((char*)(ptr) - ((size_t) &((type *)0)->member))) + ((type*)(void*)((char*)(ptr) - ((size_t) &((type *)0)->member))) #endif /** * xge_offsetof - Offset of the member in the containing structure. - * @t: struct name. - * @m: the name of the member within the struct. + * @t: struct name. + * @m: the name of the member within the struct. * * Return the offset of the member @m in the structure @t. */ diff --git a/sys/dev/nxge/include/xge-os-pal.h b/sys/dev/nxge/include/xge-os-pal.h index 5c92fe6..166cadb 100644 --- a/sys/dev/nxge/include/xge-os-pal.h +++ b/sys/dev/nxge/include/xge-os-pal.h @@ -26,15 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xge-os-pal.h - * - * Description: top-level header file. works just like switching between - * os-depndent parts - * - * Created: 6st May 2004 - */ - #ifndef XGE_OS_PAL_H #define XGE_OS_PAL_H @@ -46,10 +37,6 @@ __EXTERN_BEGIN_DECLS /* platform specific header */ #include <dev/nxge/xge-osdep.h> -#ifdef XGEHAL_RNIC -#define IN -#define OUT -#endif #if !defined(XGE_OS_PLATFORM_64BIT) && !defined(XGE_OS_PLATFORM_32BIT) #error "either 32bit or 64bit switch must be defined!" @@ -60,20 +47,20 @@ __EXTERN_BEGIN_DECLS #endif #if defined(XGE_OS_PLATFORM_64BIT) -#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a5a5a5a5a +#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a5a5a5a5a #else -#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a +#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a #endif -#define XGE_OS_TRACE_MSGBUF_MAX 512 +#define XGE_OS_TRACE_MSGBUF_MAX 512 typedef struct xge_os_tracebuf_t { - int wrapped_once; /* circular buffer been wrapped */ - int timestamp; /* whether timestamps are enabled */ - volatile int offset; /* offset within the tracebuf */ - int size; /* total size of trace buffer */ - char msg[XGE_OS_TRACE_MSGBUF_MAX]; /* each individual buffer */ - int msgbuf_max; /* actual size of msg buffer */ - char *data; /* pointer to data buffer */ + int wrapped_once; /* circular buffer been wrapped */ + int timestamp; /* whether timestamps are enabled */ + volatile int offset; /* offset within the tracebuf */ + int size; /* total size of trace buffer */ + char msg[XGE_OS_TRACE_MSGBUF_MAX]; /* each individual buffer */ + int msgbuf_max; /* actual size of msg buffer */ + char *data; /* pointer to data buffer */ } xge_os_tracebuf_t; extern xge_os_tracebuf_t *g_xge_os_tracebuf; @@ -86,42 +73,42 @@ extern char *dmesg_start; int msgsize = xge_os_strlen(tb->msg) + 2; \ int offset = tb->offset; \ if (msgsize != 2 && msgsize < tb->msgbuf_max) { \ - int leftsize = tb->size - offset; \ - if ((msgsize + tb->msgbuf_max) > leftsize) { \ - xge_os_memzero(tb->data + offset, leftsize); \ - offset = 0; \ - tb->wrapped_once = 1; \ - } \ - xge_os_memcpy(tb->data + offset, tb->msg, msgsize-1); \ - *(tb->data + offset + msgsize-1) = '\n'; \ - *(tb->data + offset + msgsize) = 0; \ - offset += msgsize; \ - tb->offset = offset; \ - dmesg_start = tb->data + offset; \ - *tb->msg = 0; \ + int leftsize = tb->size - offset; \ + if ((msgsize + tb->msgbuf_max) > leftsize) { \ + xge_os_memzero(tb->data + offset, leftsize); \ + offset = 0; \ + tb->wrapped_once = 1; \ + } \ + xge_os_memcpy(tb->data + offset, tb->msg, msgsize-1); \ + *(tb->data + offset + msgsize-1) = '\n'; \ + *(tb->data + offset + msgsize) = 0; \ + offset += msgsize; \ + tb->offset = offset; \ + dmesg_start = tb->data + offset; \ + *tb->msg = 0; \ } \ } #define xge_os_vatrace(tb, fmt) { \ if (tb != NULL) { \ - char *_p = tb->msg; \ - if (tb->timestamp) { \ - xge_os_timestamp(tb->msg); \ - _p = tb->msg + xge_os_strlen(tb->msg); \ - } \ - xge_os_vasprintf(_p, fmt); \ - __xge_trace(tb); \ + char *_p = tb->msg; \ + if (tb->timestamp) { \ + xge_os_timestamp(tb->msg); \ + _p = tb->msg + xge_os_strlen(tb->msg); \ + } \ + xge_os_vasprintf(_p, fmt); \ + __xge_trace(tb); \ } \ } #ifdef __GNUC__ #define xge_os_trace(tb, fmt...) { \ if (tb != NULL) { \ - if (tb->timestamp) { \ - xge_os_timestamp(tb->msg); \ - } \ - xge_os_sprintf(tb->msg + xge_os_strlen(tb->msg), fmt); \ - __xge_trace(tb); \ + if (tb->timestamp) { \ + xge_os_timestamp(tb->msg); \ + } \ + xge_os_sprintf(tb->msg + xge_os_strlen(tb->msg), fmt); \ + __xge_trace(tb); \ } \ } #endif /* __GNUC__ */ diff --git a/sys/dev/nxge/include/xge-os-template.h b/sys/dev/nxge/include/xge-os-template.h deleted file mode 100644 index 4d50e6e..0000000 --- a/sys/dev/nxge/include/xge-os-template.h +++ /dev/null @@ -1,614 +0,0 @@ -/*- - * Copyright (c) 2002-2007 Neterion, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -/* - * FileName : xge-os-template.h - * - * Description: Template for creating platform-dependent "glue" code. - * - * Created: 6 May 2004 - */ - -#ifndef XGE_OS_TEMPLATE_H -#define XGE_OS_TEMPLATE_H - -#ifndef TEMPLATE -# error "should not be compiled for platforms other than TEMPLATE..." -#endif - -/* ------------------------- includes and defines ------------------------- */ - -/* - * Note: - * - * - on some operating systems like Linux & FreeBSD, there is a macro - * by using which it is possible to determine endiennes automatically - */ -#define XGE_OS_HOST_BIG_ENDIAN TEMPLATE - -#define XGE_OS_HOST_PAGE_SIZE TEMPLATE - -/* ---------------------- fixed size primitive types -----------------------*/ - -/* - * Note: - * - * - u## - means ## bits unsigned int/long - * - all names must be preserved since HAL using them. - * - ulong_t is platform specific, i.e. for 64bit - 64bit size, for - * 32bit - 32bit size - */ -#define TEMPLATE u8 -#define TEMPLATE u16 -#define TEMPLATE u32 -#define TEMPLATE u64 -#define TEMPLATE ulong_t -#define TEMPLATE ptrdiff_t -#define TEMPLATE dma_addr_t -#define TEMPLATE spinlock_t -typedef TEMPLATE pci_dev_h; -typedef TEMPLATE pci_reg_h; -typedef TEMPLATE pci_dma_h; -typedef TEMPLATE pci_irq_h; -typedef TEMPLATE pci_cfg_h; -typedef TEMPLATE pci_dma_acc_h; - -/* -------------------------- "libc" functionality -------------------------*/ - -/* - * Note: - * - * - "libc" functionality maps one-to-one to be posix-like - */ -/* Note: use typedef: xge_os_memzero(void* mem, int size); */ -#define xge_os_memzero TEMPLATE - -/* Note: the 1st argument MUST be destination, like in: - * void *memcpy(void *dest, const void *src, size_t n); - */ -#define xge_os_memcpy TEMPLATE - -/* Note: should accept format (the 1st argument) and a variable - * number of arguments thereafter.. */ -#define xge_os_printf(fmt...) TEMPLATE - -#define xge_os_vasprintf(buf, fmt...) TEMPLATE - -#define xge_os_sprintf(buf, fmt, ...) TEMPLATE - -#define xge_os_timestamp(buf) TEMPLATE - -#define xge_os_println TEMPLATE - -/* -------------------- synchronization primitives -------------------------*/ - -/* - * Note: - * - * - use spin_lock in interrupts or in threads when there is no races - * with interrupt - * - use spin_lock_irqsave in threads if there is a race with interrupt - * - use spin_lock_irqsave for nested locks - */ - -/* - * Initialize the spin lock. - */ -#define xge_os_spin_lock_init(lockp, ctxh) TEMPLATE -/* - * Initialize the spin lock (IRQ version). - */ -#define xge_os_spin_lock_init_irq(lockp, ctxh) TEMPLATE -/* - * Destroy the lock. - */ -#define xge_os_spin_lock_destroy(lockp, ctxh) TEMPLATE - -/* - * Destroy the lock (IRQ version). - */ -#define xge_os_spin_lock_destroy_irq(lockp, ctxh) TEMPLATE -/* - * Acquire the lock. - */ -#define xge_os_spin_lock(lockp) TEMPLATE -/* - * Release the lock. - */ -#define xge_os_spin_unlock(lockp) TEMPLATE -/* - * Acquire the lock(IRQ version). - */ -#define xge_os_spin_lock_irq(lockp, flags) TEMPLATE -/* - * Release the lock(IRQ version). - */ -#define xge_os_spin_unlock_irq(lockp, flags) TEMPLATE -/* - * Write memory barrier. - */ -#define xge_os_wmb() TEMPLATE -/* - * Delay (in micro seconds). - */ -#define xge_os_udelay(us) TEMPLATE -/* - * Delay (in milli seconds). - */ -#define xge_os_mdelay(ms) TEMPLATE -/* - * Compare and exchange. - */ -#define xge_os_cmpxchg(targetp, cmp, newval) TEMPLATE - - - -/* ------------------------- misc primitives -------------------------------*/ - -#define xge_os_prefetch TEMPLATE -#define xge_os_prefetchw TEMPLATE -#define xge_os_bug(fmt...) TEMPLATE - -/* -------------------------- compiler stuffs ------------------------------*/ - -#define __xge_os_attr_cacheline_aligned TEMPLATE - -/* ---------------------- memory primitives --------------------------------*/ - -/** - * xge_os_malloc - Allocate non DMA-able memory. - * @pdev: Device context. Some OSs require device context to perform - * operations on memory. - * @size: Size to allocate. - * - * Allocate @size bytes of memory. This allocation can sleep, and - * therefore, and therefore it requires process context. In other words, - * xge_os_malloc() cannot be called from the interrupt context. - * Use xge_os_free() to free the allocated block. - * - * Returns: Pointer to allocated memory, NULL - on failure. - * - * See also: xge_os_free(). - */ -static inline void *xge_os_malloc(IN pci_dev_h pdev, - IN unsigned long size) -{ TEMPLATE; } - -/** - * xge_os_free - Free non DMA-able memory. - * @pdev: Device context. Some OSs require device context to perform - * operations on memory. - * @vaddr: Address of the allocated memory block. - * @size: Some OS's require to provide size on free - * - * Free the memory area obtained via xge_os_malloc(). - * This call may also sleep, and therefore it cannot be used inside - * interrupt. - * - * See also: xge_os_malloc(). - */ -static inline void xge_os_free(IN pci_dev_h pdev, - IN const void *vaddr, - IN unsigned long size) -{ TEMPLATE; } - -/** - * xge_os_vaddr - Get Virtual address for the given physical address. - * @pdev: Device context. Some OSs require device context to perform - * operations on memory. - * @vaddr: Physical Address of the memory block. - * @size: Some OS's require to provide size - * - * Get the virtual address for physical address. - * This call may also sleep, and therefore it cannot be used inside - * interrupt. - * - * See also: xge_os_malloc(). - */ -static inline void xge_os_vaddr(IN pci_dev_h pdev, - IN const void *vaddr, - IN unsigned long size) -{ TEMPLATE; } - -/** - * xge_os_dma_malloc - Allocate DMA-able memory. - * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. - * @size: Size (in bytes) to allocate. - * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, - * XGE_OS_DMA_STREAMING, - * XGE_OS_DMA_CONSISTENT - * Note that the last two flags are mutually exclusive. - * @p_dmah: Handle used to map the memory onto the corresponding device memory - * space. See xge_os_dma_map(). The handle is an out-parameter - * returned by the function. - * @p_dma_acch: One more DMA handle used subsequently to free the - * DMA object (via xge_os_dma_free()). - * Note that this and the previous handle have - * physical meaning for Solaris; on Windows and Linux the - * corresponding value will be simply a pointer to PCI device. - * The value is returned by this function. - * - * Allocate DMA-able contiguous memory block of the specified @size. - * This memory can be subsequently freed using xge_os_dma_free(). - * Note: can be used inside interrupt context. - * - * Returns: Pointer to allocated memory(DMA-able), NULL on failure. - * - */ -static inline void *xge_os_dma_malloc(IN pci_dev_h pdev, - IN unsigned long size, - IN int dma_flags, - OUT pci_dma_h *p_dmah, - OUT pci_dma_acc_h *p_dma_acch) -{ TEMPLATE; } - -/** - * xge_os_dma_free - Free previously allocated DMA-able memory. - * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. - * @vaddr: Virtual address of the DMA-able memory. - * @p_dma_acch: DMA handle used to free the resource. - * @p_dmah: DMA handle used for mapping. See xge_os_dma_malloc(). - * - * Free DMA-able memory originally allocated by xge_os_dma_malloc(). - * Note: can be used inside interrupt. - * See also: xge_os_dma_malloc(). - */ -static inline void xge_os_dma_free (IN pci_dev_h pdev, - IN const void *vaddr, - IN pci_dma_acc_h *p_dma_acch, - IN pci_dma_h *p_dmah) -{ TEMPLATE; } - -/* ----------------------- io/pci/dma primitives ---------------------------*/ - -#define XGE_OS_DMA_DIR_TODEVICE TEMPLATE -#define XGE_OS_DMA_DIR_FROMDEVICE TEMPLATE -#define XGE_OS_DMA_DIR_BIDIRECTIONAL TEMPLATE - -/** - * xge_os_pci_read8 - Read one byte from device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. - * @cfgh: PCI configuration space handle. - * @where: Offset in the PCI configuration space. - * @val: Address of the result. - * - * Read byte value from the specified @regh PCI configuration space at the - * specified offset = @where. - * Returns: 0 - success, non-zero - failure. - */ -static inline int xge_os_pci_read8(IN pci_dev_h pdev, - IN pci_cfg_h cfgh, - IN int where, - IN u8 *val) -{ TEMPLATE; } - -/** - * xge_os_pci_write8 - Write one byte into device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. - * @cfgh: PCI configuration space handle. - * @where: Offset in the PCI configuration space. - * @val: Value to write. - * - * Write byte value into the specified PCI configuration space - * Returns: 0 - success, non-zero - failure. - */ -static inline int xge_os_pci_write8(IN pci_dev_h pdev, - IN pci_cfg_h cfgh, - IN int where, - IN u8 val) -{ TEMPLATE; } - -/** - * xge_os_pci_read16 - Read 16bit word from device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. - * @cfgh: PCI configuration space handle. - * @where: Offset in the PCI configuration space. - * @val: Address of the 16bit result. - * - * Read 16bit value from the specified PCI configuration space at the - * specified offset. - * Returns: 0 - success, non-zero - failure. - */ -static inline int xge_os_pci_read16(IN pci_dev_h pdev, - IN pci_cfg_h cfgh, - IN int where, - IN u16 *val) -{ TEMPLATE; } - -/** - * xge_os_pci_write16 - Write 16bit word into device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. - * @cfgh: PCI configuration space handle. - * @where: Offset in the PCI configuration space. - * @val: Value to write. - * - * Write 16bit value into the specified @offset in PCI - * configuration space. - * Returns: 0 - success, non-zero - failure. - */ -static inline int xge_os_pci_write16(IN pci_dev_h pdev, - IN pci_cfg_h cfgh, - IN int where, - IN u16 val) -{ TEMPLATE; } - -/** - * xge_os_pci_read32 - Read 32bit word from device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. - * @cfgh: PCI configuration space handle. - * @where: Offset in the PCI configuration space. - * @val: Address of 32bit result. - * - * Read 32bit value from the specified PCI configuration space at the - * specified offset. - * Returns: 0 - success, non-zero - failure. - */ -static inline int xge_os_pci_read32(IN pci_dev_h pdev, - IN pci_cfg_h cfgh, - IN int where, - IN u32 *val) -{ TEMPLATE; } - -/** - * xge_os_pci_write32 - Write 32bit word into device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. - * @cfgh: PCI configuration space handle. - * @where: Offset in the PCI configuration space. - * @val: Value to write. - * - * Write 32bit value into the specified @offset in PCI - * configuration space. - * Returns: 0 - success, non-zero - failure. - */ -static inline int xge_os_pci_write32(IN pci_dev_h pdev, - IN pci_cfg_h cfgh, - IN int where, - IN u32 val) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO.. - * @regh: PCI configuration space handle. - * @addr: Address in device memory space. - * - * Returns: 1 byte value read from the specified (mapped) memory space address. - */ -static inline u8 xge_os_pio_mem_read8(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_write64 - Write 1 byte into device memory mapped - * space. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO.. - * @regh: PCI configuration space handle. - * @val: Value to write. - * @addr: Address in device memory space. - * - * Write byte value into the specified (mapped) device memory space. - */ -static inline void xge_os_pio_mem_write8(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN u8 val, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_read16 - Read 16bit from device memory mapped space. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @addr: Address in device memory space. - * - * Returns: 16bit value read from the specified (mapped) memory space address. - */ -static inline u16 xge_os_pio_mem_read16(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN void *addr) -{ -TEMPLATE; } - -/** - * xge_os_pio_mem_write16 - Write 16bit into device memory mapped space. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @val: Value to write. - * @addr: Address in device memory space. - * - * Write 16bit value into the specified (mapped) device memory space. - */ -static inline void xge_os_pio_mem_write16(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN u16 val, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_read32 - Read 32bit from device memory mapped space. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @addr: Address in device memory space. - * - * Returns: 32bit value read from the specified (mapped) memory space address. - */ -static inline u32 xge_os_pio_mem_read32(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_write32 - Write 32bit into device memory space. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @val: Value to write. - * @addr: Address in device memory space. - * - * Write 32bit value into the specified (mapped) device memory space. - */ -static inline void xge_os_pio_mem_write32(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN u32 val, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_read64 - Read 64bit from device memory mapped space. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @addr: Address in device memory space. - * - * Returns: 64bit value read from the specified (mapped) memory space address. - */ -static inline u64 xge_os_pio_mem_read64(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_pio_mem_write64 - Write 64bit into device memory space. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @val: Value to write. - * @addr: Address in device memory space. - * - * Write 64bit value into the specified (mapped) device memory space. - */ -static inline void xge_os_pio_mem_write64(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN u64 val, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_flush_bridge - Flush the bridge. - * @pdev: Device context. Some OSs require device context to perform - * PIO. - * @regh: PCI configuration space handle. - * @addr: Address in device memory space. - * - * Flush the bridge. - */ -static inline void xge_os_flush_bridge(IN pci_dev_h pdev, - IN pci_reg_h regh, - IN void *addr) -{ TEMPLATE; } - -/** - * xge_os_dma_map - Map DMA-able memory block to, or from, or - * to-and-from device. - * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. - * @dmah: DMA handle used to map the memory block. Obtained via - * xge_os_dma_malloc(). - * @vaddr: Virtual address of the DMA-able memory. - * @size: Size (in bytes) to be mapped. - * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.) - * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, - * XGE_OS_DMA_STREAMING, - * XGE_OS_DMA_CONSISTENT - * Note that the last two flags are mutually exclusive. - * - * Map a single memory block. - * - * Returns: DMA address of the memory block, - * XGE_OS_INVALID_DMA_ADDR on failure. - * - * See also: xge_os_dma_malloc(), xge_os_dma_unmap(), - * xge_os_dma_sync(). - */ -static inline dma_addr_t xge_os_dma_map(IN pci_dev_h pdev, - IN pci_dma_h dmah, - IN void *vaddr, - IN size_t size, - IN int dir, - IN int dma_flags) -{ TEMPLATE; } - -/** - * xge_os_dma_unmap - Unmap DMA-able memory. - * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. - * @dmah: DMA handle used to map the memory block. Obtained via - * xge_os_dma_malloc(). - * @dma_addr: DMA address of the block. Obtained via xge_os_dma_map(). - * @size: Size (in bytes) to be unmapped. - * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.) - * - * Unmap a single DMA-able memory block that was previously mapped - * using xge_os_dma_map(). - * See also: xge_os_dma_malloc(), xge_os_dma_map(). - */ -static inline void xge_os_dma_unmap(IN pci_dev_h pdev, - IN pci_dma_h dmah, - IN dma_addr_t dma_addr, - IN size_t size, - IN int dir) -{ TEMPLATE; } - -/** - * xge_os_dma_sync - Synchronize mapped memory. - * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. - * @dmah: DMA handle used to map the memory block. Obtained via - * xge_os_dma_malloc(). - * @dma_addr: DMA address of the block. Obtained via xge_os_dma_map(). - * @dma_offset: Offset from start of the blocke. Used by Solaris only. - * @length: Size of the block. - * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.) - * - * Make physical and CPU memory consistent for a single - * streaming mode DMA translation. - * This API compiles to NOP on cache-coherent platforms. - * On non cache-coherent platforms, depending on the direction - * of the "sync" operation, this API will effectively - * either invalidate CPU cache (that might contain old data), - * or flush CPU cache to update physical memory. - * See also: xge_os_dma_malloc(), xge_os_dma_map(), - * xge_os_dma_unmap(). - */ -static inline void xge_os_dma_sync(IN pci_dev_h pdev, - IN pci_dma_h dmah, - IN dma_addr_t dma_addr, - IN u64 dma_offset, - IN size_t length, - IN int dir) -{ TEMPLATE; } - -#endif /* XGE_OS_TEMPLATE_H */ diff --git a/sys/dev/nxge/include/xge-queue.h b/sys/dev/nxge/include/xge-queue.h index 6745888..c38ffb8 100644 --- a/sys/dev/nxge/include/xge-queue.h +++ b/sys/dev/nxge/include/xge-queue.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xge-queue.h - * - * Description: serialized event queue - * - * Created: 7 June 2004 - */ - #ifndef XGE_QUEUE_H #define XGE_QUEUE_H @@ -44,8 +36,8 @@ __EXTERN_BEGIN_DECLS -#define XGE_QUEUE_BUF_SIZE 0x1000 -#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16 +#define XGE_QUEUE_BUF_SIZE 0x1000 +#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16 /** * enum xge_queue_status_e - Enumerates return codes of the xge_queue @@ -61,11 +53,11 @@ __EXTERN_BEGIN_DECLS * and xge_queue_produce() APIs. */ typedef enum xge_queue_status_e { - XGE_QUEUE_OK = 0, - XGE_QUEUE_IS_FULL = 1, - XGE_QUEUE_IS_EMPTY = 2, - XGE_QUEUE_OUT_OF_MEMORY = 3, - XGE_QUEUE_NOT_ENOUGH_SPACE = 4 + XGE_QUEUE_OK = 0, + XGE_QUEUE_IS_FULL = 1, + XGE_QUEUE_IS_EMPTY = 2, + XGE_QUEUE_OUT_OF_MEMORY = 3, + XGE_QUEUE_NOT_ENOUGH_SPACE = 4 } xge_queue_status_e; typedef void* xge_queue_h; @@ -86,11 +78,11 @@ typedef void* xge_queue_h; * See also: xge_queue_t{}. */ typedef struct xge_queue_item_t { - xge_list_t item; - xge_hal_event_e event_type; - int data_size; - int is_critical; - void *context; + xge_list_t item; + xge_hal_event_e event_type; + int data_size; + int is_critical; + void *context; } xge_queue_item_t; /** @@ -114,7 +106,7 @@ typedef void (*xge_queued_f) (void *data, int event_type); * produce/consume operations. * @lock: Lock for queue operations(syncronization purpose). * @pages_initial:Number of pages to be initially allocated at the time - * of queue creation. + * of queue creation. * @pages_max: Max number of pages that can be allocated in the queue. * @pages_current: Number of pages currently allocated * @list_head: Points to the list of queue elements that are produced, but yet @@ -135,26 +127,26 @@ typedef void (*xge_queued_f) (void *data, int event_type); * See also: xge_queue_item_t{}. */ typedef struct xge_queue_t { - void *start_ptr; - void *end_ptr; - void *head_ptr; - void *tail_ptr; - spinlock_t lock; - unsigned int pages_initial; - unsigned int pages_max; - unsigned int pages_current; - xge_list_t list_head; - pci_dev_h pdev; - pci_irq_h irqh; - xge_queued_f queued_func; - void *queued_data; - int has_critical_event; + void *start_ptr; + void *end_ptr; + void *head_ptr; + void *tail_ptr; + spinlock_t lock; + unsigned int pages_initial; + unsigned int pages_max; + unsigned int pages_current; + xge_list_t list_head; + pci_dev_h pdev; + pci_irq_h irqh; + xge_queued_f queued_func; + void *queued_data; + int has_critical_event; } xge_queue_t; /* ========================== PUBLIC API ================================= */ xge_queue_h xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial, - int pages_max, xge_queued_f queued_func, void *queued_data); + int pages_max, xge_queued_f queued_func, void *queued_data); void xge_queue_destroy(xge_queue_h queueh); @@ -162,7 +154,7 @@ void* xge_queue_item_data(xge_queue_item_t *item); xge_queue_status_e xge_queue_produce(xge_queue_h queueh, int event_type, void *context, - int is_critical, const int data_size, void *data); + int is_critical, const int data_size, void *data); static inline xge_queue_status_e xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) { @@ -170,7 +162,7 @@ xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) { } xge_queue_status_e xge_queue_consume(xge_queue_h queueh, int data_max_size, - xge_queue_item_t *item); + xge_queue_item_t *item); void xge_queue_flush(xge_queue_h queueh); diff --git a/sys/dev/nxge/include/xgehal-channel.h b/sys/dev/nxge/include/xgehal-channel.h index 8d82530..d786649 100644 --- a/sys/dev/nxge/include/xgehal-channel.h +++ b/sys/dev/nxge/include/xgehal-channel.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-channel.h - * - * Description: HAL channel object functionality - * - * Created: 19 May 2004 - */ - #ifndef XGE_HAL_CHANNEL_H #define XGE_HAL_CHANNEL_H @@ -80,9 +72,9 @@ typedef enum xge_hal_channel_type_e { * Channel opening flags. Reserved for future usage. */ typedef enum xge_hal_channel_flag_e { - XGE_HAL_CHANNEL_FLAG_NONE = 0x0, - XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK = 0x1, - XGE_HAL_CHANNEL_FLAG_FREE_RXD = 0x2 + XGE_HAL_CHANNEL_FLAG_NONE = 0x0, + XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK = 0x1, + XGE_HAL_CHANNEL_FLAG_FREE_RXD = 0x2 } xge_hal_channel_flag_e; /** @@ -101,10 +93,10 @@ typedef enum xge_hal_channel_flag_e { * See also: xge_hal_channel_dtr_term_f{}. */ typedef enum xge_hal_dtr_state_e { - XGE_HAL_DTR_STATE_NONE = 0, - XGE_HAL_DTR_STATE_AVAIL = 1, - XGE_HAL_DTR_STATE_POSTED = 2, - XGE_HAL_DTR_STATE_FREED = 3 + XGE_HAL_DTR_STATE_NONE = 0, + XGE_HAL_DTR_STATE_AVAIL = 1, + XGE_HAL_DTR_STATE_POSTED = 2, + XGE_HAL_DTR_STATE_FREED = 3 } xge_hal_dtr_state_e; /** @@ -120,8 +112,8 @@ typedef enum xge_hal_dtr_state_e { * the memory (including DMA-able memory) used for channel operation. */ typedef enum xge_hal_channel_reopen_e { - XGE_HAL_CHANNEL_RESET_ONLY = 1, - XGE_HAL_CHANNEL_OC_NORMAL = 2 + XGE_HAL_CHANNEL_RESET_ONLY = 1, + XGE_HAL_CHANNEL_OC_NORMAL = 2 } xge_hal_channel_reopen_e; /** @@ -168,8 +160,8 @@ typedef enum xge_hal_channel_reopen_e { * xge_hal_ring_dtr_next_completed(), xge_hal_channel_dtr_term_f{}. */ typedef xge_hal_status_e (*xge_hal_channel_callback_f) - (xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - u8 t_code, void *userdata); + (xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, + u8 t_code, void *userdata); /** * function xge_hal_channel_dtr_init_f - Initialize descriptor callback. @@ -192,11 +184,11 @@ typedef xge_hal_status_e (*xge_hal_channel_callback_f) * See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_term_f{}. */ typedef xge_hal_status_e (*xge_hal_channel_dtr_init_f) - (xge_hal_channel_h channelh, - xge_hal_dtr_h dtrh, - int index, - void *userdata, - xge_hal_channel_reopen_e reopen); + (xge_hal_channel_h channelh, + xge_hal_dtr_h dtrh, + int index, + void *userdata, + xge_hal_channel_reopen_e reopen); /** * function xge_hal_channel_dtr_term_f - Terminate descriptor callback. @@ -220,10 +212,10 @@ typedef xge_hal_status_e (*xge_hal_channel_dtr_init_f) * See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_init_f{}. */ typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh, - xge_hal_dtr_h dtrh, - xge_hal_dtr_state_e state, - void *userdata, - xge_hal_channel_reopen_e reopen); + xge_hal_dtr_h dtrh, + xge_hal_dtr_state_e state, + void *userdata, + xge_hal_channel_reopen_e reopen); /** @@ -257,18 +249,15 @@ typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh, * Usage: See ex_open{}. */ typedef struct xge_hal_channel_attr_t { - xge_hal_channel_type_e type; -#ifdef XGEHAL_RNIC - u32 vp_id; -#endif - int post_qid; - int compl_qid; - xge_hal_channel_callback_f callback; - xge_hal_channel_dtr_init_f dtr_init; - xge_hal_channel_dtr_term_f dtr_term; - void *userdata; - int per_dtr_space; - xge_hal_channel_flag_e flags; + xge_hal_channel_type_e type; + int post_qid; + int compl_qid; + xge_hal_channel_callback_f callback; + xge_hal_channel_dtr_init_f dtr_init; + xge_hal_channel_dtr_term_f dtr_term; + void *userdata; + int per_dtr_space; + xge_hal_channel_flag_e flags; } xge_hal_channel_attr_t; /* @@ -353,73 +342,70 @@ typedef struct xge_hal_channel_attr_t { */ typedef struct { /* complete/free section */ - xge_list_t item; - xge_hal_channel_callback_f callback; - void **free_arr; - int length; - int free_length; + xge_list_t item; + xge_hal_channel_callback_f callback; + void **free_arr; + int length; + int free_length; #if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) || \ - defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) - spinlock_t free_lock; + defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) + spinlock_t free_lock; #endif - int compl_index; - unsigned int usage_cnt; - unsigned int poll_bytes; - int unused0; + int compl_index; + unsigned int usage_cnt; + unsigned int poll_bytes; /* reserve/post data path section */ + int terminating; #ifdef __XGE_WIN__ - int __xge_os_attr_cacheline_aligned - post_index; + int __xge_os_attr_cacheline_aligned + post_index; #else - int post_index - __xge_os_attr_cacheline_aligned; + int post_index + __xge_os_attr_cacheline_aligned; #endif - spinlock_t reserve_lock; - spinlock_t post_lock; + spinlock_t reserve_lock; + spinlock_t post_lock; - void **reserve_arr; - int reserve_length; - int reserve_threshold; - int reserve_top; + void **reserve_arr; + int reserve_length; + int reserve_threshold; + int reserve_top; int unused1; /* common section */ - xge_hal_device_h devh; + xge_hal_device_h devh; pci_dev_h pdev; - pci_reg_h regh0; - pci_reg_h regh1; - void *userdata; - void **work_arr; - void **saved_arr; - void **orig_arr; - xge_hal_stats_channel_info_t stats; + pci_reg_h regh0; + pci_reg_h regh1; + void *userdata; + void **work_arr; + void **saved_arr; + void **orig_arr; + xge_hal_stats_channel_info_t stats; /* slow section */ - xge_hal_channel_type_e type; -#ifdef XGEHAL_RNIC - u32 vp_id; -#endif - int post_qid; - int compl_qid; - xge_hal_channel_flag_e flags; - int reserve_initial; - int reserve_max; - int is_open; - int per_dtr_space; - xge_hal_channel_dtr_term_f dtr_term; - xge_hal_channel_dtr_init_f dtr_init; + xge_hal_channel_type_e type; + int post_qid; + int compl_qid; + xge_hal_channel_flag_e flags; + int reserve_initial; + int reserve_max; + int is_open; + int per_dtr_space; + xge_hal_channel_dtr_term_f dtr_term; + xge_hal_channel_dtr_init_f dtr_init; /* MSI stuff */ - u32 msi_msg; - u8 rti; - u8 tti; + u32 msi_msg; + u8 rti; + u8 tti; u16 unused2; /* MSI-X stuff */ - u64 msix_address; - u32 msix_data; - int msix_idx; - volatile int in_interrupt; - unsigned int magic; + u64 msix_address; + u32 msix_data; + int msix_idx; + volatile int in_interrupt; + unsigned int magic; #ifdef __XGE_WIN__ } __xge_os_attr_cacheline_aligned xge_hal_channel_t ; #else @@ -430,17 +416,14 @@ typedef struct { xge_hal_status_e __hal_channel_initialize(xge_hal_channel_h channelh, - xge_hal_channel_attr_t *attr, void **reserve_arr, - int reserve_initial, int reserve_max, int reserve_threshold); + xge_hal_channel_attr_t *attr, void **reserve_arr, + int reserve_initial, int reserve_max, int reserve_threshold); void __hal_channel_terminate(xge_hal_channel_h channelh); xge_hal_channel_t* __hal_channel_allocate(xge_hal_device_h devh, int post_qid, -#ifdef XGEHAL_RNIC - u32 vp_id, -#endif - xge_hal_channel_type_e type); + xge_hal_channel_type_e type); void __hal_channel_free(xge_hal_channel_t *channel); @@ -468,7 +451,7 @@ __hal_channel_dtr_dealloc(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void __hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - int offset); + int offset); /* ========================== CHANNEL PUBLIC API ========================= */ @@ -483,7 +466,7 @@ xge_hal_channel_id(xge_hal_channel_h channelh); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment, - int copy_size); + int copy_size); #else /* XGE_FASTPATH_EXTERN */ #define __HAL_STATIC_CHANNEL static @@ -493,14 +476,14 @@ xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment, xge_hal_status_e xge_hal_channel_open(xge_hal_device_h hldev, xge_hal_channel_attr_t *attr, - xge_hal_channel_h *channel, - xge_hal_channel_reopen_e reopen); + xge_hal_channel_h *channel, + xge_hal_channel_reopen_e reopen); void xge_hal_channel_close(xge_hal_channel_h channelh, - xge_hal_channel_reopen_e reopen); + xge_hal_channel_reopen_e reopen); void xge_hal_channel_abort(xge_hal_channel_h channelh, - xge_hal_channel_reopen_e reopen); + xge_hal_channel_reopen_e reopen); __EXTERN_END_DECLS diff --git a/sys/dev/nxge/include/xgehal-config.h b/sys/dev/nxge/include/xgehal-config.h index c7bde29..c320b4a 100644 --- a/sys/dev/nxge/include/xgehal-config.h +++ b/sys/dev/nxge/include/xgehal-config.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-config.h - * - * Description: Xframe configuration. - * - * Created: 14 May 2004 - */ - #ifndef XGE_HAL_CONFIG_H #define XGE_HAL_CONFIG_H @@ -43,14 +35,10 @@ __EXTERN_BEGIN_DECLS -#define XGE_HAL_DEFAULT_USE_HARDCODE -1 +#define XGE_HAL_DEFAULT_USE_HARDCODE -1 -#ifdef XGEHAL_RNIC -#define XGE_HAL_MAX_VIRTUAL_PATHS 17 -#else -#define XGE_HAL_MAX_VIRTUAL_PATHS 8 -#endif -#define XGE_HAL_MAX_INTR_PER_VP 4 +#define XGE_HAL_MAX_VIRTUAL_PATHS 8 +#define XGE_HAL_MAX_INTR_PER_VP 4 /** @@ -91,51 +79,51 @@ __EXTERN_BEGIN_DECLS */ typedef struct xge_hal_tti_config_t { - int enabled; -#define XGE_HAL_TTI_ENABLE 1 -#define XGE_HAL_TTI_DISABLE 0 + int enabled; +#define XGE_HAL_TTI_ENABLE 1 +#define XGE_HAL_TTI_DISABLE 0 /* Line utilization interrupts */ - int urange_a; -#define XGE_HAL_MIN_TX_URANGE_A 0 -#define XGE_HAL_MAX_TX_URANGE_A 100 + int urange_a; +#define XGE_HAL_MIN_TX_URANGE_A 0 +#define XGE_HAL_MAX_TX_URANGE_A 100 - int ufc_a; -#define XGE_HAL_MIN_TX_UFC_A 0 -#define XGE_HAL_MAX_TX_UFC_A 65535 + int ufc_a; +#define XGE_HAL_MIN_TX_UFC_A 0 +#define XGE_HAL_MAX_TX_UFC_A 65535 - int urange_b; -#define XGE_HAL_MIN_TX_URANGE_B 0 -#define XGE_HAL_MAX_TX_URANGE_B 100 + int urange_b; +#define XGE_HAL_MIN_TX_URANGE_B 0 +#define XGE_HAL_MAX_TX_URANGE_B 100 - int ufc_b; -#define XGE_HAL_MIN_TX_UFC_B 0 -#define XGE_HAL_MAX_TX_UFC_B 65535 + int ufc_b; +#define XGE_HAL_MIN_TX_UFC_B 0 +#define XGE_HAL_MAX_TX_UFC_B 65535 - int urange_c; -#define XGE_HAL_MIN_TX_URANGE_C 0 -#define XGE_HAL_MAX_TX_URANGE_C 100 + int urange_c; +#define XGE_HAL_MIN_TX_URANGE_C 0 +#define XGE_HAL_MAX_TX_URANGE_C 100 - int ufc_c; -#define XGE_HAL_MIN_TX_UFC_C 0 -#define XGE_HAL_MAX_TX_UFC_C 65535 + int ufc_c; +#define XGE_HAL_MIN_TX_UFC_C 0 +#define XGE_HAL_MAX_TX_UFC_C 65535 - int ufc_d; -#define XGE_HAL_MIN_TX_UFC_D 0 -#define XGE_HAL_MAX_TX_UFC_D 65535 + int ufc_d; +#define XGE_HAL_MIN_TX_UFC_D 0 +#define XGE_HAL_MAX_TX_UFC_D 65535 - int timer_val_us; -#define XGE_HAL_MIN_TX_TIMER_VAL 0 -#define XGE_HAL_MAX_TX_TIMER_VAL 65535 + int timer_val_us; +#define XGE_HAL_MIN_TX_TIMER_VAL 0 +#define XGE_HAL_MAX_TX_TIMER_VAL 65535 - int timer_ac_en; -#define XGE_HAL_MIN_TX_TIMER_AC_EN 0 -#define XGE_HAL_MAX_TX_TIMER_AC_EN 1 + int timer_ac_en; +#define XGE_HAL_MIN_TX_TIMER_AC_EN 0 +#define XGE_HAL_MAX_TX_TIMER_AC_EN 1 - int timer_ci_en; -#define XGE_HAL_MIN_TX_TIMER_CI_EN 0 -#define XGE_HAL_MAX_TX_TIMER_CI_EN 1 + int timer_ci_en; +#define XGE_HAL_MIN_TX_TIMER_CI_EN 0 +#define XGE_HAL_MAX_TX_TIMER_CI_EN 1 } xge_hal_tti_config_t; @@ -171,41 +159,41 @@ typedef struct xge_hal_tti_config_t { */ typedef struct xge_hal_rti_config_t { - int urange_a; -#define XGE_HAL_MIN_RX_URANGE_A 0 -#define XGE_HAL_MAX_RX_URANGE_A 127 + int urange_a; +#define XGE_HAL_MIN_RX_URANGE_A 0 +#define XGE_HAL_MAX_RX_URANGE_A 127 - int ufc_a; -#define XGE_HAL_MIN_RX_UFC_A 0 -#define XGE_HAL_MAX_RX_UFC_A 65535 + int ufc_a; +#define XGE_HAL_MIN_RX_UFC_A 0 +#define XGE_HAL_MAX_RX_UFC_A 65535 - int urange_b; -#define XGE_HAL_MIN_RX_URANGE_B 0 -#define XGE_HAL_MAX_RX_URANGE_B 127 + int urange_b; +#define XGE_HAL_MIN_RX_URANGE_B 0 +#define XGE_HAL_MAX_RX_URANGE_B 127 - int ufc_b; -#define XGE_HAL_MIN_RX_UFC_B 0 -#define XGE_HAL_MAX_RX_UFC_B 65535 + int ufc_b; +#define XGE_HAL_MIN_RX_UFC_B 0 +#define XGE_HAL_MAX_RX_UFC_B 65535 - int urange_c; -#define XGE_HAL_MIN_RX_URANGE_C 0 -#define XGE_HAL_MAX_RX_URANGE_C 127 + int urange_c; +#define XGE_HAL_MIN_RX_URANGE_C 0 +#define XGE_HAL_MAX_RX_URANGE_C 127 - int ufc_c; -#define XGE_HAL_MIN_RX_UFC_C 0 -#define XGE_HAL_MAX_RX_UFC_C 65535 + int ufc_c; +#define XGE_HAL_MIN_RX_UFC_C 0 +#define XGE_HAL_MAX_RX_UFC_C 65535 - int ufc_d; -#define XGE_HAL_MIN_RX_UFC_D 0 -#define XGE_HAL_MAX_RX_UFC_D 65535 + int ufc_d; +#define XGE_HAL_MIN_RX_UFC_D 0 +#define XGE_HAL_MAX_RX_UFC_D 65535 - int timer_ac_en; -#define XGE_HAL_MIN_RX_TIMER_AC_EN 0 -#define XGE_HAL_MAX_RX_TIMER_AC_EN 1 + int timer_ac_en; +#define XGE_HAL_MIN_RX_TIMER_AC_EN 0 +#define XGE_HAL_MAX_RX_TIMER_AC_EN 1 - int timer_val_us; -#define XGE_HAL_MIN_RX_TIMER_VAL 0 -#define XGE_HAL_MAX_RX_TIMER_VAL 65535 + int timer_val_us; +#define XGE_HAL_MIN_RX_TIMER_VAL 0 +#define XGE_HAL_MAX_RX_TIMER_VAL 65535 } xge_hal_rti_config_t; @@ -234,36 +222,36 @@ typedef struct xge_hal_rti_config_t { * See also: xge_hal_fifo_config_t{} */ typedef struct xge_hal_fifo_queue_t { - int max; - int initial; -#define XGE_HAL_MIN_FIFO_QUEUE_LENGTH 2 -#define XGE_HAL_MAX_FIFO_QUEUE_LENGTH 8192 + int max; + int initial; +#define XGE_HAL_MIN_FIFO_QUEUE_LENGTH 2 +#define XGE_HAL_MAX_FIFO_QUEUE_LENGTH 8192 - int intr; -#define XGE_HAL_MIN_FIFO_QUEUE_INTR 0 -#define XGE_HAL_MAX_FIFO_QUEUE_INTR 1 + int intr; +#define XGE_HAL_MIN_FIFO_QUEUE_INTR 0 +#define XGE_HAL_MAX_FIFO_QUEUE_INTR 1 - int intr_vector; -#define XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR 0 -#define XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR 64 + int intr_vector; +#define XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR 0 +#define XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR 64 - int no_snoop_bits; -#define XGE_HAL_MIN_FIFO_QUEUE_NO_SNOOP_DISABLED 0 -#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_TXD 1 -#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_BUFFER 2 -#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_ALL 3 + int no_snoop_bits; +#define XGE_HAL_MIN_FIFO_QUEUE_NO_SNOOP_DISABLED 0 +#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_TXD 1 +#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_BUFFER 2 +#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_ALL 3 - int priority; -#define XGE_HAL_MIN_FIFO_PRIORITY 0 -#define XGE_HAL_MAX_FIFO_PRIORITY 63 + int priority; +#define XGE_HAL_MIN_FIFO_PRIORITY 0 +#define XGE_HAL_MAX_FIFO_PRIORITY 63 - int configured; -#define XGE_HAL_MIN_FIFO_CONFIGURED 0 -#define XGE_HAL_MAX_FIFO_CONFIGURED 1 + int configured; +#define XGE_HAL_MIN_FIFO_CONFIGURED 0 +#define XGE_HAL_MAX_FIFO_CONFIGURED 1 -#define XGE_HAL_MAX_FIFO_TTI_NUM 7 -#define XGE_HAL_MAX_FIFO_TTI_RING_0 56 - xge_hal_tti_config_t tti[XGE_HAL_MAX_FIFO_TTI_NUM]; +#define XGE_HAL_MAX_FIFO_TTI_NUM 7 +#define XGE_HAL_MAX_FIFO_TTI_RING_0 56 + xge_hal_tti_config_t tti[XGE_HAL_MAX_FIFO_TTI_NUM]; } xge_hal_fifo_queue_t; @@ -293,30 +281,30 @@ typedef struct xge_hal_fifo_queue_t { * See also: xge_hal_ring_queue_t{}. */ typedef struct xge_hal_fifo_config_t { - int max_frags; -#define XGE_HAL_MIN_FIFO_FRAGS 1 -#define XGE_HAL_MAX_FIFO_FRAGS 256 + int max_frags; +#define XGE_HAL_MIN_FIFO_FRAGS 1 +#define XGE_HAL_MAX_FIFO_FRAGS 256 - int reserve_threshold; -#define XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD 0 -#define XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD 8192 + int reserve_threshold; +#define XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD 0 +#define XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD 8192 - int memblock_size; -#define XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE 4096 -#define XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE 131072 + int memblock_size; +#define XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE 4096 +#define XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE 131072 - int alignment_size; -#define XGE_HAL_MIN_ALIGNMENT_SIZE 0 -#define XGE_HAL_MAX_ALIGNMENT_SIZE 65536 + int alignment_size; +#define XGE_HAL_MIN_ALIGNMENT_SIZE 0 +#define XGE_HAL_MAX_ALIGNMENT_SIZE 65536 - int max_aligned_frags; + int max_aligned_frags; /* range: (1, @max_frags) */ -#define XGE_HAL_MIN_FIFO_NUM 1 -#define XGE_HAL_MAX_FIFO_NUM_HERC 8 -#define XGE_HAL_MAX_FIFO_NUM_TITAN (XGE_HAL_MAX_VIRTUAL_PATHS - 1) -#define XGE_HAL_MAX_FIFO_NUM (XGE_HAL_MAX_VIRTUAL_PATHS) - xge_hal_fifo_queue_t queue[XGE_HAL_MAX_FIFO_NUM]; +#define XGE_HAL_MIN_FIFO_NUM 1 +#define XGE_HAL_MAX_FIFO_NUM_HERC 8 +#define XGE_HAL_MAX_FIFO_NUM_TITAN (XGE_HAL_MAX_VIRTUAL_PATHS - 1) +#define XGE_HAL_MAX_FIFO_NUM (XGE_HAL_MAX_VIRTUAL_PATHS) + xge_hal_fifo_queue_t queue[XGE_HAL_MAX_FIFO_NUM]; } xge_hal_fifo_config_t; /** @@ -326,9 +314,9 @@ typedef struct xge_hal_fifo_config_t { * @src: Port is Source (default Destination) */ typedef struct xge_hal_rts_port_t { - int num; - int udp; - int src; + int num; + int udp; + int src; } xge_hal_rts_port_t; /** @@ -371,7 +359,7 @@ typedef struct xge_hal_rts_port_t { * Rx descriptors. Any subset of 8 available rings can be * "configured". * @rts_mac_en: 1 - To enable Receive MAC address steering. - * 0 - To disable Receive MAC address steering. + * 0 - To disable Receive MAC address steering. * @rth_en: TBD * @rts_port_en: TBD * @rts_ports: TBD @@ -384,68 +372,68 @@ typedef struct xge_hal_rts_port_t { * See also: xge_hal_fifo_config_t{}. */ typedef struct xge_hal_ring_queue_t { - int max; - int initial; -#define XGE_HAL_MIN_RING_QUEUE_BLOCKS 1 -#define XGE_HAL_MAX_RING_QUEUE_BLOCKS 64 + int max; + int initial; +#define XGE_HAL_MIN_RING_QUEUE_BLOCKS 1 +#define XGE_HAL_MAX_RING_QUEUE_BLOCKS 64 - int buffer_mode; -#define XGE_HAL_RING_QUEUE_BUFFER_MODE_1 1 -#define XGE_HAL_RING_QUEUE_BUFFER_MODE_2 2 -#define XGE_HAL_RING_QUEUE_BUFFER_MODE_3 3 -#define XGE_HAL_RING_QUEUE_BUFFER_MODE_5 5 + int buffer_mode; +#define XGE_HAL_RING_QUEUE_BUFFER_MODE_1 1 +#define XGE_HAL_RING_QUEUE_BUFFER_MODE_2 2 +#define XGE_HAL_RING_QUEUE_BUFFER_MODE_3 3 +#define XGE_HAL_RING_QUEUE_BUFFER_MODE_5 5 - int dram_size_mb; -#define XGE_HAL_MIN_RING_QUEUE_SIZE 0 -#define XGE_HAL_MAX_RING_QUEUE_SIZE_XENA 64 -#define XGE_HAL_MAX_RING_QUEUE_SIZE_HERC 32 + int dram_size_mb; +#define XGE_HAL_MIN_RING_QUEUE_SIZE 0 +#define XGE_HAL_MAX_RING_QUEUE_SIZE_XENA 64 +#define XGE_HAL_MAX_RING_QUEUE_SIZE_HERC 32 - int intr_vector; -#define XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR 0 -#define XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR 64 + int intr_vector; +#define XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR 0 +#define XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR 64 - int backoff_interval_us; -#define XGE_HAL_MIN_BACKOFF_INTERVAL_US 1 -#define XGE_HAL_MAX_BACKOFF_INTERVAL_US 125000 + int backoff_interval_us; +#define XGE_HAL_MIN_BACKOFF_INTERVAL_US 1 +#define XGE_HAL_MAX_BACKOFF_INTERVAL_US 125000 - int max_frm_len; -#define XGE_HAL_MIN_MAX_FRM_LEN -1 -#define XGE_HAL_MAX_MAX_FRM_LEN 9622 + int max_frm_len; +#define XGE_HAL_MIN_MAX_FRM_LEN -1 +#define XGE_HAL_MAX_MAX_FRM_LEN 9622 - int priority; -#define XGE_HAL_MIN_RING_PRIORITY 0 -#define XGE_HAL_MAX_RING_PRIORITY 7 + int priority; +#define XGE_HAL_MIN_RING_PRIORITY 0 +#define XGE_HAL_MAX_RING_PRIORITY 7 - int no_snoop_bits; -#define XGE_HAL_MIN_RING_QUEUE_NO_SNOOP_DISABLED 0 -#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_RXD 1 -#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_BUFFER 2 -#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_ALL 3 + int no_snoop_bits; +#define XGE_HAL_MIN_RING_QUEUE_NO_SNOOP_DISABLED 0 +#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_RXD 1 +#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_BUFFER 2 +#define XGE_HAL_MAX_RING_QUEUE_NO_SNOOP_ALL 3 - int indicate_max_pkts; -#define XGE_HAL_MIN_RING_INDICATE_MAX_PKTS 1 -#define XGE_HAL_MAX_RING_INDICATE_MAX_PKTS 65536 + int indicate_max_pkts; +#define XGE_HAL_MIN_RING_INDICATE_MAX_PKTS 1 +#define XGE_HAL_MAX_RING_INDICATE_MAX_PKTS 65536 - int configured; -#define XGE_HAL_MIN_RING_CONFIGURED 0 -#define XGE_HAL_MAX_RING_CONFIGURED 1 + int configured; +#define XGE_HAL_MIN_RING_CONFIGURED 0 +#define XGE_HAL_MAX_RING_CONFIGURED 1 - int rts_mac_en; -#define XGE_HAL_MIN_RING_RTS_MAC_EN 0 -#define XGE_HAL_MAX_RING_RTS_MAC_EN 1 + int rts_mac_en; +#define XGE_HAL_MIN_RING_RTS_MAC_EN 0 +#define XGE_HAL_MAX_RING_RTS_MAC_EN 1 - int rth_en; -#define XGE_HAL_MIN_RING_RTH_EN 0 -#define XGE_HAL_MAX_RING_RTH_EN 1 + int rth_en; +#define XGE_HAL_MIN_RING_RTH_EN 0 +#define XGE_HAL_MAX_RING_RTH_EN 1 - int rts_port_en; -#define XGE_HAL_MIN_RING_RTS_PORT_EN 0 -#define XGE_HAL_MAX_RING_RTS_PORT_EN 1 + int rts_port_en; +#define XGE_HAL_MIN_RING_RTS_PORT_EN 0 +#define XGE_HAL_MAX_RING_RTS_PORT_EN 1 -#define XGE_HAL_MAX_STEERABLE_PORTS 32 - xge_hal_rts_port_t rts_ports[XGE_HAL_MAX_STEERABLE_PORTS]; +#define XGE_HAL_MAX_STEERABLE_PORTS 32 + xge_hal_rts_port_t rts_ports[XGE_HAL_MAX_STEERABLE_PORTS]; - xge_hal_rti_config_t rti; + xge_hal_rti_config_t rti; } xge_hal_ring_queue_t; @@ -465,23 +453,23 @@ typedef struct xge_hal_ring_queue_t { */ typedef struct xge_hal_ring_config_t { - int memblock_size; -#define XGE_HAL_MIN_RING_MEMBLOCK_SIZE 4096 -#define XGE_HAL_MAX_RING_MEMBLOCK_SIZE 131072 + int memblock_size; +#define XGE_HAL_MIN_RING_MEMBLOCK_SIZE 4096 +#define XGE_HAL_MAX_RING_MEMBLOCK_SIZE 131072 - int scatter_mode; -#define XGE_HAL_RING_QUEUE_SCATTER_MODE_A 0 -#define XGE_HAL_RING_QUEUE_SCATTER_MODE_B 1 + int scatter_mode; +#define XGE_HAL_RING_QUEUE_SCATTER_MODE_A 0 +#define XGE_HAL_RING_QUEUE_SCATTER_MODE_B 1 - int strip_vlan_tag; -#define XGE_HAL_RING_DONOT_STRIP_VLAN_TAG 0 -#define XGE_HAL_RING_STRIP_VLAN_TAG 1 + int strip_vlan_tag; +#define XGE_HAL_RING_DONOT_STRIP_VLAN_TAG 0 +#define XGE_HAL_RING_STRIP_VLAN_TAG 1 -#define XGE_HAL_MIN_RING_NUM 1 -#define XGE_HAL_MAX_RING_NUM_HERC 8 -#define XGE_HAL_MAX_RING_NUM_TITAN (XGE_HAL_MAX_VIRTUAL_PATHS - 1) -#define XGE_HAL_MAX_RING_NUM (XGE_HAL_MAX_VIRTUAL_PATHS) - xge_hal_ring_queue_t queue[XGE_HAL_MAX_RING_NUM]; +#define XGE_HAL_MIN_RING_NUM 1 +#define XGE_HAL_MAX_RING_NUM_HERC 8 +#define XGE_HAL_MAX_RING_NUM_TITAN (XGE_HAL_MAX_VIRTUAL_PATHS - 1) +#define XGE_HAL_MAX_RING_NUM (XGE_HAL_MAX_VIRTUAL_PATHS) + xge_hal_ring_queue_t queue[XGE_HAL_MAX_RING_NUM]; } xge_hal_ring_config_t; @@ -520,103 +508,50 @@ typedef struct xge_hal_ring_config_t { * corresponding include file. */ typedef struct xge_hal_mac_config_t { - int media; -#define XGE_HAL_MIN_MEDIA 0 -#define XGE_HAL_MEDIA_SR 0 -#define XGE_HAL_MEDIA_SW 1 -#define XGE_HAL_MEDIA_LR 2 -#define XGE_HAL_MEDIA_LW 3 -#define XGE_HAL_MEDIA_ER 4 -#define XGE_HAL_MEDIA_EW 5 -#define XGE_HAL_MAX_MEDIA 5 - - int tmac_util_period; -#define XGE_HAL_MIN_TMAC_UTIL_PERIOD 0 -#define XGE_HAL_MAX_TMAC_UTIL_PERIOD 15 - - int rmac_util_period; -#define XGE_HAL_MIN_RMAC_UTIL_PERIOD 0 -#define XGE_HAL_MAX_RMAC_UTIL_PERIOD 15 - - int rmac_bcast_en; -#define XGE_HAL_MIN_RMAC_BCAST_EN 0 -#define XGE_HAL_MAX_RMAC_BCAST_EN 1 - - int rmac_pause_gen_en; -#define XGE_HAL_MIN_RMAC_PAUSE_GEN_EN 0 -#define XGE_HAL_MAX_RMAC_PAUSE_GEN_EN 1 - - int rmac_pause_rcv_en; -#define XGE_HAL_MIN_RMAC_PAUSE_RCV_EN 0 -#define XGE_HAL_MAX_RMAC_PAUSE_RCV_EN 1 - - int rmac_pause_time; -#define XGE_HAL_MIN_RMAC_HIGH_PTIME 16 -#define XGE_HAL_MAX_RMAC_HIGH_PTIME 65535 - - int mc_pause_threshold_q0q3; -#define XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3 0 -#define XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3 254 - - int mc_pause_threshold_q4q7; -#define XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7 0 -#define XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7 254 + int media; +#define XGE_HAL_MIN_MEDIA 0 +#define XGE_HAL_MEDIA_SR 0 +#define XGE_HAL_MEDIA_SW 1 +#define XGE_HAL_MEDIA_LR 2 +#define XGE_HAL_MEDIA_LW 3 +#define XGE_HAL_MEDIA_ER 4 +#define XGE_HAL_MEDIA_EW 5 +#define XGE_HAL_MAX_MEDIA 5 + + int tmac_util_period; +#define XGE_HAL_MIN_TMAC_UTIL_PERIOD 0 +#define XGE_HAL_MAX_TMAC_UTIL_PERIOD 15 + + int rmac_util_period; +#define XGE_HAL_MIN_RMAC_UTIL_PERIOD 0 +#define XGE_HAL_MAX_RMAC_UTIL_PERIOD 15 + + int rmac_bcast_en; +#define XGE_HAL_MIN_RMAC_BCAST_EN 0 +#define XGE_HAL_MAX_RMAC_BCAST_EN 1 + + int rmac_pause_gen_en; +#define XGE_HAL_MIN_RMAC_PAUSE_GEN_EN 0 +#define XGE_HAL_MAX_RMAC_PAUSE_GEN_EN 1 + + int rmac_pause_rcv_en; +#define XGE_HAL_MIN_RMAC_PAUSE_RCV_EN 0 +#define XGE_HAL_MAX_RMAC_PAUSE_RCV_EN 1 + + int rmac_pause_time; +#define XGE_HAL_MIN_RMAC_HIGH_PTIME 16 +#define XGE_HAL_MAX_RMAC_HIGH_PTIME 65535 + + int mc_pause_threshold_q0q3; +#define XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3 0 +#define XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3 254 + + int mc_pause_threshold_q4q7; +#define XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7 0 +#define XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7 254 } xge_hal_mac_config_t; -#ifdef XGEHAL_RNIC - -/* - * struct xge_hal_vp_config_t - Configuration of virtual path - * @vp_id: Virtual Path Id - * @vp_valid: Flag specifies if the configuration is valid - * @bitmap_intr_num: Interrupt Number associated with the bitmap - * @nce_oid_start: The start of the NCE ID range - * @nce_oid_end: The end of the NCE ID range - * @session_oid_start: The start of the Session ID range - * @session_oid_end: The end of the Session ID range - * @srq_oid_start: The start of the HSRQ ID range - * @srq_oid_end: The end of the SRQ ID range - * @cqrq_oid_start: The start of the CQRQ range - * @cqrq_oid_end: The end of the CQRQ range - * @umq_length: Length of up message queue - * @umq_int_ctrl: Interrupt control for up Message queue - * @umq_gen_compl: Generate completion for up message queue - * @dmq_length: Length of down message queue - * @dmq_int_ctrl: Interrupt control for down Message queue - * @dmq_gen_compl: Generate completion for up message queue - * - * This structure is used by the driver to pass the configuration parameters to - * configure Virtual Path. - */ -typedef struct xge_hal_vp_config_t{ - u32 vp_id; - u32 vp_valid; -#define XGE_HAL_VP_CONFIG_INVALID 0 -#define XGE_HAL_VP_CONFIG_VALID 1 - - int bitmap_intr_num; -#define XGE_HAL_BITMAP_INTR_NUM_MIN 0 -#define XGE_HAL_BITMAP_INTR_NUM_MAX 3 - - u32 nce_oid_start; - u32 nce_oid_end; - u32 session_oid_start; - u32 session_oid_end; - u32 srq_oid_start; - u32 srq_oid_end; - u32 cqrq_oid_start; - u32 cqrq_oid_end; - u32 umq_length; - u32 umq_int_ctrl; - u32 umq_gen_compl; - u32 dmq_length; - u32 dmq_int_ctrl; - u32 dmq_gen_compl; -}xge_hal_vp_config_t; - -#endif - /** * struct xge_hal_device_config_t - Device configuration. * @mtu: Current mtu size. @@ -668,9 +603,9 @@ typedef struct xge_hal_vp_config_t{ * @rth_bucket_size: RTH bucket width (in bits). For valid range please see * xge_hal_device_config_t{} in the driver sources. * @rth_spdm_en: Enable Receive Traffic Hashing(RTH) using SPDM(Socket Pair - * Direct Match). + * Direct Match). * @rth_spdm_use_l4: Set to 1, if the L4 ports are used in the calculation of - * hash value in the RTH SPDM based steering. + * hash value in the RTH SPDM based steering. * @rxufca_intr_thres: (TODO) * @rxufca_lo_lim: (TODO) * @rxufca_hi_lim: (TODO) @@ -686,7 +621,7 @@ typedef struct xge_hal_vp_config_t{ * stable in order for the adapter to declare "LINK UP". * The enumerated settings (see Xframe-II UG) are: * 0 ........... instantaneous - * 1 ........... 500 ´s + * 1 ........... 500 ³s * 2 ........... 1 ms * 3 ........... 64 ms * 4 ........... 256 ms @@ -716,9 +651,9 @@ typedef struct xge_hal_vp_config_t{ * automatic adapter refill operations. * @refill_threshold_low:This field provides a hysteresis lower bound for * automatic adapter refill operations. - * @eol_policy:This field sets the policy for handling the end of list condition.
- * 2'b00 - When EOL is reached,poll until last block wrapper size is no longer 0.
- * 2'b01 - Send UMQ message when EOL is reached.
+ * @eol_policy:This field sets the policy for handling the end of list condition. + * 2'b00 - When EOL is reached,poll until last block wrapper size is no longer 0. + * 2'b01 - Send UMQ message when EOL is reached. * 2'b1x - Poll until the poll_count_max is reached and if still EOL,send UMQ message * @eol_poll_count_max:sets the maximum number of times the queue manager will poll for * a non-zero block wrapper before giving up and sending a UMQ message @@ -746,205 +681,170 @@ typedef struct xge_hal_vp_config_t{ * xge_hal_mac_config_t{}. */ typedef struct xge_hal_device_config_t { - int mtu; -#define XGE_HAL_MIN_INITIAL_MTU XGE_HAL_MIN_MTU -#define XGE_HAL_MAX_INITIAL_MTU XGE_HAL_MAX_MTU - - int isr_polling_cnt; -#define XGE_HAL_MIN_ISR_POLLING_CNT 0 -#define XGE_HAL_MAX_ISR_POLLING_CNT 65536 - - int latency_timer; -#define XGE_HAL_USE_BIOS_DEFAULT_LATENCY -1 -#define XGE_HAL_MIN_LATENCY_TIMER 8 -#define XGE_HAL_MAX_LATENCY_TIMER 255 - - int napi_weight; -#define XGE_HAL_DEF_NAPI_WEIGHT 64 - - int max_splits_trans; -#define XGE_HAL_USE_BIOS_DEFAULT_SPLITS -1 -#define XGE_HAL_ONE_SPLIT_TRANSACTION 0 -#define XGE_HAL_TWO_SPLIT_TRANSACTION 1 -#define XGE_HAL_THREE_SPLIT_TRANSACTION 2 -#define XGE_HAL_FOUR_SPLIT_TRANSACTION 3 -#define XGE_HAL_EIGHT_SPLIT_TRANSACTION 4 -#define XGE_HAL_TWELVE_SPLIT_TRANSACTION 5 -#define XGE_HAL_SIXTEEN_SPLIT_TRANSACTION 6 -#define XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION 7 - - int mmrb_count; -#define XGE_HAL_DEFAULT_BIOS_MMRB_COUNT -1 -#define XGE_HAL_MIN_MMRB_COUNT 0 /* 512b */ -#define XGE_HAL_MAX_MMRB_COUNT 3 /* 4k */ - - int shared_splits; -#define XGE_HAL_MIN_SHARED_SPLITS 0 -#define XGE_HAL_MAX_SHARED_SPLITS 31 - - int stats_refresh_time_sec; -#define XGE_HAL_STATS_REFRESH_DISABLE 0 -#define XGE_HAL_MIN_STATS_REFRESH_TIME 1 -#define XGE_HAL_MAX_STATS_REFRESH_TIME 300 - - int pci_freq_mherz; -#define XGE_HAL_PCI_FREQ_MHERZ_33 33 -#define XGE_HAL_PCI_FREQ_MHERZ_66 66 -#define XGE_HAL_PCI_FREQ_MHERZ_100 100 -#define XGE_HAL_PCI_FREQ_MHERZ_133 133 -#define XGE_HAL_PCI_FREQ_MHERZ_266 266 - - int intr_mode; -#define XGE_HAL_INTR_MODE_IRQLINE 0 -#define XGE_HAL_INTR_MODE_MSI 1 -#define XGE_HAL_INTR_MODE_MSIX 2 - - int sched_timer_us; -#define XGE_HAL_SCHED_TIMER_DISABLED 0 -#define XGE_HAL_SCHED_TIMER_MIN 0 -#define XGE_HAL_SCHED_TIMER_MAX 0xFFFFF - - int sched_timer_one_shot; -#define XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE 0 -#define XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE 1 - - xge_hal_ring_config_t ring; - xge_hal_mac_config_t mac; - xge_hal_fifo_config_t fifo; - - int dump_on_serr; -#define XGE_HAL_DUMP_ON_SERR_DISABLE 0 -#define XGE_HAL_DUMP_ON_SERR_ENABLE 1 - - int dump_on_eccerr; -#define XGE_HAL_DUMP_ON_ECCERR_DISABLE 0 -#define XGE_HAL_DUMP_ON_ECCERR_ENABLE 1 - - int dump_on_parityerr; -#define XGE_HAL_DUMP_ON_PARITYERR_DISABLE 0 -#define XGE_HAL_DUMP_ON_PARITYERR_ENABLE 1 - - int rth_en; -#define XGE_HAL_RTH_DISABLE 0 -#define XGE_HAL_RTH_ENABLE 1 - - int rth_bucket_size; -#define XGE_HAL_MIN_RTH_BUCKET_SIZE 1 -#define XGE_HAL_MAX_RTH_BUCKET_SIZE 8 - - int rth_spdm_en; -#define XGE_HAL_RTH_SPDM_DISABLE 0 -#define XGE_HAL_RTH_SPDM_ENABLE 1 - - int rth_spdm_use_l4; -#define XGE_HAL_RTH_SPDM_USE_L4 1 - - int rxufca_intr_thres; -#define XGE_HAL_RXUFCA_INTR_THRES_MIN 1 -#define XGE_HAL_RXUFCA_INTR_THRES_MAX 4096 - - int rxufca_lo_lim; -#define XGE_HAL_RXUFCA_LO_LIM_MIN 1 -#define XGE_HAL_RXUFCA_LO_LIM_MAX 16 - - int rxufca_hi_lim; -#define XGE_HAL_RXUFCA_HI_LIM_MIN 1 -#define XGE_HAL_RXUFCA_HI_LIM_MAX 256 - - int rxufca_lbolt_period; -#define XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN 1 -#define XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX 1024 - - int link_valid_cnt; -#define XGE_HAL_LINK_VALID_CNT_MIN 0 -#define XGE_HAL_LINK_VALID_CNT_MAX 127 - - int link_retry_cnt; -#define XGE_HAL_LINK_RETRY_CNT_MIN 0 -#define XGE_HAL_LINK_RETRY_CNT_MAX 127 - - int link_stability_period; -#define XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD 2 /* 1ms */ -#define XGE_HAL_MIN_LINK_STABILITY_PERIOD 0 /* instantaneous */ -#define XGE_HAL_MAX_LINK_STABILITY_PERIOD 7 /* 2s */ - - int device_poll_millis; -#define XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS 1000 -#define XGE_HAL_MIN_DEVICE_POLL_MILLIS 1 -#define XGE_HAL_MAX_DEVICE_POLL_MILLIS 100000 - - int no_isr_events; -#define XGE_HAL_NO_ISR_EVENTS_MIN 0 -#define XGE_HAL_NO_ISR_EVENTS_MAX 1 - - int lro_sg_size; -#define XGE_HAL_LRO_DEFAULT_SG_SIZE 10 -#define XGE_HAL_LRO_MIN_SG_SIZE 1 -#define XGE_HAL_LRO_MAX_SG_SIZE 64 - - int lro_frm_len; -#define XGE_HAL_LRO_DEFAULT_FRM_LEN 65536 -#define XGE_HAL_LRO_MIN_FRM_LEN 4096 -#define XGE_HAL_LRO_MAX_FRM_LEN 65536 - - int bimodal_interrupts; -#define XGE_HAL_BIMODAL_INTR_MIN -1 -#define XGE_HAL_BIMODAL_INTR_MAX 1 + int mtu; +#define XGE_HAL_MIN_INITIAL_MTU XGE_HAL_MIN_MTU +#define XGE_HAL_MAX_INITIAL_MTU XGE_HAL_MAX_MTU + + int isr_polling_cnt; +#define XGE_HAL_MIN_ISR_POLLING_CNT 0 +#define XGE_HAL_MAX_ISR_POLLING_CNT 65536 + + int latency_timer; +#define XGE_HAL_USE_BIOS_DEFAULT_LATENCY -1 +#define XGE_HAL_MIN_LATENCY_TIMER 8 +#define XGE_HAL_MAX_LATENCY_TIMER 255 + + int napi_weight; +#define XGE_HAL_DEF_NAPI_WEIGHT 64 + + int max_splits_trans; +#define XGE_HAL_USE_BIOS_DEFAULT_SPLITS -1 +#define XGE_HAL_ONE_SPLIT_TRANSACTION 0 +#define XGE_HAL_TWO_SPLIT_TRANSACTION 1 +#define XGE_HAL_THREE_SPLIT_TRANSACTION 2 +#define XGE_HAL_FOUR_SPLIT_TRANSACTION 3 +#define XGE_HAL_EIGHT_SPLIT_TRANSACTION 4 +#define XGE_HAL_TWELVE_SPLIT_TRANSACTION 5 +#define XGE_HAL_SIXTEEN_SPLIT_TRANSACTION 6 +#define XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION 7 + + int mmrb_count; +#define XGE_HAL_DEFAULT_BIOS_MMRB_COUNT -1 +#define XGE_HAL_MIN_MMRB_COUNT 0 /* 512b */ +#define XGE_HAL_MAX_MMRB_COUNT 3 /* 4k */ + + int shared_splits; +#define XGE_HAL_MIN_SHARED_SPLITS 0 +#define XGE_HAL_MAX_SHARED_SPLITS 31 + + int stats_refresh_time_sec; +#define XGE_HAL_STATS_REFRESH_DISABLE 0 +#define XGE_HAL_MIN_STATS_REFRESH_TIME 1 +#define XGE_HAL_MAX_STATS_REFRESH_TIME 300 + + int pci_freq_mherz; +#define XGE_HAL_PCI_FREQ_MHERZ_33 33 +#define XGE_HAL_PCI_FREQ_MHERZ_66 66 +#define XGE_HAL_PCI_FREQ_MHERZ_100 100 +#define XGE_HAL_PCI_FREQ_MHERZ_133 133 +#define XGE_HAL_PCI_FREQ_MHERZ_266 266 + + int intr_mode; +#define XGE_HAL_INTR_MODE_IRQLINE 0 +#define XGE_HAL_INTR_MODE_MSI 1 +#define XGE_HAL_INTR_MODE_MSIX 2 + + int sched_timer_us; +#define XGE_HAL_SCHED_TIMER_DISABLED 0 +#define XGE_HAL_SCHED_TIMER_MIN 0 +#define XGE_HAL_SCHED_TIMER_MAX 0xFFFFF + + int sched_timer_one_shot; +#define XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE 0 +#define XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE 1 + + xge_hal_ring_config_t ring; + xge_hal_mac_config_t mac; + xge_hal_fifo_config_t fifo; + + int dump_on_serr; +#define XGE_HAL_DUMP_ON_SERR_DISABLE 0 +#define XGE_HAL_DUMP_ON_SERR_ENABLE 1 + + int dump_on_eccerr; +#define XGE_HAL_DUMP_ON_ECCERR_DISABLE 0 +#define XGE_HAL_DUMP_ON_ECCERR_ENABLE 1 + + int dump_on_parityerr; +#define XGE_HAL_DUMP_ON_PARITYERR_DISABLE 0 +#define XGE_HAL_DUMP_ON_PARITYERR_ENABLE 1 + + int rth_en; +#define XGE_HAL_RTH_DISABLE 0 +#define XGE_HAL_RTH_ENABLE 1 + + int rth_bucket_size; +#define XGE_HAL_MIN_RTH_BUCKET_SIZE 1 +#define XGE_HAL_MAX_RTH_BUCKET_SIZE 8 + + int rth_spdm_en; +#define XGE_HAL_RTH_SPDM_DISABLE 0 +#define XGE_HAL_RTH_SPDM_ENABLE 1 + + int rth_spdm_use_l4; +#define XGE_HAL_RTH_SPDM_USE_L4 1 + + int rxufca_intr_thres; +#define XGE_HAL_RXUFCA_INTR_THRES_MIN 1 +#define XGE_HAL_RXUFCA_INTR_THRES_MAX 4096 + + int rxufca_lo_lim; +#define XGE_HAL_RXUFCA_LO_LIM_MIN 1 +#define XGE_HAL_RXUFCA_LO_LIM_MAX 16 + + int rxufca_hi_lim; +#define XGE_HAL_RXUFCA_HI_LIM_MIN 1 +#define XGE_HAL_RXUFCA_HI_LIM_MAX 256 + + int rxufca_lbolt_period; +#define XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN 1 +#define XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX 1024 + + int link_valid_cnt; +#define XGE_HAL_LINK_VALID_CNT_MIN 0 +#define XGE_HAL_LINK_VALID_CNT_MAX 127 + + int link_retry_cnt; +#define XGE_HAL_LINK_RETRY_CNT_MIN 0 +#define XGE_HAL_LINK_RETRY_CNT_MAX 127 + + int link_stability_period; +#define XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD 2 /* 1ms */ +#define XGE_HAL_MIN_LINK_STABILITY_PERIOD 0 /* instantaneous */ +#define XGE_HAL_MAX_LINK_STABILITY_PERIOD 7 /* 2s */ + + int device_poll_millis; +#define XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS 1000 +#define XGE_HAL_MIN_DEVICE_POLL_MILLIS 1 +#define XGE_HAL_MAX_DEVICE_POLL_MILLIS 100000 + + int no_isr_events; +#define XGE_HAL_NO_ISR_EVENTS_MIN 0 +#define XGE_HAL_NO_ISR_EVENTS_MAX 1 + + int lro_sg_size; +#define XGE_HAL_LRO_DEFAULT_SG_SIZE 10 +#define XGE_HAL_LRO_MIN_SG_SIZE 1 +#define XGE_HAL_LRO_MAX_SG_SIZE 64 + + int lro_frm_len; +#define XGE_HAL_LRO_DEFAULT_FRM_LEN 65536 +#define XGE_HAL_LRO_MIN_FRM_LEN 4096 +#define XGE_HAL_LRO_MAX_FRM_LEN 65536 + + int bimodal_interrupts; +#define XGE_HAL_BIMODAL_INTR_MIN -1 +#define XGE_HAL_BIMODAL_INTR_MAX 1 + + int bimodal_timer_lo_us; +#define XGE_HAL_BIMODAL_TIMER_LO_US_MIN 1 +#define XGE_HAL_BIMODAL_TIMER_LO_US_MAX 127 + + int bimodal_timer_hi_us; +#define XGE_HAL_BIMODAL_TIMER_HI_US_MIN 128 +#define XGE_HAL_BIMODAL_TIMER_HI_US_MAX 65535 - int bimodal_timer_lo_us; -#define XGE_HAL_BIMODAL_TIMER_LO_US_MIN 1 -#define XGE_HAL_BIMODAL_TIMER_LO_US_MAX 127 + int rts_mac_en; +#define XGE_HAL_RTS_MAC_DISABLE 0 +#define XGE_HAL_RTS_MAC_ENABLE 1 - int bimodal_timer_hi_us; -#define XGE_HAL_BIMODAL_TIMER_HI_US_MIN 128 -#define XGE_HAL_BIMODAL_TIMER_HI_US_MAX 65535 + int rts_qos_en; +#define XGE_HAL_RTS_QOS_DISABLE 0 +#define XGE_HAL_RTS_QOS_ENABLE 1 - int rts_mac_en; -#define XGE_HAL_RTS_MAC_DISABLE 0 -#define XGE_HAL_RTS_MAC_ENABLE 1 - - int rts_qos_en; -#define XGE_HAL_RTS_QOS_DISABLE 0 -#define XGE_HAL_RTS_QOS_ENABLE 1 - - int rts_port_en; -#define XGE_HAL_RTS_PORT_DISABLE 0 -#define XGE_HAL_RTS_PORT_ENABLE 1 - -#ifdef XGEHAL_RNIC - - xge_hal_vp_config_t vp_config[XGE_HAL_MAX_VIRTUAL_PATHS]; - - int max_cqe_groups; -#define XGE_HAL_MAX_CQE_GROUPS_MIN 1 -#define XGE_HAL_MAX_CQE_GROUPS_MAX 16 - - int max_num_wqe_od_groups; -#define XGE_HAL_MAX_NUM_OD_GROUPS_MIN 1 -#define XGE_HAL_MAX_NUM_OD_GROUPS_MAX 16 - - int no_wqe_threshold; -#define XGE_HAL_NO_WQE_THRESHOLD_MIN 1 -#define XGE_HAL_NO_WQE_THRESHOLD_MAX 16 - - int refill_threshold_high; -#define XGE_HAL_REFILL_THRESHOLD_HIGH_MIN 1 -#define XGE_HAL_REFILL_THRESHOLD_HIGH_MAX 16 - - int refill_threshold_low; -#define XGE_HAL_REFILL_THRESHOLD_LOW_MIN 1 -#define XGE_HAL_REFILL_THRESHOLD_LOW_MAX 16 - - int ack_blk_limit; -#define XGE_HAL_ACK_BLOCK_LIMIT_MIN 1 -#define XGE_HAL_ACK_BLOCK_LIMIT_MAX 16 - - int poll_or_doorbell; -#define XGE_HAL_POLL_OR_DOORBELL_POLL 1 -#define XGE_HAL_POLL_OR_DOORBELL_DOORBELL 0 - - -#endif + int rts_port_en; +#define XGE_HAL_RTS_PORT_DISABLE 0 +#define XGE_HAL_RTS_PORT_ENABLE 1 } xge_hal_device_config_t; @@ -971,23 +871,23 @@ typedef struct xge_hal_device_config_t { * See also: xge_hal_device_poll() */ typedef struct xge_hal_driver_config_t { - int queue_size_initial; -#define XGE_HAL_MIN_QUEUE_SIZE_INITIAL 1 -#define XGE_HAL_MAX_QUEUE_SIZE_INITIAL 16 + int queue_size_initial; +#define XGE_HAL_MIN_QUEUE_SIZE_INITIAL 1 +#define XGE_HAL_MAX_QUEUE_SIZE_INITIAL 16 - int queue_size_max; -#define XGE_HAL_MIN_QUEUE_SIZE_MAX 1 -#define XGE_HAL_MAX_QUEUE_SIZE_MAX 16 + int queue_size_max; +#define XGE_HAL_MIN_QUEUE_SIZE_MAX 1 +#define XGE_HAL_MAX_QUEUE_SIZE_MAX 16 #ifdef XGE_TRACE_INTO_CIRCULAR_ARR - int tracebuf_size; -#define XGE_HAL_MIN_CIRCULAR_ARR 4096 -#define XGE_HAL_MAX_CIRCULAR_ARR 1048576 -#define XGE_HAL_DEF_CIRCULAR_ARR XGE_OS_HOST_PAGE_SIZE - - int tracebuf_timestamp_en; -#define XGE_HAL_MIN_TIMESTAMP_EN 0 -#define XGE_HAL_MAX_TIMESTAMP_EN 1 + int tracebuf_size; +#define XGE_HAL_MIN_CIRCULAR_ARR 4096 +#define XGE_HAL_MAX_CIRCULAR_ARR 1048576 +#define XGE_HAL_DEF_CIRCULAR_ARR XGE_OS_HOST_PAGE_SIZE + + int tracebuf_timestamp_en; +#define XGE_HAL_MIN_TIMESTAMP_EN 0 +#define XGE_HAL_MAX_TIMESTAMP_EN 1 #endif } xge_hal_driver_config_t; diff --git a/sys/dev/nxge/include/xgehal-device.h b/sys/dev/nxge/include/xgehal-device.h index 22bc792..31d99b4 100644 --- a/sys/dev/nxge/include/xgehal-device.h +++ b/sys/dev/nxge/include/xgehal-device.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-device.h - * - * Description: HAL device object functionality - * - * Created: 14 May 2004 - */ - #ifndef XGE_HAL_DEVICE_H #define XGE_HAL_DEVICE_H @@ -45,18 +37,6 @@ #include <dev/nxge/include/xgehal-channel.h> #include <dev/nxge/include/xgehal-stats.h> #include <dev/nxge/include/xgehal-ring.h> -#ifdef XGEHAL_RNIC -#include "xgehal-common-regs.h" -#include "xgehal-pcicfg-mgmt-regs.h" -#include "xgehal-mrpcim-regs.h" -#include "xgehal-srpcim-regs.h" -#include "xgehal-vpath-regs.h" -#include "xgehal-bitmap.h" -#include "xgehal-virtualpath.h" -#include "xgehal-lbwrapper.h" -#include "xgehal-blockpool.h" -#include "xgehal-regpool.h" -#endif __EXTERN_BEGIN_DECLS @@ -65,18 +45,18 @@ __EXTERN_BEGIN_DECLS #define XGE_HAL_CARD_HERC_VPD_ADDR 0x80 #define XGE_HAL_VPD_READ_COMPLETE 0x80 #define XGE_HAL_VPD_BUFFER_SIZE 128 -#define XGE_HAL_DEVICE_XMSI_WAIT_MAX_MILLIS 500 -#define XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS 500 -#define XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS 500 -#define XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS 50 -#define XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS 250 -#define XGE_HAL_DEVICE_SPDM_READY_WAIT_MAX_MILLIS 250 /* TODO */ - -#define XGE_HAL_MAGIC 0x12345678 -#define XGE_HAL_DEAD 0xDEADDEAD +#define XGE_HAL_DEVICE_XMSI_WAIT_MAX_MILLIS 500 +#define XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS 500 +#define XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS 500 +#define XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS 50 +#define XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS 250 +#define XGE_HAL_DEVICE_SPDM_READY_WAIT_MAX_MILLIS 250 /* TODO */ + +#define XGE_HAL_MAGIC 0x12345678 +#define XGE_HAL_DEAD 0xDEADDEAD #define XGE_HAL_DUMP_BUF_SIZE 0x4000 -#define XGE_HAL_LRO_MAX_BUCKETS 32 +#define XGE_HAL_LRO_MAX_BUCKETS 32 /** * enum xge_hal_card_e - Xframe adapter type. @@ -92,10 +72,10 @@ __EXTERN_BEGIN_DECLS * See also: xge_hal_device_check_id(). */ typedef enum xge_hal_card_e { - XGE_HAL_CARD_UNKNOWN = 0, - XGE_HAL_CARD_XENA = 1, - XGE_HAL_CARD_HERC = 2, - XGE_HAL_CARD_TITAN = 3, + XGE_HAL_CARD_UNKNOWN = 0, + XGE_HAL_CARD_XENA = 1, + XGE_HAL_CARD_HERC = 2, + XGE_HAL_CARD_TITAN = 3, } xge_hal_card_e; /** @@ -113,15 +93,15 @@ typedef enum xge_hal_card_e { * mapped memories. Also, includes a pointer to OS-specific PCI device object. */ typedef struct xge_hal_device_attr_t { - pci_reg_h regh0; - pci_reg_h regh1; - pci_reg_h regh2; - char *bar0; - char *bar1; - char *bar2; - pci_irq_h irqh; - pci_cfg_h cfgh; - pci_dev_h pdev; + pci_reg_h regh0; + pci_reg_h regh1; + pci_reg_h regh2; + char *bar0; + char *bar1; + char *bar2; + pci_irq_h irqh; + pci_cfg_h cfgh; + pci_dev_h pdev; } xge_hal_device_attr_t; /** @@ -140,96 +120,96 @@ typedef enum xge_hal_device_link_state_e { /** * enum xge_hal_pci_mode_e - PIC bus speed and mode specific enumeration. - * @XGE_HAL_PCI_33MHZ_MODE: 33 MHZ pci mode. - * @XGE_HAL_PCI_66MHZ_MODE: 66 MHZ pci mode. - * @XGE_HAL_PCIX_M1_66MHZ_MODE: PCIX M1 66MHZ mode. - * @XGE_HAL_PCIX_M1_100MHZ_MODE: PCIX M1 100MHZ mode. - * @XGE_HAL_PCIX_M1_133MHZ_MODE: PCIX M1 133MHZ mode. - * @XGE_HAL_PCIX_M2_66MHZ_MODE: PCIX M2 66MHZ mode. - * @XGE_HAL_PCIX_M2_100MHZ_MODE: PCIX M2 100MHZ mode. - * @XGE_HAL_PCIX_M2_133MHZ_MODE: PCIX M3 133MHZ mode. - * @XGE_HAL_PCIX_M1_RESERVED: PCIX M1 reserved mode. - * @XGE_HAL_PCIX_M1_66MHZ_NS: PCIX M1 66MHZ mode not supported. - * @XGE_HAL_PCIX_M1_100MHZ_NS: PCIX M1 100MHZ mode not supported. - * @XGE_HAL_PCIX_M1_133MHZ_NS: PCIX M1 133MHZ not supported. - * @XGE_HAL_PCIX_M2_RESERVED: PCIX M2 reserved. - * @XGE_HAL_PCIX_533_RESERVED: PCIX 533 reserved. - * @XGE_HAL_PCI_BASIC_MODE: PCI basic mode, XENA specific value. - * @XGE_HAL_PCIX_BASIC_MODE: PCIX basic mode, XENA specific value. - * @XGE_HAL_PCI_INVALID_MODE: Invalid PCI or PCIX mode. + * @XGE_HAL_PCI_33MHZ_MODE: 33 MHZ pci mode. + * @XGE_HAL_PCI_66MHZ_MODE: 66 MHZ pci mode. + * @XGE_HAL_PCIX_M1_66MHZ_MODE: PCIX M1 66MHZ mode. + * @XGE_HAL_PCIX_M1_100MHZ_MODE: PCIX M1 100MHZ mode. + * @XGE_HAL_PCIX_M1_133MHZ_MODE: PCIX M1 133MHZ mode. + * @XGE_HAL_PCIX_M2_66MHZ_MODE: PCIX M2 66MHZ mode. + * @XGE_HAL_PCIX_M2_100MHZ_MODE: PCIX M2 100MHZ mode. + * @XGE_HAL_PCIX_M2_133MHZ_MODE: PCIX M3 133MHZ mode. + * @XGE_HAL_PCIX_M1_RESERVED: PCIX M1 reserved mode. + * @XGE_HAL_PCIX_M1_66MHZ_NS: PCIX M1 66MHZ mode not supported. + * @XGE_HAL_PCIX_M1_100MHZ_NS: PCIX M1 100MHZ mode not supported. + * @XGE_HAL_PCIX_M1_133MHZ_NS: PCIX M1 133MHZ not supported. + * @XGE_HAL_PCIX_M2_RESERVED: PCIX M2 reserved. + * @XGE_HAL_PCIX_533_RESERVED: PCIX 533 reserved. + * @XGE_HAL_PCI_BASIC_MODE: PCI basic mode, XENA specific value. + * @XGE_HAL_PCIX_BASIC_MODE: PCIX basic mode, XENA specific value. + * @XGE_HAL_PCI_INVALID_MODE: Invalid PCI or PCIX mode. * */ typedef enum xge_hal_pci_mode_e { - XGE_HAL_PCI_33MHZ_MODE = 0x0, - XGE_HAL_PCI_66MHZ_MODE = 0x1, - XGE_HAL_PCIX_M1_66MHZ_MODE = 0x2, - XGE_HAL_PCIX_M1_100MHZ_MODE = 0x3, - XGE_HAL_PCIX_M1_133MHZ_MODE = 0x4, - XGE_HAL_PCIX_M2_66MHZ_MODE = 0x5, - XGE_HAL_PCIX_M2_100MHZ_MODE = 0x6, - XGE_HAL_PCIX_M2_133MHZ_MODE = 0x7, - XGE_HAL_PCIX_M1_RESERVED = 0x8, - XGE_HAL_PCIX_M1_66MHZ_NS = 0xA, - XGE_HAL_PCIX_M1_100MHZ_NS = 0xB, - XGE_HAL_PCIX_M1_133MHZ_NS = 0xC, - XGE_HAL_PCIX_M2_RESERVED = 0xD, - XGE_HAL_PCIX_533_RESERVED = 0xE, - XGE_HAL_PCI_BASIC_MODE = 0x10, - XGE_HAL_PCIX_BASIC_MODE = 0x11, - XGE_HAL_PCI_INVALID_MODE = 0x12, + XGE_HAL_PCI_33MHZ_MODE = 0x0, + XGE_HAL_PCI_66MHZ_MODE = 0x1, + XGE_HAL_PCIX_M1_66MHZ_MODE = 0x2, + XGE_HAL_PCIX_M1_100MHZ_MODE = 0x3, + XGE_HAL_PCIX_M1_133MHZ_MODE = 0x4, + XGE_HAL_PCIX_M2_66MHZ_MODE = 0x5, + XGE_HAL_PCIX_M2_100MHZ_MODE = 0x6, + XGE_HAL_PCIX_M2_133MHZ_MODE = 0x7, + XGE_HAL_PCIX_M1_RESERVED = 0x8, + XGE_HAL_PCIX_M1_66MHZ_NS = 0xA, + XGE_HAL_PCIX_M1_100MHZ_NS = 0xB, + XGE_HAL_PCIX_M1_133MHZ_NS = 0xC, + XGE_HAL_PCIX_M2_RESERVED = 0xD, + XGE_HAL_PCIX_533_RESERVED = 0xE, + XGE_HAL_PCI_BASIC_MODE = 0x10, + XGE_HAL_PCIX_BASIC_MODE = 0x11, + XGE_HAL_PCI_INVALID_MODE = 0x12, } xge_hal_pci_mode_e; /** * enum xge_hal_pci_bus_frequency_e - PCI bus frequency enumeration. - * @XGE_HAL_PCI_BUS_FREQUENCY_33MHZ: PCI bus frequency 33MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_66MHZ: PCI bus frequency 66MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_100MHZ: PCI bus frequency 100MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_133MHZ: PCI bus frequency 133MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_200MHZ: PCI bus frequency 200MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_250MHZ: PCI bus frequency 250MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_266MHZ: PCI bus frequency 266MHZ - * @XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN: Unrecognized PCI bus frequency value. + * @XGE_HAL_PCI_BUS_FREQUENCY_33MHZ: PCI bus frequency 33MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_66MHZ: PCI bus frequency 66MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_100MHZ: PCI bus frequency 100MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_133MHZ: PCI bus frequency 133MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_200MHZ: PCI bus frequency 200MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_250MHZ: PCI bus frequency 250MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_266MHZ: PCI bus frequency 266MHZ + * @XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN: Unrecognized PCI bus frequency value. * */ typedef enum xge_hal_pci_bus_frequency_e { - XGE_HAL_PCI_BUS_FREQUENCY_33MHZ = 33, - XGE_HAL_PCI_BUS_FREQUENCY_66MHZ = 66, - XGE_HAL_PCI_BUS_FREQUENCY_100MHZ = 100, - XGE_HAL_PCI_BUS_FREQUENCY_133MHZ = 133, - XGE_HAL_PCI_BUS_FREQUENCY_200MHZ = 200, - XGE_HAL_PCI_BUS_FREQUENCY_250MHZ = 250, - XGE_HAL_PCI_BUS_FREQUENCY_266MHZ = 266, - XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN = 0 + XGE_HAL_PCI_BUS_FREQUENCY_33MHZ = 33, + XGE_HAL_PCI_BUS_FREQUENCY_66MHZ = 66, + XGE_HAL_PCI_BUS_FREQUENCY_100MHZ = 100, + XGE_HAL_PCI_BUS_FREQUENCY_133MHZ = 133, + XGE_HAL_PCI_BUS_FREQUENCY_200MHZ = 200, + XGE_HAL_PCI_BUS_FREQUENCY_250MHZ = 250, + XGE_HAL_PCI_BUS_FREQUENCY_266MHZ = 266, + XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN = 0 } xge_hal_pci_bus_frequency_e; /** * enum xge_hal_pci_bus_width_e - PCI bus width enumeration. - * @XGE_HAL_PCI_BUS_WIDTH_64BIT: 64 bit bus width. - * @XGE_HAL_PCI_BUS_WIDTH_32BIT: 32 bit bus width. + * @XGE_HAL_PCI_BUS_WIDTH_64BIT: 64 bit bus width. + * @XGE_HAL_PCI_BUS_WIDTH_32BIT: 32 bit bus width. * @XGE_HAL_PCI_BUS_WIDTH_UNKNOWN: unknown bus width. * */ typedef enum xge_hal_pci_bus_width_e { - XGE_HAL_PCI_BUS_WIDTH_64BIT = 0, - XGE_HAL_PCI_BUS_WIDTH_32BIT = 1, - XGE_HAL_PCI_BUS_WIDTH_UNKNOWN = 2, + XGE_HAL_PCI_BUS_WIDTH_64BIT = 0, + XGE_HAL_PCI_BUS_WIDTH_32BIT = 1, + XGE_HAL_PCI_BUS_WIDTH_UNKNOWN = 2, } xge_hal_pci_bus_width_e; #if defined (XGE_HAL_CONFIG_LRO) -#define IP_TOTAL_LENGTH_OFFSET 2 -#define IP_FAST_PATH_HDR_MASK 0x45 -#define TCP_FAST_PATH_HDR_MASK1 0x50 -#define TCP_FAST_PATH_HDR_MASK2 0x10 -#define TCP_FAST_PATH_HDR_MASK3 0x18 -#define IP_SOURCE_ADDRESS_OFFSET 12 -#define IP_DESTINATION_ADDRESS_OFFSET 16 -#define TCP_DESTINATION_PORT_OFFSET 2 -#define TCP_SOURCE_PORT_OFFSET 0 -#define TCP_DATA_OFFSET_OFFSET 12 -#define TCP_WINDOW_OFFSET 14 -#define TCP_SEQUENCE_NUMBER_OFFSET 4 -#define TCP_ACKNOWLEDGEMENT_NUMBER_OFFSET 8 +#define IP_TOTAL_LENGTH_OFFSET 2 +#define IP_FAST_PATH_HDR_MASK 0x45 +#define TCP_FAST_PATH_HDR_MASK1 0x50 +#define TCP_FAST_PATH_HDR_MASK2 0x10 +#define TCP_FAST_PATH_HDR_MASK3 0x18 +#define IP_SOURCE_ADDRESS_OFFSET 12 +#define IP_DESTINATION_ADDRESS_OFFSET 16 +#define TCP_DESTINATION_PORT_OFFSET 2 +#define TCP_SOURCE_PORT_OFFSET 0 +#define TCP_DATA_OFFSET_OFFSET 12 +#define TCP_WINDOW_OFFSET 14 +#define TCP_SEQUENCE_NUMBER_OFFSET 4 +#define TCP_ACKNOWLEDGEMENT_NUMBER_OFFSET 8 typedef struct tcplro { u16 source; @@ -263,43 +243,43 @@ typedef struct iplro { typedef struct lro { /* non-linear: contains scatter-gather list of xframe-mapped received buffers */ - OS_NETSTACK_BUF os_buf; - OS_NETSTACK_BUF os_buf_end; + OS_NETSTACK_BUF os_buf; + OS_NETSTACK_BUF os_buf_end; /* link layer header of the first frame; remains intack throughout the processing */ - u8 *ll_hdr; + u8 *ll_hdr; /* IP header - gets _collapsed_ */ - iplro_t *ip_hdr; + iplro_t *ip_hdr; /* transport header - gets _collapsed_ */ - tcplro_t *tcp_hdr; + tcplro_t *tcp_hdr; /* Next tcp sequence number */ - u32 tcp_next_seq_num; + u32 tcp_next_seq_num; /* Current tcp seq & ack */ - u32 tcp_seq_num; - u32 tcp_ack_num; + u32 tcp_seq_num; + u32 tcp_ack_num; /* total number of accumulated (so far) frames */ - int sg_num; + int sg_num; /* total data length */ - int total_length; + int total_length; /* receive side hash value, available from Hercules */ - u32 rth_value; + u32 rth_value; /* In use */ - u8 in_use; + u8 in_use; /* Total length of the fragments clubbed with the inital frame */ - u32 frags_len; + u32 frags_len; /* LRO frame contains time stamp, if (ts_off != -1) */ - int ts_off; - + int ts_off; + } lro_t; #endif @@ -323,9 +303,9 @@ typedef struct xge_hal_spdm_entry_t { #if defined(XGE_HAL_CONFIG_LRO) typedef struct { - lro_t lro_pool[XGE_HAL_LRO_MAX_BUCKETS]; - int lro_next_idx; - lro_t *lro_recent; + lro_t lro_pool[XGE_HAL_LRO_MAX_BUCKETS]; + int lro_next_idx; + lro_t *lro_recent; } xge_hal_lro_desc_t; #endif /* @@ -334,8 +314,8 @@ typedef struct { * Represents vpd capabilty structure */ typedef struct xge_hal_vpd_data_t { - u8 product_name[XGE_HAL_VPD_LENGTH]; - u8 serial_num[XGE_HAL_VPD_LENGTH]; + u8 product_name[XGE_HAL_VPD_LENGTH]; + u8 serial_num[XGE_HAL_VPD_LENGTH]; } xge_hal_vpd_data_t; /* @@ -344,82 +324,75 @@ typedef struct xge_hal_vpd_data_t { * HAL device object. Represents Xframe. */ typedef struct { - unsigned int magic; - pci_reg_h regh0; - pci_reg_h regh1; - pci_reg_h regh2; - char *bar0; - char *isrbar0; - char *bar1; - char *bar2; - pci_irq_h irqh; - pci_cfg_h cfgh; - pci_dev_h pdev; - xge_hal_pci_config_t pci_config_space; - xge_hal_pci_config_t pci_config_space_bios; - xge_hal_device_config_t config; - xge_list_t free_channels; - xge_list_t fifo_channels; - xge_list_t ring_channels; -#ifdef XGEHAL_RNIC - __hal_bitmap_entry_t bitmap_table[XGE_HAL_MAX_BITMAP_BITS]; - __hal_virtualpath_t virtual_paths[XGE_HAL_MAX_VIRTUAL_PATHS]; - __hal_blockpool_t block_pool; - __hal_regpool_t reg_pool; -#endif - volatile int is_initialized; - volatile int terminating; - xge_hal_stats_t stats; - macaddr_t macaddr[1]; - xge_queue_h queueh; - volatile int mcast_refcnt; - int is_promisc; - volatile xge_hal_device_link_state_e link_state; - void *upper_layer_info; - xge_hal_device_attr_t orig_attr; - u16 device_id; - u8 revision; - int msi_enabled; - int hw_is_initialized; - u64 inject_serr; - u64 inject_ecc; - u8 inject_bad_tcode; - int inject_bad_tcode_for_chan_type; - int reset_needed_after_close; - int tti_enabled; - xge_hal_tti_config_t bimodal_tti[XGE_HAL_MAX_RING_NUM]; - int bimodal_timer_val_us; - int bimodal_urange_a_en; - int bimodal_intr_cnt; - char *spdm_mem_base; - u16 spdm_max_entries; - xge_hal_spdm_entry_t **spdm_table; - spinlock_t spdm_lock; - u32 msi_mask; + unsigned int magic; + pci_reg_h regh0; + pci_reg_h regh1; + pci_reg_h regh2; + char *bar0; + char *isrbar0; + char *bar1; + char *bar2; + pci_irq_h irqh; + pci_cfg_h cfgh; + pci_dev_h pdev; + xge_hal_pci_config_t pci_config_space; + xge_hal_pci_config_t pci_config_space_bios; + xge_hal_device_config_t config; + xge_list_t free_channels; + xge_list_t fifo_channels; + xge_list_t ring_channels; + volatile int is_initialized; + volatile int terminating; + xge_hal_stats_t stats; + macaddr_t macaddr[1]; + xge_queue_h queueh; + volatile int mcast_refcnt; + int is_promisc; + volatile xge_hal_device_link_state_e link_state; + void *upper_layer_info; + xge_hal_device_attr_t orig_attr; + u16 device_id; + u8 revision; + int msi_enabled; + int hw_is_initialized; + u64 inject_serr; + u64 inject_ecc; + u8 inject_bad_tcode; + int inject_bad_tcode_for_chan_type; + int reset_needed_after_close; + int tti_enabled; + xge_hal_tti_config_t bimodal_tti[XGE_HAL_MAX_RING_NUM]; + int bimodal_timer_val_us; + int bimodal_urange_a_en; + int bimodal_intr_cnt; + char *spdm_mem_base; + u16 spdm_max_entries; + xge_hal_spdm_entry_t **spdm_table; + spinlock_t spdm_lock; #if defined(XGE_HAL_CONFIG_LRO) - xge_hal_lro_desc_t lro_desc[XGE_HAL_MAX_RING_NUM]; + xge_hal_lro_desc_t lro_desc[XGE_HAL_MAX_RING_NUM]; #endif - spinlock_t xena_post_lock; + spinlock_t xena_post_lock; /* bimodal workload stats */ - int irq_workload_rxd[XGE_HAL_MAX_RING_NUM]; - int irq_workload_rxcnt[XGE_HAL_MAX_RING_NUM]; - int irq_workload_rxlen[XGE_HAL_MAX_RING_NUM]; - int irq_workload_txd[XGE_HAL_MAX_FIFO_NUM]; - int irq_workload_txcnt[XGE_HAL_MAX_FIFO_NUM]; - int irq_workload_txlen[XGE_HAL_MAX_FIFO_NUM]; - - int mtu_first_time_set; - u64 rxufca_lbolt; - u64 rxufca_lbolt_time; - u64 rxufca_intr_thres; + int irq_workload_rxd[XGE_HAL_MAX_RING_NUM]; + int irq_workload_rxcnt[XGE_HAL_MAX_RING_NUM]; + int irq_workload_rxlen[XGE_HAL_MAX_RING_NUM]; + int irq_workload_txd[XGE_HAL_MAX_FIFO_NUM]; + int irq_workload_txcnt[XGE_HAL_MAX_FIFO_NUM]; + int irq_workload_txlen[XGE_HAL_MAX_FIFO_NUM]; + + int mtu_first_time_set; + u64 rxufca_lbolt; + u64 rxufca_lbolt_time; + u64 rxufca_intr_thres; char* dump_buf; - xge_hal_pci_mode_e pci_mode; + xge_hal_pci_mode_e pci_mode; xge_hal_pci_bus_frequency_e bus_frequency; - xge_hal_pci_bus_width_e bus_width; + xge_hal_pci_bus_width_e bus_width; xge_hal_vpd_data_t vpd_data; - volatile int in_poll; - u64 msix_vector_table[XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR]; + volatile int in_poll; + u64 msix_vector_table[XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR]; } xge_hal_device_t; @@ -445,13 +418,13 @@ __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason); xge_hal_status_e __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, - u16 spdm_entry, u64 *spdm_line_val); + u16 spdm_entry, u64 *spdm_line_val); void __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, - void *addr); + void *addr); void __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, - void *addr); + void *addr); void __hal_device_get_vpd_data(xge_hal_device_t *hldev); xge_hal_status_e @@ -483,7 +456,7 @@ __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason); xge_hal_status_e __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, int op, u64 mask, - int max_millis); + int max_millis); xge_hal_status_e __hal_device_rts_mac_configure(xge_hal_device_t *hldev); @@ -501,20 +474,20 @@ __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag); void __hal_device_msix_intr_endis(xge_hal_device_t *hldev, - xge_hal_channel_t *channel, int flag); + xge_hal_channel_t *channel, int flag); /* =========================== PUBLIC API ================================= */ unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, - unsigned int time_ival); + unsigned int time_ival); xge_hal_status_e xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, - u32 itable_size); + u32 itable_size); void xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, - u16 bucket_size); + u16 bucket_size); void xge_hal_rts_rth_init(xge_hal_device_t *hldev); @@ -539,6 +512,7 @@ xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index); int xge_hal_reinitialize_hw(xge_hal_device_t * hldev); +xge_hal_status_e xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev); /** * xge_hal_device_rti_reconfigure * @hldev: Hal Device @@ -602,7 +576,7 @@ xge_hal_device_in_poll(xge_hal_device_h devh) static inline void xge_hal_device_inject_ecc(xge_hal_device_h devh, u64 err_reg) { - ((xge_hal_device_t*)devh)->inject_ecc = err_reg; + ((xge_hal_device_t*)devh)->inject_ecc = err_reg; } @@ -622,7 +596,7 @@ xge_hal_device_inject_ecc(xge_hal_device_h devh, u64 err_reg) static inline void xge_hal_device_inject_serr(xge_hal_device_h devh, u64 err_reg) { - ((xge_hal_device_t*)devh)->inject_serr = err_reg; + ((xge_hal_device_t*)devh)->inject_serr = err_reg; } @@ -645,11 +619,11 @@ xge_hal_device_inject_serr(xge_hal_device_h devh, u64 err_reg) static inline void xge_hal_device_inject_bad_tcode(xge_hal_device_h devh, int chan_type, u8 t_code) { - ((xge_hal_device_t*)devh)->inject_bad_tcode_for_chan_type = chan_type; - ((xge_hal_device_t*)devh)->inject_bad_tcode = t_code; + ((xge_hal_device_t*)devh)->inject_bad_tcode_for_chan_type = chan_type; + ((xge_hal_device_t*)devh)->inject_bad_tcode = t_code; } -void xge_hal_device_msi_enable(xge_hal_device_h devh); +void xge_hal_device_msi_enable(xge_hal_device_h devh); /* * xge_hal_device_msi_mode - Is MSI enabled? @@ -753,7 +727,7 @@ static inline xge_hal_status_e xge_hal_device_mtu_check(xge_hal_device_t *hldev, int new_mtu) { if ((new_mtu < XGE_HAL_MIN_MTU) || (new_mtu > XGE_HAL_MAX_MTU)) { - return XGE_HAL_ERR_INVALID_MTU_SIZE; + return XGE_HAL_ERR_INVALID_MTU_SIZE; } return XGE_HAL_OK; @@ -766,20 +740,20 @@ void xge_hal_device_bcast_disable(xge_hal_device_h devh); void xge_hal_device_terminating(xge_hal_device_h devh); xge_hal_status_e xge_hal_device_initialize(xge_hal_device_t *hldev, - xge_hal_device_attr_t *attr, xge_hal_device_config_t *config); + xge_hal_device_attr_t *attr, xge_hal_device_config_t *config); void xge_hal_device_terminate(xge_hal_device_t *hldev); xge_hal_status_e xge_hal_device_reset(xge_hal_device_t *hldev); xge_hal_status_e xge_hal_device_macaddr_get(xge_hal_device_t *hldev, - int index, macaddr_t *macaddr); + int index, macaddr_t *macaddr); xge_hal_status_e xge_hal_device_macaddr_set(xge_hal_device_t *hldev, - int index, macaddr_t macaddr); + int index, macaddr_t macaddr); xge_hal_status_e xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, - int index); + int index); int xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted); @@ -804,14 +778,14 @@ xge_hal_status_e xge_hal_device_disable(xge_hal_device_t *hldev); xge_hal_status_e xge_hal_device_enable(xge_hal_device_t *hldev); xge_hal_status_e xge_hal_device_handle_tcode(xge_hal_channel_h channelh, - xge_hal_dtr_h dtrh, - u8 t_code); + xge_hal_dtr_h dtrh, + u8 t_code); xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, - xge_hal_device_link_state_e *ls); + xge_hal_device_link_state_e *ls); void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, - int one_shot); + int one_shot); void xge_hal_device_poll(xge_hal_device_h devh); @@ -821,18 +795,18 @@ int xge_hal_device_is_slot_freeze(xge_hal_device_h devh); xge_hal_status_e xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, - xge_hal_pci_bus_frequency_e *bus_frequency, - xge_hal_pci_bus_width_e *bus_width); + xge_hal_pci_bus_frequency_e *bus_frequency, + xge_hal_pci_bus_width_e *bus_width); xge_hal_status_e xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, - xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, - u8 is_tcp, u8 is_ipv4, u8 tgt_queue); + xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, + u8 is_tcp, u8 is_ipv4, u8 tgt_queue); xge_hal_status_e xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, - xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, - u8 is_tcp, u8 is_ipv4); + xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, + u8 is_tcp, u8 is_ipv4); xge_hal_status_e xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index); @@ -850,7 +824,7 @@ u64 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg); /* Some function protoypes for MSI implementation. */ xge_hal_status_e xge_hal_channel_msi_set (xge_hal_channel_h channelh, int msi, - u32 msg_val); + u32 msg_val); void xge_hal_mask_msi(xge_hal_device_t *hldev); @@ -869,6 +843,9 @@ xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id); #if defined(XGE_HAL_CONFIG_LRO) xge_hal_status_e xge_hal_lro_init(u32 lro_scale, xge_hal_device_t *hldev); + +void +xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev); #endif #if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_DEVICE) @@ -910,7 +887,7 @@ xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0); __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh, - char *bar1); + char *bar1); __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_mask_tx(xge_hal_device_t *hldev); @@ -972,44 +949,41 @@ __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_lro_capable(u8 *buffer, iplro_t **ip, tcplro_t **tcp, - xge_hal_dtr_info_t *ext_info); + xge_hal_dtr_info_t *ext_info); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_get_lro_session(u8 *eth_hdr, iplro_t *ip, tcplro_t *tcp, lro_t **lro, - xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, - xge_hal_lro_desc_t *ring_lro, lro_t **lro_end3); + xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, + xge_hal_lro_desc_t *ring_lro, lro_t **lro_end3); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_lro_under_optimal_thresh(iplro_t *ip, tcplro_t *tcp, lro_t *lro, - xge_hal_device_t *hldev); + xge_hal_device_t *hldev); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_collapse_ip_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro, - xge_hal_device_t *hldev); + xge_hal_device_t *hldev); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_collapse_tcp_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro, - xge_hal_device_t *hldev); + xge_hal_device_t *hldev); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_append_lro(iplro_t *ip, tcplro_t **tcp, u32 *seg_len, lro_t *lro, - xge_hal_device_t *hldev); + xge_hal_device_t *hldev); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp, - u32 *seglen, lro_t **p_lro, - xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, - lro_t **lro_end3); + u32 *seglen, lro_t **p_lro, + xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, + lro_t **lro_end3); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen, - lro_t **lro, xge_hal_dtr_info_t *ext_info, - xge_hal_device_t *hldev, lro_t **lro_end3); + lro_t **lro, xge_hal_dtr_info_t *ext_info, + xge_hal_device_t *hldev, lro_t **lro_end3); -void -xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev); - -__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * +__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * @@ -1017,11 +991,11 @@ xge_hal_lro_get_next_session(xge_hal_device_t *hldev); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void __hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro, - xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, - int slot, u32 tcp_seg_len, int ts_off); + xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, + int slot, u32 tcp_seg_len, int ts_off); __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int -__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro); +__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro); #endif #else /* XGE_FASTPATH_EXTERN */ diff --git a/sys/dev/nxge/include/xgehal-driver.h b/sys/dev/nxge/include/xgehal-driver.h index e669368..507031b 100644 --- a/sys/dev/nxge/include/xgehal-driver.h +++ b/sys/dev/nxge/include/xgehal-driver.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-driver.h - * - * Description: HAL driver object functionality - * - * Created: 14 May 2004 - */ - #ifndef XGE_HAL_DRIVER_H #define XGE_HAL_DRIVER_H @@ -47,7 +39,7 @@ __EXTERN_BEGIN_DECLS /* maximum number of events consumed in a syncle poll() cycle */ -#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5 +#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5 /** @@ -112,7 +104,7 @@ typedef void (*xge_uld_link_down_f) (void *userdata); * xge_hal_device_private_set(), xge_hal_driver_initialize(). */ typedef void (*xge_uld_crit_err_f) (void *userdata, xge_hal_event_e type, - u64 ext_data); + u64 ext_data); /** * function xge_uld_event_queued_f - Event-enqueued notification @@ -216,15 +208,15 @@ typedef void (*xge_uld_xpak_alarm_log_f) (xge_hal_device_h devh, xge_hal_xpak_al * See also: xge_hal_driver_initialize(). */ typedef struct xge_hal_uld_cbs_t { - xge_uld_link_up_f link_up; - xge_uld_link_down_f link_down; - xge_uld_crit_err_f crit_err; - xge_uld_event_f event; - xge_uld_event_queued_f event_queued; + xge_uld_link_up_f link_up; + xge_uld_link_down_f link_down; + xge_uld_crit_err_f crit_err; + xge_uld_event_f event; + xge_uld_event_queued_f event_queued; xge_uld_before_device_poll_f before_device_poll; - xge_uld_after_device_poll_f after_device_poll; - xge_uld_sched_timer_cb_f sched_timer; - xge_uld_xpak_alarm_log_f xpak_alarm_log; + xge_uld_after_device_poll_f after_device_poll; + xge_uld_sched_timer_cb_f sched_timer; + xge_uld_xpak_alarm_log_f xpak_alarm_log; } xge_hal_uld_cbs_t; /** @@ -238,19 +230,19 @@ typedef struct xge_hal_uld_cbs_t { * @uld_callbacks: Upper-layer driver callbacks. See xge_hal_uld_cbs_t{}. * @debug_module_mask: 32bit mask that defines which components of the * driver are to be traced. The trace-able components are: - * XGE_COMPONENT_HAL_CONFIG 0x1 - * XGE_COMPONENT_HAL_FIFO 0x2 - * XGE_COMPONENT_HAL_RING 0x4 - * XGE_COMPONENT_HAL_CHANNEL 0x8 - * XGE_COMPONENT_HAL_DEVICE 0x10 - * XGE_COMPONENT_HAL_MM 0x20 - * XGE_COMPONENT_HAL_QUEUE 0x40 - * XGE_COMPONENT_HAL_STATS 0x100 - * XGE_COMPONENT_OSDEP 0x1000 - * XGE_COMPONENT_LL 0x2000 - * XGE_COMPONENT_TOE 0x4000 - * XGE_COMPONENT_RDMA 0x8000 - * XGE_COMPONENT_ALL 0xffffffff + * XGE_COMPONENT_HAL_CONFIG 0x1 + * XGE_COMPONENT_HAL_FIFO 0x2 + * XGE_COMPONENT_HAL_RING 0x4 + * XGE_COMPONENT_HAL_CHANNEL 0x8 + * XGE_COMPONENT_HAL_DEVICE 0x10 + * XGE_COMPONENT_HAL_MM 0x20 + * XGE_COMPONENT_HAL_QUEUE 0x40 + * XGE_COMPONENT_HAL_STATS 0x100 + * XGE_COMPONENT_OSDEP 0x1000 + * XGE_COMPONENT_LL 0x2000 + * XGE_COMPONENT_TOE 0x4000 + * XGE_COMPONENT_RDMA 0x8000 + * XGE_COMPONENT_ALL 0xffffffff * The @debug_module_mask allows to switch off and on tracing at runtime. * In addition, the traces for the same trace-able components can be * compiled out, based on the same mask provided via Makefile. @@ -259,18 +251,18 @@ typedef struct xge_hal_uld_cbs_t { * HAL (driver) object. There is a single instance of this structure per HAL. */ typedef struct xge_hal_driver_t { - xge_hal_driver_config_t config; + xge_hal_driver_config_t config; int is_initialized; xge_hal_uld_cbs_t uld_callbacks; - u32 debug_module_mask; - int debug_level; + u32 debug_module_mask; + int debug_level; } xge_hal_driver_t; extern xge_hal_driver_t *g_xge_hal_driver; static inline int xge_hal_driver_is_initialized(void) { - return g_xge_hal_driver->is_initialized; + return g_xge_hal_driver->is_initialized; } static inline int @@ -283,7 +275,7 @@ static inline void xge_hal_driver_debug_module_mask_set(u32 new_mask) { #if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \ - (defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0) + (defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0) g_xge_hal_driver->debug_module_mask = new_mask; g_module_mask = (unsigned long *)&g_xge_hal_driver->debug_module_mask; #endif @@ -296,14 +288,14 @@ static inline void xge_hal_driver_debug_level_set(int new_level) { #if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \ - (defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0) + (defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0) g_xge_hal_driver->debug_level = new_level; g_level = &g_xge_hal_driver->debug_level; #endif } xge_hal_status_e xge_hal_driver_initialize(xge_hal_driver_config_t *config, - xge_hal_uld_cbs_t *uld_callbacks); + xge_hal_uld_cbs_t *uld_callbacks); void xge_hal_driver_terminate(void); diff --git a/sys/dev/nxge/include/xgehal-event.h b/sys/dev/nxge/include/xgehal-event.h index 7d560d2..1613aae 100644 --- a/sys/dev/nxge/include/xgehal-event.h +++ b/sys/dev/nxge/include/xgehal-event.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-event.h - * - * Description: event types - * - * Created: 7 June 2004 - */ - #ifndef XGE_HAL_EVENT_H #define XGE_HAL_EVENT_H @@ -41,8 +33,8 @@ __EXTERN_BEGIN_DECLS -#define XGE_HAL_EVENT_BASE 0 -#define XGE_LL_EVENT_BASE 100 +#define XGE_HAL_EVENT_BASE 0 +#define XGE_LL_EVENT_BASE 100 /** * enum xge_hal_event_e - Enumerates slow-path HAL events. @@ -69,15 +61,15 @@ __EXTERN_BEGIN_DECLS * xge_uld_link_down_f{}. */ typedef enum xge_hal_event_e { - XGE_HAL_EVENT_UNKNOWN = 0, + XGE_HAL_EVENT_UNKNOWN = 0, /* HAL events */ - XGE_HAL_EVENT_SERR = XGE_HAL_EVENT_BASE + 1, - XGE_HAL_EVENT_LINK_IS_UP = XGE_HAL_EVENT_BASE + 2, - XGE_HAL_EVENT_LINK_IS_DOWN = XGE_HAL_EVENT_BASE + 3, - XGE_HAL_EVENT_ECCERR = XGE_HAL_EVENT_BASE + 4, - XGE_HAL_EVENT_PARITYERR = XGE_HAL_EVENT_BASE + 5, - XGE_HAL_EVENT_TARGETABORT = XGE_HAL_EVENT_BASE + 6, - XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7, + XGE_HAL_EVENT_SERR = XGE_HAL_EVENT_BASE + 1, + XGE_HAL_EVENT_LINK_IS_UP = XGE_HAL_EVENT_BASE + 2, + XGE_HAL_EVENT_LINK_IS_DOWN = XGE_HAL_EVENT_BASE + 3, + XGE_HAL_EVENT_ECCERR = XGE_HAL_EVENT_BASE + 4, + XGE_HAL_EVENT_PARITYERR = XGE_HAL_EVENT_BASE + 5, + XGE_HAL_EVENT_TARGETABORT = XGE_HAL_EVENT_BASE + 6, + XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7, } xge_hal_event_e; __EXTERN_END_DECLS diff --git a/sys/dev/nxge/include/xgehal-fifo.h b/sys/dev/nxge/include/xgehal-fifo.h index 6de6048..5f082d6 100644 --- a/sys/dev/nxge/include/xgehal-fifo.h +++ b/sys/dev/nxge/include/xgehal-fifo.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-fifo.h - * - * Description: Tx fifo object functionality - * - * Created: 19 May 2004 - */ - #ifndef XGE_HAL_FIFO_H #define XGE_HAL_FIFO_H @@ -44,11 +36,11 @@ __EXTERN_BEGIN_DECLS /* HW fifo configuration */ -#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65 -#define XGE_HAL_FIFO_MAX_WRR 5 -#define XGE_HAL_FIFO_MAX_PARTITION 4 -#define XGE_HAL_FIFO_MAX_WRR_STATE 36 -#define XGE_HAL_FIFO_HW_PAIR_OFFSET 0x20000 +#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65 +#define XGE_HAL_FIFO_MAX_WRR 5 +#define XGE_HAL_FIFO_MAX_PARTITION 4 +#define XGE_HAL_FIFO_MAX_WRR_STATE 36 +#define XGE_HAL_FIFO_HW_PAIR_OFFSET 0x20000 /* HW FIFO Weight Calender */ #define XGE_HAL_FIFO_WRR_0 0x0706050407030602ULL @@ -77,14 +69,14 @@ typedef struct { /* Bad TxDL transfer codes */ -#define XGE_HAL_TXD_T_CODE_OK 0x0 -#define XGE_HAL_TXD_T_CODE_UNUSED_1 0x1 -#define XGE_HAL_TXD_T_CODE_ABORT_BUFFER 0x2 -#define XGE_HAL_TXD_T_CODE_ABORT_DTOR 0x3 -#define XGE_HAL_TXD_T_CODE_UNUSED_5 0x5 -#define XGE_HAL_TXD_T_CODE_PARITY 0x7 -#define XGE_HAL_TXD_T_CODE_LOSS_OF_LINK 0xA -#define XGE_HAL_TXD_T_CODE_GENERAL_ERR 0xF +#define XGE_HAL_TXD_T_CODE_OK 0x0 +#define XGE_HAL_TXD_T_CODE_UNUSED_1 0x1 +#define XGE_HAL_TXD_T_CODE_ABORT_BUFFER 0x2 +#define XGE_HAL_TXD_T_CODE_ABORT_DTOR 0x3 +#define XGE_HAL_TXD_T_CODE_UNUSED_5 0x5 +#define XGE_HAL_TXD_T_CODE_PARITY 0x7 +#define XGE_HAL_TXD_T_CODE_LOSS_OF_LINK 0xA +#define XGE_HAL_TXD_T_CODE_GENERAL_ERR 0xF /** @@ -105,16 +97,16 @@ typedef struct { typedef struct xge_hal_fifo_txd_t { u64 control_1; #define XGE_HAL_TXD_LIST_OWN_XENA BIT(7) -#define XGE_HAL_TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15)) +#define XGE_HAL_TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15)) #define XGE_HAL_GET_TXD_T_CODE(val) ((val & XGE_HAL_TXD_T_CODE)>>48) #define XGE_HAL_SET_TXD_T_CODE(x, val) (x |= (((u64)val & 0xF) << 48)) #define XGE_HAL_TXD_GATHER_CODE (BIT(22) | BIT(23)) #define XGE_HAL_TXD_GATHER_CODE_FIRST BIT(22) #define XGE_HAL_TXD_GATHER_CODE_LAST BIT(23) -#define XGE_HAL_TXD_NO_LSO 0 -#define XGE_HAL_TXD_UDF_COF 1 -#define XGE_HAL_TXD_TCP_LSO 2 -#define XGE_HAL_TXD_UDP_LSO 3 +#define XGE_HAL_TXD_NO_LSO 0 +#define XGE_HAL_TXD_UDF_COF 1 +#define XGE_HAL_TXD_TCP_LSO 2 +#define XGE_HAL_TXD_UDP_LSO 3 #define XGE_HAL_TXD_LSO_COF_CTRL(val) vBIT(val,30,2) #define XGE_HAL_TXD_TCP_LSO_MSS(val) vBIT(val,34,14) #define XGE_HAL_TXD_BUFFER0_SIZE(val) vBIT(val,48,16) @@ -165,17 +157,17 @@ typedef xge_hal_fifo_txd_t* xge_hal_fifo_txdl_t; * Note: The structure is cache line aligned. */ typedef struct xge_hal_fifo_t { - xge_hal_channel_t channel; - spinlock_t *post_lock_ptr; - xge_hal_fifo_hw_pair_t *hw_pair; - xge_hal_fifo_config_t *config; - int no_snoop_bits; - int txdl_per_memblock; - u64 interrupt_type; - int txdl_size; - int priv_size; - xge_hal_mempool_t *mempool; - int align_size; + xge_hal_channel_t channel; + spinlock_t *post_lock_ptr; + xge_hal_fifo_hw_pair_t *hw_pair; + xge_hal_fifo_config_t *config; + int no_snoop_bits; + int txdl_per_memblock; + u64 interrupt_type; + int txdl_size; + int priv_size; + xge_hal_mempool_t *mempool; + int align_size; } __xge_os_attr_cacheline_aligned xge_hal_fifo_t; /** @@ -228,30 +220,30 @@ typedef struct xge_hal_fifo_t { * See also: xge_hal_ring_rxd_priv_t{}. */ typedef struct xge_hal_fifo_txdl_priv_t { - dma_addr_t dma_addr; - pci_dma_h dma_handle; - ptrdiff_t dma_offset; - int frags; - char *align_vaddr_start; - char *align_vaddr; - dma_addr_t align_dma_addr; - pci_dma_h align_dma_handle; - pci_dma_acc_h align_dma_acch; - ptrdiff_t align_dma_offset; - int align_used_frags; - int alloc_frags; - int dang_frags; - unsigned int bytes_sent; - int unused; - xge_hal_fifo_txd_t *dang_txdl; - struct xge_hal_fifo_txdl_priv_t *next_txdl_priv; - xge_hal_fifo_txd_t *first_txdp; - void *memblock; + dma_addr_t dma_addr; + pci_dma_h dma_handle; + ptrdiff_t dma_offset; + int frags; + char *align_vaddr_start; + char *align_vaddr; + dma_addr_t align_dma_addr; + pci_dma_h align_dma_handle; + pci_dma_acc_h align_dma_acch; + ptrdiff_t align_dma_offset; + int align_used_frags; + int alloc_frags; + int dang_frags; + unsigned int bytes_sent; + int unused; + xge_hal_fifo_txd_t *dang_txdl; + struct xge_hal_fifo_txdl_priv_t *next_txdl_priv; + xge_hal_fifo_txd_t *first_txdp; + void *memblock; #ifdef XGE_DEBUG_ASSERT - xge_hal_mempool_dma_t *dma_object; + xge_hal_mempool_dma_t *dma_object; #endif #ifdef XGE_OS_MEMORY_CHECK - int allocated; + int allocated; #endif } xge_hal_fifo_txdl_priv_t; @@ -268,7 +260,7 @@ xge_hal_fifo_get_max_frags_cnt(xge_hal_channel_h channelh) /* ========================= FIFO PRIVATE API ============================= */ xge_hal_status_e __hal_fifo_open(xge_hal_channel_h channelh, - xge_hal_channel_attr_t *attr); + xge_hal_channel_attr_t *attr); void __hal_fifo_close(xge_hal_channel_h channelh); @@ -289,16 +281,20 @@ __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - u64 ctrl_1); + u64 ctrl_1); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void __hal_fifo_txdl_restore_many(xge_hal_channel_h channelh, - xge_hal_fifo_txd_t *txdp, int txdl_count); + xge_hal_fifo_txd_t *txdp, int txdl_count); /* ========================= FIFO PUBLIC API ============================== */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh); +__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e +xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, + const int frags); + __HAL_STATIC_FIFO __HAL_INLINE_FIFO void* xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh); @@ -307,38 +303,38 @@ xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh); __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channel, int dtr_sp_size, - xge_hal_dtr_h dtr_sp); + xge_hal_dtr_h dtr_sp); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void -xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); +xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num, - xge_hal_dtr_h dtrs[]); + xge_hal_dtr_h dtrs[]); __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, - u8 *t_code); + u8 *t_code); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void -xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr); +xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - int frag_idx, dma_addr_t dma_pointer, int size); + int frag_idx, dma_addr_t dma_pointer, int size); __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, - xge_hal_dtr_h dtrh, int frag_idx, void *vaddr, - dma_addr_t dma_pointer, int size, int misaligned_size); + xge_hal_dtr_h dtrh, int frag_idx, void *vaddr, + dma_addr_t dma_pointer, int size, int misaligned_size); __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - void *vaddr, int size); + void *vaddr, int size); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - int frag_idx); + int frag_idx); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss); @@ -347,7 +343,7 @@ __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits); __HAL_STATIC_FIFO __HAL_INLINE_FIFO void -xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag); +xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag); __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh); diff --git a/sys/dev/nxge/include/xgehal-mgmt.h b/sys/dev/nxge/include/xgehal-mgmt.h index 061320e..93e60a2 100644 --- a/sys/dev/nxge/include/xgehal-mgmt.h +++ b/sys/dev/nxge/include/xgehal-mgmt.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-mgmt.h - * - * Description: management API - * - * Created: 1 September 2004 - */ - #ifndef XGE_HAL_MGMT_H #define XGE_HAL_MGMT_H @@ -68,88 +60,88 @@ __EXTERN_BEGIN_DECLS * @transponder_temperature: TODO */ typedef struct xge_hal_mgmt_about_info_t { - u16 vendor; - u16 device; - u16 subsys_vendor; - u16 subsys_device; - u8 board_rev; - char vendor_name[16]; - char chip_name[16]; - char media[16]; - char hal_major[4]; - char hal_minor[4]; - char hal_fix[4]; - char hal_build[16]; - char ll_major[4]; - char ll_minor[4]; - char ll_fix[4]; - char ll_build[16]; - u32 transponder_temperature; + u16 vendor; + u16 device; + u16 subsys_vendor; + u16 subsys_device; + u8 board_rev; + char vendor_name[16]; + char chip_name[16]; + char media[16]; + char hal_major[4]; + char hal_minor[4]; + char hal_fix[4]; + char hal_build[16]; + char ll_major[4]; + char ll_minor[4]; + char ll_fix[4]; + char ll_build[16]; + u32 transponder_temperature; } xge_hal_mgmt_about_info_t; -typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t; -typedef xge_hal_stats_pcim_info_t xge_hal_mgmt_pcim_stats_t; -typedef xge_hal_stats_sw_err_t xge_hal_mgmt_sw_stats_t; -typedef xge_hal_stats_device_info_t xge_hal_mgmt_device_stats_t; -typedef xge_hal_stats_channel_info_t xge_hal_mgmt_channel_stats_t; -typedef xge_hal_device_config_t xge_hal_mgmt_device_config_t; -typedef xge_hal_driver_config_t xge_hal_mgmt_driver_config_t; -typedef xge_hal_pci_config_t xge_hal_mgmt_pci_config_t; +typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t; +typedef xge_hal_stats_pcim_info_t xge_hal_mgmt_pcim_stats_t; +typedef xge_hal_stats_sw_err_t xge_hal_mgmt_sw_stats_t; +typedef xge_hal_stats_device_info_t xge_hal_mgmt_device_stats_t; +typedef xge_hal_stats_channel_info_t xge_hal_mgmt_channel_stats_t; +typedef xge_hal_device_config_t xge_hal_mgmt_device_config_t; +typedef xge_hal_driver_config_t xge_hal_mgmt_driver_config_t; +typedef xge_hal_pci_config_t xge_hal_mgmt_pci_config_t; xge_hal_status_e xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info, - int size); + int size); xge_hal_status_e xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats, - int size); + int size); xge_hal_status_e xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out); xge_hal_status_e xge_hal_mgmt_pcim_stats(xge_hal_device_h devh, - xge_hal_mgmt_pcim_stats_t *pcim_stats, int size); + xge_hal_mgmt_pcim_stats_t *pcim_stats, int size); xge_hal_status_e xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size, - char *out); + char *out); xge_hal_status_e xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *hw_stats, - int size); + int size); xge_hal_status_e xge_hal_mgmt_device_stats(xge_hal_device_h devh, - xge_hal_mgmt_device_stats_t *device_stats, int size); + xge_hal_mgmt_device_stats_t *device_stats, int size); xge_hal_status_e xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh, - xge_hal_mgmt_channel_stats_t *channel_stats, int size); + xge_hal_mgmt_channel_stats_t *channel_stats, int size); xge_hal_status_e xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset, - u64 *value); + u64 *value); xge_hal_status_e -xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset, - u64 value); +xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset, + u64 value); xge_hal_status_e xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset, - int bits, u32 *value); + int bits, u32 *value); xge_hal_status_e xge_hal_mgmt_device_config(xge_hal_device_h devh, - xge_hal_mgmt_device_config_t *dev_config, int size); + xge_hal_mgmt_device_config_t *dev_config, int size); xge_hal_status_e xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config, - int size); + int size); xge_hal_status_e xge_hal_mgmt_pci_config(xge_hal_device_h devh, - xge_hal_mgmt_pci_config_t *pci_config, int size); + xge_hal_mgmt_pci_config_t *pci_config, int size); xge_hal_status_e xge_hal_pma_loopback( xge_hal_device_h devh, int enable ); @@ -199,7 +191,7 @@ __hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value); #ifdef XGE_TRACE_INTO_CIRCULAR_ARR xge_hal_status_e xge_hal_mgmt_trace_read(char *buffer, unsigned buf_size, unsigned *offset, - unsigned *read_length); + unsigned *read_length); #endif void @@ -215,8 +207,8 @@ xge_hal_flick_link_led(xge_hal_device_h devh); * given its Sub system ID. */ #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \ - ((((subid >= 0x600B) && (subid <= 0x600D)) || \ - ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) + ((((subid >= 0x600B) && (subid <= 0x600D)) || \ + ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) #define CHECKBIT(value, nbit) (value & (1 << nbit)) #ifdef XGE_HAL_USE_MGMT_AUX diff --git a/sys/dev/nxge/include/xgehal-mgmtaux.h b/sys/dev/nxge/include/xgehal-mgmtaux.h index 6d4922e..93a9865 100644 --- a/sys/dev/nxge/include/xgehal-mgmtaux.h +++ b/sys/dev/nxge/include/xgehal-mgmtaux.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-mgmtaux.h - * - * Description: management auxiliary API - * - * Created: 1 September 2004 - */ - #ifndef XGE_HAL_MGMTAUX_H #define XGE_HAL_MGMTAUX_H @@ -41,54 +33,54 @@ __EXTERN_BEGIN_DECLS -#define XGE_HAL_AUX_SEPA ' ' +#define XGE_HAL_AUX_SEPA ' ' xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); + char *retbuf, int *retsize); -xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); +xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize, + char *retbuf, int *retsize); -xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); +xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize, + char *retbuf, int *retsize); xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize); + int bufsize, char *retbuf, int *retsize); xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); + char *retbuf, int *retsize); xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); + char *retbuf, int *retsize); -xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh, - unsigned int offset, int bufsize, char *retbuf, - int *retsize); +xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh, + unsigned int offset, int bufsize, char *retbuf, + int *retsize); xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh, - unsigned int offset, u64 value); + unsigned int offset, u64 value); xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh, - unsigned int offset, int bufsize, char *retbuf, - int *retsize); + unsigned int offset, int bufsize, char *retbuf, + int *retsize); -xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); +xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, + char *retbuf, int *retsize); xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize); + int bufsize, char *retbuf, int *retsize); xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize); + char *retbuf, int *retsize); xge_hal_status_e xge_hal_aux_device_dump(xge_hal_device_h devh); xge_hal_status_e xge_hal_aux_driver_config_read(int bufsize, char *retbuf, - int *retsize); + int *retsize); xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize); + int bufsize, char *retbuf, int *retsize); __EXTERN_END_DECLS diff --git a/sys/dev/nxge/include/xgehal-mm.h b/sys/dev/nxge/include/xgehal-mm.h index 5a8f836..7b43de4 100644 --- a/sys/dev/nxge/include/xgehal-mm.h +++ b/sys/dev/nxge/include/xgehal-mm.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-mm.h - * - * Description: memory pool object - * - * Created: 28 May 2004 - */ - #ifndef XGE_HAL_MM_H #define XGE_HAL_MM_H @@ -51,9 +43,9 @@ typedef void* xge_hal_mempool_h; caller. */ typedef struct xge_hal_mempool_dma_t { - dma_addr_t addr; - pci_dma_h handle; - pci_dma_acc_h acc_handle; + dma_addr_t addr; + pci_dma_h handle; + pci_dma_acc_h acc_handle; } xge_hal_mempool_dma_t; /* @@ -67,32 +59,32 @@ typedef struct xge_hal_mempool_dma_t { * Memory pool allocation/deallocation callback. */ typedef xge_hal_status_e (*xge_hal_mempool_item_f) (xge_hal_mempool_h mempoolh, - void *memblock, int memblock_index, - xge_hal_mempool_dma_t *dma_object, void *item, - int index, int is_last, void *userdata); + void *memblock, int memblock_index, + xge_hal_mempool_dma_t *dma_object, void *item, + int index, int is_last, void *userdata); /* * struct xge_hal_mempool_t - Memory pool. */ typedef struct xge_hal_mempool_t { - xge_hal_mempool_item_f item_func_alloc; - xge_hal_mempool_item_f item_func_free; - void *userdata; - void **memblocks_arr; - void **memblocks_priv_arr; - xge_hal_mempool_dma_t *memblocks_dma_arr; - pci_dev_h pdev; - int memblock_size; - int memblocks_max; - int memblocks_allocated; - int item_size; - int items_max; - int items_initial; - int items_current; - int items_per_memblock; - void **items_arr; - void **shadow_items_arr; - int items_priv_size; + xge_hal_mempool_item_f item_func_alloc; + xge_hal_mempool_item_f item_func_free; + void *userdata; + void **memblocks_arr; + void **memblocks_priv_arr; + xge_hal_mempool_dma_t *memblocks_dma_arr; + pci_dev_h pdev; + int memblock_size; + int memblocks_max; + int memblocks_allocated; + int item_size; + int items_max; + int items_initial; + int items_current; + int items_per_memblock; + void **items_arr; + void **shadow_items_arr; + int items_priv_size; } xge_hal_mempool_t; /* @@ -110,7 +102,7 @@ __hal_mempool_item(xge_hal_mempool_t *mempool, int index) */ static inline void* __hal_mempool_item_priv(xge_hal_mempool_t *mempool, int memblock_idx, - void *item, int *memblock_item_idx) + void *item, int *memblock_item_idx) { ptrdiff_t offset; void *memblock = mempool->memblocks_arr[memblock_idx]; @@ -124,7 +116,7 @@ __hal_mempool_item_priv(xge_hal_mempool_t *mempool, int memblock_idx, xge_assert((*memblock_item_idx) < mempool->items_per_memblock); return (char*)mempool->memblocks_priv_arr[memblock_idx] + - (*memblock_item_idx) * mempool->items_priv_size; + (*memblock_item_idx) * mempool->items_priv_size; } /* @@ -159,12 +151,12 @@ __hal_mempool_memblock_dma(xge_hal_mempool_t *mempool, int memblock_idx) } xge_hal_status_e __hal_mempool_grow(xge_hal_mempool_t *mempool, - int num_allocate, int *num_allocated); + int num_allocate, int *num_allocated); xge_hal_mempool_t* __hal_mempool_create(pci_dev_h pdev, int memblock_size, - int item_size, int private_size, int items_initial, - int items_max, xge_hal_mempool_item_f item_func_alloc, - xge_hal_mempool_item_f item_func_free, void *userdata); + int item_size, int private_size, int items_initial, + int items_max, xge_hal_mempool_item_f item_func_alloc, + xge_hal_mempool_item_f item_func_free, void *userdata); void __hal_mempool_destroy(xge_hal_mempool_t *mempool); diff --git a/sys/dev/nxge/include/xgehal-regs.h b/sys/dev/nxge/include/xgehal-regs.h index 89a2c4a..a20e083 100644 --- a/sys/dev/nxge/include/xgehal-regs.h +++ b/sys/dev/nxge/include/xgehal-regs.h @@ -26,17 +26,11 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-regs.h - * - * Description: Xframe mem-mapped register space - * - * Created: 14 May 2004 - */ - #ifndef XGE_HAL_REGS_H #define XGE_HAL_REGS_H +__EXTERN_BEGIN_DECLS + typedef struct { /* General Control-Status Registers */ @@ -53,14 +47,14 @@ typedef struct { #define XGE_HAL_GEN_INTR_RXXGXS BIT(36) #define XGE_HAL_GEN_INTR_RXTRAFFIC BIT(40) #define XGE_HAL_GEN_ERROR_INTR (XGE_HAL_GEN_INTR_TXPIC | \ - XGE_HAL_GEN_INTR_RXPIC | \ - XGE_HAL_GEN_INTR_TXDMA | \ - XGE_HAL_GEN_INTR_RXDMA | \ - XGE_HAL_GEN_INTR_TXMAC | \ - XGE_HAL_GEN_INTR_RXMAC | \ - XGE_HAL_GEN_INTR_TXXGXS | \ - XGE_HAL_GEN_INTR_RXXGXS | \ - XGE_HAL_GEN_INTR_MC) + XGE_HAL_GEN_INTR_RXPIC | \ + XGE_HAL_GEN_INTR_TXDMA | \ + XGE_HAL_GEN_INTR_RXDMA | \ + XGE_HAL_GEN_INTR_TXMAC | \ + XGE_HAL_GEN_INTR_RXMAC | \ + XGE_HAL_GEN_INTR_TXXGXS | \ + XGE_HAL_GEN_INTR_RXXGXS | \ + XGE_HAL_GEN_INTR_MC) u64 general_int_mask; @@ -74,17 +68,17 @@ typedef struct { #define XGE_HAL_SW_RESET_EOI vBIT(0xA5,16,8) #define XGE_HAL_SW_RESET_XGXS vBIT(0xA5,24,8) #define XGE_HAL_SW_RESET_ALL (XGE_HAL_SW_RESET_XENA | \ - XGE_HAL_SW_RESET_FLASH | \ - XGE_HAL_SW_RESET_EOI | \ - XGE_HAL_SW_RESET_XGXS) + XGE_HAL_SW_RESET_FLASH | \ + XGE_HAL_SW_RESET_EOI | \ + XGE_HAL_SW_RESET_XGXS) /* The SW_RESET register must read this value after a successful reset. */ #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) -#define XGE_HAL_SW_RESET_RAW_VAL_XENA 0xA500000000ULL -#define XGE_HAL_SW_RESET_RAW_VAL_HERC 0xA5A500000000ULL +#define XGE_HAL_SW_RESET_RAW_VAL_XENA 0xA500000000ULL +#define XGE_HAL_SW_RESET_RAW_VAL_HERC 0xA5A500000000ULL #else -#define XGE_HAL_SW_RESET_RAW_VAL_XENA 0xA5000000ULL -#define XGE_HAL_SW_RESET_RAW_VAL_HERC 0xA5A50000ULL +#define XGE_HAL_SW_RESET_RAW_VAL_XENA 0xA5000000ULL +#define XGE_HAL_SW_RESET_RAW_VAL_HERC 0xA5A50000ULL #endif @@ -115,22 +109,22 @@ typedef struct { #define XGE_HAL_ADAPTER_ECC_EN BIT(55) u64 serr_source; -#define XGE_HAL_SERR_SOURCE_PIC BIT(0) +#define XGE_HAL_SERR_SOURCE_PIC BIT(0) #define XGE_HAL_SERR_SOURCE_TXDMA BIT(1) #define XGE_HAL_SERR_SOURCE_RXDMA BIT(2) -#define XGE_HAL_SERR_SOURCE_MAC BIT(3) -#define XGE_HAL_SERR_SOURCE_MC BIT(4) -#define XGE_HAL_SERR_SOURCE_XGXS BIT(5) -#define XGE_HAL_SERR_SOURCE_ANY (XGE_HAL_SERR_SOURCE_PIC | \ - XGE_HAL_SERR_SOURCE_TXDMA | \ - XGE_HAL_SERR_SOURCE_RXDMA | \ - XGE_HAL_SERR_SOURCE_MAC | \ - XGE_HAL_SERR_SOURCE_MC | \ - XGE_HAL_SERR_SOURCE_XGXS) - - u64 pci_info; -#define XGE_HAL_PCI_INFO vBIT(0xF,0,4) -#define XGE_HAL_PCI_32_BIT BIT(8) +#define XGE_HAL_SERR_SOURCE_MAC BIT(3) +#define XGE_HAL_SERR_SOURCE_MC BIT(4) +#define XGE_HAL_SERR_SOURCE_XGXS BIT(5) +#define XGE_HAL_SERR_SOURCE_ANY (XGE_HAL_SERR_SOURCE_PIC | \ + XGE_HAL_SERR_SOURCE_TXDMA | \ + XGE_HAL_SERR_SOURCE_RXDMA | \ + XGE_HAL_SERR_SOURCE_MAC | \ + XGE_HAL_SERR_SOURCE_MC | \ + XGE_HAL_SERR_SOURCE_XGXS) + + u64 pci_info; +#define XGE_HAL_PCI_INFO vBIT(0xF,0,4) +#define XGE_HAL_PCI_32_BIT BIT(8) u8 unused0_1[0x160 - 0x128]; @@ -206,9 +200,9 @@ typedef struct { u64 msi_pending_reg; u64 misc_int_reg; -#define XGE_HAL_MISC_INT_REG_DP_ERR_INT BIT(0) -#define XGE_HAL_MISC_INT_REG_LINK_DOWN_INT BIT(1) -#define XGE_HAL_MISC_INT_REG_LINK_UP_INT BIT(2) +#define XGE_HAL_MISC_INT_REG_DP_ERR_INT BIT(0) +#define XGE_HAL_MISC_INT_REG_LINK_DOWN_INT BIT(1) +#define XGE_HAL_MISC_INT_REG_LINK_UP_INT BIT(2) u64 misc_int_mask; u64 misc_alarms; @@ -266,14 +260,14 @@ typedef struct { u64 scheduled_int_ctrl; #define XGE_HAL_SCHED_INT_CTRL_TIMER_EN BIT(0) #define XGE_HAL_SCHED_INT_CTRL_ONE_SHOT BIT(1) -#define XGE_HAL_SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6) -#define XGE_HAL_SCHED_INT_PERIOD(val) vBIT(val,32,32) -#define XGE_HAL_SCHED_INT_PERIOD_MASK 0xFFFFFFFF00000000ULL +#define XGE_HAL_SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6) +#define XGE_HAL_SCHED_INT_PERIOD(val) vBIT(val,32,32) +#define XGE_HAL_SCHED_INT_PERIOD_MASK 0xFFFFFFFF00000000ULL u64 txreqtimeout; -#define XGE_HAL_TXREQTO_VAL(val) vBIT(val,0,32) -#define XGE_HAL_TXREQTO_EN BIT(63) +#define XGE_HAL_TXREQTO_VAL(val) vBIT(val,0,32) +#define XGE_HAL_TXREQTO_EN BIT(63) u64 statsreqtimeout; #define XGE_HAL_STATREQTO_VAL(n) TBD @@ -285,25 +279,25 @@ typedef struct { u64 write_retry_acceleration; u64 xmsi_control; -#define XGE_HAL_XMSI_EN BIT(0) -#define XGE_HAL_XMSI_DIS_TINT_SERR BIT(1) -#define XGE_HAL_XMSI_BYTE_COUNT(val) vBIT(val,13,3) +#define XGE_HAL_XMSI_EN BIT(0) +#define XGE_HAL_XMSI_DIS_TINT_SERR BIT(1) +#define XGE_HAL_XMSI_BYTE_COUNT(val) vBIT(val,13,3) u64 xmsi_access; -#define XGE_HAL_XMSI_WR_RDN BIT(7) -#define XGE_HAL_XMSI_STROBE BIT(15) -#define XGE_HAL_XMSI_NO(val) vBIT(val,26,6) +#define XGE_HAL_XMSI_WR_RDN BIT(7) +#define XGE_HAL_XMSI_STROBE BIT(15) +#define XGE_HAL_XMSI_NO(val) vBIT(val,26,6) u64 xmsi_address; u64 xmsi_data; u64 rx_mat; -#define XGE_HAL_SET_RX_MAT(ring, msi) vBIT(msi, (8 * ring), 8) +#define XGE_HAL_SET_RX_MAT(ring, msi) vBIT(msi, (8 * ring), 8) u8 unused6[0x8]; u64 tx_mat[8]; -#define XGE_HAL_SET_TX_MAT(fifo, msi) vBIT(msi, (8 * fifo), 8) +#define XGE_HAL_SET_TX_MAT(fifo, msi) vBIT(msi, (8 * fifo), 8) u64 xmsi_mask_reg; @@ -314,64 +308,64 @@ typedef struct { #define XGE_HAL_STAT_CFG_ONE_SHOT_EN BIT(1) #define XGE_HAL_STAT_CFG_STAT_NS_EN BIT(8) #define XGE_HAL_STAT_CFG_STAT_RO BIT(9) -#define XGE_HAL_XENA_PER_SEC 0x208d5 -#define XGE_HAL_SET_UPDT_PERIOD(n) vBIT(n,32,32) +#define XGE_HAL_XENA_PER_SEC 0x208d5 +#define XGE_HAL_SET_UPDT_PERIOD(n) vBIT(n,32,32) u64 stat_addr; /* General Configuration */ u64 mdio_control; -#define XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(n) vBIT(n,0,16) -#define XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(n) vBIT(n,19,5) -#define XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(n) vBIT(n,27,5) -#define XGE_HAL_MDIO_CONTROL_MMD_DATA(n) vBIT(n,32,16) -#define XGE_HAL_MDIO_CONTROL_MMD_CTRL(n) vBIT(n,56,4) -#define XGE_HAL_MDIO_CONTROL_MMD_OP(n) vBIT(n,60,2) -#define XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(n) ((n>>16)&0xFFFF) -#define XGE_HAL_MDIO_MMD_PMA_DEV_ADDR 0x01 -#define XGE_HAL_MDIO_DOM_REG_ADDR 0xA100 -#define XGE_HAL_MDIO_ALARM_FLAGS_ADDR 0xA070 -#define XGE_HAL_MDIO_WARN_FLAGS_ADDR 0xA074 -#define XGE_HAL_MDIO_CTRL_START 0xE -#define XGE_HAL_MDIO_OP_ADDRESS 0x0 -#define XGE_HAL_MDIO_OP_WRITE 0x1 -#define XGE_HAL_MDIO_OP_READ 0x3 -#define XGE_HAL_MDIO_OP_READ_POST_INCREMENT 0x2 -#define XGE_HAL_MDIO_ALARM_TEMPHIGH 0x0080 -#define XGE_HAL_MDIO_ALARM_TEMPLOW 0x0040 -#define XGE_HAL_MDIO_ALARM_BIASHIGH 0x0008 -#define XGE_HAL_MDIO_ALARM_BIASLOW 0x0004 -#define XGE_HAL_MDIO_ALARM_POUTPUTHIGH 0x0002 -#define XGE_HAL_MDIO_ALARM_POUTPUTLOW 0x0001 -#define XGE_HAL_MDIO_WARN_TEMPHIGH 0x0080 -#define XGE_HAL_MDIO_WARN_TEMPLOW 0x0040 -#define XGE_HAL_MDIO_WARN_BIASHIGH 0x0008 -#define XGE_HAL_MDIO_WARN_BIASLOW 0x0004 -#define XGE_HAL_MDIO_WARN_POUTPUTHIGH 0x0002 -#define XGE_HAL_MDIO_WARN_POUTPUTLOW 0x0001 +#define XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(n) vBIT(n,0,16) +#define XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(n) vBIT(n,19,5) +#define XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(n) vBIT(n,27,5) +#define XGE_HAL_MDIO_CONTROL_MMD_DATA(n) vBIT(n,32,16) +#define XGE_HAL_MDIO_CONTROL_MMD_CTRL(n) vBIT(n,56,4) +#define XGE_HAL_MDIO_CONTROL_MMD_OP(n) vBIT(n,60,2) +#define XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(n) ((n>>16)&0xFFFF) +#define XGE_HAL_MDIO_MMD_PMA_DEV_ADDR 0x01 +#define XGE_HAL_MDIO_DOM_REG_ADDR 0xA100 +#define XGE_HAL_MDIO_ALARM_FLAGS_ADDR 0xA070 +#define XGE_HAL_MDIO_WARN_FLAGS_ADDR 0xA074 +#define XGE_HAL_MDIO_CTRL_START 0xE +#define XGE_HAL_MDIO_OP_ADDRESS 0x0 +#define XGE_HAL_MDIO_OP_WRITE 0x1 +#define XGE_HAL_MDIO_OP_READ 0x3 +#define XGE_HAL_MDIO_OP_READ_POST_INCREMENT 0x2 +#define XGE_HAL_MDIO_ALARM_TEMPHIGH 0x0080 +#define XGE_HAL_MDIO_ALARM_TEMPLOW 0x0040 +#define XGE_HAL_MDIO_ALARM_BIASHIGH 0x0008 +#define XGE_HAL_MDIO_ALARM_BIASLOW 0x0004 +#define XGE_HAL_MDIO_ALARM_POUTPUTHIGH 0x0002 +#define XGE_HAL_MDIO_ALARM_POUTPUTLOW 0x0001 +#define XGE_HAL_MDIO_WARN_TEMPHIGH 0x0080 +#define XGE_HAL_MDIO_WARN_TEMPLOW 0x0040 +#define XGE_HAL_MDIO_WARN_BIASHIGH 0x0008 +#define XGE_HAL_MDIO_WARN_BIASLOW 0x0004 +#define XGE_HAL_MDIO_WARN_POUTPUTHIGH 0x0002 +#define XGE_HAL_MDIO_WARN_POUTPUTLOW 0x0001 u64 dtx_control; u64 i2c_control; -#define XGE_HAL_I2C_CONTROL_DEV_ID(id) vBIT(id,1,3) -#define XGE_HAL_I2C_CONTROL_ADDR(addr) vBIT(addr,5,11) -#define XGE_HAL_I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2) -#define XGE_HAL_I2C_CONTROL_READ BIT(24) -#define XGE_HAL_I2C_CONTROL_NACK BIT(25) -#define XGE_HAL_I2C_CONTROL_CNTL_START vBIT(0xE,28,4) -#define XGE_HAL_I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4)) -#define XGE_HAL_I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF) -#define XGE_HAL_I2C_CONTROL_SET_DATA(val) vBIT(val,32,32) +#define XGE_HAL_I2C_CONTROL_DEV_ID(id) vBIT(id,1,3) +#define XGE_HAL_I2C_CONTROL_ADDR(addr) vBIT(addr,5,11) +#define XGE_HAL_I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2) +#define XGE_HAL_I2C_CONTROL_READ BIT(24) +#define XGE_HAL_I2C_CONTROL_NACK BIT(25) +#define XGE_HAL_I2C_CONTROL_CNTL_START vBIT(0xE,28,4) +#define XGE_HAL_I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4)) +#define XGE_HAL_I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF) +#define XGE_HAL_I2C_CONTROL_SET_DATA(val) vBIT(val,32,32) u64 beacon_control; u64 misc_control; -#define XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(val) vBIT(val,29,3) +#define XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(val) vBIT(val,29,3) #define XGE_HAL_MISC_CONTROL_EXT_REQ_EN BIT(1) -#define XGE_HAL_MISC_CONTROL_LINK_FAULT BIT(0) +#define XGE_HAL_MISC_CONTROL_LINK_FAULT BIT(0) u64 xfb_control; u64 gpio_control; -#define XGE_HAL_GPIO_CTRL_GPIO_0 BIT(8) +#define XGE_HAL_GPIO_CTRL_GPIO_0 BIT(8) u64 txfifo_dw_mask; u64 split_table_line_no; @@ -387,17 +381,17 @@ typedef struct { u64 txp_status; u64 txp_err_context; u64 spdm_bir_offset; -#define XGE_HAL_SPDM_PCI_BAR_NUM(spdm_bir_offset) \ - (u8)(spdm_bir_offset >> 61) +#define XGE_HAL_SPDM_PCI_BAR_NUM(spdm_bir_offset) \ + (u8)(spdm_bir_offset >> 61) #define XGE_HAL_SPDM_PCI_BAR_OFFSET(spdm_bir_offset) \ - (u32)((spdm_bir_offset >> 32) & 0x1FFFFFFF) + (u32)((spdm_bir_offset >> 32) & 0x1FFFFFFF) u64 spdm_overwrite; #define XGE_HAL_SPDM_OVERWRITE_ERR_SPDM_ENTRY(spdm_overwrite) \ - (u8)((spdm_overwrite >> 48) & 0xff) + (u8)((spdm_overwrite >> 48) & 0xff) #define XGE_HAL_SPDM_OVERWRITE_ERR_SPDM_DW(spdm_overwrite) \ - (u8)((spdm_overwrite >> 40) & 0x3) + (u8)((spdm_overwrite >> 40) & 0x3) #define XGE_HAL_SPDM_OVERWRITE_ERR_SPDM_LINE(spdm_overwrite) \ - (u8)((spdm_overwrite >> 32) & 0x7) + (u8)((spdm_overwrite >> 32) & 0x7) u64 cfg_addr_on_dperr; u64 pif_addr_on_dperr; u64 tags_in_use; @@ -409,9 +403,9 @@ typedef struct { u64 spdm_structure; #define XGE_HAL_SPDM_MAX_ENTRIES(spdm_structure) (u16)(spdm_structure >> 48) #define XGE_HAL_SPDM_INT_QW_PER_ENTRY(spdm_structure) \ - (u8)((spdm_structure >> 40) & 0xff) + (u8)((spdm_structure >> 40) & 0xff) #define XGE_HAL_SPDM_PCI_QW_PER_ENTRY(spdm_structure) \ - (u8)((spdm_structure >> 32) & 0xff) + (u8)((spdm_structure >> 32) & 0xff) u64 txdw_ptr_cnt_0; u64 txdw_ptr_cnt_1; @@ -435,73 +429,73 @@ typedef struct { /* TxDMA registers */ u64 txdma_int_status; u64 txdma_int_mask; -#define XGE_HAL_TXDMA_PFC_INT BIT(0) -#define XGE_HAL_TXDMA_TDA_INT BIT(1) -#define XGE_HAL_TXDMA_PCC_INT BIT(2) -#define XGE_HAL_TXDMA_TTI_INT BIT(3) -#define XGE_HAL_TXDMA_LSO_INT BIT(4) -#define XGE_HAL_TXDMA_TPA_INT BIT(5) -#define XGE_HAL_TXDMA_SM_INT BIT(6) +#define XGE_HAL_TXDMA_PFC_INT BIT(0) +#define XGE_HAL_TXDMA_TDA_INT BIT(1) +#define XGE_HAL_TXDMA_PCC_INT BIT(2) +#define XGE_HAL_TXDMA_TTI_INT BIT(3) +#define XGE_HAL_TXDMA_LSO_INT BIT(4) +#define XGE_HAL_TXDMA_TPA_INT BIT(5) +#define XGE_HAL_TXDMA_SM_INT BIT(6) u64 pfc_err_reg; -#define XGE_HAL_PFC_ECC_SG_ERR BIT(7) -#define XGE_HAL_PFC_ECC_DB_ERR BIT(15) -#define XGE_HAL_PFC_SM_ERR_ALARM BIT(23) -#define XGE_HAL_PFC_MISC_0_ERR BIT(31) -#define XGE_HAL_PFC_MISC_1_ERR BIT(32) -#define XGE_HAL_PFC_PCIX_ERR BIT(39) +#define XGE_HAL_PFC_ECC_SG_ERR BIT(7) +#define XGE_HAL_PFC_ECC_DB_ERR BIT(15) +#define XGE_HAL_PFC_SM_ERR_ALARM BIT(23) +#define XGE_HAL_PFC_MISC_0_ERR BIT(31) +#define XGE_HAL_PFC_MISC_1_ERR BIT(32) +#define XGE_HAL_PFC_PCIX_ERR BIT(39) u64 pfc_err_mask; u64 pfc_err_alarm; u64 tda_err_reg; -#define XGE_HAL_TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8) -#define XGE_HAL_TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8) -#define XGE_HAL_TDA_SM0_ERR_ALARM BIT(22) -#define XGE_HAL_TDA_SM1_ERR_ALARM BIT(23) -#define XGE_HAL_TDA_PCIX_ERR BIT(39) +#define XGE_HAL_TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8) +#define XGE_HAL_TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8) +#define XGE_HAL_TDA_SM0_ERR_ALARM BIT(22) +#define XGE_HAL_TDA_SM1_ERR_ALARM BIT(23) +#define XGE_HAL_TDA_PCIX_ERR BIT(39) u64 tda_err_mask; u64 tda_err_alarm; u64 pcc_err_reg; -#define XGE_HAL_PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8) -#define XGE_HAL_PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8) -#define XGE_HAL_PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8) -#define XGE_HAL_PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8) -#define XGE_HAL_PCC_SM_ERR_ALARM vBIT(0xff,32,8) -#define XGE_HAL_PCC_WR_ERR_ALARM vBIT(0xff,40,8) -#define XGE_HAL_PCC_N_SERR vBIT(0xff,48,8) -#define XGE_HAL_PCC_ENABLE_FOUR vBIT(0x0F,0,8) -#define XGE_HAL_PCC_6_COF_OV_ERR BIT(56) -#define XGE_HAL_PCC_7_COF_OV_ERR BIT(57) -#define XGE_HAL_PCC_6_LSO_OV_ERR BIT(58) -#define XGE_HAL_PCC_7_LSO_OV_ERR BIT(59) +#define XGE_HAL_PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8) +#define XGE_HAL_PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8) +#define XGE_HAL_PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8) +#define XGE_HAL_PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8) +#define XGE_HAL_PCC_SM_ERR_ALARM vBIT(0xff,32,8) +#define XGE_HAL_PCC_WR_ERR_ALARM vBIT(0xff,40,8) +#define XGE_HAL_PCC_N_SERR vBIT(0xff,48,8) +#define XGE_HAL_PCC_ENABLE_FOUR vBIT(0x0F,0,8) +#define XGE_HAL_PCC_6_COF_OV_ERR BIT(56) +#define XGE_HAL_PCC_7_COF_OV_ERR BIT(57) +#define XGE_HAL_PCC_6_LSO_OV_ERR BIT(58) +#define XGE_HAL_PCC_7_LSO_OV_ERR BIT(59) u64 pcc_err_mask; u64 pcc_err_alarm; u64 tti_err_reg; -#define XGE_HAL_TTI_ECC_SG_ERR BIT(7) -#define XGE_HAL_TTI_ECC_DB_ERR BIT(15) -#define XGE_HAL_TTI_SM_ERR_ALARM BIT(23) +#define XGE_HAL_TTI_ECC_SG_ERR BIT(7) +#define XGE_HAL_TTI_ECC_DB_ERR BIT(15) +#define XGE_HAL_TTI_SM_ERR_ALARM BIT(23) u64 tti_err_mask; u64 tti_err_alarm; u64 lso_err_reg; -#define XGE_HAL_LSO6_SEND_OFLOW BIT(12) -#define XGE_HAL_LSO7_SEND_OFLOW BIT(13) -#define XGE_HAL_LSO6_ABORT BIT(14) -#define XGE_HAL_LSO7_ABORT BIT(15) -#define XGE_HAL_LSO6_SM_ERR_ALARM BIT(22) -#define XGE_HAL_LSO7_SM_ERR_ALARM BIT(23) +#define XGE_HAL_LSO6_SEND_OFLOW BIT(12) +#define XGE_HAL_LSO7_SEND_OFLOW BIT(13) +#define XGE_HAL_LSO6_ABORT BIT(14) +#define XGE_HAL_LSO7_ABORT BIT(15) +#define XGE_HAL_LSO6_SM_ERR_ALARM BIT(22) +#define XGE_HAL_LSO7_SM_ERR_ALARM BIT(23) u64 lso_err_mask; u64 lso_err_alarm; u64 tpa_err_reg; -#define XGE_HAL_TPA_TX_FRM_DROP BIT(7) -#define XGE_HAL_TPA_SM_ERR_ALARM BIT(23) +#define XGE_HAL_TPA_TX_FRM_DROP BIT(7) +#define XGE_HAL_TPA_SM_ERR_ALARM BIT(23) u64 tpa_err_mask; u64 tpa_err_alarm; u64 sm_err_reg; -#define XGE_HAL_SM_SM_ERR_ALARM BIT(15) +#define XGE_HAL_SM_SM_ERR_ALARM BIT(15) u64 sm_err_mask; u64 sm_err_alarm; @@ -512,7 +506,7 @@ typedef struct { /* Tx FIFO controller */ #define XGE_HAL_X_MAX_FIFOS 8 -#define XGE_HAL_X_FIFO_MAX_LEN 0x1FFF /*8191 */ +#define XGE_HAL_X_FIFO_MAX_LEN 0x1FFF /*8191 */ u64 tx_fifo_partition_0; #define XGE_HAL_TX_FIFO_PARTITION_EN BIT(0) #define XGE_HAL_TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3) @@ -538,14 +532,14 @@ typedef struct { #define XGE_HAL_TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3) #define XGE_HAL_TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13) -#define XGE_HAL_TX_FIFO_PARTITION_PRI_0 0 /* highest */ +#define XGE_HAL_TX_FIFO_PARTITION_PRI_0 0 /* highest */ #define XGE_HAL_TX_FIFO_PARTITION_PRI_1 1 #define XGE_HAL_TX_FIFO_PARTITION_PRI_2 2 #define XGE_HAL_TX_FIFO_PARTITION_PRI_3 3 #define XGE_HAL_TX_FIFO_PARTITION_PRI_4 4 #define XGE_HAL_TX_FIFO_PARTITION_PRI_5 5 #define XGE_HAL_TX_FIFO_PARTITION_PRI_6 6 -#define XGE_HAL_TX_FIFO_PARTITION_PRI_7 7 /* lowest */ +#define XGE_HAL_TX_FIFO_PARTITION_PRI_7 7 /* lowest */ u64 tx_w_round_robin_0; u64 tx_w_round_robin_1; @@ -579,7 +573,7 @@ typedef struct { #define XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR BIT(1) #define XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI BIT(2) #define XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL BIT(3) -#define XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR BIT(6) +#define XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR BIT(6) /* Recent add, used only debug purposes. */ u64 pcc_enable; @@ -612,52 +606,52 @@ typedef struct { #define XGE_HAL_RXDMA_INT_RTI_INT_M BIT(3) u64 rda_err_reg; -#define XGE_HAL_RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8) -#define XGE_HAL_RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8) -#define XGE_HAL_RDA_FRM_ECC_SG_ERR BIT(23) -#define XGE_HAL_RDA_FRM_ECC_DB_N_AERR BIT(31) -#define XGE_HAL_RDA_SM1_ERR_ALARM BIT(38) -#define XGE_HAL_RDA_SM0_ERR_ALARM BIT(39) -#define XGE_HAL_RDA_MISC_ERR BIT(47) -#define XGE_HAL_RDA_PCIX_ERR BIT(55) -#define XGE_HAL_RDA_RXD_ECC_DB_SERR BIT(63) +#define XGE_HAL_RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8) +#define XGE_HAL_RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8) +#define XGE_HAL_RDA_FRM_ECC_SG_ERR BIT(23) +#define XGE_HAL_RDA_FRM_ECC_DB_N_AERR BIT(31) +#define XGE_HAL_RDA_SM1_ERR_ALARM BIT(38) +#define XGE_HAL_RDA_SM0_ERR_ALARM BIT(39) +#define XGE_HAL_RDA_MISC_ERR BIT(47) +#define XGE_HAL_RDA_PCIX_ERR BIT(55) +#define XGE_HAL_RDA_RXD_ECC_DB_SERR BIT(63) u64 rda_err_mask; u64 rda_err_alarm; u64 rc_err_reg; -#define XGE_HAL_RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8) -#define XGE_HAL_RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8) -#define XGE_HAL_RC_FTC_ECC_SG_ERR BIT(23) -#define XGE_HAL_RC_FTC_ECC_DB_ERR BIT(31) -#define XGE_HAL_RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8) -#define XGE_HAL_RC_FTC_SM_ERR_ALARM BIT(47) -#define XGE_HAL_RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8) +#define XGE_HAL_RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8) +#define XGE_HAL_RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8) +#define XGE_HAL_RC_FTC_ECC_SG_ERR BIT(23) +#define XGE_HAL_RC_FTC_ECC_DB_ERR BIT(31) +#define XGE_HAL_RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8) +#define XGE_HAL_RC_FTC_SM_ERR_ALARM BIT(47) +#define XGE_HAL_RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8) u64 rc_err_mask; u64 rc_err_alarm; u64 prc_pcix_err_reg; -#define XGE_HAL_PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8) -#define XGE_HAL_PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8) -#define XGE_HAL_PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8) -#define XGE_HAL_PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8) -#define XGE_HAL_PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8) -#define XGE_HAL_PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8) +#define XGE_HAL_PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8) +#define XGE_HAL_PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8) +#define XGE_HAL_PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8) +#define XGE_HAL_PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8) +#define XGE_HAL_PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8) +#define XGE_HAL_PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8) u64 prc_pcix_err_mask; u64 prc_pcix_err_alarm; u64 rpa_err_reg; -#define XGE_HAL_RPA_ECC_SG_ERR BIT(7) -#define XGE_HAL_RPA_ECC_DB_ERR BIT(15) -#define XGE_HAL_RPA_FLUSH_REQUEST BIT(22) -#define XGE_HAL_RPA_SM_ERR_ALARM BIT(23) -#define XGE_HAL_RPA_CREDIT_ERR BIT(31) +#define XGE_HAL_RPA_ECC_SG_ERR BIT(7) +#define XGE_HAL_RPA_ECC_DB_ERR BIT(15) +#define XGE_HAL_RPA_FLUSH_REQUEST BIT(22) +#define XGE_HAL_RPA_SM_ERR_ALARM BIT(23) +#define XGE_HAL_RPA_CREDIT_ERR BIT(31) u64 rpa_err_mask; u64 rpa_err_alarm; u64 rti_err_reg; -#define XGE_HAL_RTI_ECC_SG_ERR BIT(7) -#define XGE_HAL_RTI_ECC_DB_ERR BIT(15) -#define XGE_HAL_RTI_SM_ERR_ALARM BIT(23) +#define XGE_HAL_RTI_ECC_SG_ERR BIT(7) +#define XGE_HAL_RTI_ECC_DB_ERR BIT(15) +#define XGE_HAL_RTI_SM_ERR_ALARM BIT(23) u64 rti_err_mask; u64 rti_err_alarm; @@ -674,14 +668,14 @@ typedef struct { #define XGE_HAL_RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3) #define XGE_HAL_RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3) -#define XGE_HAL_RX_QUEUE_PRI_0 0 /* highest */ +#define XGE_HAL_RX_QUEUE_PRI_0 0 /* highest */ #define XGE_HAL_RX_QUEUE_PRI_1 1 #define XGE_HAL_RX_QUEUE_PRI_2 2 #define XGE_HAL_RX_QUEUE_PRI_3 3 #define XGE_HAL_RX_QUEUE_PRI_4 4 #define XGE_HAL_RX_QUEUE_PRI_5 5 #define XGE_HAL_RX_QUEUE_PRI_6 6 -#define XGE_HAL_RX_QUEUE_PRI_7 7 /* lowest */ +#define XGE_HAL_RX_QUEUE_PRI_7 7 /* lowest */ u64 rx_w_round_robin_0; u64 rx_w_round_robin_1; @@ -771,21 +765,21 @@ typedef struct { #define XGE_HAL_MAC_INT_STATUS_RMAC_INT BIT(1) u64 mac_tmac_err_reg; -#define XGE_HAL_TMAC_ECC_DB_ERR BIT(15) -#define XGE_HAL_TMAC_TX_BUF_OVRN BIT(23) -#define XGE_HAL_TMAC_TX_CRI_ERR BIT(31) -#define XGE_HAL_TMAC_TX_SM_ERR BIT(39) +#define XGE_HAL_TMAC_ECC_DB_ERR BIT(15) +#define XGE_HAL_TMAC_TX_BUF_OVRN BIT(23) +#define XGE_HAL_TMAC_TX_CRI_ERR BIT(31) +#define XGE_HAL_TMAC_TX_SM_ERR BIT(39) u64 mac_tmac_err_mask; u64 mac_tmac_err_alarm; u64 mac_rmac_err_reg; -#define XGE_HAL_RMAC_RX_BUFF_OVRN BIT(0) -#define XGE_HAL_RMAC_RTH_SPDM_ECC_SG_ERR BIT(0) -#define XGE_HAL_RMAC_RTS_ECC_DB_ERR BIT(0) -#define XGE_HAL_RMAC_ECC_DB_ERR BIT(0) -#define XGE_HAL_RMAC_RTH_SPDM_ECC_DB_ERR BIT(0) -#define XGE_HAL_RMAC_LINK_STATE_CHANGE_INT BIT(0) -#define XGE_HAL_RMAC_RX_SM_ERR BIT(39) +#define XGE_HAL_RMAC_RX_BUFF_OVRN BIT(0) +#define XGE_HAL_RMAC_RTH_SPDM_ECC_SG_ERR BIT(0) +#define XGE_HAL_RMAC_RTS_ECC_DB_ERR BIT(0) +#define XGE_HAL_RMAC_ECC_DB_ERR BIT(0) +#define XGE_HAL_RMAC_RTH_SPDM_ECC_DB_ERR BIT(0) +#define XGE_HAL_RMAC_LINK_STATE_CHANGE_INT BIT(0) +#define XGE_HAL_RMAC_RX_SM_ERR BIT(39) u64 mac_rmac_err_mask; u64 mac_rmac_err_alarm; @@ -846,7 +840,7 @@ typedef struct { u8 unused16[0x8]; /* - u64 rmac_addr_cfg; + u64 rmac_addr_cfg; #define XGE_HAL_RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n) #define XGE_HAL_RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n) #define XGE_HAL_RMAC_ADDR_BCAST_EN vBIT(0)_48 @@ -874,12 +868,12 @@ typedef struct { #define XGE_HAL_MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4) #define XGE_HAL_MAC_LINK_UTIL_DISABLE (XGE_HAL_MAC_TX_LINK_UTIL_DISABLE | \ - XGE_HAL_MAC_RX_LINK_UTIL_DISABLE) + XGE_HAL_MAC_RX_LINK_UTIL_DISABLE) u64 rmac_invalid_ipg; /* rx traffic steering */ -#define XGE_HAL_MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14) +#define XGE_HAL_MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14) u64 rts_frm_len_n[8]; u64 rts_qos_steering; @@ -891,12 +885,12 @@ typedef struct { u64 rts_q_alternates; u64 rts_default_q; -#define XGE_HAL_RTS_DEFAULT_Q(n) vBIT(n,5,3) +#define XGE_HAL_RTS_DEFAULT_Q(n) vBIT(n,5,3) u64 rts_ctrl; #define XGE_HAL_RTS_CTRL_IGNORE_SNAP_OUI BIT(2) #define XGE_HAL_RTS_CTRL_IGNORE_LLC_CTRL BIT(3) -#define XGE_HAL_RTS_CTRL_ENHANCED_MODE BIT(7) +#define XGE_HAL_RTS_CTRL_ENHANCED_MODE BIT(7) u64 rts_pn_cam_ctrl; #define XGE_HAL_RTS_PN_CAM_CTRL_WE BIT(7) @@ -1024,7 +1018,7 @@ typedef struct { u8 unused17_2[0x700 - 0x5F0]; u64 mac_debug_ctrl; -#define XGE_HAL_MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL +#define XGE_HAL_MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL u8 unused18[0x2800 - 0x2708]; @@ -1073,8 +1067,8 @@ typedef struct { #define XGE_HAL_RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8) u64 mc_rldram_mrs; -#define XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE BIT(39) -#define XGE_HAL_MC_RLDRAM_MRS_ENABLE BIT(47) +#define XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE BIT(39) +#define XGE_HAL_MC_RLDRAM_MRS_ENABLE BIT(47) u64 mc_rldram_interleave; @@ -1087,11 +1081,11 @@ typedef struct { u64 mc_rldram_ref_per; u8 unused21[0x220 - 0x208]; u64 mc_rldram_test_ctrl; -#define XGE_HAL_MC_RLDRAM_TEST_MODE BIT(47) -#define XGE_HAL_MC_RLDRAM_TEST_WRITE BIT(7) -#define XGE_HAL_MC_RLDRAM_TEST_GO BIT(15) -#define XGE_HAL_MC_RLDRAM_TEST_DONE BIT(23) -#define XGE_HAL_MC_RLDRAM_TEST_PASS BIT(31) +#define XGE_HAL_MC_RLDRAM_TEST_MODE BIT(47) +#define XGE_HAL_MC_RLDRAM_TEST_WRITE BIT(7) +#define XGE_HAL_MC_RLDRAM_TEST_GO BIT(15) +#define XGE_HAL_MC_RLDRAM_TEST_DONE BIT(23) +#define XGE_HAL_MC_RLDRAM_TEST_PASS BIT(31) u8 unused22[0x240 - 0x228]; u64 mc_rldram_test_add; @@ -1147,16 +1141,16 @@ typedef struct { #define XGE_HAL_XGXS_INT_MASK_RXGXS BIT(1) u64 xgxs_txgxs_err_reg; -#define XGE_HAL_TXGXS_ECC_SG_ERR BIT(7) -#define XGE_HAL_TXGXS_ECC_DB_ERR BIT(15) -#define XGE_HAL_TXGXS_ESTORE_UFLOW BIT(31) -#define XGE_HAL_TXGXS_TX_SM_ERR BIT(39) +#define XGE_HAL_TXGXS_ECC_SG_ERR BIT(7) +#define XGE_HAL_TXGXS_ECC_DB_ERR BIT(15) +#define XGE_HAL_TXGXS_ESTORE_UFLOW BIT(31) +#define XGE_HAL_TXGXS_TX_SM_ERR BIT(39) u64 xgxs_txgxs_err_mask; u64 xgxs_txgxs_err_alarm; u64 xgxs_rxgxs_err_reg; -#define XGE_HAL_RXGXS_ESTORE_OFLOW BIT(7) -#define XGE_HAL_RXGXS_RX_SM_ERR BIT(39) +#define XGE_HAL_RXGXS_ESTORE_OFLOW BIT(7) +#define XGE_HAL_RXGXS_RX_SM_ERR BIT(39) u64 xgxs_rxgxs_err_mask; u64 xgxs_rxgxs_err_alarm; @@ -1185,193 +1179,195 @@ typedef struct { /* Using this strcture to calculate offsets */ typedef struct xge_hal_pci_config_le_t { - u16 vendor_id; // 0x00 - u16 device_id; // 0x02 + u16 vendor_id; // 0x00 + u16 device_id; // 0x02 - u16 command; // 0x04 - u16 status; // 0x06 + u16 command; // 0x04 + u16 status; // 0x06 - u8 revision; // 0x08 - u8 pciClass[3]; // 0x09 + u8 revision; // 0x08 + u8 pciClass[3]; // 0x09 - u8 cache_line_size; // 0x0c - u8 latency_timer; // 0x0d - u8 header_type; // 0x0e - u8 bist; // 0x0f + u8 cache_line_size; // 0x0c + u8 latency_timer; // 0x0d + u8 header_type; // 0x0e + u8 bist; // 0x0f - u32 base_addr0_lo; // 0x10 - u32 base_addr0_hi; // 0x14 + u32 base_addr0_lo; // 0x10 + u32 base_addr0_hi; // 0x14 - u32 base_addr1_lo; // 0x18 - u32 base_addr1_hi; // 0x1C + u32 base_addr1_lo; // 0x18 + u32 base_addr1_hi; // 0x1C - u32 not_Implemented1; // 0x20 - u32 not_Implemented2; // 0x24 + u32 not_Implemented1; // 0x20 + u32 not_Implemented2; // 0x24 - u32 cardbus_cis_pointer; // 0x28 + u32 cardbus_cis_pointer; // 0x28 - u16 subsystem_vendor_id; // 0x2c - u16 subsystem_id; // 0x2e + u16 subsystem_vendor_id; // 0x2c + u16 subsystem_id; // 0x2e - u32 rom_base; // 0x30 - u8 capabilities_pointer; // 0x34 - u8 rsvd_35[3]; // 0x35 - u32 rsvd_38; // 0x38 + u32 rom_base; // 0x30 + u8 capabilities_pointer; // 0x34 + u8 rsvd_35[3]; // 0x35 + u32 rsvd_38; // 0x38 - u8 interrupt_line; // 0x3c - u8 interrupt_pin; // 0x3d - u8 min_grant; // 0x3e - u8 max_latency; // 0x3f + u8 interrupt_line; // 0x3c + u8 interrupt_pin; // 0x3d + u8 min_grant; // 0x3e + u8 max_latency; // 0x3f - u8 msi_cap_id; // 0x40 - u8 msi_next_ptr; // 0x41 - u16 msi_control; // 0x42 - u32 msi_lower_address; // 0x44 - u32 msi_higher_address; // 0x48 - u16 msi_data; // 0x4c - u16 msi_unused; // 0x4e + u8 msi_cap_id; // 0x40 + u8 msi_next_ptr; // 0x41 + u16 msi_control; // 0x42 + u32 msi_lower_address; // 0x44 + u32 msi_higher_address; // 0x48 + u16 msi_data; // 0x4c + u16 msi_unused; // 0x4e - u8 vpd_cap_id; // 0x50 - u8 vpd_next_cap; // 0x51 - u16 vpd_addr; // 0x52 - u32 vpd_data; // 0x54 + u8 vpd_cap_id; // 0x50 + u8 vpd_next_cap; // 0x51 + u16 vpd_addr; // 0x52 + u32 vpd_data; // 0x54 - u8 rsvd_b0[8]; // 0x58 + u8 rsvd_b0[8]; // 0x58 - u8 pcix_cap; // 0x60 - u8 pcix_next_cap; // 0x61 - u16 pcix_command; // 0x62 + u8 pcix_cap; // 0x60 + u8 pcix_next_cap; // 0x61 + u16 pcix_command; // 0x62 - u32 pcix_status; // 0x64 + u32 pcix_status; // 0x64 - u8 rsvd_b1[XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE-0x68]; + u8 rsvd_b1[XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE-0x68]; } xge_hal_pci_config_le_t; // 0x100 typedef struct xge_hal_pci_config_t { #ifdef XGE_OS_HOST_BIG_ENDIAN - u16 device_id; // 0x02 - u16 vendor_id; // 0x00 + u16 device_id; // 0x02 + u16 vendor_id; // 0x00 - u16 status; // 0x06 - u16 command; // 0x04 + u16 status; // 0x06 + u16 command; // 0x04 - u8 pciClass[3]; // 0x09 - u8 revision; // 0x08 + u8 pciClass[3]; // 0x09 + u8 revision; // 0x08 - u8 bist; // 0x0f - u8 header_type; // 0x0e - u8 latency_timer; // 0x0d - u8 cache_line_size; // 0x0c + u8 bist; // 0x0f + u8 header_type; // 0x0e + u8 latency_timer; // 0x0d + u8 cache_line_size; // 0x0c - u32 base_addr0_lo; // 0x10 - u32 base_addr0_hi; // 0x14 + u32 base_addr0_lo; // 0x10 + u32 base_addr0_hi; // 0x14 - u32 base_addr1_lo; // 0x18 - u32 base_addr1_hi; // 0x1C + u32 base_addr1_lo; // 0x18 + u32 base_addr1_hi; // 0x1C - u32 not_Implemented1; // 0x20 - u32 not_Implemented2; // 0x24 + u32 not_Implemented1; // 0x20 + u32 not_Implemented2; // 0x24 - u32 cardbus_cis_pointer; // 0x28 + u32 cardbus_cis_pointer; // 0x28 - u16 subsystem_id; // 0x2e - u16 subsystem_vendor_id; // 0x2c + u16 subsystem_id; // 0x2e + u16 subsystem_vendor_id; // 0x2c - u32 rom_base; // 0x30 - u8 rsvd_35[3]; // 0x35 - u8 capabilities_pointer; // 0x34 - u32 rsvd_38; // 0x38 + u32 rom_base; // 0x30 + u8 rsvd_35[3]; // 0x35 + u8 capabilities_pointer; // 0x34 + u32 rsvd_38; // 0x38 - u8 max_latency; // 0x3f - u8 min_grant; // 0x3e - u8 interrupt_pin; // 0x3d - u8 interrupt_line; // 0x3c + u8 max_latency; // 0x3f + u8 min_grant; // 0x3e + u8 interrupt_pin; // 0x3d + u8 interrupt_line; // 0x3c - u16 msi_control; // 0x42 - u8 msi_next_ptr; // 0x41 - u8 msi_cap_id; // 0x40 - u32 msi_lower_address; // 0x44 - u32 msi_higher_address; // 0x48 - u16 msi_unused; // 0x4e - u16 msi_data; // 0x4c + u16 msi_control; // 0x42 + u8 msi_next_ptr; // 0x41 + u8 msi_cap_id; // 0x40 + u32 msi_lower_address; // 0x44 + u32 msi_higher_address; // 0x48 + u16 msi_unused; // 0x4e + u16 msi_data; // 0x4c - u16 vpd_addr; // 0x52 - u8 vpd_next_cap; // 0x51 - u8 vpd_cap_id; // 0x50 - u32 vpd_data; // 0x54 + u16 vpd_addr; // 0x52 + u8 vpd_next_cap; // 0x51 + u8 vpd_cap_id; // 0x50 + u32 vpd_data; // 0x54 - u8 rsvd_b0[8]; // 0x58 + u8 rsvd_b0[8]; // 0x58 - u16 pcix_command; // 0x62 - u8 pcix_next_cap; // 0x61 - u8 pcix_cap; // 0x60 + u16 pcix_command; // 0x62 + u8 pcix_next_cap; // 0x61 + u8 pcix_cap; // 0x60 - u32 pcix_status; // 0x64 + u32 pcix_status; // 0x64 #else - u16 vendor_id; // 0x00 - u16 device_id; // 0x02 + u16 vendor_id; // 0x00 + u16 device_id; // 0x02 - u16 command; // 0x04 - u16 status; // 0x06 + u16 command; // 0x04 + u16 status; // 0x06 - u8 revision; // 0x08 - u8 pciClass[3]; // 0x09 + u8 revision; // 0x08 + u8 pciClass[3]; // 0x09 - u8 cache_line_size; // 0x0c - u8 latency_timer; // 0x0d - u8 header_type; // 0x0e - u8 bist; // 0x0f + u8 cache_line_size; // 0x0c + u8 latency_timer; // 0x0d + u8 header_type; // 0x0e + u8 bist; // 0x0f - u32 base_addr0_lo; // 0x10 - u32 base_addr0_hi; // 0x14 + u32 base_addr0_lo; // 0x10 + u32 base_addr0_hi; // 0x14 - u32 base_addr1_lo; // 0x18 - u32 base_addr1_hi; // 0x1C + u32 base_addr1_lo; // 0x18 + u32 base_addr1_hi; // 0x1C - u32 not_Implemented1; // 0x20 - u32 not_Implemented2; // 0x24 + u32 not_Implemented1; // 0x20 + u32 not_Implemented2; // 0x24 - u32 cardbus_cis_pointer; // 0x28 + u32 cardbus_cis_pointer; // 0x28 - u16 subsystem_vendor_id; // 0x2c - u16 subsystem_id; // 0x2e + u16 subsystem_vendor_id; // 0x2c + u16 subsystem_id; // 0x2e - u32 rom_base; // 0x30 - u8 capabilities_pointer; // 0x34 - u8 rsvd_35[3]; // 0x35 - u32 rsvd_38; // 0x38 + u32 rom_base; // 0x30 + u8 capabilities_pointer; // 0x34 + u8 rsvd_35[3]; // 0x35 + u32 rsvd_38; // 0x38 - u8 interrupt_line; // 0x3c - u8 interrupt_pin; // 0x3d - u8 min_grant; // 0x3e - u8 max_latency; // 0x3f + u8 interrupt_line; // 0x3c + u8 interrupt_pin; // 0x3d + u8 min_grant; // 0x3e + u8 max_latency; // 0x3f - u8 msi_cap_id; // 0x40 - u8 msi_next_ptr; // 0x41 - u16 msi_control; // 0x42 - u32 msi_lower_address; // 0x44 - u32 msi_higher_address; // 0x48 - u16 msi_data; // 0x4c - u16 msi_unused; // 0x4e + u8 msi_cap_id; // 0x40 + u8 msi_next_ptr; // 0x41 + u16 msi_control; // 0x42 + u32 msi_lower_address; // 0x44 + u32 msi_higher_address; // 0x48 + u16 msi_data; // 0x4c + u16 msi_unused; // 0x4e - u8 vpd_cap_id; // 0x50 - u8 vpd_next_cap; // 0x51 - u16 vpd_addr; // 0x52 - u32 vpd_data; // 0x54 + u8 vpd_cap_id; // 0x50 + u8 vpd_next_cap; // 0x51 + u16 vpd_addr; // 0x52 + u32 vpd_data; // 0x54 - u8 rsvd_b0[8]; // 0x58 + u8 rsvd_b0[8]; // 0x58 - u8 pcix_cap; // 0x60 - u8 pcix_next_cap; // 0x61 - u16 pcix_command; // 0x62 + u8 pcix_cap; // 0x60 + u8 pcix_next_cap; // 0x61 + u16 pcix_command; // 0x62 - u32 pcix_status; // 0x64 + u32 pcix_status; // 0x64 #endif - u8 rsvd_b1[XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE-0x68]; + u8 rsvd_b1[XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE-0x68]; } xge_hal_pci_config_t; // 0x100 -#define XGE_HAL_REG_SPACE sizeof(xge_hal_pci_bar0_t) -#define XGE_HAL_EEPROM_SIZE (0x01 << 11) +#define XGE_HAL_REG_SPACE sizeof(xge_hal_pci_bar0_t) +#define XGE_HAL_EEPROM_SIZE (0x01 << 11) + +__EXTERN_END_DECLS #endif /* XGE_HAL_REGS_H */ diff --git a/sys/dev/nxge/include/xgehal-ring.h b/sys/dev/nxge/include/xgehal-ring.h index c3efdf0..0e5b7e6 100644 --- a/sys/dev/nxge/include/xgehal-ring.h +++ b/sys/dev/nxge/include/xgehal-ring.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-ring.h - * - * Description: HAL Rx ring object functionality - * - * Created: 19 May 2004 - */ - #ifndef XGE_HAL_RING_H #define XGE_HAL_RING_H @@ -44,52 +36,52 @@ __EXTERN_BEGIN_DECLS /* HW ring configuration */ -#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000 +#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000 -#define XGE_HAL_RXD_T_CODE_OK 0x0 -#define XGE_HAL_RXD_T_CODE_PARITY 0x1 -#define XGE_HAL_RXD_T_CODE_ABORT 0x2 -#define XGE_HAL_RXD_T_CODE_PARITY_ABORT 0x3 -#define XGE_HAL_RXD_T_CODE_RDA_FAILURE 0x4 +#define XGE_HAL_RXD_T_CODE_OK 0x0 +#define XGE_HAL_RXD_T_CODE_PARITY 0x1 +#define XGE_HAL_RXD_T_CODE_ABORT 0x2 +#define XGE_HAL_RXD_T_CODE_PARITY_ABORT 0x3 +#define XGE_HAL_RXD_T_CODE_RDA_FAILURE 0x4 #define XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO 0x5 -#define XGE_HAL_RXD_T_CODE_BAD_FCS 0x6 -#define XGE_HAL_RXD_T_CODE_BUFF_SIZE 0x7 -#define XGE_HAL_RXD_T_CODE_BAD_ECC 0x8 -#define XGE_HAL_RXD_T_CODE_UNUSED_C 0xC -#define XGE_HAL_RXD_T_CODE_UNKNOWN 0xF +#define XGE_HAL_RXD_T_CODE_BAD_FCS 0x6 +#define XGE_HAL_RXD_T_CODE_BUFF_SIZE 0x7 +#define XGE_HAL_RXD_T_CODE_BAD_ECC 0x8 +#define XGE_HAL_RXD_T_CODE_UNUSED_C 0xC +#define XGE_HAL_RXD_T_CODE_UNKNOWN 0xF -#define XGE_HAL_RING_USE_MTU -1 +#define XGE_HAL_RING_USE_MTU -1 /* control_1 and control_2 formatting - same for all buffer modes */ #define XGE_HAL_RXD_GET_L3_CKSUM(control_1) ((u16)(control_1>>16) & 0xFFFF) #define XGE_HAL_RXD_GET_L4_CKSUM(control_1) ((u16)(control_1 & 0xFFFF)) -#define XGE_HAL_RXD_MASK_VLAN_TAG vBIT(0xFFFF,48,16) +#define XGE_HAL_RXD_MASK_VLAN_TAG vBIT(0xFFFF,48,16) #define XGE_HAL_RXD_SET_VLAN_TAG(control_2, val) control_2 |= (u16)val -#define XGE_HAL_RXD_GET_VLAN_TAG(control_2) ((u16)(control_2 & 0xFFFF)) +#define XGE_HAL_RXD_GET_VLAN_TAG(control_2) ((u16)(control_2 & 0xFFFF)) -#define XGE_HAL_RXD_POSTED_4_XFRAME BIT(7) /* control_1 */ +#define XGE_HAL_RXD_POSTED_4_XFRAME BIT(7) /* control_1 */ #define XGE_HAL_RXD_NOT_COMPLETED BIT(0) /* control_2 */ -#define XGE_HAL_RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15)) -#define XGE_HAL_RXD_GET_T_CODE(control_1) \ - ((control_1 & XGE_HAL_RXD_T_CODE)>>48) +#define XGE_HAL_RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15)) +#define XGE_HAL_RXD_GET_T_CODE(control_1) \ + ((control_1 & XGE_HAL_RXD_T_CODE)>>48) #define XGE_HAL_RXD_SET_T_CODE(control_1, val) \ - (control_1 |= (((u64)val & 0xF) << 48)) - -#define XGE_HAL_RXD_MASK_FRAME_TYPE vBIT(0x3,25,2) -#define XGE_HAL_RXD_MASK_FRAME_PROTO vBIT(0xFFFF,24,8) -#define XGE_HAL_RXD_GET_FRAME_TYPE(control_1) \ - (u8)(0x3 & ((control_1 & XGE_HAL_RXD_MASK_FRAME_TYPE) >> 37)) -#define XGE_HAL_RXD_GET_FRAME_PROTO(control_1) \ - (u8)((control_1 & XGE_HAL_RXD_MASK_FRAME_PROTO) >> 32) -#define XGE_HAL_RXD_FRAME_PROTO_VLAN_TAGGED BIT(24) -#define XGE_HAL_RXD_FRAME_PROTO_IPV4 BIT(27) -#define XGE_HAL_RXD_FRAME_PROTO_IPV6 BIT(28) -#define XGE_HAL_RXD_FRAME_PROTO_IP_FRAGMENTED BIT(29) -#define XGE_HAL_RXD_FRAME_PROTO_TCP BIT(30) -#define XGE_HAL_RXD_FRAME_PROTO_UDP BIT(31) + (control_1 |= (((u64)val & 0xF) << 48)) + +#define XGE_HAL_RXD_MASK_FRAME_TYPE vBIT(0x3,25,2) +#define XGE_HAL_RXD_MASK_FRAME_PROTO vBIT(0xFFFF,24,8) +#define XGE_HAL_RXD_GET_FRAME_TYPE(control_1) \ + (u8)(0x3 & ((control_1 & XGE_HAL_RXD_MASK_FRAME_TYPE) >> 37)) +#define XGE_HAL_RXD_GET_FRAME_PROTO(control_1) \ + (u8)((control_1 & XGE_HAL_RXD_MASK_FRAME_PROTO) >> 32) +#define XGE_HAL_RXD_FRAME_PROTO_VLAN_TAGGED BIT(24) +#define XGE_HAL_RXD_FRAME_PROTO_IPV4 BIT(27) +#define XGE_HAL_RXD_FRAME_PROTO_IPV6 BIT(28) +#define XGE_HAL_RXD_FRAME_PROTO_IP_FRAGMENTED BIT(29) +#define XGE_HAL_RXD_FRAME_PROTO_TCP BIT(30) +#define XGE_HAL_RXD_FRAME_PROTO_UDP BIT(31) #define XGE_HAL_RXD_FRAME_TCP_OR_UDP (XGE_HAL_RXD_FRAME_PROTO_TCP | \ - XGE_HAL_RXD_FRAME_PROTO_UDP) + XGE_HAL_RXD_FRAME_PROTO_UDP) /** * enum xge_hal_frame_type_e - Ethernet frame format. @@ -101,10 +93,10 @@ __EXTERN_BEGIN_DECLS * Ethernet frame format. */ typedef enum xge_hal_frame_type_e { - XGE_HAL_FRAME_TYPE_DIX = 0x0, - XGE_HAL_FRAME_TYPE_LLC = 0x1, - XGE_HAL_FRAME_TYPE_SNAP = 0x2, - XGE_HAL_FRAME_TYPE_IPX = 0x3, + XGE_HAL_FRAME_TYPE_DIX = 0x0, + XGE_HAL_FRAME_TYPE_LLC = 0x1, + XGE_HAL_FRAME_TYPE_SNAP = 0x2, + XGE_HAL_FRAME_TYPE_IPX = 0x3, } xge_hal_frame_type_e; /** @@ -120,14 +112,14 @@ typedef enum xge_hal_frame_type_e { * Higher layer ethernet protocols and options. */ typedef enum xge_hal_frame_proto_e { - XGE_HAL_FRAME_PROTO_VLAN_TAGGED = 0x80, - XGE_HAL_FRAME_PROTO_IPV4 = 0x10, - XGE_HAL_FRAME_PROTO_IPV6 = 0x08, - XGE_HAL_FRAME_PROTO_IP_FRAGMENTED = 0x04, - XGE_HAL_FRAME_PROTO_TCP = 0x02, - XGE_HAL_FRAME_PROTO_UDP = 0x01, - XGE_HAL_FRAME_PROTO_TCP_OR_UDP = (XGE_HAL_FRAME_PROTO_TCP | \ - XGE_HAL_FRAME_PROTO_UDP) + XGE_HAL_FRAME_PROTO_VLAN_TAGGED = 0x80, + XGE_HAL_FRAME_PROTO_IPV4 = 0x10, + XGE_HAL_FRAME_PROTO_IPV6 = 0x08, + XGE_HAL_FRAME_PROTO_IP_FRAGMENTED = 0x04, + XGE_HAL_FRAME_PROTO_TCP = 0x02, + XGE_HAL_FRAME_PROTO_UDP = 0x01, + XGE_HAL_FRAME_PROTO_TCP_OR_UDP = (XGE_HAL_FRAME_PROTO_TCP | \ + XGE_HAL_FRAME_PROTO_UDP) } xge_hal_frame_proto_e; /* @@ -137,12 +129,12 @@ typedef struct { u64 host_control; u64 control_1; u64 control_2; -#define XGE_HAL_RXD_1_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) -#define XGE_HAL_RXD_1_SET_BUFFER0_SIZE(val) vBIT(val,0,16) +#define XGE_HAL_RXD_1_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) +#define XGE_HAL_RXD_1_SET_BUFFER0_SIZE(val) vBIT(val,0,16) #define XGE_HAL_RXD_1_GET_BUFFER0_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFFFF,0,16))>>48) + (int)((Control_2 & vBIT(0xFFFF,0,16))>>48) #define XGE_HAL_RXD_1_GET_RTH_VALUE(Control_2) \ - (u32)((Control_2 & vBIT(0xFFFFFFFF,16,32))>>16) + (u32)((Control_2 & vBIT(0xFFFFFFFF,16,32))>>16) u64 buffer0_ptr; } xge_hal_ring_rxd_1_t; @@ -154,20 +146,20 @@ typedef struct { u64 control_1; u64 control_2; -#define XGE_HAL_RXD_3_MASK_BUFFER0_SIZE vBIT(0xFF,8,8) -#define XGE_HAL_RXD_3_SET_BUFFER0_SIZE(val) vBIT(val,8,8) -#define XGE_HAL_RXD_3_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) -#define XGE_HAL_RXD_3_SET_BUFFER1_SIZE(val) vBIT(val,16,16) -#define XGE_HAL_RXD_3_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) -#define XGE_HAL_RXD_3_SET_BUFFER2_SIZE(val) vBIT(val,32,16) +#define XGE_HAL_RXD_3_MASK_BUFFER0_SIZE vBIT(0xFF,8,8) +#define XGE_HAL_RXD_3_SET_BUFFER0_SIZE(val) vBIT(val,8,8) +#define XGE_HAL_RXD_3_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) +#define XGE_HAL_RXD_3_SET_BUFFER1_SIZE(val) vBIT(val,16,16) +#define XGE_HAL_RXD_3_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) +#define XGE_HAL_RXD_3_SET_BUFFER2_SIZE(val) vBIT(val,32,16) #define XGE_HAL_RXD_3_GET_BUFFER0_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFF,8,8))>>48) + (int)((Control_2 & vBIT(0xFF,8,8))>>48) #define XGE_HAL_RXD_3_GET_BUFFER1_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFFFF,16,16))>>32) + (int)((Control_2 & vBIT(0xFFFF,16,16))>>32) #define XGE_HAL_RXD_3_GET_BUFFER2_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFFFF,32,16))>>16) + (int)((Control_2 & vBIT(0xFFFF,32,16))>>16) u64 buffer0_ptr; u64 buffer1_ptr; @@ -187,33 +179,33 @@ typedef struct { #endif -#define XGE_HAL_RXD_5_MASK_BUFFER3_SIZE vBIT(0xFFFF,32,16) -#define XGE_HAL_RXD_5_SET_BUFFER3_SIZE(val) vBIT(val,32,16) -#define XGE_HAL_RXD_5_MASK_BUFFER4_SIZE vBIT(0xFFFF,48,16) -#define XGE_HAL_RXD_5_SET_BUFFER4_SIZE(val) vBIT(val,48,16) +#define XGE_HAL_RXD_5_MASK_BUFFER3_SIZE vBIT(0xFFFF,32,16) +#define XGE_HAL_RXD_5_SET_BUFFER3_SIZE(val) vBIT(val,32,16) +#define XGE_HAL_RXD_5_MASK_BUFFER4_SIZE vBIT(0xFFFF,48,16) +#define XGE_HAL_RXD_5_SET_BUFFER4_SIZE(val) vBIT(val,48,16) #define XGE_HAL_RXD_5_GET_BUFFER3_SIZE(Control_3) \ - (int)((Control_3 & vBIT(0xFFFF,32,16))>>16) + (int)((Control_3 & vBIT(0xFFFF,32,16))>>16) #define XGE_HAL_RXD_5_GET_BUFFER4_SIZE(Control_3) \ - (int)((Control_3 & vBIT(0xFFFF,48,16))) + (int)((Control_3 & vBIT(0xFFFF,48,16))) u64 control_1; u64 control_2; -#define XGE_HAL_RXD_5_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) -#define XGE_HAL_RXD_5_SET_BUFFER0_SIZE(val) vBIT(val,0,16) -#define XGE_HAL_RXD_5_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) -#define XGE_HAL_RXD_5_SET_BUFFER1_SIZE(val) vBIT(val,16,16) -#define XGE_HAL_RXD_5_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) -#define XGE_HAL_RXD_5_SET_BUFFER2_SIZE(val) vBIT(val,32,16) +#define XGE_HAL_RXD_5_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) +#define XGE_HAL_RXD_5_SET_BUFFER0_SIZE(val) vBIT(val,0,16) +#define XGE_HAL_RXD_5_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) +#define XGE_HAL_RXD_5_SET_BUFFER1_SIZE(val) vBIT(val,16,16) +#define XGE_HAL_RXD_5_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) +#define XGE_HAL_RXD_5_SET_BUFFER2_SIZE(val) vBIT(val,32,16) #define XGE_HAL_RXD_5_GET_BUFFER0_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFFFF,0,16))>>48) + (int)((Control_2 & vBIT(0xFFFF,0,16))>>48) #define XGE_HAL_RXD_5_GET_BUFFER1_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFFFF,16,16))>>32) + (int)((Control_2 & vBIT(0xFFFF,16,16))>>32) #define XGE_HAL_RXD_5_GET_BUFFER2_SIZE(Control_2) \ - (int)((Control_2 & vBIT(0xFFFF,32,16))>>16) + (int)((Control_2 & vBIT(0xFFFF,32,16))>>16) u64 buffer0_ptr; u64 buffer1_ptr; u64 buffer2_ptr; @@ -222,32 +214,32 @@ typedef struct { } xge_hal_ring_rxd_5_t; #define XGE_HAL_RXD_GET_RTH_SPDM_HIT(Control_1) \ - (u8)((Control_1 & BIT(18))>>45) + (u8)((Control_1 & BIT(18))>>45) #define XGE_HAL_RXD_GET_RTH_IT_HIT(Control_1) \ - (u8)((Control_1 & BIT(19))>>44) + (u8)((Control_1 & BIT(19))>>44) #define XGE_HAL_RXD_GET_RTH_HASH_TYPE(Control_1) \ - (u8)((Control_1 & vBIT(0xF,20,4))>>40) - -#define XGE_HAL_RXD_HASH_TYPE_NONE 0x0 -#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV4 0x1 -#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV4 0x2 -#define XGE_HAL_RXD_HASH_TYPE_IPV4 0x3 -#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6 0x4 -#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6 0x5 -#define XGE_HAL_RXD_HASH_TYPE_IPV6 0x6 -#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7 -#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8 -#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9 + (u8)((Control_1 & vBIT(0xF,20,4))>>40) + +#define XGE_HAL_RXD_HASH_TYPE_NONE 0x0 +#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV4 0x1 +#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV4 0x2 +#define XGE_HAL_RXD_HASH_TYPE_IPV4 0x3 +#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6 0x4 +#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6 0x5 +#define XGE_HAL_RXD_HASH_TYPE_IPV6 0x6 +#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7 +#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8 +#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9 typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE]; -#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8 -#define XGE_HAL_RING_MEMBLOCK_IDX_OFFSET 0xFF0 +#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8 +#define XGE_HAL_RING_MEMBLOCK_IDX_OFFSET 0xFF0 #define XGE_HAL_RING_RXD_SIZEOF(n) \ (n==1 ? sizeof(xge_hal_ring_rxd_1_t) : \ - (n==3 ? sizeof(xge_hal_ring_rxd_3_t) : \ - sizeof(xge_hal_ring_rxd_5_t))) + (n==3 ? sizeof(xge_hal_ring_rxd_3_t) : \ + sizeof(xge_hal_ring_rxd_5_t))) #define XGE_HAL_RING_RXDS_PER_BLOCK(n) \ (n==1 ? 127 : (n==3 ? 85 : 63)) @@ -274,14 +266,14 @@ typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE]; * purposes. */ typedef struct xge_hal_ring_rxd_priv_t { - dma_addr_t dma_addr; - pci_dma_h dma_handle; - ptrdiff_t dma_offset; + dma_addr_t dma_addr; + pci_dma_h dma_handle; + ptrdiff_t dma_offset; #ifdef XGE_DEBUG_ASSERT - xge_hal_mempool_dma_t *dma_object; + xge_hal_mempool_dma_t *dma_object; #endif #ifdef XGE_OS_MEMORY_CHECK - int allocated; + int allocated; #endif } xge_hal_ring_rxd_priv_t; @@ -317,17 +309,17 @@ typedef struct xge_hal_ring_rxd_priv_t { * CPU cache performance. */ typedef struct xge_hal_ring_t { - xge_hal_channel_t channel; - int buffer_mode; - int indicate_max_pkts; - xge_hal_ring_config_t *config; - int rxd_size; - int rxd_priv_size; - int rxds_per_block; - xge_hal_mempool_t *mempool; - int rxdblock_priv_size; - void **reserved_rxds_arr; - int cmpl_cnt; + xge_hal_channel_t channel; + int buffer_mode; + int indicate_max_pkts; + xge_hal_ring_config_t *config; + int rxd_size; + int rxd_priv_size; + int rxds_per_block; + xge_hal_mempool_t *mempool; + int rxdblock_priv_size; + void **reserved_rxds_arr; + int cmpl_cnt; } __xge_os_attr_cacheline_aligned xge_hal_ring_t; /** @@ -343,7 +335,7 @@ typedef struct xge_hal_ring_t { * corrupted. * @frame: See xge_hal_frame_type_e{}. * @proto: Reporting bits for various higher-layer protocols, including (but - * note restricted to) TCP and UDP. See xge_hal_frame_proto_e{}. + * note restricted to) TCP and UDP. See xge_hal_frame_proto_e{}. * @vlan: VLAN tag extracted from the received frame. * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Xframe II * hardware if RTH is enabled. @@ -355,22 +347,22 @@ typedef struct xge_hal_ring_t { * @reserved_pad: Unused byte. */ typedef struct xge_hal_dtr_info_t { - int l3_cksum; - int l4_cksum; - int frame; /* zero or more of xge_hal_frame_type_e flags */ - int proto; /* zero or more of xge_hal_frame_proto_e flags */ - int vlan; - u32 rth_value; - u8 rth_it_hit; - u8 rth_spdm_hit; - u8 rth_hash_type; - u8 reserved_pad; + int l3_cksum; + int l4_cksum; + int frame; /* zero or more of xge_hal_frame_type_e flags */ + int proto; /* zero or more of xge_hal_frame_proto_e flags */ + int vlan; + u32 rth_value; + u8 rth_it_hit; + u8 rth_spdm_hit; + u8 rth_hash_type; + u8 reserved_pad; } xge_hal_dtr_info_t; /* ========================== RING PRIVATE API ============================ */ xge_hal_status_e __hal_ring_open(xge_hal_channel_h channelh, - xge_hal_channel_attr_t *attr); + xge_hal_channel_attr_t *attr); void __hal_ring_close(xge_hal_channel_h channelh); @@ -383,7 +375,7 @@ void __hal_ring_prc_enable(xge_hal_channel_h channelh); void __hal_ring_prc_disable(xge_hal_channel_h channelh); xge_hal_status_e __hal_ring_initial_replenish(xge_hal_channel_t *channel, - xge_hal_channel_reopen_e reopen); + xge_hal_channel_reopen_e reopen); #if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_RING) #define __HAL_STATIC_RING @@ -400,7 +392,7 @@ __hal_ring_block_next_pointer(xge_hal_ring_block_t *block); __HAL_STATIC_RING __HAL_INLINE_RING void __hal_ring_block_next_pointer_set(xge_hal_ring_block_t*block, - dma_addr_t dma_next); + dma_addr_t dma_next); __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t* __hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh); @@ -414,31 +406,31 @@ __HAL_STATIC_RING __HAL_INLINE_RING void* xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); __HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size); +xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - xge_hal_dtr_info_t *ext_info); + xge_hal_dtr_info_t *ext_info); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - dma_addr_t *dma_pointer, int *pkt_length); + dma_addr_t *dma_pointer, int *pkt_length); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], - int sizes[]); + int sizes[]); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - dma_addr_t dma_pointers[], int sizes[]); + dma_addr_t dma_pointers[], int sizes[]); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], - int sizes[]); + int sizes[]); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - dma_addr_t dma_pointer[], int sizes[]); + dma_addr_t dma_pointer[], int sizes[]); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); @@ -454,12 +446,12 @@ xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, - u8 *t_code); + u8 *t_code); __HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh); -__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e +__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh); #else /* XGE_FASTPATH_EXTERN */ diff --git a/sys/dev/nxge/include/xgehal-stats.h b/sys/dev/nxge/include/xgehal-stats.h index ffe0e6e..ec093e7 100644 --- a/sys/dev/nxge/include/xgehal-stats.h +++ b/sys/dev/nxge/include/xgehal-stats.h @@ -26,15 +26,6 @@ * $FreeBSD$ */ -/* - xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; - * FileName : xgehal-stats.h - * - * Description: HW statistics object - * - * Created: 2 June 2004 - */ - #ifndef XGE_HAL_STATS_H #define XGE_HAL_STATS_H @@ -524,7 +515,7 @@ typedef struct xge_hal_stats_hw_info_t { u32 wr_disc_cnt; u32 rd_rtry_wr_ack_cnt; -/* DMA Transaction statistics. */ +/* DMA Transaction statistics. */ u32 txp_wr_cnt; u32 txd_rd_cnt; u32 txd_wr_cnt; @@ -696,7 +687,7 @@ typedef struct xge_hal_stats_hw_info_t { u32 wr_disc_cnt; u32 wr_rtry_cnt; -/* PCI/PCI-X Write / DMA Transaction statistics. */ +/* PCI/PCI-X Write / DMA Transaction statistics. */ u32 txp_wr_cnt; u32 rd_rtry_wr_ack_cnt; u32 txd_wr_cnt; @@ -805,34 +796,34 @@ typedef struct xge_hal_stats_hw_info_t { * @total_posts_dtrs_many: Total number of posts on the channel that involving * more than one descriptor. * @total_posts_frags_many: Total number of fragments posted on the channel - * during post requests of multiple descriptors. + * during post requests of multiple descriptors. * @total_posts_dang_dtrs: Total number of posts on the channel involving * dangling descriptors. * @total_posts_dang_frags: Total number of dangling fragments posted on the channel - * during post request containing multiple descriptors. + * during post request containing multiple descriptors. * * HAL channel counters. * See also: xge_hal_stats_device_info_t{}. */ typedef struct xge_hal_stats_channel_info_t { - u32 full_cnt; - u32 usage_max; - u32 reserve_free_swaps_cnt; - u32 avg_compl_per_intr_cnt; - u32 total_compl_cnt; - u32 total_posts; - u32 total_posts_many; - u32 total_buffers; - u32 copied_frags; - u32 copied_buffers; - u32 avg_buffers_per_post; - u32 avg_buffer_size; - u32 avg_post_size; - u32 ring_bump_cnt; - u32 total_posts_dtrs_many; - u32 total_posts_frags_many; - u32 total_posts_dang_dtrs; - u32 total_posts_dang_frags; + u32 full_cnt; + u32 usage_max; + u32 reserve_free_swaps_cnt; + u32 avg_compl_per_intr_cnt; + u32 total_compl_cnt; + u32 total_posts; + u32 total_posts_many; + u32 total_buffers; + u32 copied_frags; + u32 copied_buffers; + u32 avg_buffers_per_post; + u32 avg_buffer_size; + u32 avg_post_size; + u32 ring_bump_cnt; + u32 total_posts_dtrs_many; + u32 total_posts_frags_many; + u32 total_posts_dang_dtrs; + u32 total_posts_dang_frags; } xge_hal_stats_channel_info_t; /** @@ -843,10 +834,10 @@ typedef struct xge_hal_stats_channel_info_t { * @tick_period: tick count for each cycle */ typedef struct xge_hal_xpak_counter_t { - u32 excess_temp; - u32 excess_bias_current; - u32 excess_laser_output; - u32 tick_period; + u32 excess_temp; + u32 excess_bias_current; + u32 excess_laser_output; + u32 tick_period; } xge_hal_xpak_counter_t; /** @@ -865,18 +856,18 @@ typedef struct xge_hal_xpak_counter_t { * @warn_laser_output_power_low: warn_laser_output_power_low count value */ typedef struct xge_hal_stats_xpak_t { - u16 alarm_transceiver_temp_high; - u16 alarm_transceiver_temp_low; - u16 alarm_laser_bias_current_high; - u16 alarm_laser_bias_current_low; - u16 alarm_laser_output_power_high; - u16 alarm_laser_output_power_low; - u16 warn_transceiver_temp_high; - u16 warn_transceiver_temp_low; - u16 warn_laser_bias_current_high; - u16 warn_laser_bias_current_low; - u16 warn_laser_output_power_high; - u16 warn_laser_output_power_low; + u16 alarm_transceiver_temp_high; + u16 alarm_transceiver_temp_low; + u16 alarm_laser_bias_current_high; + u16 alarm_laser_bias_current_low; + u16 alarm_laser_output_power_high; + u16 alarm_laser_output_power_low; + u16 warn_transceiver_temp_high; + u16 warn_transceiver_temp_low; + u16 warn_laser_bias_current_high; + u16 warn_laser_bias_current_low; + u16 warn_laser_output_power_high; + u16 warn_laser_output_power_low; } xge_hal_stats_xpak_t; @@ -955,83 +946,55 @@ typedef struct xge_hal_stats_sw_err_t { * See also: xge_hal_stats_channel_info_t{}. */ typedef struct xge_hal_stats_device_info_t { - u32 rx_traffic_intr_cnt; - u32 tx_traffic_intr_cnt; - u32 txpic_intr_cnt; - u32 txdma_intr_cnt; - u32 pfc_err_cnt; - u32 tda_err_cnt; - u32 pcc_err_cnt; - u32 tti_err_cnt; - u32 lso_err_cnt; - u32 tpa_err_cnt; - u32 sm_err_cnt; - u32 txmac_intr_cnt; - u32 mac_tmac_err_cnt; - u32 txxgxs_intr_cnt; - u32 xgxs_txgxs_err_cnt; - u32 rxpic_intr_cnt; - u32 rxdma_intr_cnt; - u32 rc_err_cnt; - u32 rpa_err_cnt; - u32 rda_err_cnt; - u32 rti_err_cnt; - u32 rxmac_intr_cnt; - u32 mac_rmac_err_cnt; - u32 rxxgxs_intr_cnt; - u32 xgxs_rxgxs_err_cnt; - u32 mc_intr_cnt; - u32 not_traffic_intr_cnt; - u32 not_xge_intr_cnt; - u32 traffic_intr_cnt; - u32 total_intr_cnt; - u32 soft_reset_cnt; - u32 rxufca_hi_adjust_cnt; - u32 rxufca_lo_adjust_cnt; - u32 bimodal_hi_adjust_cnt; - u32 bimodal_lo_adjust_cnt; + u32 rx_traffic_intr_cnt; + u32 tx_traffic_intr_cnt; + u32 txpic_intr_cnt; + u32 txdma_intr_cnt; + u32 pfc_err_cnt; + u32 tda_err_cnt; + u32 pcc_err_cnt; + u32 tti_err_cnt; + u32 lso_err_cnt; + u32 tpa_err_cnt; + u32 sm_err_cnt; + u32 txmac_intr_cnt; + u32 mac_tmac_err_cnt; + u32 txxgxs_intr_cnt; + u32 xgxs_txgxs_err_cnt; + u32 rxpic_intr_cnt; + u32 rxdma_intr_cnt; + u32 rc_err_cnt; + u32 rpa_err_cnt; + u32 rda_err_cnt; + u32 rti_err_cnt; + u32 rxmac_intr_cnt; + u32 mac_rmac_err_cnt; + u32 rxxgxs_intr_cnt; + u32 xgxs_rxgxs_err_cnt; + u32 mc_intr_cnt; + u32 not_traffic_intr_cnt; + u32 not_xge_intr_cnt; + u32 traffic_intr_cnt; + u32 total_intr_cnt; + u32 soft_reset_cnt; + u32 rxufca_hi_adjust_cnt; + u32 rxufca_lo_adjust_cnt; + u32 bimodal_hi_adjust_cnt; + u32 bimodal_lo_adjust_cnt; #ifdef XGE_HAL_CONFIG_LRO - u32 tot_frms_lroised; - u32 tot_lro_sessions; - u32 lro_frm_len_exceed_cnt; - u32 lro_sg_exceed_cnt; - u32 lro_out_of_seq_pkt_cnt; - u32 lro_dup_pkt_cnt; + u32 tot_frms_lroised; + u32 tot_lro_sessions; + u32 lro_frm_len_exceed_cnt; + u32 lro_sg_exceed_cnt; + u32 lro_out_of_seq_pkt_cnt; + u32 lro_dup_pkt_cnt; #endif } xge_hal_stats_device_info_t; -#ifdef XGEHAL_RNIC - -/** - * struct xge_hal_vp_statistics_t - Virtual Path Statistics - * - * @no_nces: Number of NCEs on Adapter in this VP - * @no_sqs: Number of SQs on Adapter in this VP - * @no_srqs: Number of SRQs on Adapter in this VP - * @no_cqrqs: Number of CQRQs on Adapter in this VP - * @no_tcp_sessions: Number of TCP sessions on Adapter in this VP - * @no_lro_sessions: Number of LRO sessions on Adapter in this VP - * @no_spdm_sessions: Number of SPDM sessions on Adapter in this VP - * - * This structure contains fields to keep statistics of virtual path - */ -typedef struct xge_hal_vp_statistics_t { - u32 no_nces; - u32 no_sqs; - u32 no_srqs; - u32 no_cqrqs; - u32 no_tcp_sessions; - u32 no_lro_sessions; - u32 no_spdm_sessions; -}xge_hal_vp_statistics_t; - -#endif - - /* ========================== XFRAME ER STATISTICS ======================== */ -#define XGE_HAL_MAC_LINKS 3 -#define XGE_HAL_MAC_AGGREGATORS 2 -#define XGE_HAL_VPATHS 17 +#define XGE_HAL_MAC_LINKS 3 +#define XGE_HAL_MAC_AGGREGATORS 2 +#define XGE_HAL_VPATHS 17 /** * struct xge_hal_stats_link_info_t - XGMAC statistics for a link * @@ -1207,97 +1170,97 @@ typedef struct xge_hal_vp_statistics_t { * queue for mac the link. */ typedef struct xge_hal_stats_link_info_t { - u64 tx_frms; - u64 tx_ttl_eth_octets; - u64 tx_data_octets; - u64 tx_mcst_frms; - u64 tx_bcst_frms; - u64 tx_ucst_frms; - u64 tx_tagged_frms; - u64 tx_vld_ip; - u64 tx_vld_ip_octets; - u64 tx_icmp; - u64 tx_tcp; - u64 tx_rst_tcp; - u64 tx_udp; - u64 tx_unknown_protocol; - u64 tx_parse_error; - u64 tx_pause_ctrl_frms; - u64 tx_lacpdu_frms; - u64 tx_marker_pdu_frms; - u64 tx_marker_resp_pdu_frms; - u64 tx_drop_ip; - u64 tx_xgmii_char1_match; - u64 tx_xgmii_char2_match; - u64 tx_xgmii_column1_match; - u64 tx_xgmii_column2_match; - u64 tx_drop_frms; - u64 tx_any_err_frms; - u64 rx_ttl_frms; - u64 rx_vld_frms; - u64 rx_offld_frms; - u64 rx_ttl_eth_octets; - u64 rx_data_octets; - u64 rx_offld_octets; - u64 rx_vld_mcst_frms; - u64 rx_vld_bcst_frms; - u64 rx_accepted_ucst_frms; - u64 rx_accepted_nucst_frms; - u64 rx_tagged_frms; - u64 rx_long_frms; - u64 rx_usized_frms; - u64 rx_osized_frms; - u64 rx_frag_frms; - u64 rx_jabber_frms; - u64 rx_ttl_64_frms; - u64 rx_ttl_65_127_frms; - u64 rx_ttl_128_255_frms; - u64 rx_ttl_256_511_frms; - u64 rx_ttl_512_1023_frms; - u64 rx_ttl_1024_1518_frms; - u64 rx_ttl_1519_4095_frms; - u64 rx_ttl_40956_8191_frms; - u64 rx_ttl_8192_max_frms; - u64 rx_ttl_gt_max_frms; - u64 rx_ip; - u64 rx_ip_octets; - u64 rx_hdr_err_ip; - u64 rx_icmp; - u64 rx_tcp; - u64 rx_udp; - u64 rx_err_tcp; - u64 rx_pause_cnt; - u64 rx_pause_ctrl_frms; - u64 rx_unsup_ctrl_frms; - u64 rx_in_rng_len_err_frms; - u64 rx_out_rng_len_err_frms; - u64 rx_drop_frms; - u64 rx_discarded_frms; - u64 rx_drop_ip; - u64 rx_err_drp_udp; - u64 rx_lacpdu_frms; - u64 rx_marker_pdu_frms; - u64 rx_marker_resp_pdu_frms; - u64 rx_unknown_pdu_frms; - u64 rx_illegal_pdu_frms; - u64 rx_fcs_discard; - u64 rx_len_discard; - u64 rx_pf_discard; - u64 rx_trash_discard; - u64 rx_rts_discard; - u64 rx_wol_discard; - u64 rx_red_discard; - u64 rx_ingm_full_discard; - u64 rx_xgmii_data_err_cnt; - u64 rx_xgmii_ctrl_err_cnt; - u64 rx_xgmii_err_sym; - u64 rx_xgmii_char1_match; - u64 rx_xgmii_char2_match; - u64 rx_xgmii_column1_match; - u64 rx_xgmii_column2_match; - u64 rx_local_fault; - u64 rx_remote_fault; - u64 rx_queue_full; + u64 tx_frms; + u64 tx_ttl_eth_octets; + u64 tx_data_octets; + u64 tx_mcst_frms; + u64 tx_bcst_frms; + u64 tx_ucst_frms; + u64 tx_tagged_frms; + u64 tx_vld_ip; + u64 tx_vld_ip_octets; + u64 tx_icmp; + u64 tx_tcp; + u64 tx_rst_tcp; + u64 tx_udp; + u64 tx_unknown_protocol; + u64 tx_parse_error; + u64 tx_pause_ctrl_frms; + u64 tx_lacpdu_frms; + u64 tx_marker_pdu_frms; + u64 tx_marker_resp_pdu_frms; + u64 tx_drop_ip; + u64 tx_xgmii_char1_match; + u64 tx_xgmii_char2_match; + u64 tx_xgmii_column1_match; + u64 tx_xgmii_column2_match; + u64 tx_drop_frms; + u64 tx_any_err_frms; + u64 rx_ttl_frms; + u64 rx_vld_frms; + u64 rx_offld_frms; + u64 rx_ttl_eth_octets; + u64 rx_data_octets; + u64 rx_offld_octets; + u64 rx_vld_mcst_frms; + u64 rx_vld_bcst_frms; + u64 rx_accepted_ucst_frms; + u64 rx_accepted_nucst_frms; + u64 rx_tagged_frms; + u64 rx_long_frms; + u64 rx_usized_frms; + u64 rx_osized_frms; + u64 rx_frag_frms; + u64 rx_jabber_frms; + u64 rx_ttl_64_frms; + u64 rx_ttl_65_127_frms; + u64 rx_ttl_128_255_frms; + u64 rx_ttl_256_511_frms; + u64 rx_ttl_512_1023_frms; + u64 rx_ttl_1024_1518_frms; + u64 rx_ttl_1519_4095_frms; + u64 rx_ttl_40956_8191_frms; + u64 rx_ttl_8192_max_frms; + u64 rx_ttl_gt_max_frms; + u64 rx_ip; + u64 rx_ip_octets; + u64 rx_hdr_err_ip; + u64 rx_icmp; + u64 rx_tcp; + u64 rx_udp; + u64 rx_err_tcp; + u64 rx_pause_cnt; + u64 rx_pause_ctrl_frms; + u64 rx_unsup_ctrl_frms; + u64 rx_in_rng_len_err_frms; + u64 rx_out_rng_len_err_frms; + u64 rx_drop_frms; + u64 rx_discarded_frms; + u64 rx_drop_ip; + u64 rx_err_drp_udp; + u64 rx_lacpdu_frms; + u64 rx_marker_pdu_frms; + u64 rx_marker_resp_pdu_frms; + u64 rx_unknown_pdu_frms; + u64 rx_illegal_pdu_frms; + u64 rx_fcs_discard; + u64 rx_len_discard; + u64 rx_pf_discard; + u64 rx_trash_discard; + u64 rx_rts_discard; + u64 rx_wol_discard; + u64 rx_red_discard; + u64 rx_ingm_full_discard; + u64 rx_xgmii_data_err_cnt; + u64 rx_xgmii_ctrl_err_cnt; + u64 rx_xgmii_err_sym; + u64 rx_xgmii_char1_match; + u64 rx_xgmii_char2_match; + u64 rx_xgmii_column1_match; + u64 rx_xgmii_column2_match; + u64 rx_local_fault; + u64 rx_remote_fault; + u64 rx_queue_full; }xge_hal_stats_link_info_t; /** @@ -1323,18 +1286,18 @@ typedef struct xge_hal_stats_link_info_t { * the aggregator. */ typedef struct xge_hal_stats_aggr_info_t { - u64 tx_frms; - u64 tx_mcst_frms; - u64 tx_bcst_frms; - u64 tx_discarded_frms; - u64 tx_errored_frms; - u64 rx_frms; - u64 rx_data_octets; - u64 rx_mcst_frms; - u64 rx_bcst_frms; - u64 rx_discarded_frms; - u64 rx_errored_frms; - u64 rx_unknown_protocol_frms; + u64 tx_frms; + u64 tx_mcst_frms; + u64 tx_bcst_frms; + u64 tx_discarded_frms; + u64 tx_errored_frms; + u64 rx_frms; + u64 rx_data_octets; + u64 rx_mcst_frms; + u64 rx_bcst_frms; + u64 rx_discarded_frms; + u64 rx_errored_frms; + u64 rx_unknown_protocol_frms; }xge_hal_stats_aggr_info_t; /** @@ -1439,60 +1402,60 @@ typedef struct xge_hal_stats_aggr_info_t { * the vpath. */ typedef struct xge_hal_stats_vpath_info_t { - u64 tx_frms; - u64 tx_ttl_eth_octets; - u64 tx_data_octets; - u64 tx_mcst_frms; - u64 tx_bcst_frms; - u64 tx_ucst_frms; - u64 tx_tagged_frms; - u64 tx_vld_ip; - u64 tx_vld_ip_octets; - u64 tx_icmp; - u64 tx_tcp; - u64 tx_rst_tcp; - u64 tx_udp; - u64 tx_unknown_protocol; - u64 tx_parse_error; - u64 rx_ttl_frms; - u64 rx_vld_frms; - u64 rx_offld_frms; - u64 rx_ttl_eth_octets; - u64 rx_data_octets; - u64 rx_offld_octets; - u64 rx_vld_mcst_frms; - u64 rx_vld_bcst_frms; - u64 rx_accepted_ucst_frms; - u64 rx_accepted_nucst_frms; - u64 rx_tagged_frms; - u64 rx_long_frms; - u64 rx_usized_frms; - u64 rx_osized_frms; - u64 rx_frag_frms; - u64 rx_jabber_frms; - u64 rx_ttl_64_frms; - u64 rx_ttl_65_127_frms; - u64 rx_ttl_128_255_frms; - u64 rx_ttl_256_511_frms; - u64 rx_ttl_512_1023_frms; - u64 rx_ttl_1024_1518_frms; - u64 rx_ttl_1519_4095_frms; - u64 rx_ttl_40956_8191_frms; - u64 rx_ttl_8192_max_frms; - u64 rx_ttl_gt_max_frms; - u64 rx_ip; - u64 rx_accepted_ip; - u64 rx_ip_octets; - u64 rx_hdr_err_ip; - u64 rx_icmp; - u64 rx_tcp; - u64 rx_udp; - u64 rx_err_tcp; - u64 rx_mpa_ok_frms; - u64 rx_mpa_crc_fail_frms; - u64 rx_mpa_mrk_fail_frms; - u64 rx_mpa_len_fail_frms; - u64 rx_wol_frms; + u64 tx_frms; + u64 tx_ttl_eth_octets; + u64 tx_data_octets; + u64 tx_mcst_frms; + u64 tx_bcst_frms; + u64 tx_ucst_frms; + u64 tx_tagged_frms; + u64 tx_vld_ip; + u64 tx_vld_ip_octets; + u64 tx_icmp; + u64 tx_tcp; + u64 tx_rst_tcp; + u64 tx_udp; + u64 tx_unknown_protocol; + u64 tx_parse_error; + u64 rx_ttl_frms; + u64 rx_vld_frms; + u64 rx_offld_frms; + u64 rx_ttl_eth_octets; + u64 rx_data_octets; + u64 rx_offld_octets; + u64 rx_vld_mcst_frms; + u64 rx_vld_bcst_frms; + u64 rx_accepted_ucst_frms; + u64 rx_accepted_nucst_frms; + u64 rx_tagged_frms; + u64 rx_long_frms; + u64 rx_usized_frms; + u64 rx_osized_frms; + u64 rx_frag_frms; + u64 rx_jabber_frms; + u64 rx_ttl_64_frms; + u64 rx_ttl_65_127_frms; + u64 rx_ttl_128_255_frms; + u64 rx_ttl_256_511_frms; + u64 rx_ttl_512_1023_frms; + u64 rx_ttl_1024_1518_frms; + u64 rx_ttl_1519_4095_frms; + u64 rx_ttl_40956_8191_frms; + u64 rx_ttl_8192_max_frms; + u64 rx_ttl_gt_max_frms; + u64 rx_ip; + u64 rx_accepted_ip; + u64 rx_ip_octets; + u64 rx_hdr_err_ip; + u64 rx_icmp; + u64 rx_tcp; + u64 rx_udp; + u64 rx_err_tcp; + u64 rx_mpa_ok_frms; + u64 rx_mpa_crc_fail_frms; + u64 rx_mpa_mrk_fail_frms; + u64 rx_mpa_len_fail_frms; + u64 rx_wol_frms; }xge_hal_stats_vpath_info_t; /** @@ -1503,8 +1466,8 @@ typedef struct xge_hal_stats_vpath_info_t { * See also: xge_hal_stats_link_info_t{}, xge_hal_stats_aggr_info_t{}. */ typedef struct xge_hal_stats_pcim_info_t { - xge_hal_stats_link_info_t link_info[XGE_HAL_MAC_LINKS]; - xge_hal_stats_aggr_info_t aggr_info[XGE_HAL_MAC_AGGREGATORS]; + xge_hal_stats_link_info_t link_info[XGE_HAL_MAC_LINKS]; + xge_hal_stats_aggr_info_t aggr_info[XGE_HAL_MAC_AGGREGATORS]; }xge_hal_stats_pcim_info_t; /** @@ -1541,35 +1504,35 @@ typedef struct xge_hal_stats_pcim_info_t { * See also: xge_hal_stats_channel_info_t{}. */ typedef struct xge_hal_stats_t { - /* handles */ - xge_hal_device_h devh; - dma_addr_t dma_addr; - pci_dma_h hw_info_dmah; - pci_dma_acc_h hw_info_dma_acch; + /* handles */ + xge_hal_device_h devh; + dma_addr_t dma_addr; + pci_dma_h hw_info_dmah; + pci_dma_acc_h hw_info_dma_acch; - /* HAL device hardware statistics */ - xge_hal_stats_hw_info_t *hw_info; - xge_hal_stats_hw_info_t hw_info_saved; - xge_hal_stats_hw_info_t hw_info_latest; + /* HAL device hardware statistics */ + xge_hal_stats_hw_info_t *hw_info; + xge_hal_stats_hw_info_t hw_info_saved; + xge_hal_stats_hw_info_t hw_info_latest; /* HAL device hardware statistics for XFRAME ER */ - xge_hal_stats_pcim_info_t *pcim_info; - xge_hal_stats_pcim_info_t *pcim_info_saved; - xge_hal_stats_pcim_info_t *pcim_info_latest; + xge_hal_stats_pcim_info_t *pcim_info; + xge_hal_stats_pcim_info_t *pcim_info_saved; + xge_hal_stats_pcim_info_t *pcim_info_latest; - /* HAL device "soft" stats */ + /* HAL device "soft" stats */ xge_hal_stats_sw_err_t sw_dev_err_stats; xge_hal_stats_device_info_t sw_dev_info_stats; - /* flags */ - int is_initialized; - int is_enabled; + /* flags */ + int is_initialized; + int is_enabled; } xge_hal_stats_t; /* ========================== STATS PRIVATE API ========================= */ xge_hal_status_e __hal_stats_initialize(xge_hal_stats_t *stats, - xge_hal_device_h devh); + xge_hal_device_h devh); void __hal_stats_terminate(xge_hal_stats_t *stats); @@ -1582,16 +1545,16 @@ void __hal_stats_soft_reset(xge_hal_device_h devh, int reset_all); /* ========================== STATS PUBLIC API ========================= */ xge_hal_status_e xge_hal_stats_hw(xge_hal_device_h devh, - xge_hal_stats_hw_info_t **hw_info); + xge_hal_stats_hw_info_t **hw_info); xge_hal_status_e xge_hal_stats_pcim(xge_hal_device_h devh, - xge_hal_stats_pcim_info_t **pcim_info); + xge_hal_stats_pcim_info_t **pcim_info); xge_hal_status_e xge_hal_stats_device(xge_hal_device_h devh, - xge_hal_stats_device_info_t **device_info); + xge_hal_stats_device_info_t **device_info); xge_hal_status_e xge_hal_stats_channel(xge_hal_channel_h channelh, - xge_hal_stats_channel_info_t **channel_info); + xge_hal_stats_channel_info_t **channel_info); xge_hal_status_e xge_hal_stats_reset(xge_hal_device_h devh); diff --git a/sys/dev/nxge/include/xgehal-types.h b/sys/dev/nxge/include/xgehal-types.h index ec1942b..58dd091 100644 --- a/sys/dev/nxge/include/xgehal-types.h +++ b/sys/dev/nxge/include/xgehal-types.h @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-types.h - * - * Description: HAL commonly used types and enumerations - * - * Created: 19 May 2004 - */ - #ifndef XGE_HAL_TYPES_H #define XGE_HAL_TYPES_H @@ -44,44 +36,44 @@ __EXTERN_BEGIN_DECLS /* * BIT(loc) - set bit at offset */ -#define BIT(loc) (0x8000000000000000ULL >> (loc)) +#define BIT(loc) (0x8000000000000000ULL >> (loc)) /* * vBIT(val, loc, sz) - set bits at offset */ -#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz))) -#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz))) +#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz))) +#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz))) /* * bVALx(bits, loc) - Get the value of x bits at location */ -#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1) -#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3) -#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7) -#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF) -#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F) -#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F) -#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F) -#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF) -#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF) -#define bVAL14(bits, loc) ((((u64)bits) >> (64-(loc+14))) & 0x3FFF) -#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF) -#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF) -#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF) -#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF) -#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF) -#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF) -#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF) -#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF) -#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF) -#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF) -#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF) -#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF) -#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF) - -#define XGE_HAL_BASE_INF 100 -#define XGE_HAL_BASE_ERR 200 -#define XGE_HAL_BASE_BADCFG 300 +#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1) +#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3) +#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7) +#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF) +#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F) +#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F) +#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F) +#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF) +#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF) +#define bVAL14(bits, loc) ((((u64)bits) >> (64-(loc+14))) & 0x3FFF) +#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF) +#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF) +#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF) +#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF) +#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF) +#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF) +#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF) +#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF) +#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF) +#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF) +#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF) +#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF) +#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF) + +#define XGE_HAL_BASE_INF 100 +#define XGE_HAL_BASE_ERR 200 +#define XGE_HAL_BASE_BADCFG 300 #define XGE_HAL_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL @@ -356,176 +348,176 @@ __EXTERN_BEGIN_DECLS * */ typedef enum xge_hal_status_e { - XGE_HAL_OK = 0, - XGE_HAL_FAIL = 1, - XGE_HAL_COMPLETIONS_REMAIN = 2, + XGE_HAL_OK = 0, + XGE_HAL_FAIL = 1, + XGE_HAL_COMPLETIONS_REMAIN = 2, XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS = XGE_HAL_BASE_INF + 1, - XGE_HAL_INF_OUT_OF_DESCRIPTORS = XGE_HAL_BASE_INF + 2, - XGE_HAL_INF_CHANNEL_IS_NOT_READY = XGE_HAL_BASE_INF + 3, - XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING = XGE_HAL_BASE_INF + 4, - XGE_HAL_INF_STATS_IS_NOT_READY = XGE_HAL_BASE_INF + 5, - XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS = XGE_HAL_BASE_INF + 6, - XGE_HAL_INF_IRQ_POLLING_CONTINUE = XGE_HAL_BASE_INF + 7, - XGE_HAL_INF_LRO_BEGIN = XGE_HAL_BASE_INF + 8, - XGE_HAL_INF_LRO_CONT = XGE_HAL_BASE_INF + 9, - XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10, - XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11, - XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12, - XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13, - XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14, - XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15, - XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1, - XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4, - XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5, - XGE_HAL_ERR_WRONG_IRQ = XGE_HAL_BASE_ERR + 6, - XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES = XGE_HAL_BASE_ERR + 7, - XGE_HAL_ERR_SWAPPER_CTRL = XGE_HAL_BASE_ERR + 8, - XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT = XGE_HAL_BASE_ERR + 9, - XGE_HAL_ERR_INVALID_MTU_SIZE = XGE_HAL_BASE_ERR + 10, - XGE_HAL_ERR_OUT_OF_MAPPING = XGE_HAL_BASE_ERR + 11, - XGE_HAL_ERR_BAD_SUBSYSTEM_ID = XGE_HAL_BASE_ERR + 12, - XGE_HAL_ERR_INVALID_BAR_ID = XGE_HAL_BASE_ERR + 13, - XGE_HAL_ERR_INVALID_OFFSET = XGE_HAL_BASE_ERR + 14, - XGE_HAL_ERR_INVALID_DEVICE = XGE_HAL_BASE_ERR + 15, - XGE_HAL_ERR_OUT_OF_SPACE = XGE_HAL_BASE_ERR + 16, - XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE = XGE_HAL_BASE_ERR + 17, - XGE_HAL_ERR_VERSION_CONFLICT = XGE_HAL_BASE_ERR + 18, - XGE_HAL_ERR_INVALID_MAC_ADDRESS = XGE_HAL_BASE_ERR + 19, - XGE_HAL_ERR_BAD_DEVICE_ID = XGE_HAL_BASE_ERR + 20, - XGE_HAL_ERR_OUT_ALIGNED_FRAGS = XGE_HAL_BASE_ERR + 21, - XGE_HAL_ERR_DEVICE_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 22, - XGE_HAL_ERR_SPDM_NOT_ENABLED = XGE_HAL_BASE_ERR + 23, - XGE_HAL_ERR_SPDM_TABLE_FULL = XGE_HAL_BASE_ERR + 24, - XGE_HAL_ERR_SPDM_INVALID_ENTRY = XGE_HAL_BASE_ERR + 25, - XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND = XGE_HAL_BASE_ERR + 26, + XGE_HAL_INF_OUT_OF_DESCRIPTORS = XGE_HAL_BASE_INF + 2, + XGE_HAL_INF_CHANNEL_IS_NOT_READY = XGE_HAL_BASE_INF + 3, + XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING = XGE_HAL_BASE_INF + 4, + XGE_HAL_INF_STATS_IS_NOT_READY = XGE_HAL_BASE_INF + 5, + XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS = XGE_HAL_BASE_INF + 6, + XGE_HAL_INF_IRQ_POLLING_CONTINUE = XGE_HAL_BASE_INF + 7, + XGE_HAL_INF_LRO_BEGIN = XGE_HAL_BASE_INF + 8, + XGE_HAL_INF_LRO_CONT = XGE_HAL_BASE_INF + 9, + XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10, + XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11, + XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12, + XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13, + XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14, + XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15, + XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1, + XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4, + XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5, + XGE_HAL_ERR_WRONG_IRQ = XGE_HAL_BASE_ERR + 6, + XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES = XGE_HAL_BASE_ERR + 7, + XGE_HAL_ERR_SWAPPER_CTRL = XGE_HAL_BASE_ERR + 8, + XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT = XGE_HAL_BASE_ERR + 9, + XGE_HAL_ERR_INVALID_MTU_SIZE = XGE_HAL_BASE_ERR + 10, + XGE_HAL_ERR_OUT_OF_MAPPING = XGE_HAL_BASE_ERR + 11, + XGE_HAL_ERR_BAD_SUBSYSTEM_ID = XGE_HAL_BASE_ERR + 12, + XGE_HAL_ERR_INVALID_BAR_ID = XGE_HAL_BASE_ERR + 13, + XGE_HAL_ERR_INVALID_OFFSET = XGE_HAL_BASE_ERR + 14, + XGE_HAL_ERR_INVALID_DEVICE = XGE_HAL_BASE_ERR + 15, + XGE_HAL_ERR_OUT_OF_SPACE = XGE_HAL_BASE_ERR + 16, + XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE = XGE_HAL_BASE_ERR + 17, + XGE_HAL_ERR_VERSION_CONFLICT = XGE_HAL_BASE_ERR + 18, + XGE_HAL_ERR_INVALID_MAC_ADDRESS = XGE_HAL_BASE_ERR + 19, + XGE_HAL_ERR_BAD_DEVICE_ID = XGE_HAL_BASE_ERR + 20, + XGE_HAL_ERR_OUT_ALIGNED_FRAGS = XGE_HAL_BASE_ERR + 21, + XGE_HAL_ERR_DEVICE_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 22, + XGE_HAL_ERR_SPDM_NOT_ENABLED = XGE_HAL_BASE_ERR + 23, + XGE_HAL_ERR_SPDM_TABLE_FULL = XGE_HAL_BASE_ERR + 24, + XGE_HAL_ERR_SPDM_INVALID_ENTRY = XGE_HAL_BASE_ERR + 25, + XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND = XGE_HAL_BASE_ERR + 26, XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT= XGE_HAL_BASE_ERR + 27, - XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28, - XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29, - XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30, - XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32, - XGE_HAL_ERR_PKT_DROP = XGE_HAL_BASE_ERR + 33, - - XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1, - XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2, - XGE_HAL_BADCFG_TX_URANGE_B = XGE_HAL_BASE_BADCFG + 3, - XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4, - XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5, - XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6, - XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8, - XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9, - XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10, - XGE_HAL_BADCFG_RX_URANGE_A = XGE_HAL_BASE_BADCFG + 11, - XGE_HAL_BADCFG_RX_UFC_A = XGE_HAL_BASE_BADCFG + 12, - XGE_HAL_BADCFG_RX_URANGE_B = XGE_HAL_BASE_BADCFG + 13, - XGE_HAL_BADCFG_RX_UFC_B = XGE_HAL_BASE_BADCFG + 14, - XGE_HAL_BADCFG_RX_URANGE_C = XGE_HAL_BASE_BADCFG + 15, - XGE_HAL_BADCFG_RX_UFC_C = XGE_HAL_BASE_BADCFG + 16, - XGE_HAL_BADCFG_RX_UFC_D = XGE_HAL_BASE_BADCFG + 17, - XGE_HAL_BADCFG_RX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 18, - XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH= XGE_HAL_BASE_BADCFG + 19, + XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28, + XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29, + XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30, + XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32, + XGE_HAL_ERR_PKT_DROP = XGE_HAL_BASE_ERR + 33, + + XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1, + XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2, + XGE_HAL_BADCFG_TX_URANGE_B = XGE_HAL_BASE_BADCFG + 3, + XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4, + XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5, + XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6, + XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8, + XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9, + XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10, + XGE_HAL_BADCFG_RX_URANGE_A = XGE_HAL_BASE_BADCFG + 11, + XGE_HAL_BADCFG_RX_UFC_A = XGE_HAL_BASE_BADCFG + 12, + XGE_HAL_BADCFG_RX_URANGE_B = XGE_HAL_BASE_BADCFG + 13, + XGE_HAL_BADCFG_RX_UFC_B = XGE_HAL_BASE_BADCFG + 14, + XGE_HAL_BADCFG_RX_URANGE_C = XGE_HAL_BASE_BADCFG + 15, + XGE_HAL_BADCFG_RX_UFC_C = XGE_HAL_BASE_BADCFG + 16, + XGE_HAL_BADCFG_RX_UFC_D = XGE_HAL_BASE_BADCFG + 17, + XGE_HAL_BADCFG_RX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 18, + XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH= XGE_HAL_BASE_BADCFG + 19, XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH = XGE_HAL_BASE_BADCFG + 20, - XGE_HAL_BADCFG_FIFO_QUEUE_INTR = XGE_HAL_BASE_BADCFG + 21, - XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS=XGE_HAL_BASE_BADCFG + 22, - XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS = XGE_HAL_BASE_BADCFG + 23, - XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE = XGE_HAL_BASE_BADCFG + 24, - XGE_HAL_BADCFG_RING_QUEUE_SIZE = XGE_HAL_BASE_BADCFG + 25, - XGE_HAL_BADCFG_BACKOFF_INTERVAL_US = XGE_HAL_BASE_BADCFG + 26, - XGE_HAL_BADCFG_MAX_FRM_LEN = XGE_HAL_BASE_BADCFG + 27, - XGE_HAL_BADCFG_RING_PRIORITY = XGE_HAL_BASE_BADCFG + 28, - XGE_HAL_BADCFG_TMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 29, - XGE_HAL_BADCFG_RMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 30, - XGE_HAL_BADCFG_RMAC_BCAST_EN = XGE_HAL_BASE_BADCFG + 31, - XGE_HAL_BADCFG_RMAC_HIGH_PTIME = XGE_HAL_BASE_BADCFG + 32, - XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3 = XGE_HAL_BASE_BADCFG +33, - XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7 = XGE_HAL_BASE_BADCFG + 34, - XGE_HAL_BADCFG_FIFO_FRAGS = XGE_HAL_BASE_BADCFG + 35, - XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD = XGE_HAL_BASE_BADCFG + 37, - XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 38, - XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 39, - XGE_HAL_BADCFG_MAX_MTU = XGE_HAL_BASE_BADCFG + 40, - XGE_HAL_BADCFG_ISR_POLLING_CNT = XGE_HAL_BASE_BADCFG + 41, - XGE_HAL_BADCFG_LATENCY_TIMER = XGE_HAL_BASE_BADCFG + 42, - XGE_HAL_BADCFG_MAX_SPLITS_TRANS = XGE_HAL_BASE_BADCFG + 43, - XGE_HAL_BADCFG_MMRB_COUNT = XGE_HAL_BASE_BADCFG + 44, - XGE_HAL_BADCFG_SHARED_SPLITS = XGE_HAL_BASE_BADCFG + 45, - XGE_HAL_BADCFG_STATS_REFRESH_TIME = XGE_HAL_BASE_BADCFG + 46, - XGE_HAL_BADCFG_PCI_FREQ_MHERZ = XGE_HAL_BASE_BADCFG + 47, - XGE_HAL_BADCFG_PCI_MODE = XGE_HAL_BASE_BADCFG + 48, - XGE_HAL_BADCFG_INTR_MODE = XGE_HAL_BASE_BADCFG + 49, - XGE_HAL_BADCFG_SCHED_TIMER_US = XGE_HAL_BASE_BADCFG + 50, - XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT = XGE_HAL_BASE_BADCFG + 51, - XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL = XGE_HAL_BASE_BADCFG + 52, - XGE_HAL_BADCFG_QUEUE_SIZE_MAX = XGE_HAL_BASE_BADCFG + 53, - XGE_HAL_BADCFG_RING_RTH_EN = XGE_HAL_BASE_BADCFG + 54, - XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS = XGE_HAL_BASE_BADCFG + 55, - XGE_HAL_BADCFG_TX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 56, - XGE_HAL_BADCFG_RX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 57, - XGE_HAL_BADCFG_RXUFCA_INTR_THRES = XGE_HAL_BASE_BADCFG + 58, - XGE_HAL_BADCFG_RXUFCA_LO_LIM = XGE_HAL_BASE_BADCFG + 59, - XGE_HAL_BADCFG_RXUFCA_HI_LIM = XGE_HAL_BASE_BADCFG + 60, - XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD = XGE_HAL_BASE_BADCFG + 61, - XGE_HAL_BADCFG_TRACEBUF_SIZE = XGE_HAL_BASE_BADCFG + 62, - XGE_HAL_BADCFG_LINK_VALID_CNT = XGE_HAL_BASE_BADCFG + 63, - XGE_HAL_BADCFG_LINK_RETRY_CNT = XGE_HAL_BASE_BADCFG + 64, - XGE_HAL_BADCFG_LINK_STABILITY_PERIOD = XGE_HAL_BASE_BADCFG + 65, + XGE_HAL_BADCFG_FIFO_QUEUE_INTR = XGE_HAL_BASE_BADCFG + 21, + XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS=XGE_HAL_BASE_BADCFG + 22, + XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS = XGE_HAL_BASE_BADCFG + 23, + XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE = XGE_HAL_BASE_BADCFG + 24, + XGE_HAL_BADCFG_RING_QUEUE_SIZE = XGE_HAL_BASE_BADCFG + 25, + XGE_HAL_BADCFG_BACKOFF_INTERVAL_US = XGE_HAL_BASE_BADCFG + 26, + XGE_HAL_BADCFG_MAX_FRM_LEN = XGE_HAL_BASE_BADCFG + 27, + XGE_HAL_BADCFG_RING_PRIORITY = XGE_HAL_BASE_BADCFG + 28, + XGE_HAL_BADCFG_TMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 29, + XGE_HAL_BADCFG_RMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 30, + XGE_HAL_BADCFG_RMAC_BCAST_EN = XGE_HAL_BASE_BADCFG + 31, + XGE_HAL_BADCFG_RMAC_HIGH_PTIME = XGE_HAL_BASE_BADCFG + 32, + XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3 = XGE_HAL_BASE_BADCFG +33, + XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7 = XGE_HAL_BASE_BADCFG + 34, + XGE_HAL_BADCFG_FIFO_FRAGS = XGE_HAL_BASE_BADCFG + 35, + XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD = XGE_HAL_BASE_BADCFG + 37, + XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 38, + XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 39, + XGE_HAL_BADCFG_MAX_MTU = XGE_HAL_BASE_BADCFG + 40, + XGE_HAL_BADCFG_ISR_POLLING_CNT = XGE_HAL_BASE_BADCFG + 41, + XGE_HAL_BADCFG_LATENCY_TIMER = XGE_HAL_BASE_BADCFG + 42, + XGE_HAL_BADCFG_MAX_SPLITS_TRANS = XGE_HAL_BASE_BADCFG + 43, + XGE_HAL_BADCFG_MMRB_COUNT = XGE_HAL_BASE_BADCFG + 44, + XGE_HAL_BADCFG_SHARED_SPLITS = XGE_HAL_BASE_BADCFG + 45, + XGE_HAL_BADCFG_STATS_REFRESH_TIME = XGE_HAL_BASE_BADCFG + 46, + XGE_HAL_BADCFG_PCI_FREQ_MHERZ = XGE_HAL_BASE_BADCFG + 47, + XGE_HAL_BADCFG_PCI_MODE = XGE_HAL_BASE_BADCFG + 48, + XGE_HAL_BADCFG_INTR_MODE = XGE_HAL_BASE_BADCFG + 49, + XGE_HAL_BADCFG_SCHED_TIMER_US = XGE_HAL_BASE_BADCFG + 50, + XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT = XGE_HAL_BASE_BADCFG + 51, + XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL = XGE_HAL_BASE_BADCFG + 52, + XGE_HAL_BADCFG_QUEUE_SIZE_MAX = XGE_HAL_BASE_BADCFG + 53, + XGE_HAL_BADCFG_RING_RTH_EN = XGE_HAL_BASE_BADCFG + 54, + XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS = XGE_HAL_BASE_BADCFG + 55, + XGE_HAL_BADCFG_TX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 56, + XGE_HAL_BADCFG_RX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 57, + XGE_HAL_BADCFG_RXUFCA_INTR_THRES = XGE_HAL_BASE_BADCFG + 58, + XGE_HAL_BADCFG_RXUFCA_LO_LIM = XGE_HAL_BASE_BADCFG + 59, + XGE_HAL_BADCFG_RXUFCA_HI_LIM = XGE_HAL_BASE_BADCFG + 60, + XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD = XGE_HAL_BASE_BADCFG + 61, + XGE_HAL_BADCFG_TRACEBUF_SIZE = XGE_HAL_BASE_BADCFG + 62, + XGE_HAL_BADCFG_LINK_VALID_CNT = XGE_HAL_BASE_BADCFG + 63, + XGE_HAL_BADCFG_LINK_RETRY_CNT = XGE_HAL_BASE_BADCFG + 64, + XGE_HAL_BADCFG_LINK_STABILITY_PERIOD = XGE_HAL_BASE_BADCFG + 65, XGE_HAL_BADCFG_DEVICE_POLL_MILLIS = XGE_HAL_BASE_BADCFG + 66, - XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN = XGE_HAL_BASE_BADCFG + 67, - XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN = XGE_HAL_BASE_BADCFG + 68, - XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69, - XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70, - XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71, - XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72, - XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73, - XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74, - XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75, - XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76, - XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77, - XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78, - XGE_HAL_BADCFG_RTS_QOS_EN = XGE_HAL_BASE_BADCFG + 79, - XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 80, - XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 81, - XGE_HAL_BADCFG_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 82, - XGE_HAL_BADCFG_RING_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 83, - XGE_HAL_BADCFG_TRACEBUF_TIMESTAMP = XGE_HAL_BASE_BADCFG + 84, - XGE_HAL_EOF_TRACE_BUF = -1 + XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN = XGE_HAL_BASE_BADCFG + 67, + XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN = XGE_HAL_BASE_BADCFG + 68, + XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69, + XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70, + XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71, + XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72, + XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73, + XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74, + XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75, + XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76, + XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77, + XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78, + XGE_HAL_BADCFG_RTS_QOS_EN = XGE_HAL_BASE_BADCFG + 79, + XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 80, + XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 81, + XGE_HAL_BADCFG_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 82, + XGE_HAL_BADCFG_RING_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 83, + XGE_HAL_BADCFG_TRACEBUF_TIMESTAMP = XGE_HAL_BASE_BADCFG + 84, + XGE_HAL_EOF_TRACE_BUF = -1 } xge_hal_status_e; -#define XGE_HAL_ETH_ALEN 6 +#define XGE_HAL_ETH_ALEN 6 typedef u8 macaddr_t[XGE_HAL_ETH_ALEN]; -#define XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE 0x100 +#define XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE 0x100 /* frames sizes */ -#define XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE 14 -#define XGE_HAL_HEADER_802_2_SIZE 3 -#define XGE_HAL_HEADER_SNAP_SIZE 5 -#define XGE_HAL_HEADER_VLAN_SIZE 4 +#define XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE 14 +#define XGE_HAL_HEADER_802_2_SIZE 3 +#define XGE_HAL_HEADER_SNAP_SIZE 5 +#define XGE_HAL_HEADER_VLAN_SIZE 4 #define XGE_HAL_MAC_HEADER_MAX_SIZE \ - (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + \ - XGE_HAL_HEADER_802_2_SIZE + \ - XGE_HAL_HEADER_SNAP_SIZE) + (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + \ + XGE_HAL_HEADER_802_2_SIZE + \ + XGE_HAL_HEADER_SNAP_SIZE) -#define XGE_HAL_TCPIP_HEADER_MAX_SIZE (64 + 64) +#define XGE_HAL_TCPIP_HEADER_MAX_SIZE (64 + 64) /* 32bit alignments */ -#define XGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN 2 -#define XGE_HAL_HEADER_802_2_SNAP_ALIGN 2 -#define XGE_HAL_HEADER_802_2_ALIGN 3 -#define XGE_HAL_HEADER_SNAP_ALIGN 1 +#define XGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN 2 +#define XGE_HAL_HEADER_802_2_SNAP_ALIGN 2 +#define XGE_HAL_HEADER_802_2_ALIGN 3 +#define XGE_HAL_HEADER_SNAP_ALIGN 1 -#define XGE_HAL_L3_CKSUM_OK 0xFFFF -#define XGE_HAL_L4_CKSUM_OK 0xFFFF -#define XGE_HAL_MIN_MTU 46 -#define XGE_HAL_MAX_MTU 9600 -#define XGE_HAL_DEFAULT_MTU 1500 +#define XGE_HAL_L3_CKSUM_OK 0xFFFF +#define XGE_HAL_L4_CKSUM_OK 0xFFFF +#define XGE_HAL_MIN_MTU 46 +#define XGE_HAL_MAX_MTU 9600 +#define XGE_HAL_DEFAULT_MTU 1500 -#define XGE_HAL_SEGEMENT_OFFLOAD_MAX_SIZE 81920 +#define XGE_HAL_SEGEMENT_OFFLOAD_MAX_SIZE 81920 -#define XGE_HAL_PCISIZE_XENA 26 /* multiples of dword */ -#define XGE_HAL_PCISIZE_HERC 64 /* multiples of dword */ +#define XGE_HAL_PCISIZE_XENA 26 /* multiples of dword */ +#define XGE_HAL_PCISIZE_HERC 64 /* multiples of dword */ -#define XGE_HAL_MAX_MSIX_MESSAGES 64 +#define XGE_HAL_MAX_MSIX_MESSAGES 64 #define XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR XGE_HAL_MAX_MSIX_MESSAGES * 2 /* Highest level interrupt blocks */ #define XGE_HAL_TX_PIC_INTR (0x0001<<0) @@ -541,17 +533,17 @@ typedef u8 macaddr_t[XGE_HAL_ETH_ALEN]; #define XGE_HAL_MC_INTR (0x0001<<10) #define XGE_HAL_SCHED_INTR (0x0001<<11) #define XGE_HAL_ALL_INTRS (XGE_HAL_TX_PIC_INTR | \ - XGE_HAL_TX_DMA_INTR | \ - XGE_HAL_TX_MAC_INTR | \ - XGE_HAL_TX_XGXS_INTR | \ - XGE_HAL_TX_TRAFFIC_INTR | \ - XGE_HAL_RX_PIC_INTR | \ - XGE_HAL_RX_DMA_INTR | \ - XGE_HAL_RX_MAC_INTR | \ - XGE_HAL_RX_XGXS_INTR | \ - XGE_HAL_RX_TRAFFIC_INTR | \ - XGE_HAL_MC_INTR | \ - XGE_HAL_SCHED_INTR) + XGE_HAL_TX_DMA_INTR | \ + XGE_HAL_TX_MAC_INTR | \ + XGE_HAL_TX_XGXS_INTR | \ + XGE_HAL_TX_TRAFFIC_INTR | \ + XGE_HAL_RX_PIC_INTR | \ + XGE_HAL_RX_DMA_INTR | \ + XGE_HAL_RX_MAC_INTR | \ + XGE_HAL_RX_XGXS_INTR | \ + XGE_HAL_RX_TRAFFIC_INTR | \ + XGE_HAL_MC_INTR | \ + XGE_HAL_SCHED_INTR) #define XGE_HAL_GEN_MASK_INTR (0x0001<<12) /* Interrupt masks for the general interrupt mask register */ @@ -570,7 +562,7 @@ typedef u8 macaddr_t[XGE_HAL_ETH_ALEN]; #define XGE_HAL_RXTRAFFIC_INT_M BIT(40) /* MSI level Interrupts */ -#define XGE_HAL_MAX_MSIX_VECTORS (16) +#define XGE_HAL_MAX_MSIX_VECTORS (16) typedef struct xge_hal_ipv4 { u32 addr; @@ -586,33 +578,22 @@ typedef union xge_hal_ipaddr_t { }xge_hal_ipaddr_t; /* DMA level Interrupts */ -#define XGE_HAL_TXDMA_PFC_INT_M BIT(0) +#define XGE_HAL_TXDMA_PFC_INT_M BIT(0) /* PFC block interrupts */ -#define XGE_HAL_PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO +#define XGE_HAL_PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO full */ /* basic handles */ typedef void* xge_hal_device_h; typedef void* xge_hal_dtr_h; typedef void* xge_hal_channel_h; -#ifdef XGEHAL_RNIC -typedef void* xge_hal_towi_h; -typedef void* xge_hal_hw_wqe_h; -typedef void* xge_hal_hw_cqe_h; -typedef void* xge_hal_lro_wqe_h; -typedef void* xge_hal_lro_cqe_h; -typedef void* xge_hal_up_msg_h; -typedef void* xge_hal_down_msg_h; -typedef void* xge_hal_channel_callback_fh; -typedef void* xge_hal_msg_queueh; -typedef void* xge_hal_pblist_h; -#endif + /* * I2C device id. Used in I2C control register for accessing EEPROM device * memory. */ -#define XGE_DEV_ID 5 +#define XGE_DEV_ID 5 typedef enum xge_hal_xpak_alarm_type_e { XGE_HAL_XPAK_ALARM_EXCESS_TEMP = 1, diff --git a/sys/dev/nxge/include/xgehal.h b/sys/dev/nxge/include/xgehal.h index 4c3c08a..c864512 100644 --- a/sys/dev/nxge/include/xgehal.h +++ b/sys/dev/nxge/include/xgehal.h @@ -26,15 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal.h - * - * Description: Consolidated header. Upper layers should include it to - * avoid include order problems. - * - * Created: 14 May 2004 - */ - #ifndef XGE_HAL_H #define XGE_HAL_H diff --git a/sys/dev/nxge/xge-osdep.h b/sys/dev/nxge/xge-osdep.h index b2c448d..15adfe7 100644 --- a/sys/dev/nxge/xge-osdep.h +++ b/sys/dev/nxge/xge-osdep.h @@ -26,18 +26,12 @@ * $FreeBSD$ */ -/* - * xge-osdep.h - * - * Platform-dependent "glue" code - */ - #ifndef XGE_OSDEP_H #define XGE_OSDEP_H -/****************************************** +/** * Includes and defines - ******************************************/ + */ #include <sys/param.h> #include <sys/systm.h> #include <sys/mbuf.h> @@ -57,6 +51,9 @@ #include <sys/mutex.h> #include <sys/types.h> #include <sys/endian.h> +#include <sys/sysctl.h> +#include <sys/endian.h> +#include <sys/socket.h> #include <machine/bus.h> #include <machine/resource.h> #include <machine/clock.h> @@ -73,63 +70,70 @@ #include <net/if_var.h> #include <net/bpf.h> #include <net/if_types.h> - +#include <netinet/in_systm.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/tcp.h> #define XGE_OS_PLATFORM_64BIT #if BYTE_ORDER == BIG_ENDIAN -#define XGE_OS_HOST_BIG_ENDIAN 1 +#define XGE_OS_HOST_BIG_ENDIAN #elif BYTE_ORDER == LITTLE_ENDIAN -#define XGE_OS_HOST_LITTLE_ENDIAN 1 +#define XGE_OS_HOST_LITTLE_ENDIAN +#endif + +#define XGE_HAL_USE_5B_MODE + +#ifdef XGE_TRACE_ASSERT +#undef XGE_TRACE_ASSERT #endif -#define XGE_HAL_USE_5B_MODE 1 -#define XGE_HAL_PROCESS_LINK_INT_IN_ISR 1 #define OS_NETSTACK_BUF struct mbuf * #define XGE_LL_IP_FAST_CSUM(hdr, len) 0 +#ifndef __DECONST +#define __DECONST(type, var) ((type)(uintrptr_t)(const void *)(var)) +#endif + #define xge_os_ntohs ntohs #define xge_os_ntohl ntohl #define xge_os_htons htons #define xge_os_htonl htonl -#ifndef __DECONST -#define __DECONST(type, var) ((type)(uintrptr_t)(const void *)(var)) -#endif - -typedef struct busresources { - bus_space_tag_t bus_tag; /* DMA Tag */ - bus_space_handle_t bus_handle; /* Bus handle */ - struct resource *bar_start_addr;/* BAR start address */ -} busresource_t; - -typedef struct xge_dma_alloc { - bus_addr_t dma_phyaddr; /* Physical Address */ - caddr_t dma_viraddr; /* Virtual Address */ - bus_dma_tag_t dma_tag; /* DMA Tag */ - bus_dmamap_t dma_map; /* DMA Map */ - bus_dma_segment_t dma_segment; /* DMA Segment */ - bus_size_t dma_size; /* Size */ - int dma_nseg; /* Maximum scatter-gather segs. */ -} xdma; - -struct xge_dma_mbuf { - bus_addr_t dma_phyaddr; /* Physical Address */ - bus_dmamap_t dma_map; /* DMA Map */ -}; - -typedef struct pci_info { - device_t device; /* Device */ - struct resource *regmap0; /* Resource for BAR0 */ - struct resource *regmap1; /* Resource for BAR1 */ - void *bar0resource; /* BAR0 tag and handle */ - void *bar1resource; /* BAR1 tag and handle */ -} pci_info_t; - - -/****************************************** +typedef struct xge_bus_resource_t { + bus_space_tag_t bus_tag; /* DMA Tag */ + bus_space_handle_t bus_handle; /* Bus handle */ + struct resource *bar_start_addr;/* BAR start address */ +} xge_bus_resource_t; + +typedef struct xge_dma_alloc_t { + bus_addr_t dma_phyaddr; /* Physical Address */ + caddr_t dma_viraddr; /* Virtual Address */ + bus_dma_tag_t dma_tag; /* DMA Tag */ + bus_dmamap_t dma_map; /* DMA Map */ + bus_dma_segment_t dma_segment; /* DMA Segment */ + bus_size_t dma_size; /* Size */ + int dma_nseg; /* Maximum scatter-gather segs. */ +} xge_dma_alloc_t; + +typedef struct xge_dma_mbuf_t { + bus_addr_t dma_phyaddr; /* Physical Address */ + bus_dmamap_t dma_map; /* DMA Map */ +}xge_dma_mbuf_t; + +typedef struct xge_pci_info { + device_t device; /* Device */ + struct resource *regmap0; /* Resource for BAR0 */ + struct resource *regmap1; /* Resource for BAR1 */ + void *bar0resource; /* BAR0 tag and handle */ + void *bar1resource; /* BAR1 tag and handle */ +} xge_pci_info_t; + + +/** * Fixed size primitive types - ******************************************/ + */ #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t @@ -139,16 +143,16 @@ typedef struct pci_info { #define ptrdiff_t ptrdiff_t typedef bus_addr_t dma_addr_t; typedef struct mtx spinlock_t; -typedef pci_info_t *pci_dev_h; -typedef busresource_t *pci_reg_h; -typedef struct xge_dma_alloc pci_dma_h; +typedef xge_pci_info_t *pci_dev_h; +typedef xge_bus_resource_t *pci_reg_h; +typedef xge_dma_alloc_t pci_dma_h; +typedef xge_dma_alloc_t pci_dma_acc_h; typedef struct resource *pci_irq_h; -typedef pci_info_t *pci_cfg_h; -typedef struct xge_dma_alloc pci_dma_acc_h; +typedef xge_pci_info_t *pci_cfg_h; -/****************************************** +/** * "libc" functionality - ******************************************/ + */ #define xge_os_memzero(addr, size) bzero(addr, size) #define xge_os_memcpy(dst, src, size) bcopy(src, dst, size) #define xge_os_memcmp memcmp @@ -156,79 +160,86 @@ typedef struct xge_dma_alloc pci_dma_acc_h; #define xge_os_strlen strlen #define xge_os_snprintf snprintf #define xge_os_sprintf sprintf -#define xge_os_printf(fmt...) { \ - printf(fmt); \ - printf("\n"); \ +#define xge_os_printf(fmt...) { \ + printf(fmt); \ + printf("\n"); \ } -#define xge_os_vaprintf(fmt) { \ - sprintf(fmt, fmt, "\n"); \ - va_list va; \ - va_start(va, fmt); \ - vprintf(fmt, va); \ - va_end(va); \ +#define xge_os_vaprintf(fmt) { \ + sprintf(fmt, fmt, "\n"); \ + va_list va; \ + va_start(va, fmt); \ + vprintf(fmt, va); \ + va_end(va); \ } -#define xge_os_vasprintf(buf, fmt) { \ - va_list va; \ - va_start(va, fmt); \ - (void) vaprintf(buf, fmt, va); \ - va_end(va); \ +#define xge_os_vasprintf(buf, fmt) { \ + va_list va; \ + va_start(va, fmt); \ + (void) vaprintf(buf, fmt, va); \ + va_end(va); \ } -#define xge_os_timestamp(buf) { \ - struct timeval current_time; \ - gettimeofday(¤t_time, 0); \ - sprintf(buf, "%08li.%08li: ", current_time.tv_sec, \ - current_time.tv_usec); \ +#define xge_os_timestamp(buf) { \ + struct timeval current_time; \ + gettimeofday(¤t_time, 0); \ + sprintf(buf, "%08li.%08li: ", current_time.tv_sec, \ + current_time.tv_usec); \ } #define xge_os_println xge_os_printf -/****************************************** +/** * Synchronization Primitives - ******************************************/ + */ /* Initialize the spin lock */ -#define xge_os_spin_lock_init(lockp, ctxh) \ - if(mtx_initialized(lockp) == 0) { \ - mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \ - } +#define xge_os_spin_lock_init(lockp, ctxh) { \ + if(mtx_initialized(lockp) == 0) { \ + mtx_init((lockp), "xge", NULL, MTX_DEF); \ + } \ +} /* Initialize the spin lock (IRQ version) */ -#define xge_os_spin_lock_init_irq(lockp, ctxh) \ - if(mtx_initialized(lockp) == 0) { \ - mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \ - } +#define xge_os_spin_lock_init_irq(lockp, ctxh) { \ + if(mtx_initialized(lockp) == 0) { \ + mtx_init((lockp), "xge", NULL, MTX_DEF); \ + } \ +} /* Destroy the lock */ -#define xge_os_spin_lock_destroy(lockp, ctxh) \ - if(mtx_initialized(lockp) != 0) { \ - mtx_destroy(lockp); \ - } +#define xge_os_spin_lock_destroy(lockp, ctxh) { \ + if(mtx_initialized(lockp) != 0) { \ + mtx_destroy(lockp); \ + } \ +} /* Destroy the lock (IRQ version) */ -#define xge_os_spin_lock_destroy_irq(lockp, ctxh) \ - if(mtx_initialized(lockp) != 0) { \ - mtx_destroy(lockp); \ - } +#define xge_os_spin_lock_destroy_irq(lockp, ctxh) { \ + if(mtx_initialized(lockp) != 0) { \ + mtx_destroy(lockp); \ + } \ +} /* Acquire the lock */ -#define xge_os_spin_lock(lockp) \ - if(mtx_owned(lockp) == 0) mtx_lock(lockp) +#define xge_os_spin_lock(lockp) { \ + if(mtx_owned(lockp) == 0) mtx_lock(lockp); \ +} /* Release the lock */ -#define xge_os_spin_unlock(lockp) mtx_unlock(lockp) +#define xge_os_spin_unlock(lockp) { \ + mtx_unlock(lockp); \ +} /* Acquire the lock (IRQ version) */ -#define xge_os_spin_lock_irq(lockp, flags) { \ - flags = MTX_QUIET; \ - if(mtx_owned(lockp) == 0) mtx_lock_flags(lockp, flags); \ +#define xge_os_spin_lock_irq(lockp, flags) { \ + flags = MTX_QUIET; \ + if(mtx_owned(lockp) == 0) mtx_lock_flags(lockp, flags); \ } /* Release the lock (IRQ version) */ -#define xge_os_spin_unlock_irq(lockp, flags) { \ - flags = MTX_QUIET; \ - mtx_unlock_flags(lockp, flags); \ +#define xge_os_spin_unlock_irq(lockp, flags) { \ + flags = MTX_QUIET; \ + mtx_unlock_flags(lockp, flags); \ } /* Write memory barrier */ @@ -241,65 +252,68 @@ typedef struct xge_dma_alloc pci_dma_acc_h; #define xge_os_mdelay(ms) DELAY(ms * 1000) /* Compare and exchange */ -//#define xge_os_cmpxchg(targetp, cmd, newval) +//#define xge_os_cmpxchg(targetp, cmd, newval) -/****************************************** +/** * Misc primitives - ******************************************/ + */ #define xge_os_unlikely(x) (x) -#define xge_os_prefetch(x) (x=x) -#define xge_os_prefetchw(x) (x=x) -#define xge_os_bug(fmt...) printf(fmt...) +#define xge_os_prefetch(x) (x=x) +#define xge_os_prefetchw(x) (x=x) +#define xge_os_bug(fmt...) printf(fmt) #define xge_os_htohs ntohs #define xge_os_ntohl ntohl #define xge_os_htons htons #define xge_os_htonl htonl -/****************************************** +/** * Compiler Stuffs - ******************************************/ -#define __xge_os_attr_cacheline_aligned + */ +#define __xge_os_attr_cacheline_aligned #define __xge_os_cacheline_size 32 -/****************************************** +/** * Memory Primitives - ******************************************/ + */ #define XGE_OS_INVALID_DMA_ADDR ((dma_addr_t)0) -/****************************************** - * xge_os_malloc - Allocate non DMA-able memory. +/** + * xge_os_malloc + * Allocate non DMA-able memory. * @pdev: Device context. * @size: Size to allocate. * - * Allocate @size bytes of memory. This allocation can sleep, and - * therefore, and therefore it requires process context. In other words, - * xge_os_malloc() cannot be called from the interrupt context. - * Use xge_os_free() to free the allocated block. + * Allocate @size bytes of memory. This allocation can sleep, and therefore, + * and therefore it requires process context. In other words, xge_os_malloc() + * cannot be called from the interrupt context. Use xge_os_free() to free the + * allocated block. * * Returns: Pointer to allocated memory, NULL - on failure. * * See also: xge_os_free(). - ******************************************/ + */ static inline void * xge_os_malloc(pci_dev_h pdev, unsigned long size) { - void *vaddr = malloc((size), M_DEVBUF, M_NOWAIT); - xge_os_memzero(vaddr, size); - XGE_OS_MEMORY_CHECK_MALLOC(vaddr, size, file, line); - return (vaddr); + void *vaddr = malloc((size), M_DEVBUF, M_NOWAIT | M_ZERO); + if(vaddr != NULL) { + XGE_OS_MEMORY_CHECK_MALLOC(vaddr, size, __FILE__, __LINE__); + xge_os_memzero(vaddr, size); + } + return (vaddr); } -/****************************************** - * xge_os_free - Free non DMA-able memory. +/** + * xge_os_free + * Free non DMA-able memory. * @pdev: Device context. * @vaddr: Address of the allocated memory block. * @size: Some OS's require to provide size on free * - * Free the memory area obtained via xge_os_malloc(). - * This call may also sleep, and therefore it cannot be used inside - * interrupt. + * Free the memory area obtained via xge_os_malloc(). This call may also sleep, + * and therefore it cannot be used inside interrupt. * * See also: xge_os_malloc(). - ******************************************/ + */ static inline void xge_os_free(pci_dev_h pdev, const void *vaddr, unsigned long size) { XGE_OS_MEMORY_CHECK_FREE(vaddr, size); @@ -313,64 +327,65 @@ xge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { return; } -/****************************************** - * xge_os_dma_malloc - Allocate DMA-able memory. +/** + * xge_os_dma_malloc + * Allocate DMA-able memory. * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. * @size: Size (in bytes) to allocate. - * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, - * XGE_OS_DMA_STREAMING, - * XGE_OS_DMA_CONSISTENT - * Note that the last two flags are mutually exclusive. + * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, XGE_OS_DMA_STREAMING, + * XGE_OS_DMA_CONSISTENT (Note that the last two flags are mutually exclusive.) * @p_dmah: Handle used to map the memory onto the corresponding device memory - * space. See xge_os_dma_map(). The handle is an out-parameter - * returned by the function. - * @p_dma_acch: One more DMA handle used subsequently to free the - * DMA object (via xge_os_dma_free()). + * space. See xge_os_dma_map(). The handle is an out-parameter returned by the + * function. + * @p_dma_acch: One more DMA handle used subsequently to free the DMA object + * (via xge_os_dma_free()). * - * Allocate DMA-able contiguous memory block of the specified @size. - * This memory can be subsequently freed using xge_os_dma_free(). + * Allocate DMA-able contiguous memory block of the specified @size. This memory + * can be subsequently freed using xge_os_dma_free(). * Note: can be used inside interrupt context. * * Returns: Pointer to allocated memory(DMA-able), NULL on failure. - * - ******************************************/ + */ static inline void * xge_os_dma_malloc(pci_dev_h pdev, unsigned long size, int dma_flags, pci_dma_h *p_dmah, pci_dma_acc_h *p_dma_acch) { int retValue = bus_dma_tag_create( - bus_get_dma_tag(pdev->device), /* Parent */ + bus_get_dma_tag(pdev->device), /* Parent */ PAGE_SIZE, /* Alignment no specific alignment */ - 0, /* Bounds */ - BUS_SPACE_MAXADDR, /* Low Address */ - BUS_SPACE_MAXADDR, /* High Address */ - NULL, /* Filter */ - NULL, /* Filter arg */ - size, /* Max Size */ - 1, /* n segments */ - size, /* max segment size */ - BUS_DMA_ALLOCNOW, /* Flags */ - NULL, /* lockfunction */ - NULL, /* lock arg */ - &p_dmah->dma_tag); /* DMA tag */ + 0, /* Bounds */ + BUS_SPACE_MAXADDR, /* Low Address */ + BUS_SPACE_MAXADDR, /* High Address */ + NULL, /* Filter */ + NULL, /* Filter arg */ + size, /* Max Size */ + 1, /* n segments */ + size, /* max segment size */ + BUS_DMA_ALLOCNOW, /* Flags */ + NULL, /* lockfunction */ + NULL, /* lock arg */ + &p_dmah->dma_tag); /* DMA tag */ if(retValue != 0) { - xge_os_printf("bus_dma_tag_create failed\n"); + xge_os_printf("bus_dma_tag_create failed\n") goto fail_1; } p_dmah->dma_size = size; retValue = bus_dmamem_alloc(p_dmah->dma_tag, (void **)&p_dmah->dma_viraddr, BUS_DMA_NOWAIT, &p_dmah->dma_map); if(retValue != 0) { - xge_os_printf("bus_dmamem_alloc failed\n"); + xge_os_printf("bus_dmamem_alloc failed\n") goto fail_2; } + XGE_OS_MEMORY_CHECK_MALLOC(p_dmah->dma_viraddr, p_dmah->dma_size, + __FILE__, __LINE__); return(p_dmah->dma_viraddr); fail_2: bus_dma_tag_destroy(p_dmah->dma_tag); fail_1: return(NULL); } -/****************************************** - * xge_os_dma_free - Free previously allocated DMA-able memory. +/** + * xge_os_dma_free + * Free previously allocated DMA-able memory. * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. * @vaddr: Virtual address of the DMA-able memory. * @p_dma_acch: DMA handle used to free the resource. @@ -379,10 +394,11 @@ fail_1: return(NULL); * Free DMA-able memory originally allocated by xge_os_dma_malloc(). * Note: can be used inside interrupt. * See also: xge_os_dma_malloc(). - ******************************************/ + */ static inline void xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size, - pci_dma_acc_h *p_dma_acch, pci_dma_h *p_dmah) { + pci_dma_acc_h *p_dma_acch, pci_dma_h *p_dmah) +{ XGE_OS_MEMORY_CHECK_FREE(p_dmah->dma_viraddr, size); bus_dmamem_free(p_dmah->dma_tag, p_dmah->dma_viraddr, p_dmah->dma_map); bus_dma_tag_destroy(p_dmah->dma_tag); @@ -392,17 +408,18 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size, return; } -/****************************************** +/** * IO/PCI/DMA Primitives - ******************************************/ + */ #define XGE_OS_DMA_DIR_TODEVICE 0 #define XGE_OS_DMA_DIR_FROMDEVICE 1 #define XGE_OS_DMA_DIR_BIDIRECTIONAL 2 -/****************************************** - * xge_os_pci_read8 - Read one byte from device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. +/** + * xge_os_pci_read8 + * Read one byte from device PCI configuration. + * @pdev: Device context. Some OSs require device context to perform PIO and/or + * config space IO. * @cfgh: PCI configuration space handle. * @where: Offset in the PCI configuration space. * @val: Address of the result. @@ -410,26 +427,28 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size, * Read byte value from the specified @regh PCI configuration space at the * specified offset = @where. * Returns: 0 - success, non-zero - failure. - ******************************************/ -#define xge_os_pci_read8(pdev, cfgh, where, val) \ + */ +#define xge_os_pci_read8(pdev, cfgh, where, val) \ (*(val) = pci_read_config(pdev->device, where, 1)) -/****************************************** - * xge_os_pci_write8 - Write one byte into device PCI configuration. - * @pdev: Device context. Some OSs require device context to perform - * PIO and/or config space IO. +/** + * xge_os_pci_write8 + * Write one byte into device PCI configuration. + * @pdev: Device context. Some OSs require device context to perform PIO and/or + * config space IO. * @cfgh: PCI configuration space handle. * @where: Offset in the PCI configuration space. * @val: Value to write. * * Write byte value into the specified PCI configuration space * Returns: 0 - success, non-zero - failure. - ******************************************/ -#define xge_os_pci_write8(pdev, cfgh, where, val) \ + */ +#define xge_os_pci_write8(pdev, cfgh, where, val) \ pci_write_config(pdev->device, where, val, 1) -/****************************************** - * xge_os_pci_read16 - Read 16bit word from device PCI configuration. +/** + * xge_os_pci_read16 + * Read 16bit word from device PCI configuration. * @pdev: Device context. * @cfgh: PCI configuration space handle. * @where: Offset in the PCI configuration space. @@ -438,26 +457,27 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size, * Read 16bit value from the specified PCI configuration space at the * specified offset. * Returns: 0 - success, non-zero - failure. - ******************************************/ -#define xge_os_pci_read16(pdev, cfgh, where, val) \ + */ +#define xge_os_pci_read16(pdev, cfgh, where, val) \ (*(val) = pci_read_config(pdev->device, where, 2)) -/****************************************** - * xge_os_pci_write16 - Write 16bit word into device PCI configuration. +/** + * xge_os_pci_write16 + * Write 16bit word into device PCI configuration. * @pdev: Device context. * @cfgh: PCI configuration space handle. * @where: Offset in the PCI configuration space. * @val: Value to write. * - * Write 16bit value into the specified @offset in PCI - * configuration space. + * Write 16bit value into the specified @offset in PCI configuration space. * Returns: 0 - success, non-zero - failure. - ******************************************/ -#define xge_os_pci_write16(pdev, cfgh, where, val) \ + */ +#define xge_os_pci_write16(pdev, cfgh, where, val) \ pci_write_config(pdev->device, where, val, 2) -/****************************************** - * xge_os_pci_read32 - Read 32bit word from device PCI configuration. +/** + * xge_os_pci_read32 + * Read 32bit word from device PCI configuration. * @pdev: Device context. * @cfgh: PCI configuration space handle. * @where: Offset in the PCI configuration space. @@ -466,165 +486,176 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size, * Read 32bit value from the specified PCI configuration space at the * specified offset. * Returns: 0 - success, non-zero - failure. - ******************************************/ -#define xge_os_pci_read32(pdev, cfgh, where, val) \ + */ +#define xge_os_pci_read32(pdev, cfgh, where, val) \ (*(val) = pci_read_config(pdev->device, where, 4)) -/****************************************** - * xge_os_pci_write32 - Write 32bit word into device PCI configuration. +/** + * xge_os_pci_write32 + * Write 32bit word into device PCI configuration. * @pdev: Device context. * @cfgh: PCI configuration space handle. * @where: Offset in the PCI configuration space. * @val: Value to write. * - * Write 32bit value into the specified @offset in PCI - * configuration space. + * Write 32bit value into the specified @offset in PCI configuration space. * Returns: 0 - success, non-zero - failure. - ******************************************/ -#define xge_os_pci_write32(pdev, cfgh, where, val) \ + */ +#define xge_os_pci_write32(pdev, cfgh, where, val) \ pci_write_config(pdev->device, where, val, 4) -/****************************************** - * xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space. +/** + * xge_os_pio_mem_read8 + * Read 1 byte from device memory mapped space. * @pdev: Device context. * @regh: PCI configuration space handle. * @addr: Address in device memory space. * * Returns: 1 byte value read from the specified (mapped) memory space address. - ******************************************/ + */ static inline u8 xge_os_pio_mem_read8(pci_dev_h pdev, pci_reg_h regh, void *addr) { bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t) + (((xge_bus_resource_t *)(regh))->bar_start_addr); return bus_space_read_1(tag, handle, (caddr_t)(addr) - addrss); } -/****************************************** - * xge_os_pio_mem_write8 - Write 1 byte into device memory mapped - * space. +/** + * xge_os_pio_mem_write8 + * Write 1 byte into device memory mapped space. * @pdev: Device context. * @regh: PCI configuration space handle. * @val: Value to write. * @addr: Address in device memory space. * * Write byte value into the specified (mapped) device memory space. - ******************************************/ + */ static inline void xge_os_pio_mem_write8(pci_dev_h pdev, pci_reg_h regh, u8 val, void *addr) { bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t) + (((xge_bus_resource_t *)(regh))->bar_start_addr); bus_space_write_1(tag, handle, (caddr_t)(addr) - addrss, val); } -/****************************************** - * xge_os_pio_mem_read16 - Read 16bit from device memory mapped space. +/** + * xge_os_pio_mem_read16 + * Read 16bit from device memory mapped space. * @pdev: Device context. * @regh: PCI configuration space handle. * @addr: Address in device memory space. * * Returns: 16bit value read from the specified (mapped) memory space address. - ******************************************/ + */ static inline u16 xge_os_pio_mem_read16(pci_dev_h pdev, pci_reg_h regh, void *addr) { bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t) + (((xge_bus_resource_t *)(regh))->bar_start_addr); return bus_space_read_2(tag, handle, (caddr_t)(addr) - addrss); } -/****************************************** - * xge_os_pio_mem_write16 - Write 16bit into device memory mapped space. +/** + * xge_os_pio_mem_write16 + * Write 16bit into device memory mapped space. * @pdev: Device context. * @regh: PCI configuration space handle. * @val: Value to write. * @addr: Address in device memory space. * * Write 16bit value into the specified (mapped) device memory space. - ******************************************/ + */ static inline void xge_os_pio_mem_write16(pci_dev_h pdev, pci_reg_h regh, u16 val, void *addr) { bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t)(((xge_bus_resource_t *)(regh))->bar_start_addr); bus_space_write_2(tag, handle, (caddr_t)(addr) - addrss, val); } -/****************************************** - * xge_os_pio_mem_read32 - Read 32bit from device memory mapped space. +/** + * xge_os_pio_mem_read32 + * Read 32bit from device memory mapped space. * @pdev: Device context. * @regh: PCI configuration space handle. * @addr: Address in device memory space. * * Returns: 32bit value read from the specified (mapped) memory space address. - ******************************************/ + */ static inline u32 xge_os_pio_mem_read32(pci_dev_h pdev, pci_reg_h regh, void *addr) { bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t) + (((xge_bus_resource_t *)(regh))->bar_start_addr); return bus_space_read_4(tag, handle, (caddr_t)(addr) - addrss); } -/****************************************** - * xge_os_pio_mem_write32 - Write 32bit into device memory space. +/** + * xge_os_pio_mem_write32 + * Write 32bit into device memory space. * @pdev: Device context. * @regh: PCI configuration space handle. * @val: Value to write. * @addr: Address in device memory space. * * Write 32bit value into the specified (mapped) device memory space. - ******************************************/ + */ static inline void xge_os_pio_mem_write32(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) { bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t)(((xge_bus_resource_t *)(regh))->bar_start_addr); bus_space_write_4(tag, handle, (caddr_t)(addr) - addrss, val); } -/****************************************** - * xge_os_pio_mem_read64 - Read 64bit from device memory mapped space. +/** + * xge_os_pio_mem_read64 + * Read 64bit from device memory mapped space. * @pdev: Device context. * @regh: PCI configuration space handle. * @addr: Address in device memory space. * * Returns: 64bit value read from the specified (mapped) memory space address. - ******************************************/ + */ static inline u64 xge_os_pio_mem_read64(pci_dev_h pdev, pci_reg_h regh, void *addr) { u64 value1, value2; bus_space_tag_t tag = - (bus_space_tag_t)(((busresource_t *)regh)->bus_tag); + (bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag); bus_space_handle_t handle = - (bus_space_handle_t)(((busresource_t *)regh)->bus_handle); - caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr); + (bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle); + caddr_t addrss = (caddr_t) + (((xge_bus_resource_t *)(regh))->bar_start_addr); value1 = bus_space_read_4(tag, handle, (caddr_t)(addr) + 4 - addrss); value1 <<= 32; @@ -633,15 +664,16 @@ xge_os_pio_mem_read64(pci_dev_h pdev, pci_reg_h regh, void *addr) return value1; } -/****************************************** - * xge_os_pio_mem_write64 - Write 32bit into device memory space. +/** + * xge_os_pio_mem_write64 + * Write 32bit into device memory space. * @pdev: Device context. * @regh: PCI configuration space handle. * @val: Value to write. * @addr: Address in device memory space. * * Write 64bit value into the specified (mapped) device memory space. - ******************************************/ + */ static inline void xge_os_pio_mem_write64(pci_dev_h pdev, pci_reg_h regh, u64 val, void *addr) { @@ -650,33 +682,29 @@ xge_os_pio_mem_write64(pci_dev_h pdev, pci_reg_h regh, u64 val, void *addr) xge_os_pio_mem_write32(pdev, regh, val >> 32, ((caddr_t)(addr) + 4)); } -/****************************************** +/** * FIXME: document - ******************************************/ + */ #define xge_os_flush_bridge xge_os_pio_mem_read64 -/****************************************** - * xge_os_dma_map - Map DMA-able memory block to, or from, or - * to-and-from device. +/** + * xge_os_dma_map + * Map DMA-able memory block to, or from, or to-and-from device. * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. * @dmah: DMA handle used to map the memory block. Obtained via * xge_os_dma_malloc(). * @vaddr: Virtual address of the DMA-able memory. * @size: Size (in bytes) to be mapped. * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.) - * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, - * XGE_OS_DMA_STREAMING, - * XGE_OS_DMA_CONSISTENT - * Note that the last two flags are mutually exclusive. + * @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, XGE_OS_DMA_STREAMING, + * XGE_OS_DMA_CONSISTENT (Note that the last two flags are mutually exclusive). * * Map a single memory block. * - * Returns: DMA address of the memory block, - * XGE_OS_INVALID_DMA_ADDR on failure. + * Returns: DMA address of the memory block, XGE_OS_INVALID_DMA_ADDR on failure. * - * See also: xge_os_dma_malloc(), xge_os_dma_unmap(), - * xge_os_dma_sync(). - ******************************************/ + * See also: xge_os_dma_malloc(), xge_os_dma_unmap(), xge_os_dma_sync(). + */ static inline dma_addr_t xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size, int dir, int dma_flags) @@ -685,14 +713,14 @@ xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size, bus_dmamap_load(dmah.dma_tag, dmah.dma_map, dmah.dma_viraddr, dmah.dma_size, xge_dmamap_cb, &dmah.dma_phyaddr, BUS_DMA_NOWAIT); if(retValue != 0) { - xge_os_printf("bus_dmamap_load_ failed\n"); + xge_os_printf("bus_dmamap_load_ failed\n") return XGE_OS_INVALID_DMA_ADDR; } dmah.dma_size = size; return dmah.dma_phyaddr; } -/****************************************** +/** * xge_os_dma_unmap - Unmap DMA-able memory. * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. * @dmah: DMA handle used to map the memory block. Obtained via @@ -701,10 +729,10 @@ xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size, * @size: Size (in bytes) to be unmapped. * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.) * - * Unmap a single DMA-able memory block that was previously mapped - * using xge_os_dma_map(). + * Unmap a single DMA-able memory block that was previously mapped using + * xge_os_dma_map(). * See also: xge_os_dma_malloc(), xge_os_dma_map(). - ******************************************/ + */ static inline void xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr, size_t size, int dir) @@ -713,7 +741,7 @@ xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr, return; } -/****************************************** +/** * xge_os_dma_sync - Synchronize mapped memory. * @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory. * @dmah: DMA handle used to map the memory block. Obtained via @@ -723,16 +751,14 @@ xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr, * @length: Size of the block. * @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.) * - * Make physical and CPU memory consistent for a single - * streaming mode DMA translation. - * This API compiles to NOP on cache-coherent platforms. - * On non cache-coherent platforms, depending on the direction - * of the "sync" operation, this API will effectively - * either invalidate CPU cache (that might contain old data), - * or flush CPU cache to update physical memory. + * Make physical and CPU memory consistent for a single streaming mode DMA + * translation. This API compiles to NOP on cache-coherent platforms. On + * non cache-coherent platforms, depending on the direction of the "sync" + * operation, this API will effectively either invalidate CPU cache (that might + * contain old data), or flush CPU cache to update physical memory. * See also: xge_os_dma_malloc(), xge_os_dma_map(), * xge_os_dma_unmap(). - ******************************************/ + */ static inline void xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr, u64 dma_offset, size_t length, int dir) @@ -747,7 +773,7 @@ xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr, syncop = BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD; break; - case XGE_OS_DMA_DIR_BIDIRECTIONAL: + default: syncop = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREWRITE; break; } @@ -756,3 +782,4 @@ xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr, } #endif /* XGE_OSDEP_H */ + diff --git a/sys/dev/nxge/xgehal/xge-queue.c b/sys/dev/nxge/xgehal/xge-queue.c index 925f44f..a1a792e 100644 --- a/sys/dev/nxge/xgehal/xge-queue.c +++ b/sys/dev/nxge/xgehal/xge-queue.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xge-queue.c - * - * Description: serialized event queue - * - * Created: 7 June 2004 - */ - #include <dev/nxge/include/xge-queue.h> /** @@ -63,65 +55,65 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item) xge_queue_item_t *elem; if (xge_list_is_empty(&queue->list_head)) - return XGE_QUEUE_IS_EMPTY; + return XGE_QUEUE_IS_EMPTY; elem = (xge_queue_item_t *)queue->list_head.next; if (elem->data_size > data_max_size) - return XGE_QUEUE_NOT_ENOUGH_SPACE; + return XGE_QUEUE_NOT_ENOUGH_SPACE; xge_list_remove(&elem->item); real_size = elem->data_size + sizeof(xge_queue_item_t); if (queue->head_ptr == elem) { - queue->head_ptr = (char *)queue->head_ptr + real_size; - xge_debug_queue(XGE_TRACE, - "event_type: %d removing from the head: " - "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT - ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", - elem->event_type, - (u64)(ulong_t)queue->start_ptr, - (u64)(ulong_t)queue->head_ptr, - (u64)(ulong_t)queue->tail_ptr, - (u64)(ulong_t)queue->end_ptr, - (u64)(ulong_t)elem, - real_size); + queue->head_ptr = (char *)queue->head_ptr + real_size; + xge_debug_queue(XGE_TRACE, + "event_type: %d removing from the head: " + "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT + ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", + elem->event_type, + (u64)(ulong_t)queue->start_ptr, + (u64)(ulong_t)queue->head_ptr, + (u64)(ulong_t)queue->tail_ptr, + (u64)(ulong_t)queue->end_ptr, + (u64)(ulong_t)elem, + real_size); } else if ((char *)queue->tail_ptr - real_size == (char*)elem) { - queue->tail_ptr = (char *)queue->tail_ptr - real_size; - xge_debug_queue(XGE_TRACE, - "event_type: %d removing from the tail: " - "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT - ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", - elem->event_type, - (u64)(ulong_t)queue->start_ptr, - (u64)(ulong_t)queue->head_ptr, - (u64)(ulong_t)queue->tail_ptr, - (u64)(ulong_t)queue->end_ptr, - (u64)(ulong_t)elem, - real_size); + queue->tail_ptr = (char *)queue->tail_ptr - real_size; + xge_debug_queue(XGE_TRACE, + "event_type: %d removing from the tail: " + "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT + ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", + elem->event_type, + (u64)(ulong_t)queue->start_ptr, + (u64)(ulong_t)queue->head_ptr, + (u64)(ulong_t)queue->tail_ptr, + (u64)(ulong_t)queue->end_ptr, + (u64)(ulong_t)elem, + real_size); } else { - xge_debug_queue(XGE_TRACE, - "event_type: %d removing from the list: " - "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT - ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", - elem->event_type, - (u64)(ulong_t)queue->start_ptr, - (u64)(ulong_t)queue->head_ptr, - (u64)(ulong_t)queue->tail_ptr, - (u64)(ulong_t)queue->end_ptr, - (u64)(ulong_t)elem, - real_size); + xge_debug_queue(XGE_TRACE, + "event_type: %d removing from the list: " + "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT + ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", + elem->event_type, + (u64)(ulong_t)queue->start_ptr, + (u64)(ulong_t)queue->head_ptr, + (u64)(ulong_t)queue->tail_ptr, + (u64)(ulong_t)queue->end_ptr, + (u64)(ulong_t)elem, + real_size); } xge_assert(queue->tail_ptr >= queue->head_ptr); xge_assert(queue->tail_ptr >= queue->start_ptr && - queue->tail_ptr <= queue->end_ptr); + queue->tail_ptr <= queue->end_ptr); xge_assert(queue->head_ptr >= queue->start_ptr && - queue->head_ptr < queue->end_ptr); + queue->head_ptr < queue->end_ptr); xge_os_memcpy(item, elem, sizeof(xge_queue_item_t)); xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem), - elem->data_size); + elem->data_size); if (xge_list_is_empty(&queue->list_head)) { - /* reset buffer pointers just to be clean */ - queue->head_ptr = queue->tail_ptr = queue->start_ptr; + /* reset buffer pointers just to be clean */ + queue->head_ptr = queue->tail_ptr = queue->start_ptr; } return XGE_QUEUE_OK; } @@ -150,7 +142,7 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item) */ xge_queue_status_e xge_queue_produce(xge_queue_h queueh, int event_type, void *context, - int is_critical, const int data_size, void *data) + int is_critical, const int data_size, void *data) { xge_queue_t *queue = (xge_queue_t *)queueh; int real_size = data_size + sizeof(xge_queue_item_t); @@ -162,76 +154,76 @@ xge_queue_produce(xge_queue_h queueh, int event_type, void *context, xge_os_spin_lock_irq(&queue->lock, flags); if (is_critical && !queue->has_critical_event) { - unsigned char item_buf[sizeof(xge_queue_item_t) + - XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; - xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; - xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + - XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); + unsigned char item_buf[sizeof(xge_queue_item_t) + + XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; + xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; + xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + + XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); while (__queue_consume(queue, - XGE_DEFAULT_EVENT_MAX_DATA_SIZE, - item) != XGE_QUEUE_IS_EMPTY) - ; /* do nothing */ + XGE_DEFAULT_EVENT_MAX_DATA_SIZE, + item) != XGE_QUEUE_IS_EMPTY) + ; /* do nothing */ } try_again: if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) { - elem = (xge_queue_item_t *) queue->tail_ptr; - queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size); - xge_debug_queue(XGE_TRACE, - "event_type: %d adding to the tail: " - "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT - ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", - event_type, - (u64)(ulong_t)queue->start_ptr, - (u64)(ulong_t)queue->head_ptr, - (u64)(ulong_t)queue->tail_ptr, - (u64)(ulong_t)queue->end_ptr, - (u64)(ulong_t)elem, - real_size); + elem = (xge_queue_item_t *) queue->tail_ptr; + queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size); + xge_debug_queue(XGE_TRACE, + "event_type: %d adding to the tail: " + "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT + ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d", + event_type, + (u64)(ulong_t)queue->start_ptr, + (u64)(ulong_t)queue->head_ptr, + (u64)(ulong_t)queue->tail_ptr, + (u64)(ulong_t)queue->end_ptr, + (u64)(ulong_t)elem, + real_size); } else if ((char *)queue->head_ptr - real_size >= - (char *)queue->start_ptr) { - elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size); - queue->head_ptr = elem; - xge_debug_queue(XGE_TRACE, - "event_type: %d adding to the head: " - "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT - ":0x"XGE_OS_LLXFMT" length %d", - event_type, - (u64)(ulong_t)queue->start_ptr, - (u64)(ulong_t)queue->head_ptr, - (u64)(ulong_t)queue->tail_ptr, - (u64)(ulong_t)queue->end_ptr, - real_size); + (char *)queue->start_ptr) { + elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size); + queue->head_ptr = elem; + xge_debug_queue(XGE_TRACE, + "event_type: %d adding to the head: " + "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT + ":0x"XGE_OS_LLXFMT" length %d", + event_type, + (u64)(ulong_t)queue->start_ptr, + (u64)(ulong_t)queue->head_ptr, + (u64)(ulong_t)queue->tail_ptr, + (u64)(ulong_t)queue->end_ptr, + real_size); } else { - xge_queue_status_e status; - - if (queue->pages_current >= queue->pages_max) { - xge_os_spin_unlock_irq(&queue->lock, flags); - return XGE_QUEUE_IS_FULL; - } - - if (queue->has_critical_event) { - xge_os_spin_unlock_irq(&queue->lock, flags); - return XGE_QUEUE_IS_FULL; - } - - /* grow */ - status = __io_queue_grow(queueh); - if (status != XGE_QUEUE_OK) { - xge_os_spin_unlock_irq(&queue->lock, flags); - return status; - } - - goto try_again; + xge_queue_status_e status; + + if (queue->pages_current >= queue->pages_max) { + xge_os_spin_unlock_irq(&queue->lock, flags); + return XGE_QUEUE_IS_FULL; + } + + if (queue->has_critical_event) { + xge_os_spin_unlock_irq(&queue->lock, flags); + return XGE_QUEUE_IS_FULL; + } + + /* grow */ + status = __io_queue_grow(queueh); + if (status != XGE_QUEUE_OK) { + xge_os_spin_unlock_irq(&queue->lock, flags); + return status; + } + + goto try_again; } xge_assert(queue->tail_ptr >= queue->head_ptr); xge_assert(queue->tail_ptr >= queue->start_ptr && - queue->tail_ptr <= queue->end_ptr); + queue->tail_ptr <= queue->end_ptr); xge_assert(queue->head_ptr >= queue->start_ptr && - queue->head_ptr < queue->end_ptr); + queue->head_ptr < queue->end_ptr); elem->data_size = data_size; - elem->event_type = (xge_hal_event_e) event_type; + elem->event_type = (xge_hal_event_e) event_type; elem->is_critical = is_critical; if (is_critical) queue->has_critical_event = 1; @@ -267,12 +259,12 @@ try_again: */ xge_queue_h xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial, - int pages_max, xge_queued_f queued, void *queued_data) + int pages_max, xge_queued_f queued, void *queued_data) { xge_queue_t *queue; - if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL) - return NULL; + if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL) + return NULL; queue->queued_func = queued; queue->queued_data = queued_data; @@ -282,12 +274,12 @@ xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial, queue->start_ptr = xge_os_malloc(pdev, queue->pages_current * XGE_QUEUE_BUF_SIZE); if (queue->start_ptr == NULL) { - xge_os_free(pdev, queue, sizeof(xge_queue_t)); - return NULL; + xge_os_free(pdev, queue, sizeof(xge_queue_t)); + return NULL; } queue->head_ptr = queue->tail_ptr = queue->start_ptr; queue->end_ptr = (char *)queue->start_ptr + - queue->pages_current * XGE_QUEUE_BUF_SIZE; + queue->pages_current * XGE_QUEUE_BUF_SIZE; xge_os_spin_lock_init_irq(&queue->lock, irqh); queue->pages_initial = pages_initial; queue->pages_max = pages_max; @@ -309,8 +301,8 @@ void xge_queue_destroy(xge_queue_h queueh) xge_queue_t *queue = (xge_queue_t *)queueh; xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh); if (!xge_list_is_empty(&queue->list_head)) { - xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x" - XGE_OS_LLXFMT, (u64)(ulong_t)queue); + xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x" + XGE_OS_LLXFMT, (u64)(ulong_t)queue); } xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current * XGE_QUEUE_BUF_SIZE); @@ -339,12 +331,12 @@ __io_queue_grow(xge_queue_h queueh) xge_queue_item_t *elem; xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing", - (u64)(ulong_t)queue, queue->pages_current); + (u64)(ulong_t)queue, queue->pages_current); newbuf = xge_os_malloc(queue->pdev, (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE); if (newbuf == NULL) - return XGE_QUEUE_OUT_OF_MEMORY; + return XGE_QUEUE_OUT_OF_MEMORY; xge_os_memcpy(newbuf, queue->start_ptr, queue->pages_current * XGE_QUEUE_BUF_SIZE); @@ -353,32 +345,32 @@ __io_queue_grow(xge_queue_h queueh) /* adjust queue sizes */ queue->start_ptr = newbuf; queue->end_ptr = (char *)newbuf + - (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE; + (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE; queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr - - (char *)oldbuf); + (char *)oldbuf); queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr - - (char *)oldbuf); + (char *)oldbuf); xge_assert(!xge_list_is_empty(&queue->list_head)); queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf + - ((char *)queue->list_head.next - (char *)oldbuf)); + ((char *)queue->list_head.next - (char *)oldbuf)); queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf + - ((char *)queue->list_head.prev - (char *)oldbuf)); + ((char *)queue->list_head.prev - (char *)oldbuf)); /* adjust queue list */ xge_list_for_each(item, &queue->list_head) { - elem = xge_container_of(item, xge_queue_item_t, item); - if (elem->item.next != &queue->list_head) { - elem->item.next = - (xge_list_t*)(void *)((char *)newbuf + - ((char *)elem->item.next - (char *)oldbuf)); - } - if (elem->item.prev != &queue->list_head) { - elem->item.prev = - (xge_list_t*) (void *)((char *)newbuf + - ((char *)elem->item.prev - (char *)oldbuf)); - } + elem = xge_container_of(item, xge_queue_item_t, item); + if (elem->item.next != &queue->list_head) { + elem->item.next = + (xge_list_t*)(void *)((char *)newbuf + + ((char *)elem->item.next - (char *)oldbuf)); + } + if (elem->item.prev != &queue->list_head) { + elem->item.prev = + (xge_list_t*) (void *)((char *)newbuf + + ((char *)elem->item.prev - (char *)oldbuf)); + } } xge_os_free(queue->pdev, oldbuf, - queue->pages_current * XGE_QUEUE_BUF_SIZE); + queue->pages_current * XGE_QUEUE_BUF_SIZE); queue->pages_current++; return XGE_QUEUE_OK; @@ -426,18 +418,18 @@ xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item) void xge_queue_flush(xge_queue_h queueh) { unsigned char item_buf[sizeof(xge_queue_item_t) + - XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; + XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + - XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); + XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); /* flush queue by consuming all enqueued items */ while (xge_queue_consume(queueh, - XGE_DEFAULT_EVENT_MAX_DATA_SIZE, - item) != XGE_QUEUE_IS_EMPTY) { - /* do nothing */ - xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed", - item, item->event_type); + XGE_DEFAULT_EVENT_MAX_DATA_SIZE, + item) != XGE_QUEUE_IS_EMPTY) { + /* do nothing */ + xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed", + item, item->event_type); } (void) __queue_get_reset_critical (queueh); } @@ -456,5 +448,5 @@ int __queue_get_reset_critical (xge_queue_h qh) { int c = queue->has_critical_event; queue->has_critical_event = 0; - return c; + return c; } diff --git a/sys/dev/nxge/xgehal/xgehal-channel-fp.c b/sys/dev/nxge/xgehal/xgehal-channel-fp.c index 0417ca0..98c9810 100644 --- a/sys/dev/nxge/xgehal/xgehal-channel-fp.c +++ b/sys/dev/nxge/xgehal/xgehal-channel-fp.c @@ -26,159 +26,154 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-channel-fp.c - * - * Description: HAL channel object functionality (fast path) - * - * Created: 10 June 2004 - */ - #ifdef XGE_DEBUG_FP #include <dev/nxge/include/xgehal-channel.h> #endif __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e -__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) +__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) { void **tmp_arr; - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; -#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) - unsigned long flags = 0; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; +#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) + unsigned long flags = 0; #endif + if (channel->terminating) { + return XGE_HAL_FAIL; + } - if (channel->reserve_length - channel->reserve_top > - channel->reserve_threshold) { + if (channel->reserve_length - channel->reserve_top > + channel->reserve_threshold) { _alloc_after_swap: - *dtrh = channel->reserve_arr[--channel->reserve_length]; + *dtrh = channel->reserve_arr[--channel->reserve_length]; - xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, " - "channel %d:%d:%d, reserve_idx %d", - (unsigned long long)(ulong_t)*dtrh, - channel->type, channel->post_qid, - channel->compl_qid, channel->reserve_length); + xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, " + "channel %d:%d:%d, reserve_idx %d", + (unsigned long long)(ulong_t)*dtrh, + channel->type, channel->post_qid, + channel->compl_qid, channel->reserve_length); - return XGE_HAL_OK; + return XGE_HAL_OK; } -#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) +#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) xge_os_spin_lock_irq(&channel->free_lock, flags); -#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) +#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) xge_os_spin_lock(&channel->free_lock); #endif - /* switch between empty and full arrays */ + /* switch between empty and full arrays */ - /* the idea behind such a design is that by having free and reserved - * arrays separated we basically separated irq and non-irq parts. - * i.e. no additional lock need to be done when we free a resource */ + /* the idea behind such a design is that by having free and reserved + * arrays separated we basically separated irq and non-irq parts. + * i.e. no additional lock need to be done when we free a resource */ - if (channel->reserve_initial - channel->free_length > - channel->reserve_threshold) { + if (channel->reserve_initial - channel->free_length > + channel->reserve_threshold) { - tmp_arr = channel->reserve_arr; - channel->reserve_arr = channel->free_arr; - channel->reserve_length = channel->reserve_initial; - channel->free_arr = tmp_arr; - channel->reserve_top = channel->free_length; - channel->free_length = channel->reserve_initial; + tmp_arr = channel->reserve_arr; + channel->reserve_arr = channel->free_arr; + channel->reserve_length = channel->reserve_initial; + channel->free_arr = tmp_arr; + channel->reserve_top = channel->free_length; + channel->free_length = channel->reserve_initial; - channel->stats.reserve_free_swaps_cnt++; + channel->stats.reserve_free_swaps_cnt++; - xge_debug_channel(XGE_TRACE, - "switch on channel %d:%d:%d, reserve_length %d, " - "free_length %d", channel->type, channel->post_qid, - channel->compl_qid, channel->reserve_length, - channel->free_length); + xge_debug_channel(XGE_TRACE, + "switch on channel %d:%d:%d, reserve_length %d, " + "free_length %d", channel->type, channel->post_qid, + channel->compl_qid, channel->reserve_length, + channel->free_length); -#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) - xge_os_spin_unlock_irq(&channel->free_lock, flags); -#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) - xge_os_spin_unlock(&channel->free_lock); +#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) + xge_os_spin_unlock_irq(&channel->free_lock, flags); +#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) + xge_os_spin_unlock(&channel->free_lock); #endif - goto _alloc_after_swap; + goto _alloc_after_swap; } -#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) - xge_os_spin_unlock_irq(&channel->free_lock, flags); -#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) +#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) + xge_os_spin_unlock_irq(&channel->free_lock, flags); +#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE) xge_os_spin_unlock(&channel->free_lock); #endif xge_debug_channel(XGE_TRACE, "channel %d:%d:%d is empty!", - channel->type, channel->post_qid, - channel->compl_qid); + channel->type, channel->post_qid, + channel->compl_qid); channel->stats.full_cnt++; - *dtrh = NULL; + *dtrh = NULL; return XGE_HAL_INF_OUT_OF_DESCRIPTORS; } __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void -__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - int offset) +__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, + int offset) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; - /* restore a previously allocated dtrh at current offset and update - * the available reserve length accordingly. If dtrh is null just + /* restore a previously allocated dtrh at current offset and update + * the available reserve length accordingly. If dtrh is null just * update the reserve length, only */ if (dtrh) { - channel->reserve_arr[channel->reserve_length + offset] = dtrh; - xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for " - "channel %d:%d:%d, offset %d at reserve index %d, ", - (unsigned long long)(ulong_t)dtrh, channel->type, - channel->post_qid, channel->compl_qid, offset, - channel->reserve_length + offset); + channel->reserve_arr[channel->reserve_length + offset] = dtrh; + xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for " + "channel %d:%d:%d, offset %d at reserve index %d, ", + (unsigned long long)(ulong_t)dtrh, channel->type, + channel->post_qid, channel->compl_qid, offset, + channel->reserve_length + offset); } else { - channel->reserve_length += offset; - xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored " - "for offset %d, new reserve_length %d, free length %d", - channel->type, channel->post_qid, channel->compl_qid, - offset, channel->reserve_length, channel->free_length); + channel->reserve_length += offset; + xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored " + "for offset %d, new reserve_length %d, free length %d", + channel->type, channel->post_qid, channel->compl_qid, + offset, channel->reserve_length, channel->free_length); } } __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void __hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { - xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh; xge_assert(channel->work_arr[channel->post_index] == NULL); channel->work_arr[channel->post_index++] = dtrh; - /* wrap-around */ - if (channel->post_index == channel->length) - channel->post_index = 0; + /* wrap-around */ + if (channel->post_index == channel->length) + channel->post_index = 0; } __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void __hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_assert(channel->work_arr); - xge_assert(channel->compl_index < channel->length); + xge_assert(channel->compl_index < channel->length); - *dtrh = channel->work_arr[channel->compl_index]; + *dtrh = channel->work_arr[channel->compl_index]; } __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void __hal_channel_dtr_complete(xge_hal_channel_h channelh) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; - channel->work_arr[channel->compl_index] = NULL; + channel->work_arr[channel->compl_index] = NULL; /* wrap-around */ if (++channel->compl_index == channel->length) - channel->compl_index = 0; + channel->compl_index = 0; channel->stats.total_compl_cnt++; } @@ -186,15 +181,15 @@ __hal_channel_dtr_complete(xge_hal_channel_h channelh) __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void __hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; - channel->free_arr[--channel->free_length] = dtrh; + channel->free_arr[--channel->free_length] = dtrh; - xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, " - "channel %d:%d:%d, new free_length %d", - (unsigned long long)(ulong_t)dtrh, - channel->type, channel->post_qid, - channel->compl_qid, channel->free_length); + xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, " + "channel %d:%d:%d, new free_length %d", + (unsigned long long)(ulong_t)dtrh, + channel->type, channel->post_qid, + channel->compl_qid, channel->free_length); } /** @@ -210,88 +205,88 @@ xge_hal_channel_dtr_count(xge_hal_channel_h channelh) xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; return ((channel->reserve_length - channel->reserve_top) + - (channel->reserve_initial - channel->free_length) - - channel->reserve_threshold); + (channel->reserve_initial - channel->free_length) - + channel->reserve_threshold); } /** - * xge_hal_channel_userdata - Get user-specified channel context. + * xge_hal_channel_userdata - Get user-specified channel context. * @channelh: Channel handle. Obtained via xge_hal_channel_open(). * - * Returns: per-channel "user data", which can be any ULD-defined context. - * The %userdata "gets" into the channel at open time - * (see xge_hal_channel_open()). + * Returns: per-channel "user data", which can be any ULD-defined context. + * The %userdata "gets" into the channel at open time + * (see xge_hal_channel_open()). * * See also: xge_hal_channel_open(). */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void* xge_hal_channel_userdata(xge_hal_channel_h channelh) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; return channel->userdata; } /** - * xge_hal_channel_id - Get channel ID. + * xge_hal_channel_id - Get channel ID. * @channelh: Channel handle. Obtained via xge_hal_channel_open(). * - * Returns: channel ID. For link layer channel id is the number - * in the range from 0 to 7 that identifies hardware ring or fifo, - * depending on the channel type. + * Returns: channel ID. For link layer channel id is the number + * in the range from 0 to 7 that identifies hardware ring or fifo, + * depending on the channel type. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int xge_hal_channel_id(xge_hal_channel_h channelh) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; return channel->post_qid; } /** - * xge_hal_check_alignment - Check buffer alignment and calculate the - * "misaligned" portion. - * @dma_pointer: DMA address of the buffer. + * xge_hal_check_alignment - Check buffer alignment and calculate the + * "misaligned" portion. + * @dma_pointer: DMA address of the buffer. * @size: Buffer size, in bytes. - * @alignment: Alignment "granularity" (see below), in bytes. - * @copy_size: Maximum number of bytes to "extract" from the buffer - * (in order to spost it as a separate scatter-gather entry). See below. + * @alignment: Alignment "granularity" (see below), in bytes. + * @copy_size: Maximum number of bytes to "extract" from the buffer + * (in order to spost it as a separate scatter-gather entry). See below. * - * Check buffer alignment and calculate "misaligned" portion, if exists. - * The buffer is considered aligned if its address is multiple of - * the specified @alignment. If this is the case, + * Check buffer alignment and calculate "misaligned" portion, if exists. + * The buffer is considered aligned if its address is multiple of + * the specified @alignment. If this is the case, * xge_hal_check_alignment() returns zero. - * Otherwise, xge_hal_check_alignment() uses the last argument, + * Otherwise, xge_hal_check_alignment() uses the last argument, * @copy_size, - * to calculate the size to "extract" from the buffer. The @copy_size - * may or may not be equal @alignment. The difference between these two - * arguments is that the @alignment is used to make the decision: aligned - * or not aligned. While the @copy_size is used to calculate the portion - * of the buffer to "extract", i.e. to post as a separate entry in the - * transmit descriptor. For example, the combination - * @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes. + * to calculate the size to "extract" from the buffer. The @copy_size + * may or may not be equal @alignment. The difference between these two + * arguments is that the @alignment is used to make the decision: aligned + * or not aligned. While the @copy_size is used to calculate the portion + * of the buffer to "extract", i.e. to post as a separate entry in the + * transmit descriptor. For example, the combination + * @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes. * - * Note: @copy_size should be a multiple of @alignment. In many practical - * cases @copy_size and @alignment will probably be equal. + * Note: @copy_size should be a multiple of @alignment. In many practical + * cases @copy_size and @alignment will probably be equal. * * See also: xge_hal_fifo_dtr_buffer_set_aligned(). */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int -xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment, - int copy_size) +xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment, + int copy_size) { - int misaligned_size; + int misaligned_size; - misaligned_size = (int)(dma_pointer & (alignment - 1)); + misaligned_size = (int)(dma_pointer & (alignment - 1)); if (!misaligned_size) { - return 0; + return 0; } if (size > copy_size) { - misaligned_size = (int)(dma_pointer & (copy_size - 1)); - misaligned_size = copy_size - misaligned_size; + misaligned_size = (int)(dma_pointer & (copy_size - 1)); + misaligned_size = copy_size - misaligned_size; } else { - misaligned_size = size; + misaligned_size = size; } return misaligned_size; diff --git a/sys/dev/nxge/xgehal/xgehal-channel.c b/sys/dev/nxge/xgehal/xgehal-channel.c index dad39f2..c1ca17b 100644 --- a/sys/dev/nxge/xgehal/xgehal-channel.c +++ b/sys/dev/nxge/xgehal/xgehal-channel.c @@ -26,23 +26,11 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-channel.c - * - * Description: chipset channel abstraction - * - * Created: 10 May 2004 - */ - #include <dev/nxge/include/xgehal-channel.h> #include <dev/nxge/include/xgehal-fifo.h> #include <dev/nxge/include/xgehal-ring.h> #include <dev/nxge/include/xgehal-device.h> #include <dev/nxge/include/xgehal-regs.h> -#ifdef XGEHAL_RNIC -#include <dev/nxge/include/xgehal-types.h> -#include "xgehal-iov.h" -#endif /* * __hal_channel_dtr_next_reservelist @@ -51,12 +39,12 @@ */ static xge_hal_status_e __hal_channel_dtr_next_reservelist(xge_hal_channel_h channelh, - xge_hal_dtr_h *dtrh) + xge_hal_dtr_h *dtrh) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; if (channel->reserve_top >= channel->reserve_length) { - return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS; + return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS; } *dtrh = channel->reserve_arr[channel->reserve_top++]; @@ -75,7 +63,7 @@ __hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; if (channel->reserve_initial == channel->free_length) { - return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS; + return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS; } *dtrh = channel->free_arr[channel->free_length++]; @@ -91,21 +79,16 @@ __hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) */ static xge_hal_status_e __hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh, - xge_hal_dtr_h *dtrh) + xge_hal_dtr_h *dtrh) { -#ifndef XGEHAL_RNIC xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ -#endif - __hal_channel_dtr_try_complete(channelh, dtrh); if (*dtrh == NULL) { - return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; + return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; } -#ifndef XGEHAL_RNIC rxdp = (xge_hal_ring_rxd_1_t *)*dtrh; xge_assert(rxdp->host_control!=0); -#endif __hal_channel_dtr_complete(channelh); @@ -114,46 +97,26 @@ __hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh, xge_hal_channel_t* __hal_channel_allocate(xge_hal_device_h devh, int post_qid, -#ifdef XGEHAL_RNIC - u32 vp_id, -#endif - xge_hal_channel_type_e type) + xge_hal_channel_type_e type) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_hal_channel_t *channel; int size = 0; switch(type) { - case XGE_HAL_CHANNEL_TYPE_FIFO: - xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM && - post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM); - size = sizeof(xge_hal_fifo_t); - break; - case XGE_HAL_CHANNEL_TYPE_RING: - xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM && - post_qid + 1 <= XGE_HAL_MAX_RING_NUM); - size = sizeof(xge_hal_ring_t); - break; -#ifdef XGEHAL_RNIC - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - size = sizeof(__hal_sq_t); - break; - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - size = sizeof(__hal_srq_t); - break; - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - size = sizeof(__hal_cqrq_t); - break; - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - size = sizeof(__hal_umq_t); - break; - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - size = sizeof(__hal_dmq_t); - break; -#endif - default : - xge_assert(size); - break; + case XGE_HAL_CHANNEL_TYPE_FIFO: + xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM && + post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM); + size = sizeof(xge_hal_fifo_t); + break; + case XGE_HAL_CHANNEL_TYPE_RING: + xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM && + post_qid + 1 <= XGE_HAL_MAX_RING_NUM); + size = sizeof(xge_hal_ring_t); + break; + default : + xge_assert(size); + break; } @@ -161,20 +124,17 @@ __hal_channel_allocate(xge_hal_device_h devh, int post_qid, /* allocate FIFO channel */ channel = (xge_hal_channel_t *) xge_os_malloc(hldev->pdev, size); if (channel == NULL) { - return NULL; + return NULL; } xge_os_memzero(channel, size); - channel->pdev = hldev->pdev; - channel->regh0 = hldev->regh0; - channel->regh1 = hldev->regh1; - channel->type = type; - channel->devh = devh; -#ifdef XGEHAL_RNIC - channel->vp_id = vp_id; -#endif - channel->post_qid = post_qid; - channel->compl_qid = 0; + channel->pdev = hldev->pdev; + channel->regh0 = hldev->regh0; + channel->regh1 = hldev->regh1; + channel->type = type; + channel->devh = devh; + channel->post_qid = post_qid; + channel->compl_qid = 0; return channel; } @@ -186,39 +146,21 @@ void __hal_channel_free(xge_hal_channel_t *channel) xge_assert(channel->pdev); switch(channel->type) { - case XGE_HAL_CHANNEL_TYPE_FIFO: - size = sizeof(xge_hal_fifo_t); - break; - case XGE_HAL_CHANNEL_TYPE_RING: - size = sizeof(xge_hal_ring_t); - break; -#ifdef XGEHAL_RNIC - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - size = sizeof(__hal_sq_t); - break; - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - size = sizeof(__hal_srq_t); - break; - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - size = sizeof(__hal_cqrq_t); - break; - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - size = sizeof(__hal_umq_t); - break; - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - size = sizeof(__hal_dmq_t); - break; -#else - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - xge_assert(size); - break; -#endif - default: - break; + case XGE_HAL_CHANNEL_TYPE_FIFO: + size = sizeof(xge_hal_fifo_t); + break; + case XGE_HAL_CHANNEL_TYPE_RING: + size = sizeof(xge_hal_ring_t); + break; + case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: + case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: + case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: + xge_assert(size); + break; + default: + break; } xge_os_free(channel->pdev, channel, size); @@ -226,8 +168,8 @@ void __hal_channel_free(xge_hal_channel_t *channel) xge_hal_status_e __hal_channel_initialize (xge_hal_channel_h channelh, - xge_hal_channel_attr_t *attr, void **reserve_arr, - int reserve_initial, int reserve_max, int reserve_threshold) + xge_hal_channel_attr_t *attr, void **reserve_arr, + int reserve_initial, int reserve_max, int reserve_threshold) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_device_t *hldev; @@ -248,28 +190,28 @@ __hal_channel_initialize (xge_hal_channel_h channelh, channel->reserve_threshold = reserve_threshold; channel->reserve_top = 0; channel->saved_arr = (void **) xge_os_malloc(hldev->pdev, - sizeof(void*)*channel->reserve_max); + sizeof(void*)*channel->reserve_max); if (channel->saved_arr == NULL) { - return XGE_HAL_ERR_OUT_OF_MEMORY; + return XGE_HAL_ERR_OUT_OF_MEMORY; } xge_os_memzero(channel->saved_arr, sizeof(void*)*channel->reserve_max); channel->free_arr = channel->saved_arr; channel->free_length = channel->reserve_initial; channel->work_arr = (void **) xge_os_malloc(hldev->pdev, - sizeof(void*)*channel->reserve_max); + sizeof(void*)*channel->reserve_max); if (channel->work_arr == NULL) { - return XGE_HAL_ERR_OUT_OF_MEMORY; + return XGE_HAL_ERR_OUT_OF_MEMORY; } xge_os_memzero(channel->work_arr, - sizeof(void*)*channel->reserve_max); + sizeof(void*)*channel->reserve_max); channel->post_index = 0; channel->compl_index = 0; channel->length = channel->reserve_initial; channel->orig_arr = (void **) xge_os_malloc(hldev->pdev, - sizeof(void*)*channel->reserve_max); + sizeof(void*)*channel->reserve_max); if (channel->orig_arr == NULL) - return XGE_HAL_ERR_OUT_OF_MEMORY; + return XGE_HAL_ERR_OUT_OF_MEMORY; xge_os_memzero(channel->orig_arr, sizeof(void*)*channel->reserve_max); @@ -292,21 +234,21 @@ void __hal_channel_terminate(xge_hal_channel_h channelh) xge_assert(channel->pdev); /* undo changes made at channel_initialize() */ if (channel->work_arr) { - xge_os_free(channel->pdev, channel->work_arr, - sizeof(void*)*channel->reserve_max); - channel->work_arr = NULL; + xge_os_free(channel->pdev, channel->work_arr, + sizeof(void*)*channel->reserve_max); + channel->work_arr = NULL; } if (channel->saved_arr) { - xge_os_free(channel->pdev, channel->saved_arr, - sizeof(void*)*channel->reserve_max); - channel->saved_arr = NULL; + xge_os_free(channel->pdev, channel->saved_arr, + sizeof(void*)*channel->reserve_max); + channel->saved_arr = NULL; } if (channel->orig_arr) { - xge_os_free(channel->pdev, channel->orig_arr, - sizeof(void*)*channel->reserve_max); - channel->orig_arr = NULL; + xge_os_free(channel->pdev, channel->orig_arr, + sizeof(void*)*channel->reserve_max); + channel->orig_arr = NULL; } #if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) @@ -354,9 +296,9 @@ void __hal_channel_terminate(xge_hal_channel_h channelh) */ xge_hal_status_e xge_hal_channel_open(xge_hal_device_h devh, - xge_hal_channel_attr_t *attr, - xge_hal_channel_h *channelh, - xge_hal_channel_reopen_e reopen) + xge_hal_channel_attr_t *attr, + xge_hal_channel_h *channelh, + xge_hal_channel_reopen_e reopen) { xge_list_t *item; int i; @@ -369,169 +311,98 @@ xge_hal_channel_open(xge_hal_device_h devh, *channelh = NULL; -#ifdef XGEHAL_RNIC - if((attr->type == XGE_HAL_CHANNEL_TYPE_FIFO) || - (attr->type == XGE_HAL_CHANNEL_TYPE_RING)) { -#endif /* find channel */ - xge_list_for_each(item, &device->free_channels) { - xge_hal_channel_t *tmp; - - tmp = xge_container_of(item, xge_hal_channel_t, item); - if (tmp->type == attr->type && - tmp->post_qid == attr->post_qid && - tmp->compl_qid == attr->compl_qid) { - channel = tmp; - break; - } - } - - if (channel == NULL) { - return XGE_HAL_ERR_CHANNEL_NOT_FOUND; - } - -#ifdef XGEHAL_RNIC + xge_list_for_each(item, &device->free_channels) { + xge_hal_channel_t *tmp; + + tmp = xge_container_of(item, xge_hal_channel_t, item); + if (tmp->type == attr->type && + tmp->post_qid == attr->post_qid && + tmp->compl_qid == attr->compl_qid) { + channel = tmp; + break; + } } - else { - channel = __hal_channel_allocate(devh, attr->post_qid, -#ifdef XGEHAL_RNIC - attr->vp_id, -#endif - attr->type); - if (channel == NULL) { - xge_debug_device(XGE_ERR, - "__hal_channel_allocate failed"); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } + + if (channel == NULL) { + return XGE_HAL_ERR_CHANNEL_NOT_FOUND; } -#endif -#ifndef XGEHAL_RNIC xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) || - (channel->type == XGE_HAL_CHANNEL_TYPE_RING)); -#endif + (channel->type == XGE_HAL_CHANNEL_TYPE_RING)); -#ifdef XGEHAL_RNIC - if((reopen == XGE_HAL_CHANNEL_OC_NORMAL) || - ((channel->type != XGE_HAL_CHANNEL_TYPE_FIFO) && - (channel->type != XGE_HAL_CHANNEL_TYPE_RING))) { -#else if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) { -#endif - /* allocate memory, initialize pointers, etc */ - switch(channel->type) { - case XGE_HAL_CHANNEL_TYPE_FIFO: - status = __hal_fifo_open(channel, attr); - break; - case XGE_HAL_CHANNEL_TYPE_RING: - status = __hal_ring_open(channel, attr); - break; -#ifdef XGEHAL_RNIC - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - status = __hal_sq_open(channel, attr); - break; - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - status = __hal_srq_open(channel, attr); - break; - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - status = __hal_cqrq_open(channel, attr); - break; - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - status = __hal_umq_open(channel, attr); - break; - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - status = __hal_dmq_open(channel, attr); - break; -#else - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - status = XGE_HAL_FAIL; - break; -#endif - default: - break; - } - - if (status == XGE_HAL_OK) { - for (i = 0; i < channel->reserve_initial; i++) { - channel->orig_arr[i] = - channel->reserve_arr[i]; - } - } - else - return status; + /* allocate memory, initialize pointers, etc */ + switch(channel->type) { + case XGE_HAL_CHANNEL_TYPE_FIFO: + status = __hal_fifo_open(channel, attr); + break; + case XGE_HAL_CHANNEL_TYPE_RING: + status = __hal_ring_open(channel, attr); + break; + case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: + case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: + case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: + status = XGE_HAL_FAIL; + break; + default: + break; + } + + if (status == XGE_HAL_OK) { + for (i = 0; i < channel->reserve_initial; i++) { + channel->orig_arr[i] = + channel->reserve_arr[i]; + } + } + else + return status; } else { xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY); - for (i = 0; i < channel->reserve_initial; i++) { - channel->reserve_arr[i] = channel->orig_arr[i]; - channel->free_arr[i] = NULL; - } - channel->free_length = channel->reserve_initial; - channel->reserve_length = channel->reserve_initial; - channel->reserve_top = 0; - channel->post_index = 0; - channel->compl_index = 0; - if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { - status = __hal_ring_initial_replenish(channel, - reopen); - if (status != XGE_HAL_OK) - return status; - } + for (i = 0; i < channel->reserve_initial; i++) { + channel->reserve_arr[i] = channel->orig_arr[i]; + channel->free_arr[i] = NULL; + } + channel->free_length = channel->reserve_initial; + channel->reserve_length = channel->reserve_initial; + channel->reserve_top = 0; + channel->post_index = 0; + channel->compl_index = 0; + if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { + status = __hal_ring_initial_replenish(channel, + reopen); + if (status != XGE_HAL_OK) + return status; + } } /* move channel to the open state list */ switch(channel->type) { - case XGE_HAL_CHANNEL_TYPE_FIFO: - xge_list_remove(&channel->item); - xge_list_insert(&channel->item, &device->fifo_channels); - break; - case XGE_HAL_CHANNEL_TYPE_RING: - xge_list_remove(&channel->item); - xge_list_insert(&channel->item, &device->ring_channels); - break; -#ifdef XGEHAL_RNIC - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - xge_list_insert(&channel->item, - &device->virtual_paths[attr->vp_id].sq_channels); - device->virtual_paths[attr->vp_id].stats.no_sqs++; - break; - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - xge_list_insert(&channel->item, - &device->virtual_paths[attr->vp_id].srq_channels); - device->virtual_paths[attr->vp_id].stats.no_srqs++; - break; - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - xge_list_insert(&channel->item, - &device->virtual_paths[attr->vp_id].cqrq_channels); - device->virtual_paths[attr->vp_id].stats.no_cqrqs++; - break; - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - xge_list_init(&channel->item); - device->virtual_paths[attr->vp_id].umq_channelh = channel; - break; - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - xge_list_init(&channel->item); - device->virtual_paths[attr->vp_id].dmq_channelh = channel; - break; -#else - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO || - channel->type == XGE_HAL_CHANNEL_TYPE_RING); - break; -#endif - default: - break; + case XGE_HAL_CHANNEL_TYPE_FIFO: + xge_list_remove(&channel->item); + xge_list_insert(&channel->item, &device->fifo_channels); + break; + case XGE_HAL_CHANNEL_TYPE_RING: + xge_list_remove(&channel->item); + xge_list_insert(&channel->item, &device->ring_channels); + break; + case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: + case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: + case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: + xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO || + channel->type == XGE_HAL_CHANNEL_TYPE_RING); + break; + default: + break; } channel->is_open = 1; + channel->terminating = 0; /* * The magic check the argument validity, has to be * removed before 03/01/2005. @@ -556,7 +427,7 @@ xge_hal_channel_open(xge_hal_device_h devh, * See also: xge_hal_channel_dtr_term_f{}. */ void xge_hal_channel_abort(xge_hal_channel_h channelh, - xge_hal_channel_reopen_e reopen) + xge_hal_channel_reopen_e reopen) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_dtr_h dtr; @@ -567,25 +438,25 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh, int reserve_top_sav; if (channel->dtr_term == NULL) { - return; + return; } free_length_sav = channel->free_length; while (__hal_channel_dtr_next_freelist(channelh, &dtr) == XGE_HAL_OK) { #ifdef XGE_OS_MEMORY_CHECK #ifdef XGE_DEBUG_ASSERT - if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated); - } else { - if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { - xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated); - } - } + if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { + xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated); + } else { + if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { + xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated); + } + } #endif - check_cnt++; + check_cnt++; #endif - channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_FREED, - channel->userdata, reopen); + channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_FREED, + channel->userdata, reopen); } channel->free_length = free_length_sav; @@ -593,44 +464,44 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh, XGE_HAL_OK) { #ifdef XGE_OS_MEMORY_CHECK #ifdef XGE_DEBUG_ASSERT - if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - xge_assert(__hal_fifo_txdl_priv(dtr)->allocated); - } else { - if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { - xge_assert(__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr) - ->allocated); - } - } + if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { + xge_assert(__hal_fifo_txdl_priv(dtr)->allocated); + } else { + if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { + xge_assert(__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr) + ->allocated); + } + } #endif - check_cnt++; + check_cnt++; #endif - channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED, - channel->userdata, reopen); + channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED, + channel->userdata, reopen); } reserve_top_sav = channel->reserve_top; while (__hal_channel_dtr_next_reservelist(channelh, &dtr) == - XGE_HAL_OK) { + XGE_HAL_OK) { #ifdef XGE_OS_MEMORY_CHECK #ifdef XGE_DEBUG_ASSERT - if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated); - } else { - if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { - xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated); - } - } + if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { + xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated); + } else { + if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { + xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated); + } + } #endif - check_cnt++; + check_cnt++; #endif - channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_AVAIL, - channel->userdata, reopen); + channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_AVAIL, + channel->userdata, reopen); } channel->reserve_top = reserve_top_sav; xge_assert(channel->reserve_length == - (channel->free_length + channel->reserve_top)); + (channel->free_length + channel->reserve_top)); #ifdef XGE_OS_MEMORY_CHECK xge_assert(check_cnt == channel->reserve_initial); @@ -649,14 +520,11 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh, * safe context. */ void xge_hal_channel_close(xge_hal_channel_h channelh, - xge_hal_channel_reopen_e reopen) + xge_hal_channel_reopen_e reopen) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_device_t *hldev; xge_list_t *item; -#ifdef XGEHAL_RNIC - u32 vp_id; -#endif xge_assert(channel); xge_assert(channel->type < XGE_HAL_CHANNEL_TYPE_MAX); @@ -664,96 +532,53 @@ void xge_hal_channel_close(xge_hal_channel_h channelh, channel->is_open = 0; channel->magic = XGE_HAL_DEAD; -#ifdef XGEHAL_RNIC - vp_id = channel->vp_id; + /* sanity check: make sure channel is not in free list */ + xge_list_for_each(item, &hldev->free_channels) { + xge_hal_channel_t *tmp; - if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) || - (channel->type == XGE_HAL_CHANNEL_TYPE_RING)) { -#endif - /* sanity check: make sure channel is not in free list */ - xge_list_for_each(item, &hldev->free_channels) { - xge_hal_channel_t *tmp; - - tmp = xge_container_of(item, xge_hal_channel_t, item); - xge_assert(!tmp->is_open); - if (channel == tmp) { - return; - } - } -#ifdef XGEHAL_RNIC + tmp = xge_container_of(item, xge_hal_channel_t, item); + xge_assert(!tmp->is_open); + if (channel == tmp) { + return; + } } -#endif xge_hal_channel_abort(channel, reopen); -#ifndef XGEHAL_RNIC xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) || - (channel->type == XGE_HAL_CHANNEL_TYPE_RING)); -#endif + (channel->type == XGE_HAL_CHANNEL_TYPE_RING)); if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) { - /* de-allocate */ - switch(channel->type) { - case XGE_HAL_CHANNEL_TYPE_FIFO: - __hal_fifo_close(channelh); - break; - case XGE_HAL_CHANNEL_TYPE_RING: - __hal_ring_close(channelh); - break; -#ifdef XGEHAL_RNIC - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - __hal_sq_close(channelh); - hldev->virtual_paths[vp_id].stats.no_sqs--; - break; - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - __hal_srq_close(channelh); - hldev->virtual_paths[vp_id].stats.no_srqs--; - break; - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - __hal_cqrq_close(channelh); - hldev->virtual_paths[vp_id].stats.no_cqrqs--; - break; - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - __hal_umq_close(channelh); - break; - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - __hal_dmq_close(channelh); - break; -#else - case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: - case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: - case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: - case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: - xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO || - channel->type == XGE_HAL_CHANNEL_TYPE_RING); - break; -#endif - default: - break; - } + /* de-allocate */ + switch(channel->type) { + case XGE_HAL_CHANNEL_TYPE_FIFO: + __hal_fifo_close(channelh); + break; + case XGE_HAL_CHANNEL_TYPE_RING: + __hal_ring_close(channelh); + break; + case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: + case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: + case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: + case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: + xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO || + channel->type == XGE_HAL_CHANNEL_TYPE_RING); + break; + default: + break; + } } else xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY); /* move channel back to free state list */ xge_list_remove(&channel->item); -#ifdef XGEHAL_RNIC - if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) || - (channel->type == XGE_HAL_CHANNEL_TYPE_RING)) { -#endif - xge_list_insert(&channel->item, &hldev->free_channels); - - if (xge_list_is_empty(&hldev->fifo_channels) && - xge_list_is_empty(&hldev->ring_channels)) { - /* clear msix_idx in case of following HW reset */ - hldev->reset_needed_after_close = 1; - } -#ifdef XGEHAL_RNIC - } - else { - __hal_channel_free(channel); - } -#endif + xge_list_insert(&channel->item, &hldev->free_channels); + if (xge_list_is_empty(&hldev->fifo_channels) && + xge_list_is_empty(&hldev->ring_channels)) { + /* clear msix_idx in case of following HW reset */ + hldev->reset_needed_after_close = 1; + } } diff --git a/sys/dev/nxge/xgehal/xgehal-config.c b/sys/dev/nxge/xgehal/xgehal-config.c index 45a82e9..a937d53 100644 --- a/sys/dev/nxge/xgehal/xgehal-config.c +++ b/sys/dev/nxge/xgehal/xgehal-config.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-config.c - * - * Description: configuration functionality - * - * Created: 14 May 2004 - */ - #include <dev/nxge/include/xgehal-config.h> #include <dev/nxge/include/xge-debug.h> @@ -48,53 +40,53 @@ static xge_hal_status_e __hal_tti_config_check (xge_hal_tti_config_t *new_config) { if ((new_config->urange_a < XGE_HAL_MIN_TX_URANGE_A) || - (new_config->urange_a > XGE_HAL_MAX_TX_URANGE_A)) { - return XGE_HAL_BADCFG_TX_URANGE_A; + (new_config->urange_a > XGE_HAL_MAX_TX_URANGE_A)) { + return XGE_HAL_BADCFG_TX_URANGE_A; } if ((new_config->ufc_a < XGE_HAL_MIN_TX_UFC_A) || - (new_config->ufc_a > XGE_HAL_MAX_TX_UFC_A)) { - return XGE_HAL_BADCFG_TX_UFC_A; + (new_config->ufc_a > XGE_HAL_MAX_TX_UFC_A)) { + return XGE_HAL_BADCFG_TX_UFC_A; } if ((new_config->urange_b < XGE_HAL_MIN_TX_URANGE_B) || - (new_config->urange_b > XGE_HAL_MAX_TX_URANGE_B)) { - return XGE_HAL_BADCFG_TX_URANGE_B; + (new_config->urange_b > XGE_HAL_MAX_TX_URANGE_B)) { + return XGE_HAL_BADCFG_TX_URANGE_B; } if ((new_config->ufc_b < XGE_HAL_MIN_TX_UFC_B) || - (new_config->ufc_b > XGE_HAL_MAX_TX_UFC_B)) { - return XGE_HAL_BADCFG_TX_UFC_B; + (new_config->ufc_b > XGE_HAL_MAX_TX_UFC_B)) { + return XGE_HAL_BADCFG_TX_UFC_B; } if ((new_config->urange_c < XGE_HAL_MIN_TX_URANGE_C) || - (new_config->urange_c > XGE_HAL_MAX_TX_URANGE_C)) { - return XGE_HAL_BADCFG_TX_URANGE_C; + (new_config->urange_c > XGE_HAL_MAX_TX_URANGE_C)) { + return XGE_HAL_BADCFG_TX_URANGE_C; } if ((new_config->ufc_c < XGE_HAL_MIN_TX_UFC_C) || - (new_config->ufc_c > XGE_HAL_MAX_TX_UFC_C)) { - return XGE_HAL_BADCFG_TX_UFC_C; + (new_config->ufc_c > XGE_HAL_MAX_TX_UFC_C)) { + return XGE_HAL_BADCFG_TX_UFC_C; } if ((new_config->ufc_d < XGE_HAL_MIN_TX_UFC_D) || - (new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) { - return XGE_HAL_BADCFG_TX_UFC_D; + (new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) { + return XGE_HAL_BADCFG_TX_UFC_D; } if ((new_config->timer_val_us < XGE_HAL_MIN_TX_TIMER_VAL) || - (new_config->timer_val_us > XGE_HAL_MAX_TX_TIMER_VAL)) { - return XGE_HAL_BADCFG_TX_TIMER_VAL; + (new_config->timer_val_us > XGE_HAL_MAX_TX_TIMER_VAL)) { + return XGE_HAL_BADCFG_TX_TIMER_VAL; } if ((new_config->timer_ci_en < XGE_HAL_MIN_TX_TIMER_CI_EN) || - (new_config->timer_ci_en > XGE_HAL_MAX_TX_TIMER_CI_EN)) { - return XGE_HAL_BADCFG_TX_TIMER_CI_EN; + (new_config->timer_ci_en > XGE_HAL_MAX_TX_TIMER_CI_EN)) { + return XGE_HAL_BADCFG_TX_TIMER_CI_EN; } if ((new_config->timer_ac_en < XGE_HAL_MIN_TX_TIMER_AC_EN) || - (new_config->timer_ac_en > XGE_HAL_MAX_TX_TIMER_AC_EN)) { - return XGE_HAL_BADCFG_TX_TIMER_AC_EN; + (new_config->timer_ac_en > XGE_HAL_MAX_TX_TIMER_AC_EN)) { + return XGE_HAL_BADCFG_TX_TIMER_AC_EN; } return XGE_HAL_OK; @@ -111,48 +103,48 @@ static xge_hal_status_e __hal_rti_config_check (xge_hal_rti_config_t *new_config) { if ((new_config->urange_a < XGE_HAL_MIN_RX_URANGE_A) || - (new_config->urange_a > XGE_HAL_MAX_RX_URANGE_A)) { - return XGE_HAL_BADCFG_RX_URANGE_A; + (new_config->urange_a > XGE_HAL_MAX_RX_URANGE_A)) { + return XGE_HAL_BADCFG_RX_URANGE_A; } if ((new_config->ufc_a < XGE_HAL_MIN_RX_UFC_A) || - (new_config->ufc_a > XGE_HAL_MAX_RX_UFC_A)) { - return XGE_HAL_BADCFG_RX_UFC_A; + (new_config->ufc_a > XGE_HAL_MAX_RX_UFC_A)) { + return XGE_HAL_BADCFG_RX_UFC_A; } if ((new_config->urange_b < XGE_HAL_MIN_RX_URANGE_B) || - (new_config->urange_b > XGE_HAL_MAX_RX_URANGE_B)) { - return XGE_HAL_BADCFG_RX_URANGE_B; + (new_config->urange_b > XGE_HAL_MAX_RX_URANGE_B)) { + return XGE_HAL_BADCFG_RX_URANGE_B; } if ((new_config->ufc_b < XGE_HAL_MIN_RX_UFC_B) || - (new_config->ufc_b > XGE_HAL_MAX_RX_UFC_B)) { - return XGE_HAL_BADCFG_RX_UFC_B; + (new_config->ufc_b > XGE_HAL_MAX_RX_UFC_B)) { + return XGE_HAL_BADCFG_RX_UFC_B; } if ((new_config->urange_c < XGE_HAL_MIN_RX_URANGE_C) || - (new_config->urange_c > XGE_HAL_MAX_RX_URANGE_C)) { - return XGE_HAL_BADCFG_RX_URANGE_C; + (new_config->urange_c > XGE_HAL_MAX_RX_URANGE_C)) { + return XGE_HAL_BADCFG_RX_URANGE_C; } if ((new_config->ufc_c < XGE_HAL_MIN_RX_UFC_C) || - (new_config->ufc_c > XGE_HAL_MAX_RX_UFC_C)) { - return XGE_HAL_BADCFG_RX_UFC_C; + (new_config->ufc_c > XGE_HAL_MAX_RX_UFC_C)) { + return XGE_HAL_BADCFG_RX_UFC_C; } if ((new_config->ufc_d < XGE_HAL_MIN_RX_UFC_D) || - (new_config->ufc_d > XGE_HAL_MAX_RX_UFC_D)) { - return XGE_HAL_BADCFG_RX_UFC_D; + (new_config->ufc_d > XGE_HAL_MAX_RX_UFC_D)) { + return XGE_HAL_BADCFG_RX_UFC_D; } if ((new_config->timer_val_us < XGE_HAL_MIN_RX_TIMER_VAL) || - (new_config->timer_val_us > XGE_HAL_MAX_RX_TIMER_VAL)) { - return XGE_HAL_BADCFG_RX_TIMER_VAL; + (new_config->timer_val_us > XGE_HAL_MAX_RX_TIMER_VAL)) { + return XGE_HAL_BADCFG_RX_TIMER_VAL; } if ((new_config->timer_ac_en < XGE_HAL_MIN_RX_TIMER_AC_EN) || - (new_config->timer_ac_en > XGE_HAL_MAX_RX_TIMER_AC_EN)) { - return XGE_HAL_BADCFG_RX_TIMER_AC_EN; + (new_config->timer_ac_en > XGE_HAL_MAX_RX_TIMER_AC_EN)) { + return XGE_HAL_BADCFG_RX_TIMER_AC_EN; } return XGE_HAL_OK; @@ -168,13 +160,13 @@ __hal_rti_config_check (xge_hal_rti_config_t *new_config) */ static xge_hal_status_e __hal_fifo_queue_check (xge_hal_fifo_config_t *new_config, - xge_hal_fifo_queue_t *new_queue) + xge_hal_fifo_queue_t *new_queue) { int i; if ((new_queue->initial < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) || - (new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) { - return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH; + (new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) { + return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH; } /* FIXME: queue "grow" feature is not supported. @@ -183,37 +175,37 @@ __hal_fifo_queue_check (xge_hal_fifo_config_t *new_config, new_queue->max = new_queue->initial; if ((new_queue->max < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) || - (new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) { - return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH; + (new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) { + return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH; } if (new_queue->max < new_config->reserve_threshold) { - return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD; + return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD; } if ((new_queue->intr < XGE_HAL_MIN_FIFO_QUEUE_INTR) || - (new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) { - return XGE_HAL_BADCFG_FIFO_QUEUE_INTR; + (new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) { + return XGE_HAL_BADCFG_FIFO_QUEUE_INTR; } if ((new_queue->intr_vector < XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR) || - (new_queue->intr_vector > XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR)) { - return XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR; + (new_queue->intr_vector > XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR)) { + return XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR; } for(i = 0; i < XGE_HAL_MAX_FIFO_TTI_NUM; i++) { - /* - * Validate the tti configuration parameters only if - * the TTI feature is enabled. - */ - if (new_queue->tti[i].enabled) { - xge_hal_status_e status; - - if ((status = __hal_tti_config_check( - &new_queue->tti[i])) != XGE_HAL_OK) { - return status; - } - } + /* + * Validate the tti configuration parameters only if + * the TTI feature is enabled. + */ + if (new_queue->tti[i].enabled) { + xge_hal_status_e status; + + if ((status = __hal_tti_config_check( + &new_queue->tti[i])) != XGE_HAL_OK) { + return status; + } + } } return XGE_HAL_OK; @@ -231,8 +223,8 @@ __hal_ring_queue_check (xge_hal_ring_queue_t *new_config) { if ((new_config->initial < XGE_HAL_MIN_RING_QUEUE_BLOCKS) || - (new_config->initial > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) { - return XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS; + (new_config->initial > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) { + return XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS; } /* FIXME: queue "grow" feature is not supported. @@ -241,67 +233,67 @@ __hal_ring_queue_check (xge_hal_ring_queue_t *new_config) new_config->max = new_config->initial; if ((new_config->max < XGE_HAL_MIN_RING_QUEUE_BLOCKS) || - (new_config->max > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) { - return XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS; + (new_config->max > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) { + return XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS; } if ((new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) && - (new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_3) && - (new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)) { - return XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE; + (new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_3) && + (new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)) { + return XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE; } - /* + /* * Herc has less DRAM; the check is done later inside * device_initialize() */ if (((new_config->dram_size_mb < XGE_HAL_MIN_RING_QUEUE_SIZE) || (new_config->dram_size_mb > XGE_HAL_MAX_RING_QUEUE_SIZE_XENA)) && new_config->dram_size_mb != XGE_HAL_DEFAULT_USE_HARDCODE) - return XGE_HAL_BADCFG_RING_QUEUE_SIZE; + return XGE_HAL_BADCFG_RING_QUEUE_SIZE; if ((new_config->backoff_interval_us < - XGE_HAL_MIN_BACKOFF_INTERVAL_US) || - (new_config->backoff_interval_us > - XGE_HAL_MAX_BACKOFF_INTERVAL_US)) { - return XGE_HAL_BADCFG_BACKOFF_INTERVAL_US; + XGE_HAL_MIN_BACKOFF_INTERVAL_US) || + (new_config->backoff_interval_us > + XGE_HAL_MAX_BACKOFF_INTERVAL_US)) { + return XGE_HAL_BADCFG_BACKOFF_INTERVAL_US; } if ((new_config->max_frm_len < XGE_HAL_MIN_MAX_FRM_LEN) || - (new_config->max_frm_len > XGE_HAL_MAX_MAX_FRM_LEN)) { - return XGE_HAL_BADCFG_MAX_FRM_LEN; + (new_config->max_frm_len > XGE_HAL_MAX_MAX_FRM_LEN)) { + return XGE_HAL_BADCFG_MAX_FRM_LEN; } if ((new_config->priority < XGE_HAL_MIN_RING_PRIORITY) || - (new_config->priority > XGE_HAL_MAX_RING_PRIORITY)) { - return XGE_HAL_BADCFG_RING_PRIORITY; + (new_config->priority > XGE_HAL_MAX_RING_PRIORITY)) { + return XGE_HAL_BADCFG_RING_PRIORITY; } if ((new_config->rth_en < XGE_HAL_MIN_RING_RTH_EN) || - (new_config->rth_en > XGE_HAL_MAX_RING_RTH_EN)) { - return XGE_HAL_BADCFG_RING_RTH_EN; + (new_config->rth_en > XGE_HAL_MAX_RING_RTH_EN)) { + return XGE_HAL_BADCFG_RING_RTH_EN; } if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_MAC_EN) || - (new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_MAC_EN)) { - return XGE_HAL_BADCFG_RING_RTS_MAC_EN; + (new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_MAC_EN)) { + return XGE_HAL_BADCFG_RING_RTS_MAC_EN; } if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_PORT_EN) || - (new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) { - return XGE_HAL_BADCFG_RING_RTS_PORT_EN; + (new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) { + return XGE_HAL_BADCFG_RING_RTS_PORT_EN; } if ((new_config->intr_vector < XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR) || - (new_config->intr_vector > XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR)) { - return XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR; + (new_config->intr_vector > XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR)) { + return XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR; } if (new_config->indicate_max_pkts < XGE_HAL_MIN_RING_INDICATE_MAX_PKTS || new_config->indicate_max_pkts > XGE_HAL_MAX_RING_INDICATE_MAX_PKTS) { - return XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS; + return XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS; } return __hal_rti_config_check(&new_config->rti); @@ -318,52 +310,52 @@ static xge_hal_status_e __hal_mac_config_check (xge_hal_mac_config_t *new_config) { if ((new_config->tmac_util_period < XGE_HAL_MIN_TMAC_UTIL_PERIOD) || - (new_config->tmac_util_period > XGE_HAL_MAX_TMAC_UTIL_PERIOD)) { - return XGE_HAL_BADCFG_TMAC_UTIL_PERIOD; + (new_config->tmac_util_period > XGE_HAL_MAX_TMAC_UTIL_PERIOD)) { + return XGE_HAL_BADCFG_TMAC_UTIL_PERIOD; } if ((new_config->rmac_util_period < XGE_HAL_MIN_RMAC_UTIL_PERIOD) || - (new_config->rmac_util_period > XGE_HAL_MAX_RMAC_UTIL_PERIOD)) { - return XGE_HAL_BADCFG_RMAC_UTIL_PERIOD; + (new_config->rmac_util_period > XGE_HAL_MAX_RMAC_UTIL_PERIOD)) { + return XGE_HAL_BADCFG_RMAC_UTIL_PERIOD; } if ((new_config->rmac_bcast_en < XGE_HAL_MIN_RMAC_BCAST_EN) || - (new_config->rmac_bcast_en > XGE_HAL_MAX_RMAC_BCAST_EN)) { - return XGE_HAL_BADCFG_RMAC_BCAST_EN; + (new_config->rmac_bcast_en > XGE_HAL_MAX_RMAC_BCAST_EN)) { + return XGE_HAL_BADCFG_RMAC_BCAST_EN; } if ((new_config->rmac_pause_gen_en < XGE_HAL_MIN_RMAC_PAUSE_GEN_EN) || - (new_config->rmac_pause_gen_en>XGE_HAL_MAX_RMAC_PAUSE_GEN_EN)) { - return XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN; + (new_config->rmac_pause_gen_en>XGE_HAL_MAX_RMAC_PAUSE_GEN_EN)) { + return XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN; } if ((new_config->rmac_pause_rcv_en < XGE_HAL_MIN_RMAC_PAUSE_RCV_EN) || - (new_config->rmac_pause_rcv_en>XGE_HAL_MAX_RMAC_PAUSE_RCV_EN)) { - return XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN; + (new_config->rmac_pause_rcv_en>XGE_HAL_MAX_RMAC_PAUSE_RCV_EN)) { + return XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN; } if ((new_config->rmac_pause_time < XGE_HAL_MIN_RMAC_HIGH_PTIME) || - (new_config->rmac_pause_time > XGE_HAL_MAX_RMAC_HIGH_PTIME)) { - return XGE_HAL_BADCFG_RMAC_HIGH_PTIME; + (new_config->rmac_pause_time > XGE_HAL_MAX_RMAC_HIGH_PTIME)) { + return XGE_HAL_BADCFG_RMAC_HIGH_PTIME; } if ((new_config->media < XGE_HAL_MIN_MEDIA) || - (new_config->media > XGE_HAL_MAX_MEDIA)) { - return XGE_HAL_BADCFG_MEDIA; + (new_config->media > XGE_HAL_MAX_MEDIA)) { + return XGE_HAL_BADCFG_MEDIA; } if ((new_config->mc_pause_threshold_q0q3 < - XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3) || - (new_config->mc_pause_threshold_q0q3 > - XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3)) { - return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3; + XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3) || + (new_config->mc_pause_threshold_q0q3 > + XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3)) { + return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3; } if ((new_config->mc_pause_threshold_q4q7 < - XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7) || - (new_config->mc_pause_threshold_q4q7 > - XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7)) { - return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7; + XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7) || + (new_config->mc_pause_threshold_q4q7 > + XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7)) { + return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7; } return XGE_HAL_OK; @@ -389,38 +381,38 @@ __hal_fifo_config_check (xge_hal_fifo_config_t *new_config) new_config->max_frags = ((new_config->max_frags + 3) >> 2) << 2; if ((new_config->max_frags < XGE_HAL_MIN_FIFO_FRAGS) || - (new_config->max_frags > XGE_HAL_MAX_FIFO_FRAGS)) { - return XGE_HAL_BADCFG_FIFO_FRAGS; + (new_config->max_frags > XGE_HAL_MAX_FIFO_FRAGS)) { + return XGE_HAL_BADCFG_FIFO_FRAGS; } if ((new_config->reserve_threshold < - XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD) || - (new_config->reserve_threshold > - XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD)) { - return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD; + XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD) || + (new_config->reserve_threshold > + XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD)) { + return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD; } if ((new_config->memblock_size < XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE) || - (new_config->memblock_size > XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE)) { - return XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE; + (new_config->memblock_size > XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE)) { + return XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE; } for(i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { - xge_hal_status_e status; + xge_hal_status_e status; - if (!new_config->queue[i].configured) - continue; + if (!new_config->queue[i].configured) + continue; - if ((status = __hal_fifo_queue_check(new_config, - &new_config->queue[i])) != XGE_HAL_OK) { - return status; - } + if ((status = __hal_fifo_queue_check(new_config, + &new_config->queue[i])) != XGE_HAL_OK) { + return status; + } total_fifo_length += new_config->queue[i].max; } if(total_fifo_length > XGE_HAL_MAX_FIFO_QUEUE_LENGTH){ - return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH; + return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH; } return XGE_HAL_OK; @@ -439,20 +431,20 @@ __hal_ring_config_check (xge_hal_ring_config_t *new_config) int i; if ((new_config->memblock_size < XGE_HAL_MIN_RING_MEMBLOCK_SIZE) || - (new_config->memblock_size > XGE_HAL_MAX_RING_MEMBLOCK_SIZE)) { - return XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE; + (new_config->memblock_size > XGE_HAL_MAX_RING_MEMBLOCK_SIZE)) { + return XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE; } for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - xge_hal_status_e status; + xge_hal_status_e status; - if (!new_config->queue[i].configured) - continue; + if (!new_config->queue[i].configured) + continue; - if ((status = __hal_ring_queue_check(&new_config->queue[i])) - != XGE_HAL_OK) { - return status; - } + if ((status = __hal_ring_queue_check(&new_config->queue[i])) + != XGE_HAL_OK) { + return status; + } } return XGE_HAL_OK; @@ -477,92 +469,92 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config) xge_hal_status_e status; if ((new_config->mtu < XGE_HAL_MIN_MTU) || - (new_config->mtu > XGE_HAL_MAX_MTU)) { - return XGE_HAL_BADCFG_MAX_MTU; + (new_config->mtu > XGE_HAL_MAX_MTU)) { + return XGE_HAL_BADCFG_MAX_MTU; } if ((new_config->bimodal_interrupts < XGE_HAL_BIMODAL_INTR_MIN) || - (new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) { - return XGE_HAL_BADCFG_BIMODAL_INTR; + (new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) { + return XGE_HAL_BADCFG_BIMODAL_INTR; } if (new_config->bimodal_interrupts && ((new_config->bimodal_timer_lo_us < XGE_HAL_BIMODAL_TIMER_LO_US_MIN) || - (new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) { - return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US; + (new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) { + return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US; } if (new_config->bimodal_interrupts && ((new_config->bimodal_timer_hi_us < XGE_HAL_BIMODAL_TIMER_HI_US_MIN) || - (new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) { - return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US; + (new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) { + return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US; } if ((new_config->no_isr_events < XGE_HAL_NO_ISR_EVENTS_MIN) || - (new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) { - return XGE_HAL_BADCFG_NO_ISR_EVENTS; + (new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) { + return XGE_HAL_BADCFG_NO_ISR_EVENTS; } if ((new_config->isr_polling_cnt < XGE_HAL_MIN_ISR_POLLING_CNT) || - (new_config->isr_polling_cnt > XGE_HAL_MAX_ISR_POLLING_CNT)) { - return XGE_HAL_BADCFG_ISR_POLLING_CNT; + (new_config->isr_polling_cnt > XGE_HAL_MAX_ISR_POLLING_CNT)) { + return XGE_HAL_BADCFG_ISR_POLLING_CNT; } if (new_config->latency_timer && new_config->latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { - if ((new_config->latency_timer < XGE_HAL_MIN_LATENCY_TIMER) || - (new_config->latency_timer > XGE_HAL_MAX_LATENCY_TIMER)) { - return XGE_HAL_BADCFG_LATENCY_TIMER; - } + if ((new_config->latency_timer < XGE_HAL_MIN_LATENCY_TIMER) || + (new_config->latency_timer > XGE_HAL_MAX_LATENCY_TIMER)) { + return XGE_HAL_BADCFG_LATENCY_TIMER; + } } if (new_config->max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { - if ((new_config->max_splits_trans < - XGE_HAL_ONE_SPLIT_TRANSACTION) || - (new_config->max_splits_trans > - XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION)) - return XGE_HAL_BADCFG_MAX_SPLITS_TRANS; + if ((new_config->max_splits_trans < + XGE_HAL_ONE_SPLIT_TRANSACTION) || + (new_config->max_splits_trans > + XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION)) + return XGE_HAL_BADCFG_MAX_SPLITS_TRANS; } if (new_config->mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { if ((new_config->mmrb_count < XGE_HAL_MIN_MMRB_COUNT) || - (new_config->mmrb_count > XGE_HAL_MAX_MMRB_COUNT)) { - return XGE_HAL_BADCFG_MMRB_COUNT; + (new_config->mmrb_count > XGE_HAL_MAX_MMRB_COUNT)) { + return XGE_HAL_BADCFG_MMRB_COUNT; } } if ((new_config->shared_splits < XGE_HAL_MIN_SHARED_SPLITS) || - (new_config->shared_splits > XGE_HAL_MAX_SHARED_SPLITS)) { - return XGE_HAL_BADCFG_SHARED_SPLITS; + (new_config->shared_splits > XGE_HAL_MAX_SHARED_SPLITS)) { + return XGE_HAL_BADCFG_SHARED_SPLITS; } if (new_config->stats_refresh_time_sec != XGE_HAL_STATS_REFRESH_DISABLE) { if ((new_config->stats_refresh_time_sec < - XGE_HAL_MIN_STATS_REFRESH_TIME) || + XGE_HAL_MIN_STATS_REFRESH_TIME) || (new_config->stats_refresh_time_sec > - XGE_HAL_MAX_STATS_REFRESH_TIME)) { - return XGE_HAL_BADCFG_STATS_REFRESH_TIME; + XGE_HAL_MAX_STATS_REFRESH_TIME)) { + return XGE_HAL_BADCFG_STATS_REFRESH_TIME; } } if ((new_config->intr_mode != XGE_HAL_INTR_MODE_IRQLINE) && - (new_config->intr_mode != XGE_HAL_INTR_MODE_MSI) && - (new_config->intr_mode != XGE_HAL_INTR_MODE_MSIX)) { - return XGE_HAL_BADCFG_INTR_MODE; + (new_config->intr_mode != XGE_HAL_INTR_MODE_MSI) && + (new_config->intr_mode != XGE_HAL_INTR_MODE_MSIX)) { + return XGE_HAL_BADCFG_INTR_MODE; } if ((new_config->sched_timer_us < XGE_HAL_SCHED_TIMER_MIN) || - (new_config->sched_timer_us > XGE_HAL_SCHED_TIMER_MAX)) { - return XGE_HAL_BADCFG_SCHED_TIMER_US; + (new_config->sched_timer_us > XGE_HAL_SCHED_TIMER_MAX)) { + return XGE_HAL_BADCFG_SCHED_TIMER_US; } if ((new_config->sched_timer_one_shot != - XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE) && - (new_config->sched_timer_one_shot != - XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE)) { - return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT; + XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE) && + (new_config->sched_timer_one_shot != + XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE)) { + return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT; } /* @@ -576,105 +568,105 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config) */ if (new_config->sched_timer_us && new_config->rxufca_hi_lim != new_config->rxufca_lo_lim) { - if ((new_config->rxufca_intr_thres < - XGE_HAL_RXUFCA_INTR_THRES_MIN) || - (new_config->rxufca_intr_thres > - XGE_HAL_RXUFCA_INTR_THRES_MAX)) { - return XGE_HAL_BADCFG_RXUFCA_INTR_THRES; - } - - if ((new_config->rxufca_hi_lim < XGE_HAL_RXUFCA_HI_LIM_MIN) || - (new_config->rxufca_hi_lim > XGE_HAL_RXUFCA_HI_LIM_MAX)) { - return XGE_HAL_BADCFG_RXUFCA_HI_LIM; - } - - if ((new_config->rxufca_lo_lim < XGE_HAL_RXUFCA_LO_LIM_MIN) || - (new_config->rxufca_lo_lim > XGE_HAL_RXUFCA_LO_LIM_MAX) || - (new_config->rxufca_lo_lim > new_config->rxufca_hi_lim)) { - return XGE_HAL_BADCFG_RXUFCA_LO_LIM; - } - - if ((new_config->rxufca_lbolt_period < - XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN) || - (new_config->rxufca_lbolt_period > - XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX)) { - return XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD; - } + if ((new_config->rxufca_intr_thres < + XGE_HAL_RXUFCA_INTR_THRES_MIN) || + (new_config->rxufca_intr_thres > + XGE_HAL_RXUFCA_INTR_THRES_MAX)) { + return XGE_HAL_BADCFG_RXUFCA_INTR_THRES; + } + + if ((new_config->rxufca_hi_lim < XGE_HAL_RXUFCA_HI_LIM_MIN) || + (new_config->rxufca_hi_lim > XGE_HAL_RXUFCA_HI_LIM_MAX)) { + return XGE_HAL_BADCFG_RXUFCA_HI_LIM; + } + + if ((new_config->rxufca_lo_lim < XGE_HAL_RXUFCA_LO_LIM_MIN) || + (new_config->rxufca_lo_lim > XGE_HAL_RXUFCA_LO_LIM_MAX) || + (new_config->rxufca_lo_lim > new_config->rxufca_hi_lim)) { + return XGE_HAL_BADCFG_RXUFCA_LO_LIM; + } + + if ((new_config->rxufca_lbolt_period < + XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN) || + (new_config->rxufca_lbolt_period > + XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX)) { + return XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD; + } } if ((new_config->link_valid_cnt < XGE_HAL_LINK_VALID_CNT_MIN) || - (new_config->link_valid_cnt > XGE_HAL_LINK_VALID_CNT_MAX)) { - return XGE_HAL_BADCFG_LINK_VALID_CNT; + (new_config->link_valid_cnt > XGE_HAL_LINK_VALID_CNT_MAX)) { + return XGE_HAL_BADCFG_LINK_VALID_CNT; } if ((new_config->link_retry_cnt < XGE_HAL_LINK_RETRY_CNT_MIN) || - (new_config->link_retry_cnt > XGE_HAL_LINK_RETRY_CNT_MAX)) { - return XGE_HAL_BADCFG_LINK_RETRY_CNT; + (new_config->link_retry_cnt > XGE_HAL_LINK_RETRY_CNT_MAX)) { + return XGE_HAL_BADCFG_LINK_RETRY_CNT; } if (new_config->link_valid_cnt > new_config->link_retry_cnt) - return XGE_HAL_BADCFG_LINK_VALID_CNT; + return XGE_HAL_BADCFG_LINK_VALID_CNT; if (new_config->link_stability_period != XGE_HAL_DEFAULT_USE_HARDCODE) { if ((new_config->link_stability_period < - XGE_HAL_MIN_LINK_STABILITY_PERIOD) || - (new_config->link_stability_period > - XGE_HAL_MAX_LINK_STABILITY_PERIOD)) { - return XGE_HAL_BADCFG_LINK_STABILITY_PERIOD; + XGE_HAL_MIN_LINK_STABILITY_PERIOD) || + (new_config->link_stability_period > + XGE_HAL_MAX_LINK_STABILITY_PERIOD)) { + return XGE_HAL_BADCFG_LINK_STABILITY_PERIOD; } } if (new_config->device_poll_millis != XGE_HAL_DEFAULT_USE_HARDCODE) { if ((new_config->device_poll_millis < - XGE_HAL_MIN_DEVICE_POLL_MILLIS) || - (new_config->device_poll_millis > - XGE_HAL_MAX_DEVICE_POLL_MILLIS)) { - return XGE_HAL_BADCFG_DEVICE_POLL_MILLIS; + XGE_HAL_MIN_DEVICE_POLL_MILLIS) || + (new_config->device_poll_millis > + XGE_HAL_MAX_DEVICE_POLL_MILLIS)) { + return XGE_HAL_BADCFG_DEVICE_POLL_MILLIS; } - } + } if ((new_config->rts_port_en < XGE_HAL_MIN_RING_RTS_PORT_EN) || - (new_config->rts_port_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) { - return XGE_HAL_BADCFG_RTS_PORT_EN; + (new_config->rts_port_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) { + return XGE_HAL_BADCFG_RTS_PORT_EN; } if ((new_config->rts_qos_en < XGE_HAL_RTS_QOS_DISABLE) || - (new_config->rts_qos_en > XGE_HAL_RTS_QOS_ENABLE)) { - return XGE_HAL_BADCFG_RTS_QOS_EN; + (new_config->rts_qos_en > XGE_HAL_RTS_QOS_ENABLE)) { + return XGE_HAL_BADCFG_RTS_QOS_EN; } #if defined(XGE_HAL_CONFIG_LRO) if (new_config->lro_sg_size != - XGE_HAL_DEFAULT_USE_HARDCODE) { - if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) || - (new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) { - return XGE_HAL_BADCFG_LRO_SG_SIZE; - } + XGE_HAL_DEFAULT_USE_HARDCODE) { + if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) || + (new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) { + return XGE_HAL_BADCFG_LRO_SG_SIZE; + } } if (new_config->lro_frm_len != - XGE_HAL_DEFAULT_USE_HARDCODE) { - if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) || - (new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) { - return XGE_HAL_BADCFG_LRO_FRM_LEN; - } + XGE_HAL_DEFAULT_USE_HARDCODE) { + if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) || + (new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) { + return XGE_HAL_BADCFG_LRO_FRM_LEN; + } } #endif if ((status = __hal_ring_config_check(&new_config->ring)) - != XGE_HAL_OK) { - return status; + != XGE_HAL_OK) { + return status; } if ((status = __hal_mac_config_check(&new_config->mac)) != XGE_HAL_OK) { - return status; + return status; } if ((status = __hal_fifo_config_check(&new_config->fifo)) != XGE_HAL_OK) { - return status; + return status; } return XGE_HAL_OK; @@ -695,12 +687,12 @@ xge_hal_status_e __hal_device_config_check_xena (xge_hal_device_config_t *new_config) { if ((new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_33) && - (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) && - (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) && - (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) && - (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) && - (new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) { - return XGE_HAL_BADCFG_PCI_FREQ_MHERZ; + (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) && + (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) && + (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) && + (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) && + (new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) { + return XGE_HAL_BADCFG_PCI_FREQ_MHERZ; } return XGE_HAL_OK; @@ -735,25 +727,25 @@ xge_hal_status_e __hal_driver_config_check (xge_hal_driver_config_t *new_config) { if ((new_config->queue_size_initial < - XGE_HAL_MIN_QUEUE_SIZE_INITIAL) || + XGE_HAL_MIN_QUEUE_SIZE_INITIAL) || (new_config->queue_size_initial > - XGE_HAL_MAX_QUEUE_SIZE_INITIAL)) { - return XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL; + XGE_HAL_MAX_QUEUE_SIZE_INITIAL)) { + return XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL; } if ((new_config->queue_size_max < XGE_HAL_MIN_QUEUE_SIZE_MAX) || - (new_config->queue_size_max > XGE_HAL_MAX_QUEUE_SIZE_MAX)) { - return XGE_HAL_BADCFG_QUEUE_SIZE_MAX; + (new_config->queue_size_max > XGE_HAL_MAX_QUEUE_SIZE_MAX)) { + return XGE_HAL_BADCFG_QUEUE_SIZE_MAX; } #ifdef XGE_TRACE_INTO_CIRCULAR_ARR if ((new_config->tracebuf_size < XGE_HAL_MIN_CIRCULAR_ARR) || - (new_config->tracebuf_size > XGE_HAL_MAX_CIRCULAR_ARR)) { - return XGE_HAL_BADCFG_TRACEBUF_SIZE; + (new_config->tracebuf_size > XGE_HAL_MAX_CIRCULAR_ARR)) { + return XGE_HAL_BADCFG_TRACEBUF_SIZE; } if ((new_config->tracebuf_timestamp_en < XGE_HAL_MIN_TIMESTAMP_EN) || - (new_config->tracebuf_timestamp_en > XGE_HAL_MAX_TIMESTAMP_EN)) { - return XGE_HAL_BADCFG_TRACEBUF_SIZE; + (new_config->tracebuf_timestamp_en > XGE_HAL_MAX_TIMESTAMP_EN)) { + return XGE_HAL_BADCFG_TRACEBUF_SIZE; } #endif diff --git a/sys/dev/nxge/xgehal/xgehal-device-fp.c b/sys/dev/nxge/xgehal/xgehal-device-fp.c index 5e2faf1..2788e51 100644 --- a/sys/dev/nxge/xgehal/xgehal-device-fp.c +++ b/sys/dev/nxge/xgehal/xgehal-device-fp.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-device-fp.c - * - * Description: HAL device object functionality (fast path) - * - * Created: 10 June 2004 - */ - #ifdef XGE_DEBUG_FP #include <dev/nxge/include/xgehal-device.h> #endif @@ -45,22 +37,22 @@ * xge_hal_device_bar0 - Get BAR0 mapped address. * @hldev: HAL device handle. * - * Returns: BAR0 address of the specified device. + * Returns: BAR0 address of the specified device. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * xge_hal_device_bar0(xge_hal_device_t *hldev) { return hldev->bar0; } /** - * xge_hal_device_isrbar0 - Get BAR0 mapped address. + * xge_hal_device_isrbar0 - Get BAR0 mapped address. * @hldev: HAL device handle. * - * Returns: BAR0 address of the specified device. + * Returns: BAR0 address of the specified device. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * -xge_hal_device_isrbar0(xge_hal_device_t *hldev) +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * +xge_hal_device_isrbar0(xge_hal_device_t *hldev) { return hldev->isrbar0; } @@ -69,9 +61,9 @@ xge_hal_device_isrbar0(xge_hal_device_t *hldev) * xge_hal_device_bar1 - Get BAR1 mapped address. * @hldev: HAL device handle. * - * Returns: BAR1 address of the specified device. + * Returns: BAR1 address of the specified device. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * xge_hal_device_bar1(xge_hal_device_t *hldev) { return hldev->bar1; @@ -81,23 +73,23 @@ xge_hal_device_bar1(xge_hal_device_t *hldev) * xge_hal_device_bar0_set - Set BAR0 mapped address. * @hldev: HAL device handle. * @bar0: BAR0 mapped address. - * * Set BAR0 address in the HAL device object. + * * Set BAR0 address in the HAL device object. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0) { xge_assert(bar0); - hldev->bar0 = bar0; + hldev->bar0 = bar0; } /** - * xge_hal_device_isrbar0_set - Set BAR0 mapped address. + * xge_hal_device_isrbar0_set - Set BAR0 mapped address. * @hldev: HAL device handle. * @isrbar0: BAR0 mapped address. - * * Set BAR0 address in the HAL device object. + * * Set BAR0 address in the HAL device object. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void -xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0) +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0) { xge_assert(isrbar0); hldev->isrbar0 = isrbar0; @@ -109,186 +101,186 @@ xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0) * @channelh: Channel handle. * @bar1: BAR1 mapped address. * - * Set BAR1 address for the given channel. + * Set BAR1 address for the given channel. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh, - char *bar1) + char *bar1) { xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; xge_assert(bar1); xge_assert(fifo); - /* Initializing the BAR1 address as the start of - * the FIFO queue pointer and as a location of FIFO control + /* Initializing the BAR1 address as the start of + * the FIFO queue pointer and as a location of FIFO control * word. */ fifo->hw_pair = - (xge_hal_fifo_hw_pair_t *) (bar1 + - (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); - hldev->bar1 = bar1; + (xge_hal_fifo_hw_pair_t *) (bar1 + + (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); + hldev->bar1 = bar1; } /** - * xge_hal_device_rev - Get Device revision number. + * xge_hal_device_rev - Get Device revision number. * @hldev: HAL device handle. * - * Returns: Device revision number + * Returns: Device revision number */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int -xge_hal_device_rev(xge_hal_device_t *hldev) +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int +xge_hal_device_rev(xge_hal_device_t *hldev) { - return hldev->revision; + return hldev->revision; } /** - * xge_hal_device_begin_irq - Begin IRQ processing. + * xge_hal_device_begin_irq - Begin IRQ processing. * @hldev: HAL device handle. - * @reason: "Reason" for the interrupt, the value of Xframe's - * general_int_status register. + * @reason: "Reason" for the interrupt, the value of Xframe's + * general_int_status register. * - * The function performs two actions, It first checks whether (shared IRQ) the - * interrupt was raised by the device. Next, it masks the device interrupts. + * The function performs two actions, It first checks whether (shared IRQ) the + * interrupt was raised by the device. Next, it masks the device interrupts. * * Note: * xge_hal_device_begin_irq() does not flush MMIO writes through the * bridge. Therefore, two back-to-back interrupts are potentially possible. - * It is the responsibility of the ULD to make sure that only one + * It is the responsibility of the ULD to make sure that only one * xge_hal_device_continue_irq() runs at a time. * - * Returns: 0, if the interrupt is not "ours" (note that in this case the + * Returns: 0, if the interrupt is not "ours" (note that in this case the * device remain enabled). * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter * status. * See also: xge_hal_device_handle_irq() */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason) { - u64 val64; - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + u64 val64; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; hldev->stats.sw_dev_info_stats.total_intr_cnt++; - val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &isrbar0->general_int_status); + val64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &isrbar0->general_int_status); if (xge_os_unlikely(!val64)) { - /* not Xframe interrupt */ - hldev->stats.sw_dev_info_stats.not_xge_intr_cnt++; - *reason = 0; - return XGE_HAL_ERR_WRONG_IRQ; + /* not Xframe interrupt */ + hldev->stats.sw_dev_info_stats.not_xge_intr_cnt++; + *reason = 0; + return XGE_HAL_ERR_WRONG_IRQ; } if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) { - u64 adapter_status = - xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->adapter_status); - if (adapter_status == XGE_HAL_ALL_FOXES) { - (void) xge_queue_produce(hldev->queueh, - XGE_HAL_EVENT_SLOT_FREEZE, - hldev, - 1, /* critical: slot freeze */ - sizeof(u64), - (void*)&adapter_status); - *reason = 0; - return XGE_HAL_ERR_CRITICAL; - } + u64 adapter_status = + xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->adapter_status); + if (adapter_status == XGE_HAL_ALL_FOXES) { + (void) xge_queue_produce(hldev->queueh, + XGE_HAL_EVENT_SLOT_FREEZE, + hldev, + 1, /* critical: slot freeze */ + sizeof(u64), + (void*)&adapter_status); + *reason = 0; + return XGE_HAL_ERR_CRITICAL; + } } - *reason = val64; + *reason = val64; - /* separate fast path, i.e. no errors */ - if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) { - hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++; - return XGE_HAL_OK; + /* separate fast path, i.e. no errors */ + if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) { + hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++; + return XGE_HAL_OK; } - if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) { - hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++; - return XGE_HAL_OK; + if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) { + hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++; + return XGE_HAL_OK; } hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.txpic_intr_cnt++; - status = __hal_device_handle_txpic(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.txpic_intr_cnt++; + status = __hal_device_handle_txpic(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.txdma_intr_cnt++; - status = __hal_device_handle_txdma(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.txdma_intr_cnt++; + status = __hal_device_handle_txdma(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.txmac_intr_cnt++; - status = __hal_device_handle_txmac(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.txmac_intr_cnt++; + status = __hal_device_handle_txmac(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++; - status = __hal_device_handle_txxgxs(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++; + status = __hal_device_handle_txxgxs(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++; - status = __hal_device_handle_rxpic(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++; + status = __hal_device_handle_rxpic(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++; - status = __hal_device_handle_rxdma(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++; + status = __hal_device_handle_rxdma(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++; - status = __hal_device_handle_rxmac(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++; + status = __hal_device_handle_rxmac(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++; - status = __hal_device_handle_rxxgxs(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++; + status = __hal_device_handle_rxxgxs(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } - if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) { - xge_hal_status_e status; - hldev->stats.sw_dev_info_stats.mc_intr_cnt++; - status = __hal_device_handle_mc(hldev, val64); - if (status != XGE_HAL_OK) { - return status; - } + if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) { + xge_hal_status_e status; + hldev->stats.sw_dev_info_stats.mc_intr_cnt++; + status = __hal_device_handle_mc(hldev, val64); + if (status != XGE_HAL_OK) { + return status; + } } return XGE_HAL_OK; @@ -296,60 +288,60 @@ xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason) /** * xge_hal_device_clear_rx - Acknowledge (that is, clear) the - * condition that has caused the RX interrupt. + * condition that has caused the RX interrupt. * @hldev: HAL device handle. * - * Acknowledge (that is, clear) the condition that has caused + * Acknowledge (that is, clear) the condition that has caused * the Rx interrupt. * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), * xge_hal_device_clear_tx(), xge_hal_device_mask_rx(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_clear_rx(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xFFFFFFFFFFFFFFFFULL, - &isrbar0->rx_traffic_int); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0xFFFFFFFFFFFFFFFFULL, + &isrbar0->rx_traffic_int); } /** * xge_hal_device_clear_tx - Acknowledge (that is, clear) the - * condition that has caused the TX interrupt. + * condition that has caused the TX interrupt. * @hldev: HAL device handle. * - * Acknowledge (that is, clear) the condition that has caused + * Acknowledge (that is, clear) the condition that has caused * the Tx interrupt. * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), * xge_hal_device_clear_rx(), xge_hal_device_mask_tx(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_clear_tx(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xFFFFFFFFFFFFFFFFULL, - &isrbar0->tx_traffic_int); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0xFFFFFFFFFFFFFFFFULL, + &isrbar0->tx_traffic_int); } /** - * xge_hal_device_poll_rx_channel - Poll Rx channel for completed + * xge_hal_device_poll_rx_channel - Poll Rx channel for completed * descriptors and process the same. * @channel: HAL channel. * @got_rx: Buffer to return the flag set if receive interrupt is occured * - * The function polls the Rx channel for the completed descriptors and calls - * the upper-layer driver (ULD) via supplied completion callback. + * The function polls the Rx channel for the completed descriptors and calls + * the upper-layer driver (ULD) via supplied completion callback. * - * Returns: XGE_HAL_OK, if the polling is completed successful. - * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed + * Returns: XGE_HAL_OK, if the polling is completed successful. + * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. * * See also: xge_hal_device_poll_tx_channel() */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx) { xge_hal_status_e ret = XGE_HAL_OK; @@ -363,21 +355,21 @@ xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx) ((xge_hal_ring_t *)channel)->cmpl_cnt = 0; channel->poll_bytes = 0; if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh, - &t_code)) == XGE_HAL_OK) { - if (channel->callback(channel, first_dtrh, - t_code, channel->userdata) != XGE_HAL_OK) { - (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; - got_bytes += channel->poll_bytes + 1; - ret = XGE_HAL_COMPLETIONS_REMAIN; - } else { - (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; - got_bytes += channel->poll_bytes + 1; - } + &t_code)) == XGE_HAL_OK) { + if (channel->callback(channel, first_dtrh, + t_code, channel->userdata) != XGE_HAL_OK) { + (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; + got_bytes += channel->poll_bytes + 1; + ret = XGE_HAL_COMPLETIONS_REMAIN; + } else { + (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; + got_bytes += channel->poll_bytes + 1; + } } if (*got_rx) { - hldev->irq_workload_rxd[channel->post_qid] += *got_rx; - hldev->irq_workload_rxcnt[channel->post_qid] ++; + hldev->irq_workload_rxd[channel->post_qid] += *got_rx; + hldev->irq_workload_rxcnt[channel->post_qid] ++; } hldev->irq_workload_rxlen[channel->post_qid] += got_bytes; @@ -385,21 +377,21 @@ xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx) } /** - * xge_hal_device_poll_tx_channel - Poll Tx channel for completed + * xge_hal_device_poll_tx_channel - Poll Tx channel for completed * descriptors and process the same. * @channel: HAL channel. * @got_tx: Buffer to return the flag set if transmit interrupt is occured * - * The function polls the Tx channel for the completed descriptors and calls - * the upper-layer driver (ULD) via supplied completion callback. + * The function polls the Tx channel for the completed descriptors and calls + * the upper-layer driver (ULD) via supplied completion callback. * - * Returns: XGE_HAL_OK, if the polling is completed successful. - * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed + * Returns: XGE_HAL_OK, if the polling is completed successful. + * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. * * See also: xge_hal_device_poll_rx_channel(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx) { xge_hal_dtr_h first_dtrh; @@ -411,20 +403,20 @@ xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx) got_bytes = *got_tx = 0; channel->poll_bytes = 0; if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh, - &t_code) == XGE_HAL_OK) { - if (channel->callback(channel, first_dtrh, - t_code, channel->userdata) != XGE_HAL_OK) { - (*got_tx)++; - got_bytes += channel->poll_bytes + 1; - return XGE_HAL_COMPLETIONS_REMAIN; - } - (*got_tx)++; - got_bytes += channel->poll_bytes + 1; + &t_code) == XGE_HAL_OK) { + if (channel->callback(channel, first_dtrh, + t_code, channel->userdata) != XGE_HAL_OK) { + (*got_tx)++; + got_bytes += channel->poll_bytes + 1; + return XGE_HAL_COMPLETIONS_REMAIN; + } + (*got_tx)++; + got_bytes += channel->poll_bytes + 1; } if (*got_tx) { - hldev->irq_workload_txd[channel->post_qid] += *got_tx; - hldev->irq_workload_txcnt[channel->post_qid] ++; + hldev->irq_workload_txd[channel->post_qid] += *got_tx; + hldev->irq_workload_txcnt[channel->post_qid] ++; } hldev->irq_workload_txlen[channel->post_qid] += got_bytes; @@ -437,27 +429,27 @@ xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx) * @hldev: HAL device handle. * @got_rx: Buffer to return flag set if receive is ready * - * The function polls the Rx channels for the completed descriptors and calls - * the upper-layer driver (ULD) via supplied completion callback. + * The function polls the Rx channels for the completed descriptors and calls + * the upper-layer driver (ULD) via supplied completion callback. * - * Returns: XGE_HAL_OK, if the polling is completed successful. - * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed + * Returns: XGE_HAL_OK, if the polling is completed successful. + * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. * - * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq(). + * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx) { xge_list_t *item; xge_hal_channel_t *channel; /* for each opened rx channel */ - xge_list_for_each(item, &hldev->ring_channels) { - if (hldev->terminating) - return XGE_HAL_OK; - channel = xge_container_of(item, xge_hal_channel_t, item); - (void) xge_hal_device_poll_rx_channel(channel, got_rx); + xge_list_for_each(item, &hldev->ring_channels) { + if (hldev->terminating) + return XGE_HAL_OK; + channel = xge_container_of(item, xge_hal_channel_t, item); + (void) xge_hal_device_poll_rx_channel(channel, got_rx); } return XGE_HAL_OK; @@ -469,122 +461,122 @@ xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx) * @hldev: HAL device handle. * @got_tx: Buffer to return flag set if transmit is ready * - * The function polls the Tx channels for the completed descriptors and calls - * the upper-layer driver (ULD) via supplied completion callback. + * The function polls the Tx channels for the completed descriptors and calls + * the upper-layer driver (ULD) via supplied completion callback. * - * Returns: XGE_HAL_OK, if the polling is completed successful. - * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed + * Returns: XGE_HAL_OK, if the polling is completed successful. + * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. * - * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq(). + * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx) { xge_list_t *item; xge_hal_channel_t *channel; /* for each opened tx channel */ - xge_list_for_each(item, &hldev->fifo_channels) { - if (hldev->terminating) - return XGE_HAL_OK; - channel = xge_container_of(item, xge_hal_channel_t, item); - (void) xge_hal_device_poll_tx_channel(channel, got_tx); + xge_list_for_each(item, &hldev->fifo_channels) { + if (hldev->terminating) + return XGE_HAL_OK; + channel = xge_container_of(item, xge_hal_channel_t, item); + (void) xge_hal_device_poll_tx_channel(channel, got_tx); } return XGE_HAL_OK; } /** - * xge_hal_device_mask_tx - Mask Tx interrupts. + * xge_hal_device_mask_tx - Mask Tx interrupts. * @hldev: HAL device handle. * - * Mask Tx device interrupts. + * Mask Tx device interrupts. * * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(), * xge_hal_device_clear_tx(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void -xge_hal_device_mask_tx(xge_hal_device_t *hldev) +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +xge_hal_device_mask_tx(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xFFFFFFFFFFFFFFFFULL, - &isrbar0->tx_traffic_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0xFFFFFFFFFFFFFFFFULL, + &isrbar0->tx_traffic_mask); } /** - * xge_hal_device_mask_rx - Mask Rx interrupts. + * xge_hal_device_mask_rx - Mask Rx interrupts. * @hldev: HAL device handle. * - * Mask Rx device interrupts. + * Mask Rx device interrupts. * * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(), * xge_hal_device_clear_rx(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void -xge_hal_device_mask_rx(xge_hal_device_t *hldev) +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +xge_hal_device_mask_rx(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xFFFFFFFFFFFFFFFFULL, - &isrbar0->rx_traffic_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0xFFFFFFFFFFFFFFFFULL, + &isrbar0->rx_traffic_mask); } /** * xge_hal_device_mask_all - Mask all device interrupts. * @hldev: HAL device handle. * - * Mask all device interrupts. + * Mask all device interrupts. * * See also: xge_hal_device_unmask_all() */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_mask_all(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xFFFFFFFFFFFFFFFFULL, - &isrbar0->general_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0xFFFFFFFFFFFFFFFFULL, + &isrbar0->general_int_mask); } /** - * xge_hal_device_unmask_tx - Unmask Tx interrupts. + * xge_hal_device_unmask_tx - Unmask Tx interrupts. * @hldev: HAL device handle. * - * Unmask Tx device interrupts. + * Unmask Tx device interrupts. * * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_unmask_tx(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0ULL, - &isrbar0->tx_traffic_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0ULL, + &isrbar0->tx_traffic_mask); } /** - * xge_hal_device_unmask_rx - Unmask Rx interrupts. + * xge_hal_device_unmask_rx - Unmask Rx interrupts. * @hldev: HAL device handle. * - * Unmask Rx device interrupts. + * Unmask Rx device interrupts. * * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_unmask_rx(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0ULL, - &isrbar0->rx_traffic_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0ULL, + &isrbar0->rx_traffic_mask); } /** @@ -595,63 +587,63 @@ xge_hal_device_unmask_rx(xge_hal_device_t *hldev) * * See also: xge_hal_device_mask_all() */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void xge_hal_device_unmask_all(xge_hal_device_t *hldev) { - xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; + xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0ULL, - &isrbar0->general_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0ULL, + &isrbar0->general_int_mask); } /** - * xge_hal_device_continue_irq - Continue handling IRQ: process all + * xge_hal_device_continue_irq - Continue handling IRQ: process all * completed descriptors. * @hldev: HAL device handle. * - * Process completed descriptors and unmask the device interrupts. + * Process completed descriptors and unmask the device interrupts. * - * The xge_hal_device_continue_irq() walks all open channels - * and calls upper-layer driver (ULD) via supplied completion - * callback. Note that the completion callback is specified at channel open + * The xge_hal_device_continue_irq() walks all open channels + * and calls upper-layer driver (ULD) via supplied completion + * callback. Note that the completion callback is specified at channel open * time, see xge_hal_channel_open(). * - * Note that the xge_hal_device_continue_irq is part of the _fast_ path. - * To optimize the processing, the function does _not_ check for + * Note that the xge_hal_device_continue_irq is part of the _fast_ path. + * To optimize the processing, the function does _not_ check for * errors and alarms. * - * The latter is done in a polling fashion, via xge_hal_device_poll(). + * The latter is done in a polling fashion, via xge_hal_device_poll(). * - * Returns: XGE_HAL_OK. + * Returns: XGE_HAL_OK. * * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(), * xge_hal_ring_dtr_next_completed(), * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}. */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_continue_irq(xge_hal_device_t *hldev) { - int got_rx = 1, got_tx = 1; - int isr_polling_cnt = hldev->config.isr_polling_cnt; - int count = 0; + int got_rx = 1, got_tx = 1; + int isr_polling_cnt = hldev->config.isr_polling_cnt; + int count = 0; do { - if (got_rx) - (void) xge_hal_device_poll_rx_channels(hldev, &got_rx); - if (got_tx && hldev->tti_enabled) - (void) xge_hal_device_poll_tx_channels(hldev, &got_tx); + if (got_rx) + (void) xge_hal_device_poll_rx_channels(hldev, &got_rx); + if (got_tx && hldev->tti_enabled) + (void) xge_hal_device_poll_tx_channels(hldev, &got_tx); - if (!got_rx && !got_tx) - break; + if (!got_rx && !got_tx) + break; - count += (got_rx + got_tx); + count += (got_rx + got_tx); }while (isr_polling_cnt--); if (!count) - hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; + hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; return XGE_HAL_OK; } @@ -660,34 +652,34 @@ xge_hal_device_continue_irq(xge_hal_device_t *hldev) * xge_hal_device_handle_irq - Handle device IRQ. * @hldev: HAL device handle. * - * Perform the complete handling of the line interrupt. The function - * performs two calls. - * First it uses xge_hal_device_begin_irq() to check the reason for + * Perform the complete handling of the line interrupt. The function + * performs two calls. + * First it uses xge_hal_device_begin_irq() to check the reason for * the interrupt and mask the device interrupts. - * Second, it calls xge_hal_device_continue_irq() to process all + * Second, it calls xge_hal_device_continue_irq() to process all * completed descriptors and re-enable the interrupts. * - * Returns: XGE_HAL_OK - success; - * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device. + * Returns: XGE_HAL_OK - success; + * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device. * * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(). */ -__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e +__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e xge_hal_device_handle_irq(xge_hal_device_t *hldev) { - u64 reason; + u64 reason; xge_hal_status_e status; xge_hal_device_mask_all(hldev); status = xge_hal_device_begin_irq(hldev, &reason); if (status != XGE_HAL_OK) { - xge_hal_device_unmask_all(hldev); - return status; + xge_hal_device_unmask_all(hldev); + return status; } if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) { - xge_hal_device_clear_rx(hldev); + xge_hal_device_clear_rx(hldev); } status = xge_hal_device_continue_irq(hldev); @@ -699,266 +691,266 @@ xge_hal_device_handle_irq(xge_hal_device_t *hldev) return status; } -#if defined(XGE_HAL_CONFIG_LRO) +#if defined(XGE_HAL_CONFIG_LRO) __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int -__hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip) +__hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip) { - /* Match Source address field */ - if ((lro->ip_hdr->saddr != ip->saddr)) - return XGE_HAL_FAIL; + /* Match Source address field */ + if ((lro->ip_hdr->saddr != ip->saddr)) + return XGE_HAL_FAIL; /* Match Destination address field */ - if ((lro->ip_hdr->daddr != ip->daddr)) - return XGE_HAL_FAIL; + if ((lro->ip_hdr->daddr != ip->daddr)) + return XGE_HAL_FAIL; - /* Match Source Port field */ + /* Match Source Port field */ if ((lro->tcp_hdr->source != tcp->source)) - return XGE_HAL_FAIL; + return XGE_HAL_FAIL; - /* Match Destination Port field */ - if ((lro->tcp_hdr->dest != tcp->dest)) - return XGE_HAL_FAIL; - + /* Match Destination Port field */ + if ((lro->tcp_hdr->dest != tcp->dest)) + return XGE_HAL_FAIL; + return XGE_HAL_OK; } /* * __hal_tcp_seg_len: Find the tcp seg len. - * @ip: ip header. + * @ip: ip header. * @tcp: tcp header. - * returns: Tcp seg length. + * returns: Tcp seg length. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16 -__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp) +__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp) { - u16 ret; + u16 ret; - ret = (xge_os_ntohs(ip->tot_len) - - ((ip->version_ihl & 0x0F)<<2) - - ((tcp->doff_res)>>2)); + ret = (xge_os_ntohs(ip->tot_len) - + ((ip->version_ihl & 0x0F)<<2) - + ((tcp->doff_res)>>2)); return (ret); } /* * __hal_ip_lro_capable: Finds whether ip is lro capable. - * @ip: ip header. + * @ip: ip header. * @ext_info: descriptor info. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_ip_lro_capable(iplro_t *ip, - xge_hal_dtr_info_t *ext_info) + xge_hal_dtr_info_t *ext_info) { #ifdef XGE_LL_DEBUG_DUMP_PKT - { - u16 i; - u8 ch, *iph = (u8 *)ip; - - xge_debug_ring(XGE_TRACE, "Dump Ip:" ); - for (i =0; i < 40; i++) { - ch = ntohs(*((u8 *)(iph + i)) ); - printf("i:%d %02x, ",i,ch); - } - } + { + u16 i; + u8 ch, *iph = (u8 *)ip; + + xge_debug_ring(XGE_TRACE, "Dump Ip:" ); + for (i =0; i < 40; i++) { + ch = ntohs(*((u8 *)(iph + i)) ); + printf("i:%d %02x, ",i,ch); + } + } #endif - if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) { - xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl); - return XGE_HAL_FAIL; + if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) { + xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl); + return XGE_HAL_FAIL; } - if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) { - xge_debug_ring(XGE_ERR, "IP fragmented"); - return XGE_HAL_FAIL; + if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) { + xge_debug_ring(XGE_ERR, "IP fragmented"); + return XGE_HAL_FAIL; } return XGE_HAL_OK; } /* - * __hal_tcp_lro_capable: Finds whether tcp is lro capable. - * @ip: ip header. + * __hal_tcp_lro_capable: Finds whether tcp is lro capable. + * @ip: ip header. * @tcp: tcp header. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e -__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off) +__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off) { #ifdef XGE_LL_DEBUG_DUMP_PKT - { - u8 ch; - u16 i; - - xge_debug_ring(XGE_TRACE, "Dump Tcp:" ); - for (i =0; i < 20; i++) { - ch = ntohs(*((u8 *)((u8 *)tcp + i)) ); - xge_os_printf("i:%d %02x, ",i,ch); - } - } + { + u8 ch; + u16 i; + + xge_debug_ring(XGE_TRACE, "Dump Tcp:" ); + for (i =0; i < 20; i++) { + ch = ntohs(*((u8 *)((u8 *)tcp + i)) ); + xge_os_printf("i:%d %02x, ",i,ch); + } + } #endif - if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) && - (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl)) - goto _exit_fail; - - *ts_off = -1; - if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) { - u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */ - u16 off = 20; /* Start of tcp options */ - int i, diff; - - /* Does Packet can contain time stamp */ - if (tcp_hdr_len < 32) { - /* - * If the session is not opened, we can consider - * this packet for LRO - */ - if (lro == NULL) - return XGE_HAL_OK; - - goto _exit_fail; - } - - /* Ignore No-operation 0x1 */ - while (((u8 *)tcp)[off] == 0x1) - off++; - - /* Next option == Timestamp */ - if (((u8 *)tcp)[off] != 0x8) { - /* - * If the session ie not opened, we can consider - * this packet for LRO - */ - if (lro == NULL) - return XGE_HAL_OK; - - goto _exit_fail; - } - - *ts_off = off; - if (lro == NULL) - return XGE_HAL_OK; - - /* - * Now the session is opened. If the LRO frame doesn't - * have time stamp, we cannot consider current packet for - * LRO. - */ - if (lro->ts_off == -1) { - xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x", tcp->doff_res, tcp->ctrl); - return XGE_HAL_FAIL; - } - - /* - * If the difference is greater than three, then there are - * more options possible. - * else, there are two cases: - * case 1: remaining are padding bytes. - * case 2: remaining can contain options or padding - */ - off += ((u8 *)tcp)[off+1]; - diff = tcp_hdr_len - off; - if (diff > 3) { - /* - * Probably contains more options. - */ - xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl); - return XGE_HAL_FAIL; - } - - for (i = 0; i < diff; i++) { - u8 byte = ((u8 *)tcp)[off+i]; - - /* Ignore No-operation 0x1 */ - if ((byte == 0x0) || (byte == 0x1)) - continue; - xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl); - return XGE_HAL_FAIL; - } + if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) && + (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl)) + goto _exit_fail; + + *ts_off = -1; + if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) { + u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */ + u16 off = 20; /* Start of tcp options */ + int i, diff; + + /* Does Packet can contain time stamp */ + if (tcp_hdr_len < 32) { + /* + * If the session is not opened, we can consider + * this packet for LRO + */ + if (lro == NULL) + return XGE_HAL_OK; + + goto _exit_fail; + } + + /* Ignore No-operation 0x1 */ + while (((u8 *)tcp)[off] == 0x1) + off++; + + /* Next option == Timestamp */ + if (((u8 *)tcp)[off] != 0x8) { + /* + * If the session ie not opened, we can consider + * this packet for LRO + */ + if (lro == NULL) + return XGE_HAL_OK; + + goto _exit_fail; + } + + *ts_off = off; + if (lro == NULL) + return XGE_HAL_OK; + + /* + * Now the session is opened. If the LRO frame doesn't + * have time stamp, we cannot consider current packet for + * LRO. + */ + if (lro->ts_off == -1) { + xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x", tcp->doff_res, tcp->ctrl); + return XGE_HAL_FAIL; + } + + /* + * If the difference is greater than three, then there are + * more options possible. + * else, there are two cases: + * case 1: remaining are padding bytes. + * case 2: remaining can contain options or padding + */ + off += ((u8 *)tcp)[off+1]; + diff = tcp_hdr_len - off; + if (diff > 3) { + /* + * Probably contains more options. + */ + xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl); + return XGE_HAL_FAIL; + } + + for (i = 0; i < diff; i++) { + u8 byte = ((u8 *)tcp)[off+i]; + + /* Ignore No-operation 0x1 */ + if ((byte == 0x0) || (byte == 0x1)) + continue; + xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl); + return XGE_HAL_FAIL; + } - /* - * Update the time stamp of LRO frame. - */ - xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2), - (char *)((char *)tcp + (*ts_off) + 2), 8); + /* + * Update the time stamp of LRO frame. + */ + xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2), + (char *)((char *)tcp + (*ts_off) + 2), 8); } return XGE_HAL_OK; _exit_fail: - xge_debug_ring(XGE_ERR, "tcphdr not fastpth %02x %02x", tcp->doff_res, tcp->ctrl); + xge_debug_ring(XGE_TRACE, "tcphdr not fastpth %02x %02x", tcp->doff_res, tcp->ctrl); return XGE_HAL_FAIL; } /* - * __hal_lro_capable: Finds whether frame is lro capable. - * @buffer: Ethernet frame. - * @ip: ip frame. + * __hal_lro_capable: Finds whether frame is lro capable. + * @buffer: Ethernet frame. + * @ip: ip frame. * @tcp: tcp frame. * @ext_info: Descriptor info. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_lro_capable( u8 *buffer, - iplro_t **ip, - tcplro_t **tcp, - xge_hal_dtr_info_t *ext_info) + iplro_t **ip, + tcplro_t **tcp, + xge_hal_dtr_info_t *ext_info) { u8 ip_off, ip_length; - if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) { - xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto); - return XGE_HAL_FAIL; + if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) { + xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto); + return XGE_HAL_FAIL; } if ( !*ip ) { #ifdef XGE_LL_DEBUG_DUMP_PKT - { - u8 ch; - u16 i; - - xge_os_printf("Dump Eth:" ); - for (i =0; i < 60; i++) { - ch = ntohs(*((u8 *)(buffer + i)) ); - xge_os_printf("i:%d %02x, ",i,ch); - } - } + { + u8 ch; + u16 i; + + xge_os_printf("Dump Eth:" ); + for (i =0; i < 60; i++) { + ch = ntohs(*((u8 *)(buffer + i)) ); + xge_os_printf("i:%d %02x, ",i,ch); + } + } #endif - switch (ext_info->frame) { - case XGE_HAL_FRAME_TYPE_DIX: - ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE; - break; - case XGE_HAL_FRAME_TYPE_LLC: - ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + - XGE_HAL_HEADER_802_2_SIZE); - break; - case XGE_HAL_FRAME_TYPE_SNAP: - ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + - XGE_HAL_HEADER_SNAP_SIZE); - break; - default: // XGE_HAL_FRAME_TYPE_IPX, etc. - return XGE_HAL_FAIL; - } - - - if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) { - ip_off += XGE_HAL_HEADER_VLAN_SIZE; - } - - /* Grab ip, tcp headers */ - *ip = (iplro_t *)((char*)buffer + ip_off); + switch (ext_info->frame) { + case XGE_HAL_FRAME_TYPE_DIX: + ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE; + break; + case XGE_HAL_FRAME_TYPE_LLC: + ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + + XGE_HAL_HEADER_802_2_SIZE); + break; + case XGE_HAL_FRAME_TYPE_SNAP: + ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + + XGE_HAL_HEADER_SNAP_SIZE); + break; + default: // XGE_HAL_FRAME_TYPE_IPX, etc. + return XGE_HAL_FAIL; + } + + + if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) { + ip_off += XGE_HAL_HEADER_VLAN_SIZE; + } + + /* Grab ip, tcp headers */ + *ip = (iplro_t *)((char*)buffer + ip_off); } /* !*ip */ - ip_length = (u8)((*ip)->version_ihl & 0x0F); - ip_length = ip_length <<2; + ip_length = (u8)((*ip)->version_ihl & 0x0F); + ip_length = ip_length <<2; *tcp = (tcplro_t *)((char *)*ip + ip_length); - xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT - " tcp:"XGE_OS_LLXFMT"", (int)ip_length, - (unsigned long long)(ulong_t)*ip, (unsigned long long)(ulong_t)*tcp); + xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT + " tcp:"XGE_OS_LLXFMT"", (int)ip_length, + (unsigned long long)(ulong_t)*ip, (unsigned long long)(ulong_t)*tcp); return XGE_HAL_OK; @@ -966,38 +958,38 @@ __hal_lro_capable( u8 *buffer, /* - * __hal_open_lro_session: Open a new LRO session. - * @buffer: Ethernet frame. - * @ip: ip header. + * __hal_open_lro_session: Open a new LRO session. + * @buffer: Ethernet frame. + * @ip: ip header. * @tcp: tcp header. * @lro: lro pointer * @ext_info: Descriptor info. * @hldev: Hal context. * @ring_lro: LRO descriptor per rx ring. * @slot: Bucket no. - * @tcp_seg_len: Length of tcp segment. - * @ts_off: time stamp offset in the packet. + * @tcp_seg_len: Length of tcp segment. + * @ts_off: time stamp offset in the packet. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void -__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro, - xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, int slot, - u32 tcp_seg_len, int ts_off) +__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro, + xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, int slot, + u32 tcp_seg_len, int ts_off) { lro_t *lro_new = &ring_lro->lro_pool[slot]; - lro_new->in_use = 1; - lro_new->ll_hdr = buffer; - lro_new->ip_hdr = ip; - lro_new->tcp_hdr = tcp; - lro_new->tcp_next_seq_num = tcp_seg_len + xge_os_ntohl( - tcp->seq); - lro_new->tcp_seq_num = tcp->seq; - lro_new->tcp_ack_num = tcp->ack_seq; - lro_new->sg_num = 1; - lro_new->total_length = xge_os_ntohs(ip->tot_len); - lro_new->frags_len = 0; - lro_new->ts_off = ts_off; + lro_new->in_use = 1; + lro_new->ll_hdr = buffer; + lro_new->ip_hdr = ip; + lro_new->tcp_hdr = tcp; + lro_new->tcp_next_seq_num = tcp_seg_len + xge_os_ntohl( + tcp->seq); + lro_new->tcp_seq_num = tcp->seq; + lro_new->tcp_ack_num = tcp->ack_seq; + lro_new->sg_num = 1; + lro_new->total_length = xge_os_ntohs(ip->tot_len); + lro_new->frags_len = 0; + lro_new->ts_off = ts_off; hldev->stats.sw_dev_info_stats.tot_frms_lroised++; hldev->stats.sw_dev_info_stats.tot_lro_sessions++; @@ -1006,27 +998,27 @@ __hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro, return; } /* - * __hal_lro_get_free_slot: Get a free LRO bucket. + * __hal_lro_get_free_slot: Get a free LRO bucket. * @ring_lro: LRO descriptor per ring. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int -__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro) +__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro) { - int i; + int i; - for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { - lro_t *lro_temp = &ring_lro->lro_pool[i]; + for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { + lro_t *lro_temp = &ring_lro->lro_pool[i]; - if (!lro_temp->in_use) - return i; + if (!lro_temp->in_use) + return i; } - return -1; + return -1; } /* - * __hal_get_lro_session: Gets matching LRO session or creates one. - * @eth_hdr: Ethernet header. - * @ip: ip header. + * __hal_get_lro_session: Gets matching LRO session or creates one. + * @eth_hdr: Ethernet header. + * @ip: ip header. * @tcp: tcp header. * @lro: lro pointer * @ext_info: Descriptor info. @@ -1035,198 +1027,198 @@ __hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro) */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_get_lro_session (u8 *eth_hdr, - iplro_t *ip, - tcplro_t *tcp, - lro_t **lro, - xge_hal_dtr_info_t *ext_info, - xge_hal_device_t *hldev, - xge_hal_lro_desc_t *ring_lro, - lro_t **lro_end3 /* Valid only when ret=END_3 */) + iplro_t *ip, + tcplro_t *tcp, + lro_t **lro, + xge_hal_dtr_info_t *ext_info, + xge_hal_device_t *hldev, + xge_hal_lro_desc_t *ring_lro, + lro_t **lro_end3 /* Valid only when ret=END_3 */) { lro_t *lro_match; - int i, free_slot = -1; - u32 tcp_seg_len; - int ts_off = -1; + int i, free_slot = -1; + u32 tcp_seg_len; + int ts_off = -1; *lro = lro_match = NULL; /* - * Compare the incoming frame with the lro session left from the - * previous call. There is a good chance that this incoming frame + * Compare the incoming frame with the lro session left from the + * previous call. There is a good chance that this incoming frame * matches the lro session. */ - if (ring_lro->lro_recent && ring_lro->lro_recent->in_use) { - if (__hal_lro_check_for_session_match(ring_lro->lro_recent, - tcp, ip) - == XGE_HAL_OK) - lro_match = ring_lro->lro_recent; + if (ring_lro->lro_recent && ring_lro->lro_recent->in_use) { + if (__hal_lro_check_for_session_match(ring_lro->lro_recent, + tcp, ip) + == XGE_HAL_OK) + lro_match = ring_lro->lro_recent; } - if (!lro_match) { - /* - * Search in the pool of LROs for the session that matches - * the incoming frame. - */ - for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { - lro_t *lro_temp = &ring_lro->lro_pool[i]; - - if (!lro_temp->in_use) { - if (free_slot == -1) - free_slot = i; - continue; - } - - if (__hal_lro_check_for_session_match(lro_temp, tcp, - ip) == XGE_HAL_OK) { - lro_match = lro_temp; - break; - } - } + if (!lro_match) { + /* + * Search in the pool of LROs for the session that matches + * the incoming frame. + */ + for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { + lro_t *lro_temp = &ring_lro->lro_pool[i]; + + if (!lro_temp->in_use) { + if (free_slot == -1) + free_slot = i; + continue; + } + + if (__hal_lro_check_for_session_match(lro_temp, tcp, + ip) == XGE_HAL_OK) { + lro_match = lro_temp; + break; + } + } } if (lro_match) { - /* - * Matching LRO Session found - */ - *lro = lro_match; - - if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) { - xge_debug_ring(XGE_ERR, "**retransmit **" - "found***"); - hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++; - return XGE_HAL_INF_LRO_END_2; - } - - if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info)) - { - return XGE_HAL_INF_LRO_END_2; - } - - if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match, - &ts_off)) { - /* - * Close the current session and open a new - * LRO session with this packet, - * provided it has tcp payload - */ - tcp_seg_len = __hal_tcp_seg_len(ip, tcp); - if (tcp_seg_len == 0) - { - return XGE_HAL_INF_LRO_END_2; - } - - /* Get a free bucket */ - free_slot = __hal_lro_get_free_slot(ring_lro); - if (free_slot == -1) - { - return XGE_HAL_INF_LRO_END_2; - } - - /* - * Open a new LRO session - */ - __hal_open_lro_session (eth_hdr, ip, tcp, lro_end3, - hldev, ring_lro, free_slot, tcp_seg_len, - ts_off); - - return XGE_HAL_INF_LRO_END_3; - } - - /* - * The frame is good, in-sequence, can be LRO-ed; - * take its (latest) ACK - unless it is a dupack. - * Note: to be exact need to check window size as well.. - */ - if (lro_match->tcp_ack_num == tcp->ack_seq && - lro_match->tcp_seq_num == tcp->seq) { - hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++; - return XGE_HAL_INF_LRO_END_2; - } - - lro_match->tcp_seq_num = tcp->seq; - lro_match->tcp_ack_num = tcp->ack_seq; - lro_match->frags_len += __hal_tcp_seg_len(ip, tcp); - - ring_lro->lro_recent = lro_match; + /* + * Matching LRO Session found + */ + *lro = lro_match; + + if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) { + xge_debug_ring(XGE_ERR, "**retransmit **" + "found***"); + hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++; + return XGE_HAL_INF_LRO_END_2; + } + + if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info)) + { + return XGE_HAL_INF_LRO_END_2; + } + + if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match, + &ts_off)) { + /* + * Close the current session and open a new + * LRO session with this packet, + * provided it has tcp payload + */ + tcp_seg_len = __hal_tcp_seg_len(ip, tcp); + if (tcp_seg_len == 0) + { + return XGE_HAL_INF_LRO_END_2; + } + + /* Get a free bucket */ + free_slot = __hal_lro_get_free_slot(ring_lro); + if (free_slot == -1) + { + return XGE_HAL_INF_LRO_END_2; + } + + /* + * Open a new LRO session + */ + __hal_open_lro_session (eth_hdr, ip, tcp, lro_end3, + hldev, ring_lro, free_slot, tcp_seg_len, + ts_off); + + return XGE_HAL_INF_LRO_END_3; + } + + /* + * The frame is good, in-sequence, can be LRO-ed; + * take its (latest) ACK - unless it is a dupack. + * Note: to be exact need to check window size as well.. + */ + if (lro_match->tcp_ack_num == tcp->ack_seq && + lro_match->tcp_seq_num == tcp->seq) { + hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++; + return XGE_HAL_INF_LRO_END_2; + } + + lro_match->tcp_seq_num = tcp->seq; + lro_match->tcp_ack_num = tcp->ack_seq; + lro_match->frags_len += __hal_tcp_seg_len(ip, tcp); + + ring_lro->lro_recent = lro_match; - return XGE_HAL_INF_LRO_CONT; + return XGE_HAL_INF_LRO_CONT; } /* ********** New Session ***************/ if (free_slot == -1) - return XGE_HAL_INF_LRO_UNCAPABLE; + return XGE_HAL_INF_LRO_UNCAPABLE; - if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info)) - return XGE_HAL_INF_LRO_UNCAPABLE; + if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info)) + return XGE_HAL_INF_LRO_UNCAPABLE; - if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off)) - return XGE_HAL_INF_LRO_UNCAPABLE; - - xge_debug_ring(XGE_TRACE, "Creating lro session."); + if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off)) + return XGE_HAL_INF_LRO_UNCAPABLE; + + xge_debug_ring(XGE_TRACE, "Creating lro session."); /* - * Open a LRO session, provided the packet contains payload. + * Open a LRO session, provided the packet contains payload. */ - tcp_seg_len = __hal_tcp_seg_len(ip, tcp); - if (tcp_seg_len == 0) - return XGE_HAL_INF_LRO_UNCAPABLE; + tcp_seg_len = __hal_tcp_seg_len(ip, tcp); + if (tcp_seg_len == 0) + return XGE_HAL_INF_LRO_UNCAPABLE; - __hal_open_lro_session (eth_hdr, ip, tcp, lro, hldev, ring_lro, free_slot, - tcp_seg_len, ts_off); + __hal_open_lro_session (eth_hdr, ip, tcp, lro, hldev, ring_lro, free_slot, + tcp_seg_len, ts_off); return XGE_HAL_INF_LRO_BEGIN; } /* * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal. - * @ip: ip header. + * @ip: ip header. * @tcp: tcp header. * @lro: lro pointer * @hldev: Hal context. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e -__hal_lro_under_optimal_thresh (iplro_t *ip, - tcplro_t *tcp, - lro_t *lro, - xge_hal_device_t *hldev) +__hal_lro_under_optimal_thresh (iplro_t *ip, + tcplro_t *tcp, + lro_t *lro, + xge_hal_device_t *hldev) { if (!lro) return XGE_HAL_FAIL; - if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) > - hldev->config.lro_frm_len) { - xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:" - "max length %d ", hldev->config.lro_frm_len); - hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++; - return XGE_HAL_FAIL; + if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) > + hldev->config.lro_frm_len) { + xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:" + "max length %d ", hldev->config.lro_frm_len); + hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++; + return XGE_HAL_FAIL; } - if (lro->sg_num == hldev->config.lro_sg_size) { - xge_debug_ring(XGE_TRACE, "Max sg count exceeded:" - "max sg %d ", hldev->config.lro_sg_size); - hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; - return XGE_HAL_FAIL; + if (lro->sg_num == hldev->config.lro_sg_size) { + xge_debug_ring(XGE_TRACE, "Max sg count exceeded:" + "max sg %d ", hldev->config.lro_sg_size); + hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; + return XGE_HAL_FAIL; } return XGE_HAL_OK; } /* - * __hal_collapse_ip_hdr: Collapses ip header. - * @ip: ip header. + * __hal_collapse_ip_hdr: Collapses ip header. + * @ip: ip header. * @tcp: tcp header. * @lro: lro pointer * @hldev: Hal context. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e -__hal_collapse_ip_hdr ( iplro_t *ip, - tcplro_t *tcp, - lro_t *lro, - xge_hal_device_t *hldev) +__hal_collapse_ip_hdr ( iplro_t *ip, + tcplro_t *tcp, + lro_t *lro, + xge_hal_device_t *hldev) { lro->total_length += __hal_tcp_seg_len(ip, tcp); - /* May be we have to handle time stamps or more options */ + /* May be we have to handle time stamps or more options */ return XGE_HAL_OK; @@ -1234,16 +1226,16 @@ __hal_collapse_ip_hdr ( iplro_t *ip, /* * __hal_collapse_tcp_hdr: Collapses tcp header. - * @ip: ip header. + * @ip: ip header. * @tcp: tcp header. * @lro: lro pointer * @hldev: Hal context. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_collapse_tcp_hdr ( iplro_t *ip, - tcplro_t *tcp, - lro_t *lro, - xge_hal_device_t *hldev) + tcplro_t *tcp, + lro_t *lro, + xge_hal_device_t *hldev) { lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp); return XGE_HAL_OK; @@ -1252,122 +1244,122 @@ __hal_collapse_tcp_hdr ( iplro_t *ip, /* * __hal_append_lro: Appends new frame to existing LRO session. - * @ip: ip header. - * @tcp: IN tcp header, OUT tcp payload. + * @ip: ip header. + * @tcp: IN tcp header, OUT tcp payload. * @seg_len: tcp payload length. * @lro: lro pointer * @hldev: Hal context. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e __hal_append_lro(iplro_t *ip, - tcplro_t **tcp, - u32 *seg_len, - lro_t *lro, - xge_hal_device_t *hldev) + tcplro_t **tcp, + u32 *seg_len, + lro_t *lro, + xge_hal_device_t *hldev) { - (void) __hal_collapse_ip_hdr(ip, *tcp, lro, hldev); + (void) __hal_collapse_ip_hdr(ip, *tcp, lro, hldev); (void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev); - // Update mbuf chain will be done in ll driver. + // Update mbuf chain will be done in ll driver. // xge_hal_accumulate_large_rx on success of appending new frame to - // lro will return to ll driver tcpdata pointer, and tcp payload length. - // along with return code lro frame appended. + // lro will return to ll driver tcpdata pointer, and tcp payload length. + // along with return code lro frame appended. lro->sg_num++; *seg_len = __hal_tcp_seg_len(ip, *tcp); - *tcp = (tcplro_t *)((char *)*tcp + (((*tcp)->doff_res)>>2)); + *tcp = (tcplro_t *)((char *)*tcp + (((*tcp)->doff_res)>>2)); return XGE_HAL_OK; } /** - * __xge_hal_accumulate_large_rx: LRO a given frame + * __xge_hal_accumulate_large_rx: LRO a given frame * frames * @ring: rx ring number * @eth_hdr: ethernet header. * @ip_hdr: ip header (optional) * @tcp: tcp header. - * @seglen: packet length. + * @seglen: packet length. * @p_lro: lro pointer. * @ext_info: descriptor info, see xge_hal_dtr_info_t{}. * @hldev: HAL device. * @lro_end3: for lro_end3 output * - * LRO the newly received frame, i.e. attach it (if possible) to the + * LRO the newly received frame, i.e. attach it (if possible) to the * already accumulated (i.e., already LRO-ed) received frames (if any), - * to form one super-sized frame for the subsequent processing + * to form one super-sized frame for the subsequent processing * by the stack. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp, - u32 *seglen, lro_t **p_lro, - xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, - lro_t **lro_end3) + u32 *seglen, lro_t **p_lro, + xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, + lro_t **lro_end3) { - iplro_t *ip = (iplro_t *)ip_hdr; + iplro_t *ip = (iplro_t *)ip_hdr; xge_hal_status_e ret; lro_t *lro; xge_debug_ring(XGE_TRACE, "Entered accumu lro. "); if (XGE_HAL_OK != __hal_lro_capable(eth_hdr, &ip, (tcplro_t **)tcp, - ext_info)) - return XGE_HAL_INF_LRO_UNCAPABLE; + ext_info)) + return XGE_HAL_INF_LRO_UNCAPABLE; /* - * This function shall get matching LRO or else + * This function shall get matching LRO or else * create one and return it */ ret = __hal_get_lro_session(eth_hdr, ip, (tcplro_t *)*tcp, - p_lro, ext_info, hldev, &hldev->lro_desc[ring], - lro_end3); + p_lro, ext_info, hldev, &hldev->lro_desc[ring], + lro_end3); xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret); lro = *p_lro; if (XGE_HAL_INF_LRO_CONT == ret) { - if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip, - (tcplro_t *)*tcp, lro, hldev)) { - (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen, - lro, hldev); - hldev->stats.sw_dev_info_stats.tot_frms_lroised++; - - if (lro->sg_num >= hldev->config.lro_sg_size) { - hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; - ret = XGE_HAL_INF_LRO_END_1; - } - - } else ret = XGE_HAL_INF_LRO_END_2; + if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip, + (tcplro_t *)*tcp, lro, hldev)) { + (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen, + lro, hldev); + hldev->stats.sw_dev_info_stats.tot_frms_lroised++; + + if (lro->sg_num >= hldev->config.lro_sg_size) { + hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; + ret = XGE_HAL_INF_LRO_END_1; + } + + } else ret = XGE_HAL_INF_LRO_END_2; } /* * Since its time to flush, - * update ip header so that it can be sent up + * update ip header so that it can be sent up */ if ((ret == XGE_HAL_INF_LRO_END_1) || - (ret == XGE_HAL_INF_LRO_END_2) || - (ret == XGE_HAL_INF_LRO_END_3)) { - lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length); - lro->ip_hdr->check = xge_os_htons(0); - lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), - (lro->ip_hdr->version_ihl & 0x0F)); - lro->tcp_hdr->ack_seq = lro->tcp_ack_num; + (ret == XGE_HAL_INF_LRO_END_2) || + (ret == XGE_HAL_INF_LRO_END_3)) { + lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length); + lro->ip_hdr->check = xge_os_htons(0); + lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), + (lro->ip_hdr->version_ihl & 0x0F)); + lro->tcp_hdr->ack_seq = lro->tcp_ack_num; } return (ret); } /** - * xge_hal_accumulate_large_rx: LRO a given frame + * xge_hal_accumulate_large_rx: LRO a given frame * frames - * @buffer: Ethernet frame. + * @buffer: Ethernet frame. * @tcp: tcp header. - * @seglen: packet length. + * @seglen: packet length. * @p_lro: lro pointer. * @ext_info: descriptor info, see xge_hal_dtr_info_t{}. * @hldev: HAL device. * @lro_end3: for lro_end3 output * - * LRO the newly received frame, i.e. attach it (if possible) to the + * LRO the newly received frame, i.e. attach it (if possible) to the * already accumulated (i.e., already LRO-ed) received frames (if any), - * to form one super-sized frame for the subsequent processing + * to form one super-sized frame for the subsequent processing * by the stack. */ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e @@ -1377,7 +1369,7 @@ lro_t **lro_end3) { int ring = 0; return xge_hal_lro_process_rx(ring, buffer, NULL, tcp, seglen, p_lro, - ext_info, hldev, lro_end3); + ext_info, hldev, lro_end3); } /** @@ -1393,32 +1385,32 @@ xge_hal_lro_close_session (lro_t *lro) /** * xge_hal_lro_next_session: Returns next LRO session in the list or NULL - * if none exists. + * if none exists. * @hldev: HAL Context. * @ring: rx ring number. */ -__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * +__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring) { xge_hal_lro_desc_t *ring_lro = &hldev->lro_desc[ring]; - int i; - int start_idx = ring_lro->lro_next_idx; + int i; + int start_idx = ring_lro->lro_next_idx; - for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { - lro_t *lro = &ring_lro->lro_pool[i]; + for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { + lro_t *lro = &ring_lro->lro_pool[i]; - if (!lro->in_use) - continue; + if (!lro->in_use) + continue; - lro->ip_hdr->tot_len = xge_os_htons(lro->total_length); - lro->ip_hdr->check = xge_os_htons(0); - lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), - (lro->ip_hdr->version_ihl & 0x0F)); - ring_lro->lro_next_idx = i + 1; - return lro; + lro->ip_hdr->tot_len = xge_os_htons(lro->total_length); + lro->ip_hdr->check = xge_os_htons(0); + lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), + (lro->ip_hdr->version_ihl & 0x0F)); + ring_lro->lro_next_idx = i + 1; + return lro; } - ring_lro->lro_next_idx = 0; + ring_lro->lro_next_idx = 0; return NULL; } diff --git a/sys/dev/nxge/xgehal/xgehal-device.c b/sys/dev/nxge/xgehal/xgehal-device.c index 0ba7562..882aaf7 100644 --- a/sys/dev/nxge/xgehal/xgehal-device.c +++ b/sys/dev/nxge/xgehal/xgehal-device.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-device.c - * - * Description: HAL device object functionality - * - * Created: 10 May 2004 - */ - #include <dev/nxge/include/xgehal-device.h> #include <dev/nxge/include/xgehal-channel.h> #include <dev/nxge/include/xgehal-fifo.h> @@ -41,8 +33,8 @@ #include <dev/nxge/include/xgehal-driver.h> #include <dev/nxge/include/xgehal-mgmt.h> -#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL -#define END_SIGN 0x0 +#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL +#define END_SIGN 0x0 #ifdef XGE_HAL_HERC_EMULATION #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR @@ -80,7 +72,7 @@ __hal_device_event_queued(void *data, int event_type) { xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC); if (g_xge_hal_driver->uld_callbacks.event_queued) { - g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); + g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); } } @@ -110,11 +102,11 @@ __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) */ void __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, - void *addr) + void *addr) { #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) xge_os_pio_mem_write32(pdev, regh, val, - (void *) ((char *)addr + 4)); + (void *) ((char *)addr + 4)); #else xge_os_pio_mem_write32(pdev, regh, val, addr); #endif @@ -133,7 +125,7 @@ __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, */ xge_hal_status_e __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, - int op, u64 mask, int max_millis) + int op, u64 mask, int max_millis) { u64 val64; int i = 0; @@ -142,21 +134,21 @@ __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, xge_os_udelay(10); do { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); - if (op == 0 && !(val64 & mask)) - return XGE_HAL_OK; - else if (op == 1 && (val64 & mask) == mask) - return XGE_HAL_OK; - xge_os_udelay(100); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); + if (op == 0 && !(val64 & mask)) + return XGE_HAL_OK; + else if (op == 1 && (val64 & mask) == mask) + return XGE_HAL_OK; + xge_os_udelay(100); } while (++i <= 9); do { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); - if (op == 0 && !(val64 & mask)) - return XGE_HAL_OK; - else if (op == 1 && (val64 & mask) == mask) - return XGE_HAL_OK; - xge_os_udelay(1000); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); + if (op == 0 && !(val64 & mask)) + return XGE_HAL_OK; + else if (op == 1 && (val64 & mask) == mask) + return XGE_HAL_OK; + xge_os_udelay(1000); } while (++i < max_millis); return ret; @@ -177,27 +169,27 @@ __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status) /* poll and wait first */ #ifdef XGE_HAL_HERC_EMULATION (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, - (XGE_HAL_ADAPTER_STATUS_TDMA_READY | - XGE_HAL_ADAPTER_STATUS_RDMA_READY | - XGE_HAL_ADAPTER_STATUS_PFC_READY | - XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | - XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | - XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | - XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | - XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), - XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); + (XGE_HAL_ADAPTER_STATUS_TDMA_READY | + XGE_HAL_ADAPTER_STATUS_RDMA_READY | + XGE_HAL_ADAPTER_STATUS_PFC_READY | + XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | + XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | + XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | + XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | + XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), + XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); #else (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, - (XGE_HAL_ADAPTER_STATUS_TDMA_READY | - XGE_HAL_ADAPTER_STATUS_RDMA_READY | - XGE_HAL_ADAPTER_STATUS_PFC_READY | - XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | - XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | - XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | - XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | - XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | - XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), - XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); + (XGE_HAL_ADAPTER_STATUS_TDMA_READY | + XGE_HAL_ADAPTER_STATUS_RDMA_READY | + XGE_HAL_ADAPTER_STATUS_PFC_READY | + XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | + XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | + XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | + XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | + XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | + XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), + XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); #endif return xge_hal_device_status(hldev, hw_status); @@ -219,19 +211,19 @@ xge_hal_device_is_slot_freeze(xge_hal_device_h devh) xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u16 device_id; u64 adapter_status = - xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_status); + xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_status); xge_os_pci_read16(hldev->pdev,hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, device_id), - &device_id); + xge_offsetof(xge_hal_pci_config_le_t, device_id), + &device_id); #ifdef TX_DEBUG if (adapter_status == XGE_HAL_ALL_FOXES) { - u64 dummy; - dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->pcc_enable); - printf(">>> Slot is frozen!\n"); - brkpoint(0); + u64 dummy; + dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->pcc_enable); + printf(">>> Slot is frozen!\n"); + brkpoint(0); } #endif return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff)); @@ -252,7 +244,7 @@ __hal_device_led_actifity_fix(xge_hal_device_t *hldev) u64 val64; xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); + xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); /* * In the case of Herc, there is a new register named beacon control @@ -273,14 +265,14 @@ __hal_device_led_actifity_fix(xge_hal_device_t *hldev) * since it represents the gpio control register in Xena. */ if ((subid & 0xFF) >= 0x07) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->beacon_control); - val64 |= 0x0000800000000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->beacon_control); - val64 = 0x0411040400000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - (void *) ((u8 *)bar0 + 0x2700)); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->beacon_control); + val64 |= 0x0000800000000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->beacon_control); + val64 = 0x0411040400000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + (void *) ((u8 *)bar0 + 0x2700)); } } @@ -336,9 +328,9 @@ __hal_device_xena_fix_mac(xge_hal_device_t *hldev) * register. In the case of Herc, there is no change required. */ while (xena_fix_mac[i] != END_SIGN) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - xena_fix_mac[i++], &bar0->beacon_control); - xge_os_mdelay(1); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + xena_fix_mac[i++], &bar0->beacon_control); + xge_os_mdelay(1); } } @@ -359,17 +351,17 @@ xge_hal_device_bcast_enable(xge_hal_device_h devh) val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); - val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; + val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); - __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), &bar0->mac_cfg); + __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, + (u32)(val64 >> 32), &bar0->mac_cfg); xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", - (unsigned long long)val64, - hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); + (unsigned long long)val64, + hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); } /* @@ -392,14 +384,14 @@ xge_hal_device_bcast_disable(xge_hal_device_h devh) val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); - __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), &bar0->mac_cfg); + __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, + (u32)(val64 >> 32), &bar0->mac_cfg); xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", - (unsigned long long)val64, - hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); + (unsigned long long)val64, + hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); } /* @@ -420,7 +412,7 @@ __hal_device_shared_splits_configure(xge_hal_device_t *hldev) val64 |= XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->pic_control); + &bar0->pic_control); xge_debug_device(XGE_TRACE, "%s", "shared splits configured"); } @@ -438,7 +430,7 @@ __hal_device_rmac_padding_configure(xge_hal_device_t *hldev) u64 val64; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE ); @@ -450,20 +442,20 @@ __hal_device_rmac_padding_configure(xge_hal_device_t *hldev) */ if (!hldev->config.rth_en || !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { - val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; + &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { + val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; } val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD ); val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM; __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), (char*)&bar0->mac_cfg); + (u32)(val64 >> 32), (char*)&bar0->mac_cfg); xge_os_mdelay(1); xge_debug_device(XGE_TRACE, - "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured", - (unsigned long long)val64); + "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured", + (unsigned long long)val64); } /* @@ -484,60 +476,60 @@ __hal_device_pause_frames_configure(xge_hal_device_t *hldev) u64 val64; switch (hldev->config.mac.media) { - case XGE_HAL_MEDIA_SR: - case XGE_HAL_MEDIA_SW: - val64=0xfffbfffbfffbfffbULL; - break; - case XGE_HAL_MEDIA_LR: - case XGE_HAL_MEDIA_LW: - val64=0xffbbffbbffbbffbbULL; - break; - case XGE_HAL_MEDIA_ER: - case XGE_HAL_MEDIA_EW: - default: - val64=0xffbbffbbffbbffbbULL; - break; + case XGE_HAL_MEDIA_SR: + case XGE_HAL_MEDIA_SW: + val64=0xfffbfffbfffbfffbULL; + break; + case XGE_HAL_MEDIA_LR: + case XGE_HAL_MEDIA_LW: + val64=0xffbbffbbffbbffbbULL; + break; + case XGE_HAL_MEDIA_ER: + case XGE_HAL_MEDIA_EW: + default: + val64=0xffbbffbbffbbffbbULL; + break; } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->mc_pause_thresh_q0q3); + val64, &bar0->mc_pause_thresh_q0q3); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->mc_pause_thresh_q4q7); + val64, &bar0->mc_pause_thresh_q4q7); /* Set the time value to be inserted in the pause frame generated * by Xframe */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rmac_pause_cfg); if (hldev->config.mac.rmac_pause_gen_en) - val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; + val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; else - val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); + val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); if (hldev->config.mac.rmac_pause_rcv_en) - val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; + val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; else - val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); + val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff)); val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rmac_pause_cfg); + &bar0->rmac_pause_cfg); val64 = 0; for (i = 0; i<4; i++) { - val64 |= - (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) - <<(i*2*8)); + val64 |= + (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) + <<(i*2*8)); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_pause_thresh_q0q3); + &bar0->mc_pause_thresh_q0q3); val64 = 0; for (i = 0; i<4; i++) { - val64 |= - (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) - <<(i*2*8)); + val64 |= + (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) + <<(i*2*8)); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_pause_thresh_q4q7); + &bar0->mc_pause_thresh_q4q7); xge_debug_device(XGE_TRACE, "%s", "pause frames configured"); } @@ -545,16 +537,16 @@ __hal_device_pause_frames_configure(xge_hal_device_t *hldev) * Herc's clock rate doubled, unless the slot is 33MHz. */ unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, - unsigned int time_ival) + unsigned int time_ival) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) - return time_ival; + return time_ival; xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC); if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN && hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ) - time_ival *= 2; + time_ival *= 2; return time_ival; } @@ -573,10 +565,10 @@ __hal_device_bus_master_disable (xge_hal_device_t *hldev) u16 bus_master = 4; xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); + xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); cmd &= ~bus_master; xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, command), cmd); + xge_offsetof(xge_hal_pci_config_le_t, command), cmd); } /* @@ -592,15 +584,15 @@ __hal_device_bus_master_enable (xge_hal_device_t *hldev) u16 bus_master = 4; xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); + xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); /* already enabled? do nothing */ if (cmd & bus_master) - return; + return; cmd |= bus_master; xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, command), cmd); + xge_offsetof(xge_hal_pci_config_le_t, command), cmd); } /* * __hal_device_intr_mgmt @@ -620,286 +612,286 @@ __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag) u64 gim, gim_saved; gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->general_int_mask); + hldev->regh0, &bar0->general_int_mask); /* Top level interrupt classification */ /* PIC Interrupts */ if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) { - /* Enable PIC Intrs in the general intr mask register */ - val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; - if (flag) { - gim &= ~((u64) val64); - temp64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->pic_int_mask); - - temp64 &= ~XGE_HAL_PIC_INT_TX; + /* Enable PIC Intrs in the general intr mask register */ + val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; + if (flag) { + gim &= ~((u64) val64); + temp64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->pic_int_mask); + + temp64 &= ~XGE_HAL_PIC_INT_TX; #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR - if (xge_hal_device_check_id(hldev) == - XGE_HAL_CARD_HERC) { - temp64 &= ~XGE_HAL_PIC_INT_MISC; - } + if (xge_hal_device_check_id(hldev) == + XGE_HAL_CARD_HERC) { + temp64 &= ~XGE_HAL_PIC_INT_MISC; + } #endif - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - temp64, &bar0->pic_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + temp64, &bar0->pic_int_mask); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR - if (xge_hal_device_check_id(hldev) == - XGE_HAL_CARD_HERC) { - /* - * Unmask only Link Up interrupt - */ - temp64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->misc_int_mask); - temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, temp64, - &bar0->misc_int_mask); - xge_debug_device(XGE_TRACE, - "unmask link up flag "XGE_OS_LLXFMT, - (unsigned long long)temp64); - } + if (xge_hal_device_check_id(hldev) == + XGE_HAL_CARD_HERC) { + /* + * Unmask only Link Up interrupt + */ + temp64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->misc_int_mask); + temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, temp64, + &bar0->misc_int_mask); + xge_debug_device(XGE_TRACE, + "unmask link up flag "XGE_OS_LLXFMT, + (unsigned long long)temp64); + } #endif - } else { /* flag == 0 */ + } else { /* flag == 0 */ #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR - if (xge_hal_device_check_id(hldev) == - XGE_HAL_CARD_HERC) { - /* - * Mask both Link Up and Down interrupts - */ - temp64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->misc_int_mask); - temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; - temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, temp64, - &bar0->misc_int_mask); - xge_debug_device(XGE_TRACE, - "mask link up/down flag "XGE_OS_LLXFMT, - (unsigned long long)temp64); - } + if (xge_hal_device_check_id(hldev) == + XGE_HAL_CARD_HERC) { + /* + * Mask both Link Up and Down interrupts + */ + temp64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->misc_int_mask); + temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; + temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, temp64, + &bar0->misc_int_mask); + xge_debug_device(XGE_TRACE, + "mask link up/down flag "XGE_OS_LLXFMT, + (unsigned long long)temp64); + } #endif - /* Disable PIC Intrs in the general intr mask - * register */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->pic_int_mask); - gim |= val64; - } + /* Disable PIC Intrs in the general intr mask + * register */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->pic_int_mask); + gim |= val64; + } } /* DMA Interrupts */ /* Enabling/Disabling Tx DMA interrupts */ if (mask & XGE_HAL_TX_DMA_INTR) { - /* Enable TxDMA Intrs in the general intr mask register */ - val64 = XGE_HAL_TXDMA_INT_M; - if (flag) { - gim &= ~((u64) val64); - /* Enable all TxDMA interrupts */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->txdma_int_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->pfc_err_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->tda_err_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->pcc_err_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->tti_err_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->lso_err_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->tpa_err_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0, &bar0->sm_err_mask); - - } else { /* flag == 0 */ - - /* Disable TxDMA Intrs in the general intr mask - * register */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->txdma_int_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->pfc_err_mask); - - gim |= val64; - } + /* Enable TxDMA Intrs in the general intr mask register */ + val64 = XGE_HAL_TXDMA_INT_M; + if (flag) { + gim &= ~((u64) val64); + /* Enable all TxDMA interrupts */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->txdma_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->pfc_err_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->tda_err_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->pcc_err_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->tti_err_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->lso_err_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->tpa_err_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0, &bar0->sm_err_mask); + + } else { /* flag == 0 */ + + /* Disable TxDMA Intrs in the general intr mask + * register */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->txdma_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->pfc_err_mask); + + gim |= val64; + } } /* Enabling/Disabling Rx DMA interrupts */ if (mask & XGE_HAL_RX_DMA_INTR) { - /* Enable RxDMA Intrs in the general intr mask register */ - val64 = XGE_HAL_RXDMA_INT_M; - if (flag) { + /* Enable RxDMA Intrs in the general intr mask register */ + val64 = XGE_HAL_RXDMA_INT_M; + if (flag) { - gim &= ~((u64) val64); - /* All RxDMA block interrupts are disabled for now - * TODO */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->rxdma_int_mask); + gim &= ~((u64) val64); + /* All RxDMA block interrupts are disabled for now + * TODO */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->rxdma_int_mask); - } else { /* flag == 0 */ + } else { /* flag == 0 */ - /* Disable RxDMA Intrs in the general intr mask - * register */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->rxdma_int_mask); + /* Disable RxDMA Intrs in the general intr mask + * register */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->rxdma_int_mask); - gim |= val64; - } + gim |= val64; + } } /* MAC Interrupts */ /* Enabling/Disabling MAC interrupts */ if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) { - val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; - if (flag) { + val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; + if (flag) { - gim &= ~((u64) val64); + gim &= ~((u64) val64); - /* All MAC block error inter. are disabled for now. */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); + /* All MAC block error inter. are disabled for now. */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); - } else { /* flag == 0 */ + } else { /* flag == 0 */ - /* Disable MAC Intrs in the general intr mask - * register */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); + /* Disable MAC Intrs in the general intr mask + * register */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); - gim |= val64; - } + gim |= val64; + } } /* XGXS Interrupts */ if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) { - val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; - if (flag) { + val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; + if (flag) { - gim &= ~((u64) val64); - /* All XGXS block error interrupts are disabled for now - * TODO */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); + gim &= ~((u64) val64); + /* All XGXS block error interrupts are disabled for now + * TODO */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); - } else { /* flag == 0 */ + } else { /* flag == 0 */ - /* Disable MC Intrs in the general intr mask register */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); + /* Disable MC Intrs in the general intr mask register */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); - gim |= val64; - } + gim |= val64; + } } /* Memory Controller(MC) interrupts */ if (mask & XGE_HAL_MC_INTR) { - val64 = XGE_HAL_MC_INT_M; - if (flag) { + val64 = XGE_HAL_MC_INT_M; + if (flag) { - gim &= ~((u64) val64); + gim &= ~((u64) val64); - /* Enable all MC blocks error interrupts */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0x0ULL, &bar0->mc_int_mask); + /* Enable all MC blocks error interrupts */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0x0ULL, &bar0->mc_int_mask); - } else { /* flag == 0 */ + } else { /* flag == 0 */ - /* Disable MC Intrs in the general intr mask - * register */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); + /* Disable MC Intrs in the general intr mask + * register */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); - gim |= val64; - } + gim |= val64; + } } /* Tx traffic interrupts */ if (mask & XGE_HAL_TX_TRAFFIC_INTR) { - val64 = XGE_HAL_TXTRAFFIC_INT_M; - if (flag) { + val64 = XGE_HAL_TXTRAFFIC_INT_M; + if (flag) { - gim &= ~((u64) val64); + gim &= ~((u64) val64); - /* Enable all the Tx side interrupts */ - /* '0' Enables all 64 TX interrupt levels. */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, - &bar0->tx_traffic_mask); + /* Enable all the Tx side interrupts */ + /* '0' Enables all 64 TX interrupt levels. */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, + &bar0->tx_traffic_mask); - } else { /* flag == 0 */ + } else { /* flag == 0 */ - /* Disable Tx Traffic Intrs in the general intr mask - * register. */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->tx_traffic_mask); - gim |= val64; - } + /* Disable Tx Traffic Intrs in the general intr mask + * register. */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->tx_traffic_mask); + gim |= val64; + } } /* Rx traffic interrupts */ if (mask & XGE_HAL_RX_TRAFFIC_INTR) { - val64 = XGE_HAL_RXTRAFFIC_INT_M; - if (flag) { - gim &= ~((u64) val64); - /* '0' Enables all 8 RX interrupt levels. */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, - &bar0->rx_traffic_mask); + val64 = XGE_HAL_RXTRAFFIC_INT_M; + if (flag) { + gim &= ~((u64) val64); + /* '0' Enables all 8 RX interrupt levels. */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, + &bar0->rx_traffic_mask); - } else { /* flag == 0 */ + } else { /* flag == 0 */ - /* Disable Rx Traffic Intrs in the general intr mask - * register. - */ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_ALL_INTRS_DIS, - &bar0->rx_traffic_mask); + /* Disable Rx Traffic Intrs in the general intr mask + * register. + */ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_ALL_INTRS_DIS, + &bar0->rx_traffic_mask); - gim |= val64; - } + gim |= val64; + } } /* Sched Timer interrupt */ if (mask & XGE_HAL_SCHED_INTR) { - if (flag) { - temp64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->txpic_int_mask); - temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - temp64, &bar0->txpic_int_mask); - - xge_hal_device_sched_timer(hldev, - hldev->config.sched_timer_us, - hldev->config.sched_timer_one_shot); - } else { - temp64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->txpic_int_mask); - temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; - - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - temp64, &bar0->txpic_int_mask); - - xge_hal_device_sched_timer(hldev, - XGE_HAL_SCHED_TIMER_DISABLED, - XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); - } + if (flag) { + temp64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->txpic_int_mask); + temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + temp64, &bar0->txpic_int_mask); + + xge_hal_device_sched_timer(hldev, + hldev->config.sched_timer_us, + hldev->config.sched_timer_one_shot); + } else { + temp64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->txpic_int_mask); + temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; + + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + temp64, &bar0->txpic_int_mask); + + xge_hal_device_sched_timer(hldev, + XGE_HAL_SCHED_TIMER_DISABLED, + XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); + } } if (gim != gim_saved) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, - &bar0->general_int_mask); - xge_debug_device(XGE_TRACE, "general_int_mask updated " - XGE_OS_LLXFMT" => "XGE_OS_LLXFMT, - (unsigned long long)gim_saved, (unsigned long long)gim); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, + &bar0->general_int_mask); + xge_debug_device(XGE_TRACE, "general_int_mask updated " + XGE_OS_LLXFMT" => "XGE_OS_LLXFMT, + (unsigned long long)gim_saved, (unsigned long long)gim); } } @@ -915,35 +907,35 @@ __hal_device_bimodal_configure(xge_hal_device_t *hldev) int i; for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { - xge_hal_tti_config_t *tti; - xge_hal_rti_config_t *rti; - - if (!hldev->config.ring.queue[i].configured) - continue; - rti = &hldev->config.ring.queue[i].rti; - tti = &hldev->bimodal_tti[i]; - - tti->enabled = 1; - tti->urange_a = hldev->bimodal_urange_a_en * 10; - tti->urange_b = 20; - tti->urange_c = 30; - tti->ufc_a = hldev->bimodal_urange_a_en * 8; - tti->ufc_b = 16; - tti->ufc_c = 32; - tti->ufc_d = 64; - tti->timer_val_us = hldev->bimodal_timer_val_us; - tti->timer_ac_en = 1; - tti->timer_ci_en = 0; - - rti->urange_a = 10; - rti->urange_b = 20; - rti->urange_c = 30; - rti->ufc_a = 1; /* <= for netpipe type of tests */ - rti->ufc_b = 4; - rti->ufc_c = 4; - rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */ - rti->timer_ac_en = 1; - rti->timer_val_us = 5; /* for optimal bus efficiency usage */ + xge_hal_tti_config_t *tti; + xge_hal_rti_config_t *rti; + + if (!hldev->config.ring.queue[i].configured) + continue; + rti = &hldev->config.ring.queue[i].rti; + tti = &hldev->bimodal_tti[i]; + + tti->enabled = 1; + tti->urange_a = hldev->bimodal_urange_a_en * 10; + tti->urange_b = 20; + tti->urange_c = 30; + tti->ufc_a = hldev->bimodal_urange_a_en * 8; + tti->ufc_b = 16; + tti->ufc_c = 32; + tti->ufc_d = 64; + tti->timer_val_us = hldev->bimodal_timer_val_us; + tti->timer_ac_en = 1; + tti->timer_ci_en = 0; + + rti->urange_a = 10; + rti->urange_b = 20; + rti->urange_c = 30; + rti->ufc_a = 1; /* <= for netpipe type of tests */ + rti->ufc_b = 4; + rti->ufc_c = 4; + rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */ + rti->timer_ac_en = 1; + rti->timer_val_us = 5; /* for optimal bus efficiency usage */ } } @@ -955,41 +947,41 @@ __hal_device_bimodal_configure(xge_hal_device_t *hldev) */ static xge_hal_status_e __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti, - int num, int runtime) + int num, int runtime) { u64 val64, data1 = 0, data2 = 0; xge_hal_pci_bar0_t *bar0; if (runtime) - bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; + bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; else - bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (tti->timer_val_us) { - unsigned int tx_interval; - - if (hldev->config.pci_freq_mherz) { - tx_interval = hldev->config.pci_freq_mherz * - tti->timer_val_us / 64; - tx_interval = - __hal_fix_time_ival_herc(hldev, - tx_interval); - } else { - tx_interval = tti->timer_val_us; - } - data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); - if (tti->timer_ac_en) { - data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; - } - if (tti->timer_ci_en) { - data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; - } - - if (!runtime) { - xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s", - num, tx_interval, tti->timer_ci_en ? - "enabled": "disabled"); - } + unsigned int tx_interval; + + if (hldev->config.pci_freq_mherz) { + tx_interval = hldev->config.pci_freq_mherz * + tti->timer_val_us / 64; + tx_interval = + __hal_fix_time_ival_herc(hldev, + tx_interval); + } else { + tx_interval = tti->timer_val_us; + } + data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); + if (tti->timer_ac_en) { + data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; + } + if (tti->timer_ci_en) { + data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; + } + + if (!runtime) { + xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s", + num, tx_interval, tti->timer_ci_en ? + "enabled": "disabled"); + } } if (tti->urange_a || @@ -999,43 +991,43 @@ __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti, tti->ufc_b || tti->ufc_c || tti->ufc_d ) { - data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | - XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | - XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); + data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | + XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | + XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); - data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | - XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | - XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | - XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); + data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | + XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | + XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | + XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, - &bar0->tti_data1_mem); + &bar0->tti_data1_mem); (void)xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->tti_data1_mem); + hldev->regh0, &bar0->tti_data1_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, - &bar0->tti_data2_mem); + &bar0->tti_data2_mem); (void)xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->tti_data2_mem); + hldev->regh0, &bar0->tti_data2_mem); xge_os_wmb(); val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_TTI_CMD_MEM_OFFSET(num); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->tti_command_mem); + &bar0->tti_command_mem); if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem, - 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } if (!runtime) { - xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x" - XGE_OS_LLXFMT, num, - (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->tti_data1_mem)); + xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x" + XGE_OS_LLXFMT, num, + (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->tti_data1_mem)); } return XGE_HAL_OK; @@ -1054,42 +1046,42 @@ __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime) int i; for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { - int j; + int j; - if (!hldev->config.fifo.queue[i].configured) - continue; + if (!hldev->config.fifo.queue[i].configured) + continue; - for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { - xge_hal_status_e status; + for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { + xge_hal_status_e status; - if (!hldev->config.fifo.queue[i].tti[j].enabled) - continue; + if (!hldev->config.fifo.queue[i].tti[j].enabled) + continue; - /* at least some TTI enabled. Record it. */ - hldev->tti_enabled = 1; + /* at least some TTI enabled. Record it. */ + hldev->tti_enabled = 1; - status = __hal_device_tti_apply(hldev, - &hldev->config.fifo.queue[i].tti[j], - i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime); - if (status != XGE_HAL_OK) - return status; - } + status = __hal_device_tti_apply(hldev, + &hldev->config.fifo.queue[i].tti[j], + i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime); + if (status != XGE_HAL_OK) + return status; + } } /* processing bimodal TTIs */ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { - xge_hal_status_e status; + xge_hal_status_e status; - if (!hldev->bimodal_tti[i].enabled) - continue; + if (!hldev->bimodal_tti[i].enabled) + continue; - /* at least some bimodal TTI enabled. Record it. */ - hldev->tti_enabled = 1; + /* at least some bimodal TTI enabled. Record it. */ + hldev->tti_enabled = 1; - status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i], - XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime); - if (status != XGE_HAL_OK) - return status; + status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i], + XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime); + if (status != XGE_HAL_OK) + return status; } @@ -1111,92 +1103,92 @@ __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime) int i; if (runtime) { - /* - * we don't want to re-configure RTI in case when - * bimodal interrupts are in use. Instead reconfigure TTI - * with new RTI values. - */ - if (hldev->config.bimodal_interrupts) { - __hal_device_bimodal_configure(hldev); - return __hal_device_tti_configure(hldev, 1); - } - bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; + /* + * we don't want to re-configure RTI in case when + * bimodal interrupts are in use. Instead reconfigure TTI + * with new RTI values. + */ + if (hldev->config.bimodal_interrupts) { + __hal_device_bimodal_configure(hldev); + return __hal_device_tti_configure(hldev, 1); + } + bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; } else - bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { - xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti; - - if (!hldev->config.ring.queue[i].configured) - continue; - - if (rti->timer_val_us) { - unsigned int rx_interval; - - if (hldev->config.pci_freq_mherz) { - rx_interval = hldev->config.pci_freq_mherz * - rti->timer_val_us / 8; - rx_interval = - __hal_fix_time_ival_herc(hldev, - rx_interval); - } else { - rx_interval = rti->timer_val_us; - } - data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); - if (rti->timer_ac_en) { - data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; - } - data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; - } - - if (rti->urange_a || - rti->urange_b || - rti->urange_c || - rti->ufc_a || - rti->ufc_b || - rti->ufc_c || - rti->ufc_d) { - data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | - XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | - XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); - - data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | - XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | - XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | - XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); - } - - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, - &bar0->rti_data1_mem); - (void)xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->rti_data1_mem); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, - &bar0->rti_data2_mem); - (void)xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->rti_data2_mem); - xge_os_wmb(); - - val64 = XGE_HAL_RTI_CMD_MEM_WE | - XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; - val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rti_command_mem); - - if (!runtime && __hal_device_register_poll(hldev, - &bar0->rti_command_mem, 0, - XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; - } - - if (!runtime) { - xge_debug_device(XGE_TRACE, - "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT, - i, - (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->rti_data1_mem)); - } + xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti; + + if (!hldev->config.ring.queue[i].configured) + continue; + + if (rti->timer_val_us) { + unsigned int rx_interval; + + if (hldev->config.pci_freq_mherz) { + rx_interval = hldev->config.pci_freq_mherz * + rti->timer_val_us / 8; + rx_interval = + __hal_fix_time_ival_herc(hldev, + rx_interval); + } else { + rx_interval = rti->timer_val_us; + } + data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); + if (rti->timer_ac_en) { + data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; + } + data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; + } + + if (rti->urange_a || + rti->urange_b || + rti->urange_c || + rti->ufc_a || + rti->ufc_b || + rti->ufc_c || + rti->ufc_d) { + data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | + XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | + XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); + + data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | + XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | + XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | + XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); + } + + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, + &bar0->rti_data1_mem); + (void)xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->rti_data1_mem); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, + &bar0->rti_data2_mem); + (void)xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->rti_data2_mem); + xge_os_wmb(); + + val64 = XGE_HAL_RTI_CMD_MEM_WE | + XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; + val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rti_command_mem); + + if (!runtime && __hal_device_register_poll(hldev, + &bar0->rti_command_mem, 0, + XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + } + + if (!runtime) { + xge_debug_device(XGE_TRACE, + "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT, + i, + (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->rti_data1_mem)); + } } return XGE_HAL_OK; @@ -1256,18 +1248,18 @@ static u64 default_herc_dtx_cfg[] = { */ static u64 default_herc_dtx_cfg[] = { - 0x8000051536750000ULL, 0x80000515367500E0ULL, - 0x8000051536750004ULL, 0x80000515367500E4ULL, + 0x8000051536750000ULL, 0x80000515367500E0ULL, + 0x8000051536750004ULL, 0x80000515367500E4ULL, - 0x80010515003F0000ULL, 0x80010515003F00E0ULL, - 0x80010515003F0004ULL, 0x80010515003F00E4ULL, + 0x80010515003F0000ULL, 0x80010515003F00E0ULL, + 0x80010515003F0004ULL, 0x80010515003F00E4ULL, - 0x801205150D440000ULL, 0x801205150D4400E0ULL, - 0x801205150D440004ULL, 0x801205150D4400E4ULL, + 0x801205150D440000ULL, 0x801205150D4400E0ULL, + 0x801205150D440004ULL, 0x801205150D4400E4ULL, - 0x80020515F2100000ULL, 0x80020515F21000E0ULL, - 0x80020515F2100004ULL, 0x80020515F21000E4ULL, - END_SIGN + 0x80020515F2100000ULL, 0x80020515F21000E0ULL, + 0x80020515F2100004ULL, 0x80020515F21000E4ULL, + END_SIGN }; @@ -1275,10 +1267,10 @@ void __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg) { __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(value>>32), reg); + (u32)(value>>32), reg); xge_os_wmb(); __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, - (u32)value, reg); + (u32)value, reg); xge_os_wmb(); xge_os_mdelay(1); } @@ -1287,7 +1279,7 @@ u64 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg) { u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - reg); + reg); xge_os_mdelay(1); return val64; } @@ -1317,39 +1309,39 @@ __hal_device_xaui_configure(xge_hal_device_t *hldev) u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - default_dtx_cfg = default_xena_dtx_cfg; - default_mdio_cfg = default_xena_mdio_cfg; + default_dtx_cfg = default_xena_dtx_cfg; + default_mdio_cfg = default_xena_mdio_cfg; } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - default_dtx_cfg = default_herc_dtx_cfg; - default_mdio_cfg = default_herc_mdio_cfg; + default_dtx_cfg = default_herc_dtx_cfg; + default_mdio_cfg = default_herc_mdio_cfg; } else { - xge_assert(default_dtx_cfg); - return; + xge_assert(default_dtx_cfg); + return; } do { dtx_cfg: - while (default_dtx_cfg[dtx_cnt] != END_SIGN) { - if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { - dtx_cnt++; - goto mdio_cfg; - } - __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt], - &bar0->dtx_control); - dtx_cnt++; - } + while (default_dtx_cfg[dtx_cnt] != END_SIGN) { + if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { + dtx_cnt++; + goto mdio_cfg; + } + __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt], + &bar0->dtx_control); + dtx_cnt++; + } mdio_cfg: - while (default_mdio_cfg[mdio_cnt] != END_SIGN) { - if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { - mdio_cnt++; - goto dtx_cfg; - } - __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt], - &bar0->mdio_control); - mdio_cnt++; - } + while (default_mdio_cfg[mdio_cnt] != END_SIGN) { + if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { + mdio_cnt++; + goto dtx_cfg; + } + __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt], + &bar0->mdio_control); + mdio_cnt++; + } } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) && - (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); + (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured"); } @@ -1367,13 +1359,13 @@ __hal_device_mac_link_util_set(xge_hal_device_t *hldev) u64 val64; val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL( - hldev->config.mac.tmac_util_period) | - XGE_HAL_MAC_RX_LINK_UTIL_VAL( - hldev->config.mac.rmac_util_period); + hldev->config.mac.tmac_util_period) | + XGE_HAL_MAC_RX_LINK_UTIL_VAL( + hldev->config.mac.rmac_util_period); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mac_link_util); xge_debug_device(XGE_TRACE, "%s", - "bandwidth link utilization configured"); + "bandwidth link utilization configured"); } /* @@ -1410,16 +1402,16 @@ __hal_device_set_swapper(xge_hal_device_t *hldev) #if defined(XGE_HAL_CUSTOM_HW_SWAPPER) xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xffffffffffffffffULL, &bar0->swapper_ctrl); + 0xffffffffffffffffULL, &bar0->swapper_ctrl); val64 = XGE_HAL_CUSTOM_HW_SWAPPER; xge_os_wmb(); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->swapper_ctrl); + &bar0->swapper_ctrl); xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT, - (unsigned long long)val64); + (unsigned long long)val64); #elif !defined(XGE_OS_HOST_BIG_ENDIAN) @@ -1432,32 +1424,32 @@ __hal_device_set_swapper(xge_hal_device_t *hldev) * Use only for little endian platforms. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0xffffffffffffffffULL, &bar0->swapper_ctrl); + 0xffffffffffffffffULL, &bar0->swapper_ctrl); xge_os_wmb(); val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE | - XGE_HAL_SWAPPER_CTRL_PIF_R_SE | - XGE_HAL_SWAPPER_CTRL_PIF_W_FE | - XGE_HAL_SWAPPER_CTRL_PIF_W_SE | - XGE_HAL_SWAPPER_CTRL_RTH_FE | - XGE_HAL_SWAPPER_CTRL_RTH_SE | - XGE_HAL_SWAPPER_CTRL_TXP_FE | - XGE_HAL_SWAPPER_CTRL_TXP_SE | - XGE_HAL_SWAPPER_CTRL_TXD_R_FE | - XGE_HAL_SWAPPER_CTRL_TXD_R_SE | - XGE_HAL_SWAPPER_CTRL_TXD_W_FE | - XGE_HAL_SWAPPER_CTRL_TXD_W_SE | - XGE_HAL_SWAPPER_CTRL_TXF_R_FE | - XGE_HAL_SWAPPER_CTRL_RXD_R_FE | - XGE_HAL_SWAPPER_CTRL_RXD_R_SE | - XGE_HAL_SWAPPER_CTRL_RXD_W_FE | - XGE_HAL_SWAPPER_CTRL_RXD_W_SE | - XGE_HAL_SWAPPER_CTRL_RXF_W_FE | - XGE_HAL_SWAPPER_CTRL_XMSI_FE | - XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); + XGE_HAL_SWAPPER_CTRL_PIF_R_SE | + XGE_HAL_SWAPPER_CTRL_PIF_W_FE | + XGE_HAL_SWAPPER_CTRL_PIF_W_SE | + XGE_HAL_SWAPPER_CTRL_RTH_FE | + XGE_HAL_SWAPPER_CTRL_RTH_SE | + XGE_HAL_SWAPPER_CTRL_TXP_FE | + XGE_HAL_SWAPPER_CTRL_TXP_SE | + XGE_HAL_SWAPPER_CTRL_TXD_R_FE | + XGE_HAL_SWAPPER_CTRL_TXD_R_SE | + XGE_HAL_SWAPPER_CTRL_TXD_W_FE | + XGE_HAL_SWAPPER_CTRL_TXD_W_SE | + XGE_HAL_SWAPPER_CTRL_TXF_R_FE | + XGE_HAL_SWAPPER_CTRL_RXD_R_FE | + XGE_HAL_SWAPPER_CTRL_RXD_R_SE | + XGE_HAL_SWAPPER_CTRL_RXD_W_FE | + XGE_HAL_SWAPPER_CTRL_RXD_W_SE | + XGE_HAL_SWAPPER_CTRL_RXF_W_FE | + XGE_HAL_SWAPPER_CTRL_XMSI_FE | + XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); /* if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { - val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; + val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; } */ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, &bar0->swapper_ctrl); @@ -1475,9 +1467,9 @@ __hal_device_set_swapper(xge_hal_device_t *hldev) val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pif_rd_swapper_fb); if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) { - xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT, - (unsigned long long) val64); - return XGE_HAL_ERR_SWAPPER_CTRL; + xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT, + (unsigned long long) val64); + return XGE_HAL_ERR_SWAPPER_CTRL; } xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled"); @@ -1498,7 +1490,7 @@ __hal_device_rts_mac_configure(xge_hal_device_t *hldev) u64 val64; if (!hldev->config.rts_mac_en) { - return XGE_HAL_OK; + return XGE_HAL_OK; } /* @@ -1506,10 +1498,10 @@ __hal_device_rts_mac_configure(xge_hal_device_t *hldev) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_ctrl); + &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_ctrl); + val64, &bar0->rts_ctrl); return XGE_HAL_OK; } @@ -1527,7 +1519,7 @@ __hal_device_rts_port_configure(xge_hal_device_t *hldev) int rnum; if (!hldev->config.rts_port_en) { - return XGE_HAL_OK; + return XGE_HAL_OK; } /* @@ -1535,67 +1527,67 @@ __hal_device_rts_port_configure(xge_hal_device_t *hldev) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_ctrl); + &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_ctrl); + val64, &bar0->rts_ctrl); /* * Initiate port steering according to per-ring configuration */ for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { - int pnum; - xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum]; - - if (!queue->configured || queue->rts_port_en) - continue; - - for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) { - xge_hal_rts_port_t *port = &queue->rts_ports[pnum]; - - /* - * Skip and clear empty ports - */ - if (!port->num) { - /* - * Clear CAM memory - */ - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, 0ULL, - &bar0->rts_pn_cam_data); - - val64 = BIT(7) | BIT(15); - } else { - /* - * Assign new Port values according - * to configuration - */ - val64 = vBIT(port->num,8,16) | - vBIT(rnum,37,3) | BIT(63); - if (port->src) - val64 = BIT(47); - if (!port->udp) - val64 = BIT(7); - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, val64, - &bar0->rts_pn_cam_data); - - val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8); - } - - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_pn_cam_ctrl); - - /* poll until done */ - if (__hal_device_register_poll(hldev, - &bar0->rts_pn_cam_ctrl, 0, - XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != - XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; - } - } + int pnum; + xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum]; + + if (!queue->configured || queue->rts_port_en) + continue; + + for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) { + xge_hal_rts_port_t *port = &queue->rts_ports[pnum]; + + /* + * Skip and clear empty ports + */ + if (!port->num) { + /* + * Clear CAM memory + */ + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, 0ULL, + &bar0->rts_pn_cam_data); + + val64 = BIT(7) | BIT(15); + } else { + /* + * Assign new Port values according + * to configuration + */ + val64 = vBIT(port->num,8,16) | + vBIT(rnum,37,3) | BIT(63); + if (port->src) + val64 = BIT(47); + if (!port->udp) + val64 = BIT(7); + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, val64, + &bar0->rts_pn_cam_data); + + val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8); + } + + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->rts_pn_cam_ctrl); + + /* poll until done */ + if (__hal_device_register_poll(hldev, + &bar0->rts_pn_cam_ctrl, 0, + XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != + XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + } + } } return XGE_HAL_OK; } @@ -1614,128 +1606,128 @@ __hal_device_rts_qos_configure(xge_hal_device_t *hldev) int j, rx_ring_num; if (!hldev->config.rts_qos_en) { - return XGE_HAL_OK; + return XGE_HAL_OK; } /* First clear the RTS_DS_MEM_DATA */ val64 = 0; for (j = 0; j < 64; j++ ) { - /* First clear the value */ - val64 = XGE_HAL_RTS_DS_MEM_DATA(0); + /* First clear the value */ + val64 = XGE_HAL_RTS_DS_MEM_DATA(0); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_ds_mem_data); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_ds_mem_data); - val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE | - XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD | - XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j ); + val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE | + XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD | + XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j ); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_ds_mem_ctrl); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_ds_mem_ctrl); - /* poll until done */ - if (__hal_device_register_poll(hldev, - &bar0->rts_ds_mem_ctrl, 0, - XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; - } + /* poll until done */ + if (__hal_device_register_poll(hldev, + &bar0->rts_ds_mem_ctrl, 0, + XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + } } rx_ring_num = 0; for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) { - if (hldev->config.ring.queue[j].configured) - rx_ring_num++; + if (hldev->config.ring.queue[j].configured) + rx_ring_num++; } switch (rx_ring_num) { case 1: - val64 = 0x0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 2: - val64 = 0x0001000100010001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0001000100000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001000100010001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0001000100000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 3: - val64 = 0x0001020001020001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - val64 = 0x0200010200010200ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - val64 = 0x0102000102000102ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - val64 = 0x0001020001020001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0200010200000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001020001020001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + val64 = 0x0200010200010200ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + val64 = 0x0102000102000102ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + val64 = 0x0001020001020001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0200010200000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 4: - val64 = 0x0001020300010203ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0001020300000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001020300010203ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0001020300000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 5: - val64 = 0x0001020304000102ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - val64 = 0x0304000102030400ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - val64 = 0x0102030400010203ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - val64 = 0x0400010203040001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0203040000000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001020304000102ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + val64 = 0x0304000102030400ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + val64 = 0x0102030400010203ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + val64 = 0x0400010203040001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0203040000000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 6: - val64 = 0x0001020304050001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - val64 = 0x0203040500010203ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - val64 = 0x0405000102030405ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - val64 = 0x0001020304050001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0203040500000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001020304050001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + val64 = 0x0203040500010203ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + val64 = 0x0405000102030405ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + val64 = 0x0001020304050001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0203040500000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 7: - val64 = 0x0001020304050600ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - val64 = 0x0102030405060001ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - val64 = 0x0203040506000102ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - val64 = 0x0304050600010203ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0405060000000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001020304050600ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + val64 = 0x0102030405060001ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + val64 = 0x0203040506000102ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + val64 = 0x0304050600010203ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0405060000000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; case 8: - val64 = 0x0001020304050607ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); - val64 = 0x0001020300000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); - break; + val64 = 0x0001020304050607ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); + val64 = 0x0001020300000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); + break; } return XGE_HAL_OK; @@ -1754,7 +1746,7 @@ __hal_device_rts_qos_configure(xge_hal_device_t *hldev) * NOTE: * 1. ULD has to call this function with the index value which * statisfies the following condition: - * ring_num = (index % 8) + * ring_num = (index % 8) * 2.ULD also needs to make sure that the index is not * occupied by any MAC address. If that index has any MAC address * it will be overwritten and HAL will not check for it. @@ -1769,19 +1761,19 @@ xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macadd xge_hal_device_t *hldev = (xge_hal_device_t *)devh; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; + max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; if ( index >= max_addr ) - return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; + return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; /* * Set the MAC address at the given location marked by index. */ status = xge_hal_device_macaddr_set(hldev, index, macaddr); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, "%s", - "Not able to set the mac addr"); - return status; + xge_debug_device(XGE_ERR, "%s", + "Not able to set the mac addr"); + return status; } return xge_hal_device_rts_section_enable(hldev, index); @@ -1808,19 +1800,19 @@ xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index) xge_debug_ll(XGE_TRACE, "the index value is %d ", index); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; + max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; if ( index >= max_addr ) - return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; + return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; /* * Disable MAC address @ given index location */ status = xge_hal_device_macaddr_set(hldev, index, macaddr); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, "%s", - "Not able to set the mac addr"); - return status; + xge_debug_device(XGE_ERR, "%s", + "Not able to set the mac addr"); + return status; } return XGE_HAL_OK; @@ -1845,7 +1837,7 @@ __hal_device_rth_it_configure(xge_hal_device_t *hldev) int bucket; if (!hldev->config.rth_en) { - return XGE_HAL_OK; + return XGE_HAL_OK; } /* @@ -1853,18 +1845,18 @@ __hal_device_rth_it_configure(xge_hal_device_t *hldev) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_ctrl); + &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_ctrl); + val64, &bar0->rts_ctrl); buckets_num = (1 << hldev->config.rth_bucket_size); rmax=0; for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { - if (hldev->config.ring.queue[rnum].configured && - hldev->config.ring.queue[rnum].rth_en) - rings[rmax++] = rnum; + if (hldev->config.ring.queue[rnum].configured && + hldev->config.ring.queue[rnum].rth_en) + rings[rmax++] = rnum; } rnum = 0; @@ -1872,43 +1864,43 @@ __hal_device_rth_it_configure(xge_hal_device_t *hldev) for (bucket = 0; bucket < buckets_num; bucket++) { if (rnum == rmax) - rnum = 0; + rnum = 0; - /* write data */ - val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | - XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_map_mem_data); + /* write data */ + val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | + XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_rth_map_mem_data); - /* execute */ - val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | - XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | - XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_map_mem_ctrl); + /* execute */ + val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | + XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | + XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_rth_map_mem_ctrl); - /* poll until done */ - if (__hal_device_register_poll(hldev, - &bar0->rts_rth_map_mem_ctrl, 0, - XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; - } + /* poll until done */ + if (__hal_device_register_poll(hldev, + &bar0->rts_rth_map_mem_ctrl, 0, + XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + } - rnum++; + rnum++; } val64 = XGE_HAL_RTS_RTH_EN; val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size); val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN | - XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | - XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; + XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | + XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d", - hldev->config.rth_bucket_size); + hldev->config.rth_bucket_size); return XGE_HAL_OK; } @@ -1928,8 +1920,8 @@ __hal_device_rth_it_configure(xge_hal_device_t *hldev) */ static xge_hal_status_e __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, - xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, - u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) + xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, + u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; @@ -1941,11 +1933,11 @@ __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, */ val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rxpic_int_reg); + &bar0->rxpic_int_reg); xge_debug_device(XGE_TRACE, - "L4 SP %x:DP %x: hash %x tgt_queue %d ", - l4_sp, l4_dp, jhash_value, tgt_queue); + "L4 SP %x:DP %x: hash %x tgt_queue %d ", + l4_sp, l4_dp, jhash_value, tgt_queue); xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr)); @@ -1953,44 +1945,44 @@ __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, * Construct the SPDM entry. */ spdm_line_arr[0] = vBIT(l4_sp,0,16) | - vBIT(l4_dp,16,32) | - vBIT(tgt_queue,53,3) | - vBIT(is_tcp,59,1) | - vBIT(is_ipv4,63,1); + vBIT(l4_dp,16,32) | + vBIT(tgt_queue,53,3) | + vBIT(is_tcp,59,1) | + vBIT(is_ipv4,63,1); if (is_ipv4) { - spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | - vBIT(dst_ip->ipv4.addr,32,32); + spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | + vBIT(dst_ip->ipv4.addr,32,32); } else { - xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); - xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); - xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); - xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); + xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); + xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); + xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); + xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); } spdm_line_arr[7] = vBIT(jhash_value,0,32) | - BIT(63); /* entry enable bit */ + BIT(63); /* entry enable bit */ /* * Add the entry to the SPDM table */ for(line_no = 0; line_no < 8; line_no++) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - spdm_line_arr[line_no], - (void *)((char *)hldev->spdm_mem_base + - (spdm_entry * 64) + - (line_no * 8))); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + spdm_line_arr[line_no], + (void *)((char *)hldev->spdm_mem_base + + (spdm_entry * 64) + + (line_no * 8))); } /* * Wait for the operation to be completed. */ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, - XGE_HAL_RX_PIC_INT_REG_SPDM_READY, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + XGE_HAL_RX_PIC_INT_REG_SPDM_READY, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } /* @@ -2001,9 +1993,9 @@ __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, */ hldev->spdm_table[spdm_entry]->in_use = 1; xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip, - sizeof(xge_hal_ipaddr_t)); + sizeof(xge_hal_ipaddr_t)); xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip, - sizeof(xge_hal_ipaddr_t)); + sizeof(xge_hal_ipaddr_t)); hldev->spdm_table[spdm_entry]->l4_sp = l4_sp; hldev->spdm_table[spdm_entry]->l4_dp = l4_dp; hldev->spdm_table[spdm_entry]->is_tcp = is_tcp; @@ -2032,17 +2024,17 @@ __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) int i; if (!hldev->config.rth_spdm_en) { - return XGE_HAL_OK; + return XGE_HAL_OK; } /* * Retrieve the base address of SPDM Table. */ val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->spdm_bir_offset); + hldev->regh0, &bar0->spdm_bir_offset); - spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); - spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); + spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); + spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); /* @@ -2051,92 +2043,92 @@ __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) * of the SPDM memory with in the bar num memory space. */ switch (spdm_bar_num) { - case 0: - { - hldev->spdm_mem_base = (char *)bar0 + - (spdm_bar_offset * 8); - break; - } - case 1: - { - char *bar1 = (char *)hldev->bar1; - hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); - break; - } - default: - xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); + case 0: + { + hldev->spdm_mem_base = (char *)bar0 + + (spdm_bar_offset * 8); + break; + } + case 1: + { + char *bar1 = (char *)hldev->bar1; + hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); + break; + } + default: + xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); } /* * Retrieve the size of SPDM table(number of entries). */ val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->spdm_structure); + hldev->regh0, &bar0->spdm_structure); hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64); spdm_table_size = hldev->spdm_max_entries * - sizeof(xge_hal_spdm_entry_t); + sizeof(xge_hal_spdm_entry_t); if (hldev->spdm_table == NULL) { - void *mem; - - /* - * Allocate memory to hold the copy of SPDM table. - */ - if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) - xge_os_malloc( - hldev->pdev, - (sizeof(xge_hal_spdm_entry_t *) * - hldev->spdm_max_entries))) == NULL) { - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - - if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) - { - xge_os_free(hldev->pdev, hldev->spdm_table, - (sizeof(xge_hal_spdm_entry_t *) * - hldev->spdm_max_entries)); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - - xge_os_memzero(mem, spdm_table_size); - for (i = 0; i < hldev->spdm_max_entries; i++) { - hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) - ((char *)mem + - i * sizeof(xge_hal_spdm_entry_t)); - } - xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); + void *mem; + + /* + * Allocate memory to hold the copy of SPDM table. + */ + if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) + xge_os_malloc( + hldev->pdev, + (sizeof(xge_hal_spdm_entry_t *) * + hldev->spdm_max_entries))) == NULL) { + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + + if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) + { + xge_os_free(hldev->pdev, hldev->spdm_table, + (sizeof(xge_hal_spdm_entry_t *) * + hldev->spdm_max_entries)); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + + xge_os_memzero(mem, spdm_table_size); + for (i = 0; i < hldev->spdm_max_entries; i++) { + hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) + ((char *)mem + + i * sizeof(xge_hal_spdm_entry_t)); + } + xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); } else { - /* - * We are here because the host driver tries to - * do a soft reset on the device. - * Since the device soft reset clears the SPDM table, copy - * the entries from the local SPDM table to the actual one. - */ - xge_os_spin_lock(&hldev->spdm_lock); - for (i = 0; i < hldev->spdm_max_entries; i++) { - xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; - - if (spdm_entry->in_use) { - if (__hal_spdm_entry_add(hldev, - &spdm_entry->src_ip, - &spdm_entry->dst_ip, - spdm_entry->l4_sp, - spdm_entry->l4_dp, - spdm_entry->is_tcp, - spdm_entry->is_ipv4, - spdm_entry->tgt_queue, - spdm_entry->jhash_value, - spdm_entry->spdm_entry) - != XGE_HAL_OK) { - /* Log an warning */ - xge_debug_device(XGE_ERR, - "SPDM table update from local" - " memory failed"); - } - } - } - xge_os_spin_unlock(&hldev->spdm_lock); + /* + * We are here because the host driver tries to + * do a soft reset on the device. + * Since the device soft reset clears the SPDM table, copy + * the entries from the local SPDM table to the actual one. + */ + xge_os_spin_lock(&hldev->spdm_lock); + for (i = 0; i < hldev->spdm_max_entries; i++) { + xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; + + if (spdm_entry->in_use) { + if (__hal_spdm_entry_add(hldev, + &spdm_entry->src_ip, + &spdm_entry->dst_ip, + spdm_entry->l4_sp, + spdm_entry->l4_dp, + spdm_entry->is_tcp, + spdm_entry->is_ipv4, + spdm_entry->tgt_queue, + spdm_entry->jhash_value, + spdm_entry->spdm_entry) + != XGE_HAL_OK) { + /* Log an warning */ + xge_debug_device(XGE_ERR, + "SPDM table update from local" + " memory failed"); + } + } + } + xge_os_spin_unlock(&hldev->spdm_lock); } /* @@ -2144,10 +2136,10 @@ __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->rts_ctrl); + hldev->regh0, &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_ctrl); + val64, &bar0->rts_ctrl); /* * We may not need to configure rts_rth_jhash_cfg register as the @@ -2158,23 +2150,23 @@ __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) * As of now, set all the rth mask registers to zero. TODO. */ for(i = 0; i < 5; i++) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0, &bar0->rts_rth_hash_mask[i]); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + 0, &bar0->rts_rth_hash_mask[i]); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - 0, &bar0->rts_rth_hash_mask_5); + 0, &bar0->rts_rth_hash_mask_5); if (hldev->config.rth_spdm_use_l4) { - val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_rth_status); + val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->rts_rth_status); } val64 = XGE_HAL_RTS_RTH_EN; val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); return XGE_HAL_OK; @@ -2197,115 +2189,94 @@ __hal_device_pci_init(xge_hal_device_t *hldev) /* Store PCI device ID and revision for future references where in we * decide Xena revision using PCI sub system ID */ xge_os_pci_read16(hldev->pdev,hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, device_id), - &hldev->device_id); + xge_offsetof(xge_hal_pci_config_le_t, device_id), + &hldev->device_id); xge_os_pci_read8(hldev->pdev,hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, revision), - &hldev->revision); + xge_offsetof(xge_hal_pci_config_le_t, revision), + &hldev->revision); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - pcisize = XGE_HAL_PCISIZE_HERC; + pcisize = XGE_HAL_PCISIZE_HERC; else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) - pcisize = XGE_HAL_PCISIZE_XENA; + pcisize = XGE_HAL_PCISIZE_XENA; /* save original PCI config space to restore it on device_terminate() */ for (i = 0; i < pcisize; i++) { - xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, - (u32*)&hldev->pci_config_space_bios + i); + xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, + (u32*)&hldev->pci_config_space_bios + i); } /* Set the PErr Repconse bit and SERR in PCI command register. */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); + xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); cmd |= 0x140; xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, command), cmd); + xge_offsetof(xge_hal_pci_config_le_t, command), cmd); /* Set user spcecified value for the PCI Latency Timer */ if (hldev->config.latency_timer && hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { - xge_os_pci_write8(hldev->pdev, hldev->cfgh, + xge_os_pci_write8(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, latency_timer), - (u8)hldev->config.latency_timer); + (u8)hldev->config.latency_timer); } /* Read back latency timer to reflect it into user level */ xge_os_pci_read8(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); + xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); hldev->config.latency_timer = val; /* Enable Data Parity Error Recovery in PCI-X command register. */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd |= 1; xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); /* Set MMRB count in PCI-X command register. */ if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { - cmd &= 0xFFF3; - cmd |= hldev->config.mmrb_count << 2; - xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), - cmd); + cmd &= 0xFFF3; + cmd |= hldev->config.mmrb_count << 2; + xge_os_pci_write16(hldev->pdev, hldev->cfgh, + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), + cmd); } /* Read back MMRB count to reflect it into user level */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), - &cmd); + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), + &cmd); cmd &= 0x000C; hldev->config.mmrb_count = cmd>>2; /* Setting Maximum outstanding splits based on system type. */ if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { - xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), - &cmd); - cmd &= 0xFF8F; - cmd |= hldev->config.max_splits_trans << 4; - xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), - cmd); + xge_os_pci_read16(hldev->pdev, hldev->cfgh, + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), + &cmd); + cmd &= 0xFF8F; + cmd |= hldev->config.max_splits_trans << 4; + xge_os_pci_write16(hldev->pdev, hldev->cfgh, + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), + cmd); } /* Read back max split trans to reflect it into user level */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd &= 0x0070; hldev->config.max_splits_trans = cmd>>4; /* Forcibly disabling relaxed ordering capability of the card. */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd &= 0xFFFD; xge_os_pci_write16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); + xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); /* save PCI config space for future resets */ for (i = 0; i < pcisize; i++) { - xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, - (u32*)&hldev->pci_config_space + i); - } - - if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || - hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { - /* Upper limit of the MSI number enabled by the system */ - xge_os_pci_read32(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, msi_control), - &hldev->msi_mask); - hldev->msi_mask &= 0x70; - if (!hldev->msi_mask) - return; - hldev->msi_mask >>= 4; - /* - * This number's power of 2 is the number - * of MSIs enabled. - */ - hldev->msi_mask = (0x1 << hldev->msi_mask); - /* - * If 32 MSIs are enabled, then MSI numbers range from 0 - 31. - */ - hldev->msi_mask -= 1; + xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, + (u32*)&hldev->pci_config_space + i); } } @@ -2313,149 +2284,149 @@ __hal_device_pci_init(xge_hal_device_t *hldev) * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency * and mode. * @devh: HAL device handle. - * @pci_mode: pointer to a variable of enumerated type - * xge_hal_pci_mode_e{}. - * @bus_frequency: pointer to a variable of enumerated type - * xge_hal_pci_bus_frequency_e{}. - * @bus_width: pointer to a variable of enumerated type - * xge_hal_pci_bus_width_e{}. + * @pci_mode: pointer to a variable of enumerated type + * xge_hal_pci_mode_e{}. + * @bus_frequency: pointer to a variable of enumerated type + * xge_hal_pci_bus_frequency_e{}. + * @bus_width: pointer to a variable of enumerated type + * xge_hal_pci_bus_width_e{}. * * Get pci mode, frequency, and PCI bus width. * * Returns: one of the xge_hal_status_e{} enumerated types. - * XGE_HAL_OK - for success. + * XGE_HAL_OK - for success. * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card. - * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. + * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. * * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. */ static xge_hal_status_e __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, - xge_hal_pci_bus_frequency_e *bus_frequency, - xge_hal_pci_bus_width_e *bus_width) + xge_hal_pci_bus_frequency_e *bus_frequency, + xge_hal_pci_bus_width_e *bus_width) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_status_e rc_status = XGE_HAL_OK; - xge_hal_card_e card_id = xge_hal_device_check_id (devh); + xge_hal_card_e card_id = xge_hal_device_check_id (devh); #ifdef XGE_HAL_HERC_EMULATION hldev->config.pci_freq_mherz = - XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; + XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; *pci_mode = XGE_HAL_PCI_66MHZ_MODE; #else if (card_id == XGE_HAL_CARD_HERC) { - xge_hal_pci_bar0_t *bar0 = - (xge_hal_pci_bar0_t *)(void *)hldev->bar0; - u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->pci_info); - if (XGE_HAL_PCI_32_BIT & pci_info) - *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; - else - *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; - switch((pci_info & XGE_HAL_PCI_INFO)>>60) - { - case XGE_HAL_PCI_33MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; - *pci_mode = XGE_HAL_PCI_33MHZ_MODE; - break; - case XGE_HAL_PCI_66MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; - *pci_mode = XGE_HAL_PCI_66MHZ_MODE; - break; - case XGE_HAL_PCIX_M1_66MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; - *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; - break; - case XGE_HAL_PCIX_M1_100MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; - *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; - break; - case XGE_HAL_PCIX_M1_133MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; - *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; - break; - case XGE_HAL_PCIX_M2_66MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; - *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; - break; - case XGE_HAL_PCIX_M2_100MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; - *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; - break; - case XGE_HAL_PCIX_M2_133MHZ_MODE: - *bus_frequency = - XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; - *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; - break; - case XGE_HAL_PCIX_M1_RESERVED: - case XGE_HAL_PCIX_M1_66MHZ_NS: - case XGE_HAL_PCIX_M1_100MHZ_NS: - case XGE_HAL_PCIX_M1_133MHZ_NS: - case XGE_HAL_PCIX_M2_RESERVED: - case XGE_HAL_PCIX_533_RESERVED: - default: - rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; - xge_debug_device(XGE_ERR, - "invalid pci info "XGE_OS_LLXFMT, - (unsigned long long)pci_info); - break; - } - if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) - xge_debug_device(XGE_TRACE, "PCI info: mode %d width " - "%d frequency %d", *pci_mode, *bus_width, - *bus_frequency); - if (hldev->config.pci_freq_mherz == - XGE_HAL_DEFAULT_USE_HARDCODE) { - hldev->config.pci_freq_mherz = *bus_frequency; - } + xge_hal_pci_bar0_t *bar0 = + (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->pci_info); + if (XGE_HAL_PCI_32_BIT & pci_info) + *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; + else + *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; + switch((pci_info & XGE_HAL_PCI_INFO)>>60) + { + case XGE_HAL_PCI_33MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; + *pci_mode = XGE_HAL_PCI_33MHZ_MODE; + break; + case XGE_HAL_PCI_66MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; + *pci_mode = XGE_HAL_PCI_66MHZ_MODE; + break; + case XGE_HAL_PCIX_M1_66MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; + *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; + break; + case XGE_HAL_PCIX_M1_100MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; + *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; + break; + case XGE_HAL_PCIX_M1_133MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; + *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; + break; + case XGE_HAL_PCIX_M2_66MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; + *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; + break; + case XGE_HAL_PCIX_M2_100MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; + *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; + break; + case XGE_HAL_PCIX_M2_133MHZ_MODE: + *bus_frequency = + XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; + *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; + break; + case XGE_HAL_PCIX_M1_RESERVED: + case XGE_HAL_PCIX_M1_66MHZ_NS: + case XGE_HAL_PCIX_M1_100MHZ_NS: + case XGE_HAL_PCIX_M1_133MHZ_NS: + case XGE_HAL_PCIX_M2_RESERVED: + case XGE_HAL_PCIX_533_RESERVED: + default: + rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; + xge_debug_device(XGE_ERR, + "invalid pci info "XGE_OS_LLXFMT, + (unsigned long long)pci_info); + break; + } + if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) + xge_debug_device(XGE_TRACE, "PCI info: mode %d width " + "%d frequency %d", *pci_mode, *bus_width, + *bus_frequency); + if (hldev->config.pci_freq_mherz == + XGE_HAL_DEFAULT_USE_HARDCODE) { + hldev->config.pci_freq_mherz = *bus_frequency; + } } /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width * are set to unknown */ else if (card_id == XGE_HAL_CARD_XENA) { - u32 pcix_status; - u8 dev_num, bus_num; - /* initialize defaults for XENA */ - *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; - *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; - xge_os_pci_read32(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, pcix_status), - &pcix_status); - dev_num = (u8)((pcix_status & 0xF8) >> 3); - bus_num = (u8)((pcix_status & 0xFF00) >> 8); - if (dev_num == 0 && bus_num == 0) - *pci_mode = XGE_HAL_PCI_BASIC_MODE; - else - *pci_mode = XGE_HAL_PCIX_BASIC_MODE; - xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); - if (hldev->config.pci_freq_mherz == - XGE_HAL_DEFAULT_USE_HARDCODE) { - /* - * There is no way to detect BUS frequency on Xena, - * so, in case of automatic configuration we hopelessly - * assume 133MHZ. - */ - hldev->config.pci_freq_mherz = - XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; - } + u32 pcix_status; + u8 dev_num, bus_num; + /* initialize defaults for XENA */ + *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; + *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; + xge_os_pci_read32(hldev->pdev, hldev->cfgh, + xge_offsetof(xge_hal_pci_config_le_t, pcix_status), + &pcix_status); + dev_num = (u8)((pcix_status & 0xF8) >> 3); + bus_num = (u8)((pcix_status & 0xFF00) >> 8); + if (dev_num == 0 && bus_num == 0) + *pci_mode = XGE_HAL_PCI_BASIC_MODE; + else + *pci_mode = XGE_HAL_PCIX_BASIC_MODE; + xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); + if (hldev->config.pci_freq_mherz == + XGE_HAL_DEFAULT_USE_HARDCODE) { + /* + * There is no way to detect BUS frequency on Xena, + * so, in case of automatic configuration we hopelessly + * assume 133MHZ. + */ + hldev->config.pci_freq_mherz = + XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; + } } else if (card_id == XGE_HAL_CARD_TITAN) { - *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; - *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ; - if (hldev->config.pci_freq_mherz == - XGE_HAL_DEFAULT_USE_HARDCODE) { - hldev->config.pci_freq_mherz = *bus_frequency; - } + *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; + *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ; + if (hldev->config.pci_freq_mherz == + XGE_HAL_DEFAULT_USE_HARDCODE) { + hldev->config.pci_freq_mherz = *bus_frequency; + } } else{ - rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; - xge_debug_device(XGE_ERR, "invalid device id %d", card_id); + rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; + xge_debug_device(XGE_ERR, "invalid device id %d", card_id); } #endif @@ -2480,86 +2451,86 @@ __hal_device_handle_link_up_ind(xge_hal_device_t *hldev) */ if (hldev->link_state == XGE_HAL_LINK_UP) { #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ - val64 = xge_os_pio_mem_read64( - hldev->pdev, hldev->regh0, - &bar0->misc_int_mask); - val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; - val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->misc_int_mask); - } + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ + val64 = xge_os_pio_mem_read64( + hldev->pdev, hldev->regh0, + &bar0->misc_int_mask); + val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; + val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->misc_int_mask); + } #endif - xge_debug_device(XGE_TRACE, - "link up indication while link is up, ignoring.."); - return 0; + xge_debug_device(XGE_TRACE, + "link up indication while link is up, ignoring.."); + return 0; } /* Now re-enable it as due to noise, hardware turned it off */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); val64 |= XGE_HAL_ADAPTER_CNTL_EN; val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); + &bar0->adapter_control); /* Turn on the Laser */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON | - XGE_HAL_ADAPTER_LED_ON); + XGE_HAL_ADAPTER_LED_ON); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); + &bar0->adapter_control); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_status); + &bar0->adapter_status); if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { - xge_debug_device(XGE_TRACE, "%s", - "fail to transition link to up..."); - return 0; + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { + xge_debug_device(XGE_TRACE, "%s", + "fail to transition link to up..."); + return 0; } else { - /* - * Mask the Link Up interrupt and unmask the Link Down - * interrupt. - */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_int_mask); - val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; - val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->misc_int_mask); - xge_debug_device(XGE_TRACE, "calling link up.."); - hldev->link_state = XGE_HAL_LINK_UP; - - /* notify ULD */ - if (g_xge_hal_driver->uld_callbacks.link_up) { - g_xge_hal_driver->uld_callbacks.link_up( - hldev->upper_layer_info); - } - return 1; + /* + * Mask the Link Up interrupt and unmask the Link Down + * interrupt. + */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->misc_int_mask); + val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; + val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->misc_int_mask); + xge_debug_device(XGE_TRACE, "calling link up.."); + hldev->link_state = XGE_HAL_LINK_UP; + + /* notify ULD */ + if (g_xge_hal_driver->uld_callbacks.link_up) { + g_xge_hal_driver->uld_callbacks.link_up( + hldev->upper_layer_info); + } + return 1; } - } + } #endif xge_os_mdelay(1); if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, - (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), - XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { - - /* notify ULD */ - (void) xge_queue_produce_context(hldev->queueh, - XGE_HAL_EVENT_LINK_IS_UP, - hldev); - /* link is up after been enabled */ - return 1; + (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), + XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { + + /* notify ULD */ + (void) xge_queue_produce_context(hldev->queueh, + XGE_HAL_EVENT_LINK_IS_UP, + hldev); + /* link is up after been enabled */ + return 1; } else { - xge_debug_device(XGE_TRACE, "%s", - "fail to transition link to up..."); - return 0; + xge_debug_device(XGE_TRACE, "%s", + "fail to transition link to up..."); + return 0; } } @@ -2580,74 +2551,74 @@ __hal_device_handle_link_down_ind(xge_hal_device_t *hldev) * If the previous link state is not up, return. */ if (hldev->link_state == XGE_HAL_LINK_DOWN) { -#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ - val64 = xge_os_pio_mem_read64( - hldev->pdev, hldev->regh0, - &bar0->misc_int_mask); - val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; - val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->misc_int_mask); - } +#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ + val64 = xge_os_pio_mem_read64( + hldev->pdev, hldev->regh0, + &bar0->misc_int_mask); + val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; + val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->misc_int_mask); + } #endif - xge_debug_device(XGE_TRACE, - "link down indication while link is down, ignoring.."); - return 0; + xge_debug_device(XGE_TRACE, + "link down indication while link is down, ignoring.."); + return 0; } xge_os_mdelay(1); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); /* try to debounce the link only if the adapter is enabled. */ if (val64 & XGE_HAL_ADAPTER_CNTL_EN) { - if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, - (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), - XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { - xge_debug_device(XGE_TRACE, - "link is actually up (possible noisy link?), ignoring."); - return(0); - } + if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, + (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), + XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { + xge_debug_device(XGE_TRACE, + "link is actually up (possible noisy link?), ignoring."); + return(0); + } } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); /* turn off LED */ val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); + &bar0->adapter_control); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - /* - * Mask the Link Down interrupt and unmask the Link up - * interrupt - */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_int_mask); - val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; - val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->misc_int_mask); - - /* link is down */ - xge_debug_device(XGE_TRACE, "calling link down.."); - hldev->link_state = XGE_HAL_LINK_DOWN; - - /* notify ULD */ - if (g_xge_hal_driver->uld_callbacks.link_down) { - g_xge_hal_driver->uld_callbacks.link_down( - hldev->upper_layer_info); - } - return 1; + /* + * Mask the Link Down interrupt and unmask the Link up + * interrupt + */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->misc_int_mask); + val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; + val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->misc_int_mask); + + /* link is down */ + xge_debug_device(XGE_TRACE, "calling link down.."); + hldev->link_state = XGE_HAL_LINK_DOWN; + + /* notify ULD */ + if (g_xge_hal_driver->uld_callbacks.link_down) { + g_xge_hal_driver->uld_callbacks.link_down( + hldev->upper_layer_info); + } + return 1; } #endif /* notify ULD */ (void) xge_queue_produce_context(hldev->queueh, - XGE_HAL_EVENT_LINK_IS_DOWN, - hldev); + XGE_HAL_EVENT_LINK_IS_DOWN, + hldev); /* link is down */ return 1; } @@ -2673,39 +2644,39 @@ __hal_device_handle_link_state_change(xge_hal_device_t *hldev) int i = 0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); /* If the adapter is not enabled but the hal thinks we are in the up * state then transition to the down state. */ if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) && (hldev->link_state == XGE_HAL_LINK_UP) ) { - return(__hal_device_handle_link_down_ind(hldev)); + return(__hal_device_handle_link_down_ind(hldev)); } do { - xge_os_mdelay(1); - (void) xge_hal_device_status(hldev, &hw_status); - hw_link_state = (hw_status & - (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? - XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; - - /* check if the current link state is still considered - * to be changed. This way we will make sure that this is - * not a noise which needs to be filtered out */ - if (hldev->link_state == hw_link_state) - break; + xge_os_mdelay(1); + (void) xge_hal_device_status(hldev, &hw_status); + hw_link_state = (hw_status & + (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? + XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; + + /* check if the current link state is still considered + * to be changed. This way we will make sure that this is + * not a noise which needs to be filtered out */ + if (hldev->link_state == hw_link_state) + break; } while (i++ < hldev->config.link_valid_cnt); /* If the current link state is same as previous, just return */ if (hldev->link_state == hw_link_state) - retcode = 0; + retcode = 0; /* detected state change */ else if (hw_link_state == XGE_HAL_LINK_UP) - retcode = __hal_device_handle_link_up_ind(hldev); + retcode = __hal_device_handle_link_up_ind(hldev); else - retcode = __hal_device_handle_link_down_ind(hldev); + retcode = __hal_device_handle_link_down_ind(hldev); return retcode; } @@ -2718,15 +2689,15 @@ __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value) hldev->stats.sw_dev_err_stats.serr_cnt++; if (hldev->config.dump_on_serr) { #ifdef XGE_HAL_USE_MGMT_AUX - (void) xge_hal_aux_device_dump(hldev); + (void) xge_hal_aux_device_dump(hldev); #endif } (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev, - 1, sizeof(u64), (void *)&value); + 1, sizeof(u64), (void *)&value); xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, - (unsigned long long) value); + (unsigned long long) value); } /* @@ -2737,19 +2708,19 @@ __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value) { if (hldev->config.dump_on_eccerr) { #ifdef XGE_HAL_USE_MGMT_AUX - (void) xge_hal_aux_device_dump(hldev); + (void) xge_hal_aux_device_dump(hldev); #endif } /* Herc smart enough to recover on its own! */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - (void) xge_queue_produce(hldev->queueh, - XGE_HAL_EVENT_ECCERR, hldev, - 1, sizeof(u64), (void *)&value); + (void) xge_queue_produce(hldev->queueh, + XGE_HAL_EVENT_ECCERR, hldev, + 1, sizeof(u64), (void *)&value); } - xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, - (unsigned long long) value); + xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, + (unsigned long long) value); } /* @@ -2760,14 +2731,14 @@ __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value) { if (hldev->config.dump_on_parityerr) { #ifdef XGE_HAL_USE_MGMT_AUX - (void) xge_hal_aux_device_dump(hldev); + (void) xge_hal_aux_device_dump(hldev); #endif } (void) xge_queue_produce_context(hldev->queueh, - XGE_HAL_EVENT_PARITYERR, hldev); + XGE_HAL_EVENT_PARITYERR, hldev); - xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, - (unsigned long long) value); + xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, + (unsigned long long) value); } /* @@ -2777,7 +2748,7 @@ static void __hal_device_handle_targetabort(xge_hal_device_t *hldev) { (void) xge_queue_produce_context(hldev->queueh, - XGE_HAL_EVENT_TARGETABORT, hldev); + XGE_HAL_EVENT_TARGETABORT, hldev); } @@ -2798,100 +2769,100 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev) * Feed-back register. */ status = __hal_device_set_swapper(hldev); if (status != XGE_HAL_OK) { - return status; + return status; } /* update the pci mode, frequency, and width */ if (__hal_device_pci_info_get(hldev, &hldev->pci_mode, - &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ - hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; - hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; - hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; - /* - * FIXME: this cannot happen. - * But if it happens we cannot continue just like that - */ - xge_debug_device(XGE_ERR, "unable to get pci info"); + &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ + hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; + hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; + hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; + /* + * FIXME: this cannot happen. + * But if it happens we cannot continue just like that + */ + xge_debug_device(XGE_ERR, "unable to get pci info"); } if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) || - (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || - (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { - /* PCI optimization: set TxReqTimeOut - * register (0x800+0x120) to 0x1ff or - * something close to this. - * Note: not to be used for PCI-X! */ + (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || + (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { + /* PCI optimization: set TxReqTimeOut + * register (0x800+0x120) to 0x1ff or + * something close to this. + * Note: not to be used for PCI-X! */ - val64 = XGE_HAL_TXREQTO_VAL(0x1FF); - val64 |= XGE_HAL_TXREQTO_EN; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->txreqtimeout); + val64 = XGE_HAL_TXREQTO_VAL(0x1FF); + val64 |= XGE_HAL_TXREQTO_EN; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->txreqtimeout); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, - &bar0->read_retry_delay); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, + &bar0->read_retry_delay); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, - &bar0->write_retry_delay); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, + &bar0->write_retry_delay); - xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); + xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); } if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ || hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) { - /* Optimizing for PCI-X 266/250 */ + /* Optimizing for PCI-X 266/250 */ - val64 = XGE_HAL_TXREQTO_VAL(0x7F); - val64 |= XGE_HAL_TXREQTO_EN; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->txreqtimeout); + val64 = XGE_HAL_TXREQTO_VAL(0x7F); + val64 |= XGE_HAL_TXREQTO_EN; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->txreqtimeout); - xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes"); + xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes"); } if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, - &bar0->read_retry_delay); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, + &bar0->read_retry_delay); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, - &bar0->write_retry_delay); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, + &bar0->write_retry_delay); } /* added this to set the no of bytes used to update lso_bytes_sent returned TxD0 */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->pic_control_2); + &bar0->pic_control_2); val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2); val64 |= XGE_HAL_TXD_WRITE_BC(0x4); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->pic_control_2); + &bar0->pic_control_2); /* added this to clear the EOI_RESET field while leaving XGXS_RESET * in reset, then a 1-second delay */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); + XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); xge_os_mdelay(1000); /* Clear the XGXS_RESET field of the SW_RESET register in order to * release the XGXS from reset. Its reset value is 0xA5; write 0x00 * to activate the XGXS. The core requires a minimum 500 us reset.*/ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->sw_reset); + &bar0->sw_reset); xge_os_mdelay(1); /* read registers in all blocks */ (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mac_int_mask); + &bar0->mac_int_mask); (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mc_int_mask); + &bar0->mc_int_mask); (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xgxs_int_mask); + &bar0->xgxs_int_mask); /* set default MTU and steer based on length*/ __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work if (hldev->config.mac.rmac_bcast_en) { - xge_hal_device_bcast_enable(hldev); + xge_hal_device_bcast_enable(hldev); } else { xge_hal_device_bcast_disable(hldev); } @@ -2908,21 +2879,21 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev) * transaction up to the end of the transaction */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_control); + &bar0->misc_control); val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->misc_control); + val64, &bar0->misc_control); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_control); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->misc_control); - val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT; + val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->misc_control); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->misc_control); } /* @@ -2933,61 +2904,61 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev) * distrupt bimodal behavior. */ if (hldev->config.bimodal_interrupts) { - int i; - - /* force polling_cnt to be "0", otherwise - * IRQ workload statistics will be screwed. This could - * be worked out in TXPIC handler later. */ - hldev->config.isr_polling_cnt = 0; - hldev->config.sched_timer_us = 10000; - - /* disable all TTI < 56 */ - for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { - int j; - if (!hldev->config.fifo.queue[i].configured) - continue; - for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { - if (hldev->config.fifo.queue[i].tti[j].enabled) - hldev->config.fifo.queue[i].tti[j].enabled = 0; - } - } + int i; + + /* force polling_cnt to be "0", otherwise + * IRQ workload statistics will be screwed. This could + * be worked out in TXPIC handler later. */ + hldev->config.isr_polling_cnt = 0; + hldev->config.sched_timer_us = 10000; + + /* disable all TTI < 56 */ + for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { + int j; + if (!hldev->config.fifo.queue[i].configured) + continue; + for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { + if (hldev->config.fifo.queue[i].tti[j].enabled) + hldev->config.fifo.queue[i].tti[j].enabled = 0; + } + } - /* now configure bimodal interrupts */ - __hal_device_bimodal_configure(hldev); + /* now configure bimodal interrupts */ + __hal_device_bimodal_configure(hldev); } status = __hal_device_tti_configure(hldev, 0); if (status != XGE_HAL_OK) - return status; + return status; status = __hal_device_rti_configure(hldev, 0); if (status != XGE_HAL_OK) - return status; + return status; status = __hal_device_rth_it_configure(hldev); if (status != XGE_HAL_OK) - return status; + return status; status = __hal_device_rth_spdm_configure(hldev); if (status != XGE_HAL_OK) - return status; + return status; status = __hal_device_rts_mac_configure(hldev); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed "); - return status; + xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed "); + return status; } status = __hal_device_rts_port_configure(hldev); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed "); - return status; + xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed "); + return status; } status = __hal_device_rts_qos_configure(hldev); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed "); - return status; + xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed "); + return status; } __hal_device_pause_frames_configure(hldev); @@ -3000,38 +2971,47 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev) /* SXE-008 Transmit DMA arbitration issue */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && hldev->revision < 4) { - xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, - XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, - &bar0->pcc_enable); + xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, + XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, + &bar0->pcc_enable); + } +#if 0 // Removing temporarily as FreeBSD is seeing lower performance + // attributable to this fix. + /* SXE-2-010 */ + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { + /* Turn off the ECC error reporting for RLDRAM interface */ + if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK) + return status; } +#endif __hal_fifo_hw_initialize(hldev); __hal_ring_hw_initialize(hldev); if (__hal_device_wait_quiescent(hldev, &val64)) { - return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, - XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, - XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); - return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, + XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); + return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent", - (unsigned long long)(ulong_t)hldev); + (unsigned long long)(ulong_t)hldev); if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX || hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) { - /* - * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL - * is disabled. - */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->pic_control); - val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->pic_control); + /* + * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL + * is disabled. + */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->pic_control); + val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->pic_control); } hldev->hw_is_initialized = 1; @@ -3055,121 +3035,121 @@ __hal_device_reset(xge_hal_device_t *hldev) u64 val64, rawval = 0ULL; if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - if ( hldev->bar2 ) { - u64 *msix_vetor_table = (u64 *)hldev->bar2; - - // 2 64bit words for each entry - for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; - i++) { - hldev->msix_vector_table[i] = - xge_os_pio_mem_read64(hldev->pdev, - hldev->regh2, &msix_vetor_table[i]); - } - } - } + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { + if ( hldev->bar2 ) { + u64 *msix_vetor_table = (u64 *)hldev->bar2; + + // 2 64bit words for each entry + for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; + i++) { + hldev->msix_vector_table[i] = + xge_os_pio_mem_read64(hldev->pdev, + hldev->regh2, &msix_vetor_table[i]); + } + } + } } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pif_rd_swapper_fb); swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB); if (swap_done) { - __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); + __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, + (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); } else { - u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); + u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN) - /* swap it */ - val = (((val & (u32)0x000000ffUL) << 24) | - ((val & (u32)0x0000ff00UL) << 8) | - ((val & (u32)0x00ff0000UL) >> 8) | - ((val & (u32)0xff000000UL) >> 24)); + /* swap it */ + val = (((val & (u32)0x000000ffUL) << 24) | + ((val & (u32)0x0000ff00UL) << 8) | + ((val & (u32)0x00ff0000UL) >> 8) | + ((val & (u32)0xff000000UL) >> 24)); #endif - xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, - &bar0->sw_reset); + xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, + &bar0->sw_reset); } pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? - XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; + XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; xge_os_mdelay(20); /* Wait for 20 ms after reset */ { - /* Poll for no more than 1 second */ - for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++) - { - for (j = 0; j < pcisize; j++) { - xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, - *((u32*)&hldev->pci_config_space + j)); - } + /* Poll for no more than 1 second */ + for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++) + { + for (j = 0; j < pcisize; j++) { + xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, + *((u32*)&hldev->pci_config_space + j)); + } - xge_os_pci_read16(hldev->pdev,hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, device_id), - &hldev->device_id); + xge_os_pci_read16(hldev->pdev,hldev->cfgh, + xge_offsetof(xge_hal_pci_config_le_t, device_id), + &hldev->device_id); - if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN) - break; - xge_os_mdelay(20); - } + if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN) + break; + xge_os_mdelay(20); + } } if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN) { - xge_debug_device(XGE_ERR, "device reset failed"); - return XGE_HAL_ERR_RESET_FAILED; + xge_debug_device(XGE_ERR, "device reset failed"); + return XGE_HAL_ERR_RESET_FAILED; } if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - int cnt = 0; - - rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; - pcisize = XGE_HAL_PCISIZE_HERC; - xge_os_mdelay(1); - do { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->sw_reset); - if (val64 != rawval) { - break; - } - cnt++; - xge_os_mdelay(1); /* Wait for 1ms before retry */ - } while(cnt < 20); + int cnt = 0; + + rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; + pcisize = XGE_HAL_PCISIZE_HERC; + xge_os_mdelay(1); + do { + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->sw_reset); + if (val64 != rawval) { + break; + } + cnt++; + xge_os_mdelay(1); /* Wait for 1ms before retry */ + } while(cnt < 20); } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; - pcisize = XGE_HAL_PCISIZE_XENA; - xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); + rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; + pcisize = XGE_HAL_PCISIZE_XENA; + xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); } /* Restore MSI-X vector table */ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - if ( hldev->bar2 ) { - /* - * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) - * 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) - */ - u64 *msix_vetor_table = (u64 *)hldev->bar2; - - /* 2 64bit words for each entry */ - for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; - i++) { - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh2, - hldev->msix_vector_table[i], - &msix_vetor_table[i]); - } - } - } + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { + if ( hldev->bar2 ) { + /* + * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) + * 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) + */ + u64 *msix_vetor_table = (u64 *)hldev->bar2; + + /* 2 64bit words for each entry */ + for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; + i++) { + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh2, + hldev->msix_vector_table[i], + &msix_vetor_table[i]); + } + } + } } hldev->link_state = XGE_HAL_LINK_DOWN; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->sw_reset); + &bar0->sw_reset); if (val64 != rawval) { - xge_debug_device(XGE_ERR, "device has not been reset " - "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT, - (unsigned long long)val64, (unsigned long long)rawval); + xge_debug_device(XGE_ERR, "device has not been reset " + "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT, + (unsigned long long)val64, (unsigned long long)rawval); return XGE_HAL_ERR_RESET_FAILED; } @@ -3182,7 +3162,7 @@ __hal_device_reset(xge_hal_device_t *hldev) * @hldev: HAL device handle. * * Returns: one of the xge_hal_status_e{} enumerated types. - * XGE_HAL_OK - for success. + * XGE_HAL_OK - for success. * XGE_HAL_ERR_CRITICAL - when encounters critical error. */ static xge_hal_status_e @@ -3195,19 +3175,19 @@ __hal_device_poll(xge_hal_device_t *hldev) /* Handling SERR errors by forcing a H/W reset. */ err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->serr_source); + &bar0->serr_source); if (err_reg & XGE_HAL_SERR_SOURCE_ANY) { - __hal_device_handle_serr(hldev, "serr_source", err_reg); - return XGE_HAL_ERR_CRITICAL; + __hal_device_handle_serr(hldev, "serr_source", err_reg); + return XGE_HAL_ERR_CRITICAL; } err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_int_reg); + &bar0->misc_int_reg); if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) { - hldev->stats.sw_dev_err_stats.parity_err_cnt++; - __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); - return XGE_HAL_ERR_CRITICAL; + hldev->stats.sw_dev_err_stats.parity_err_cnt++; + __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); + return XGE_HAL_ERR_CRITICAL; } #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR @@ -3215,54 +3195,54 @@ __hal_device_poll(xge_hal_device_t *hldev) #endif { - /* Handling link status change error Intr */ - err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mac_rmac_err_reg); - if (__hal_device_handle_link_state_change(hldev)) - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err_reg, &bar0->mac_rmac_err_reg); + /* Handling link status change error Intr */ + err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->mac_rmac_err_reg); + if (__hal_device_handle_link_state_change(hldev)) + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err_reg, &bar0->mac_rmac_err_reg); } if (hldev->inject_serr != 0) { - err_reg = hldev->inject_serr; - hldev->inject_serr = 0; - __hal_device_handle_serr(hldev, "inject_serr", err_reg); - return XGE_HAL_ERR_CRITICAL; - } - - if (hldev->inject_ecc != 0) { - err_reg = hldev->inject_ecc; - hldev->inject_ecc = 0; - hldev->stats.sw_dev_err_stats.ecc_err_cnt++; - __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); - return XGE_HAL_ERR_CRITICAL; - } + err_reg = hldev->inject_serr; + hldev->inject_serr = 0; + __hal_device_handle_serr(hldev, "inject_serr", err_reg); + return XGE_HAL_ERR_CRITICAL; + } + + if (hldev->inject_ecc != 0) { + err_reg = hldev->inject_ecc; + hldev->inject_ecc = 0; + hldev->stats.sw_dev_err_stats.ecc_err_cnt++; + __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); + return XGE_HAL_ERR_CRITICAL; + } if (hldev->inject_bad_tcode != 0) { - u8 t_code = hldev->inject_bad_tcode; - xge_hal_channel_t channel; - xge_hal_fifo_txd_t txd; - xge_hal_ring_rxd_1_t rxd; + u8 t_code = hldev->inject_bad_tcode; + xge_hal_channel_t channel; + xge_hal_fifo_txd_t txd; + xge_hal_ring_rxd_1_t rxd; - channel.devh = hldev; + channel.devh = hldev; - if (hldev->inject_bad_tcode_for_chan_type == - XGE_HAL_CHANNEL_TYPE_FIFO) { - channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; + if (hldev->inject_bad_tcode_for_chan_type == + XGE_HAL_CHANNEL_TYPE_FIFO) { + channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; - } else { - channel.type = XGE_HAL_CHANNEL_TYPE_RING; - } + } else { + channel.type = XGE_HAL_CHANNEL_TYPE_RING; + } - hldev->inject_bad_tcode = 0; + hldev->inject_bad_tcode = 0; - if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) - return xge_hal_device_handle_tcode(&channel, &txd, - t_code); - else - return xge_hal_device_handle_tcode(&channel, &rxd, - t_code); - } + if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) + return xge_hal_device_handle_tcode(&channel, &txd, + t_code); + else + return xge_hal_device_handle_tcode(&channel, &rxd, + t_code); + } return XGE_HAL_OK; } @@ -3278,23 +3258,23 @@ __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && hldev->revision < 4) { - /* - * For Xena 1,2,3 we enable only 4 PCCs Due to - * SXE-008 (Transmit DMA arbitration issue) - */ - if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) - != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { - xge_debug_device(XGE_TRACE, "%s", - "PCC is not IDLE after adapter enabled!"); - return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; - } + /* + * For Xena 1,2,3 we enable only 4 PCCs Due to + * SXE-008 (Transmit DMA arbitration issue) + */ + if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) + != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { + xge_debug_device(XGE_TRACE, "%s", + "PCC is not IDLE after adapter enabled!"); + return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + } } else { - if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != - XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { - xge_debug_device(XGE_TRACE, "%s", - "PCC is not IDLE after adapter enabled!"); - return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; - } + if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != + XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { + xge_debug_device(XGE_TRACE, "%s", + "PCC is not IDLE after adapter enabled!"); + return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + } } return XGE_HAL_OK; } @@ -3306,9 +3286,9 @@ __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg; int iwl_cnt, i; -#define _HIST_SIZE 50 /* 0.5 sec history */ -#define _HIST_ADJ_TIMER 1 -#define _STEP 2 +#define _HIST_SIZE 50 /* 0.5 sec history */ +#define _HIST_ADJ_TIMER 1 +#define _STEP 2 static int bytes_avg_history[_HIST_SIZE] = {0}; static int d_avg_history[_HIST_SIZE] = {0}; @@ -3326,20 +3306,20 @@ __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) * bimodal timer tick. */ d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt - - hldev->bimodal_intr_cnt; + hldev->bimodal_intr_cnt; /* advance bimodal interrupt counter */ hldev->bimodal_intr_cnt = - hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; + hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; /* * iwl_cnt - how many interrupts we've got since last * bimodal timer tick. */ iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ? - hldev->irq_workload_rxcnt[ring_no] : 1); + hldev->irq_workload_rxcnt[ring_no] : 1); iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ? - hldev->irq_workload_txcnt[ring_no] : 1); + hldev->irq_workload_txcnt[ring_no] : 1); iwl_cnt = iwl_rxcnt + iwl_txcnt; iwl_cnt = iwl_cnt; /* just to remove the lint warning */ @@ -3366,28 +3346,28 @@ __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) * last bimodal timer tick. i.e. avarage frame size. */ len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] / - (hldev->irq_workload_rxd[ring_no] ? - hldev->irq_workload_rxd[ring_no] : 1); + (hldev->irq_workload_rxd[ring_no] ? + hldev->irq_workload_rxd[ring_no] : 1); len_txavg = 1 + hldev->irq_workload_txlen[ring_no] / - (hldev->irq_workload_txd[ring_no] ? - hldev->irq_workload_txd[ring_no] : 1); + (hldev->irq_workload_txd[ring_no] ? + hldev->irq_workload_txd[ring_no] : 1); len_avg = len_rxavg + len_txavg; if (len_avg < 60) - len_avg = 60; + len_avg = 60; /* align on low boundary */ if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us) - tval = hldev->config.bimodal_timer_lo_us; + tval = hldev->config.bimodal_timer_lo_us; /* reset faster */ if (iwl_avg == 1) { - tval = hldev->config.bimodal_timer_lo_us; - /* reset history */ - for (i = 0; i < _HIST_SIZE; i++) - bytes_avg_history[i] = d_avg_history[i] = 0; - history_idx = 0; - pstep = 1; - hist_adj_timer = 0; + tval = hldev->config.bimodal_timer_lo_us; + /* reset history */ + for (i = 0; i < _HIST_SIZE; i++) + bytes_avg_history[i] = d_avg_history[i] = 0; + history_idx = 0; + pstep = 1; + hist_adj_timer = 0; } /* always try to ajust timer to the best throughput value */ @@ -3398,40 +3378,40 @@ __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) history_idx++; d_hist = bytes_hist = 0; for (i = 0; i < _HIST_SIZE; i++) { - /* do not re-configure until history is gathered */ - if (!bytes_avg_history[i]) { - tval = hldev->config.bimodal_timer_lo_us; - goto _end; - } - bytes_hist += bytes_avg_history[i]; - d_hist += d_avg_history[i]; + /* do not re-configure until history is gathered */ + if (!bytes_avg_history[i]) { + tval = hldev->config.bimodal_timer_lo_us; + goto _end; + } + bytes_hist += bytes_avg_history[i]; + d_hist += d_avg_history[i]; } bytes_hist /= _HIST_SIZE; d_hist /= _HIST_SIZE; -// xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d", -// d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg, -// d_hist*bytes_hist, pstep); +// xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d", +// d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg, +// d_hist*bytes_hist, pstep); /* make an adaptive step */ if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) { - pstep = !pstep; - hist_adj_timer = 0; + pstep = !pstep; + hist_adj_timer = 0; } if (pstep && (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) { - tval += _STEP; - hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++; + tval += _STEP; + hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++; } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) { - tval -= _STEP; - hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++; + tval -= _STEP; + hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++; } /* enable TTI range A for better latencies */ hldev->bimodal_urange_a_en = 0; if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2) - hldev->bimodal_urange_a_en = 1; + hldev->bimodal_urange_a_en = 1; _end: /* reset workload statistics counters */ @@ -3457,29 +3437,29 @@ __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no) /* urange_a adaptive coalescing */ if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) { - if (ic > hldev->rxufca_intr_thres) { - if (ufc < hldev->config.rxufca_hi_lim) { - ufc += 1; - for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) - hldev->config.ring.queue[i].rti.ufc_a = ufc; - (void) __hal_device_rti_configure(hldev, 1); - hldev->stats.sw_dev_info_stats. - rxufca_hi_adjust_cnt++; - } - hldev->rxufca_intr_thres = ic + - hldev->config.rxufca_intr_thres; /* def: 30 */ - } else { - if (ufc > hldev->config.rxufca_lo_lim) { - ufc -= 1; - for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) - hldev->config.ring.queue[i].rti.ufc_a = ufc; - (void) __hal_device_rti_configure(hldev, 1); - hldev->stats.sw_dev_info_stats. - rxufca_lo_adjust_cnt++; - } - } - hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + - hldev->config.rxufca_lbolt_period; + if (ic > hldev->rxufca_intr_thres) { + if (ufc < hldev->config.rxufca_hi_lim) { + ufc += 1; + for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) + hldev->config.ring.queue[i].rti.ufc_a = ufc; + (void) __hal_device_rti_configure(hldev, 1); + hldev->stats.sw_dev_info_stats. + rxufca_hi_adjust_cnt++; + } + hldev->rxufca_intr_thres = ic + + hldev->config.rxufca_intr_thres; /* def: 30 */ + } else { + if (ufc > hldev->config.rxufca_lo_lim) { + ufc -= 1; + for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) + hldev->config.ring.queue[i].rti.ufc_a = ufc; + (void) __hal_device_rti_configure(hldev, 1); + hldev->stats.sw_dev_info_stats. + rxufca_lo_adjust_cnt++; + } + } + hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + + hldev->config.rxufca_lbolt_period; } hldev->rxufca_lbolt++; } @@ -3497,14 +3477,14 @@ __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mc_int_status); + &isrbar0->mc_int_status); if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT)) - return XGE_HAL_OK; + return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mc_err_reg); + &isrbar0->mc_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->mc_err_reg); + val64, &isrbar0->mc_err_reg); if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L || val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U || @@ -3515,8 +3495,8 @@ __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) { - hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; - hldev->stats.sw_dev_err_stats.ecc_err_cnt++; + hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; + hldev->stats.sw_dev_err_stats.ecc_err_cnt++; } if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L || @@ -3528,19 +3508,19 @@ __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) { - hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; - hldev->stats.sw_dev_err_stats.ecc_err_cnt++; + hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; + hldev->stats.sw_dev_err_stats.ecc_err_cnt++; } if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) { - hldev->stats.sw_dev_err_stats.sm_err_cnt++; + hldev->stats.sw_dev_err_stats.sm_err_cnt++; } /* those two should result in device reset */ if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) { - __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); - return XGE_HAL_ERR_CRITICAL; + __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); + return XGE_HAL_ERR_CRITICAL; } return XGE_HAL_OK; @@ -3559,66 +3539,66 @@ __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason) u64 val64; if (reason & XGE_HAL_PIC_INT_FLSH) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->flsh_int_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->flsh_int_reg); - /* FIXME: handle register */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->flsh_int_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &isrbar0->flsh_int_reg); + /* FIXME: handle register */ } if (reason & XGE_HAL_PIC_INT_MDIO) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mdio_int_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->mdio_int_reg); - /* FIXME: handle register */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->mdio_int_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &isrbar0->mdio_int_reg); + /* FIXME: handle register */ } if (reason & XGE_HAL_PIC_INT_IIC) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->iic_int_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->iic_int_reg); - /* FIXME: handle register */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->iic_int_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &isrbar0->iic_int_reg); + /* FIXME: handle register */ } if (reason & XGE_HAL_PIC_INT_MISC) { - val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &isrbar0->misc_int_reg); + val64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &isrbar0->misc_int_reg); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - /* Check for Link interrupts. If both Link Up/Down - * bits are set, clear both and check adapter status - */ - if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && - (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { - u64 temp64; - - xge_debug_device(XGE_TRACE, - "both link up and link down detected "XGE_OS_LLXFMT, - (unsigned long long)val64); - - temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | - XGE_HAL_MISC_INT_REG_LINK_UP_INT); - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, temp64, - &isrbar0->misc_int_reg); - } - else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { - xge_debug_device(XGE_TRACE, - "link up call request, misc_int "XGE_OS_LLXFMT, - (unsigned long long)val64); - __hal_device_handle_link_up_ind(hldev); - } - else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ - xge_debug_device(XGE_TRACE, - "link down request, misc_int "XGE_OS_LLXFMT, - (unsigned long long)val64); - __hal_device_handle_link_down_ind(hldev); - } - } else + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { + /* Check for Link interrupts. If both Link Up/Down + * bits are set, clear both and check adapter status + */ + if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && + (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { + u64 temp64; + + xge_debug_device(XGE_TRACE, + "both link up and link down detected "XGE_OS_LLXFMT, + (unsigned long long)val64); + + temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | + XGE_HAL_MISC_INT_REG_LINK_UP_INT); + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, temp64, + &isrbar0->misc_int_reg); + } + else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { + xge_debug_device(XGE_TRACE, + "link up call request, misc_int "XGE_OS_LLXFMT, + (unsigned long long)val64); + __hal_device_handle_link_up_ind(hldev); + } + else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ + xge_debug_device(XGE_TRACE, + "link down request, misc_int "XGE_OS_LLXFMT, + (unsigned long long)val64); + __hal_device_handle_link_down_ind(hldev); + } + } else #endif - { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->misc_int_reg); - } + { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &isrbar0->misc_int_reg); + } } return XGE_HAL_OK; @@ -3638,68 +3618,68 @@ __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason) volatile u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->pic_int_status); + &isrbar0->pic_int_status); if ( val64 & (XGE_HAL_PIC_INT_FLSH | - XGE_HAL_PIC_INT_MDIO | - XGE_HAL_PIC_INT_IIC | - XGE_HAL_PIC_INT_MISC) ) { - status = __hal_device_handle_pic(hldev, val64); - xge_os_wmb(); + XGE_HAL_PIC_INT_MDIO | + XGE_HAL_PIC_INT_IIC | + XGE_HAL_PIC_INT_MISC) ) { + status = __hal_device_handle_pic(hldev, val64); + xge_os_wmb(); } if (!(val64 & XGE_HAL_PIC_INT_TX)) - return status; + return status; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->txpic_int_reg); + &isrbar0->txpic_int_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->txpic_int_reg); + val64, &isrbar0->txpic_int_reg); xge_os_wmb(); if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) { - int i; - - if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) - g_xge_hal_driver->uld_callbacks.sched_timer( - hldev, hldev->upper_layer_info); - /* - * This feature implements adaptive receive interrupt - * coalecing. It is disabled by default. To enable it - * set hldev->config.rxufca_lo_lim to be not equal to - * hldev->config.rxufca_hi_lim. - * - * We are using HW timer for this feature, so - * use needs to configure hldev->config.rxufca_lbolt_period - * which is essentially a time slice of timer. - * - * For those who familiar with Linux, lbolt means jiffies - * of this timer. I.e. timer tick. - */ - if (hldev->config.rxufca_lo_lim != - hldev->config.rxufca_hi_lim && - hldev->config.rxufca_lo_lim != 0) { - for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!hldev->config.ring.queue[i].configured) - continue; - if (hldev->config.ring.queue[i].rti.urange_a) - __hal_update_rxufca(hldev, i); - } - } - - /* - * This feature implements adaptive TTI timer re-calculation - * based on host utilization, number of interrupt processed, - * number of RXD per tick and avarage length of packets per - * tick. - */ - if (hldev->config.bimodal_interrupts) { - for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!hldev->config.ring.queue[i].configured) - continue; - if (hldev->bimodal_tti[i].enabled) - __hal_update_bimodal(hldev, i); - } - } + int i; + + if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) + g_xge_hal_driver->uld_callbacks.sched_timer( + hldev, hldev->upper_layer_info); + /* + * This feature implements adaptive receive interrupt + * coalecing. It is disabled by default. To enable it + * set hldev->config.rxufca_lo_lim to be not equal to + * hldev->config.rxufca_hi_lim. + * + * We are using HW timer for this feature, so + * use needs to configure hldev->config.rxufca_lbolt_period + * which is essentially a time slice of timer. + * + * For those who familiar with Linux, lbolt means jiffies + * of this timer. I.e. timer tick. + */ + if (hldev->config.rxufca_lo_lim != + hldev->config.rxufca_hi_lim && + hldev->config.rxufca_lo_lim != 0) { + for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { + if (!hldev->config.ring.queue[i].configured) + continue; + if (hldev->config.ring.queue[i].rti.urange_a) + __hal_update_rxufca(hldev, i); + } + } + + /* + * This feature implements adaptive TTI timer re-calculation + * based on host utilization, number of interrupt processed, + * number of RXD per tick and avarage length of packets per + * tick. + */ + if (hldev->config.bimodal_interrupts) { + for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { + if (!hldev->config.ring.queue[i].configured) + continue; + if (hldev->bimodal_tti[i].enabled) + __hal_update_bimodal(hldev, i); + } + } } return XGE_HAL_OK; @@ -3718,84 +3698,84 @@ __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason) u64 val64, temp64, err; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->txdma_int_status); + &isrbar0->txdma_int_status); if (val64 & XGE_HAL_TXDMA_PFC_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->pfc_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->pfc_err_reg); - hldev->stats.sw_dev_info_stats.pfc_err_cnt++; - temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM - |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR - |XGE_HAL_PFC_PCIX_ERR; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->pfc_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->pfc_err_reg); + hldev->stats.sw_dev_info_stats.pfc_err_cnt++; + temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM + |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR + |XGE_HAL_PFC_PCIX_ERR; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_TXDMA_TDA_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->tda_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->tda_err_reg); - hldev->stats.sw_dev_info_stats.tda_err_cnt++; - temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM - |XGE_HAL_TDA_SM1_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->tda_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->tda_err_reg); + hldev->stats.sw_dev_info_stats.tda_err_cnt++; + temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM + |XGE_HAL_TDA_SM1_ERR_ALARM; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_TXDMA_PCC_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->pcc_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->pcc_err_reg); - hldev->stats.sw_dev_info_stats.pcc_err_cnt++; - temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR - |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM - |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR - |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR - |XGE_HAL_PCC_7_LSO_OV_ERR; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->pcc_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->pcc_err_reg); + hldev->stats.sw_dev_info_stats.pcc_err_cnt++; + temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR + |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM + |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR + |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR + |XGE_HAL_PCC_7_LSO_OV_ERR; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_TXDMA_TTI_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->tti_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->tti_err_reg); - hldev->stats.sw_dev_info_stats.tti_err_cnt++; - temp64 = XGE_HAL_TTI_SM_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->tti_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->tti_err_reg); + hldev->stats.sw_dev_info_stats.tti_err_cnt++; + temp64 = XGE_HAL_TTI_SM_ERR_ALARM; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_TXDMA_LSO_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->lso_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->lso_err_reg); - hldev->stats.sw_dev_info_stats.lso_err_cnt++; - temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT - |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->lso_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->lso_err_reg); + hldev->stats.sw_dev_info_stats.lso_err_cnt++; + temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT + |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_TXDMA_TPA_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->tpa_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->tpa_err_reg); - hldev->stats.sw_dev_info_stats.tpa_err_cnt++; - temp64 = XGE_HAL_TPA_SM_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->tpa_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->tpa_err_reg); + hldev->stats.sw_dev_info_stats.tpa_err_cnt++; + temp64 = XGE_HAL_TPA_SM_ERR_ALARM; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_TXDMA_SM_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->sm_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->sm_err_reg); - hldev->stats.sw_dev_info_stats.sm_err_cnt++; - temp64 = XGE_HAL_SM_SM_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->sm_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->sm_err_reg); + hldev->stats.sw_dev_info_stats.sm_err_cnt++; + temp64 = XGE_HAL_SM_SM_ERR_ALARM; + if (val64 & temp64) + goto reset; } return XGE_HAL_OK; @@ -3819,20 +3799,20 @@ __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason) u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mac_int_status); + &isrbar0->mac_int_status); if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT)) - return XGE_HAL_OK; + return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mac_tmac_err_reg); + &isrbar0->mac_tmac_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->mac_tmac_err_reg); + val64, &isrbar0->mac_tmac_err_reg); hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++; temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR; if (val64 & temp64) { - xge_hal_device_reset(hldev); - xge_hal_device_enable(hldev); - xge_hal_device_intr_enable(hldev); + xge_hal_device_reset(hldev); + xge_hal_device_enable(hldev); + xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; @@ -3851,20 +3831,20 @@ __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason) u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->xgxs_int_status); + &isrbar0->xgxs_int_status); if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS)) - return XGE_HAL_OK; + return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->xgxs_txgxs_err_reg); + &isrbar0->xgxs_txgxs_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->xgxs_txgxs_err_reg); + val64, &isrbar0->xgxs_txgxs_err_reg); hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++; temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR; if (val64 & temp64) { - xge_hal_device_reset(hldev); - xge_hal_device_enable(hldev); - xge_hal_device_intr_enable(hldev); + xge_hal_device_reset(hldev); + xge_hal_device_enable(hldev); + xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; @@ -3896,51 +3876,51 @@ __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason) u64 val64, err, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->rxdma_int_status); + &isrbar0->rxdma_int_status); if (val64 & XGE_HAL_RXDMA_RC_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->rc_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->rc_err_reg); - hldev->stats.sw_dev_info_stats.rc_err_cnt++; - temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR - |XGE_HAL_RC_PRCn_SM_ERR_ALARM - |XGE_HAL_RC_FTC_SM_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->rc_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->rc_err_reg); + hldev->stats.sw_dev_info_stats.rc_err_cnt++; + temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR + |XGE_HAL_RC_PRCn_SM_ERR_ALARM + |XGE_HAL_RC_FTC_SM_ERR_ALARM; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_RXDMA_RPA_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->rpa_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->rpa_err_reg); - hldev->stats.sw_dev_info_stats.rpa_err_cnt++; - temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->rpa_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->rpa_err_reg); + hldev->stats.sw_dev_info_stats.rpa_err_cnt++; + temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_RXDMA_RDA_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->rda_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->rda_err_reg); - hldev->stats.sw_dev_info_stats.rda_err_cnt++; - temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR - |XGE_HAL_RDA_FRM_ECC_DB_N_AERR - |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM - |XGE_HAL_RDA_RXD_ECC_DB_SERR; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->rda_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->rda_err_reg); + hldev->stats.sw_dev_info_stats.rda_err_cnt++; + temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR + |XGE_HAL_RDA_FRM_ECC_DB_N_AERR + |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM + |XGE_HAL_RDA_RXD_ECC_DB_SERR; + if (val64 & temp64) + goto reset; } if (val64 & XGE_HAL_RXDMA_RTI_INT) { - err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->rti_err_reg); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - err, &isrbar0->rti_err_reg); - hldev->stats.sw_dev_info_stats.rti_err_cnt++; - temp64 = XGE_HAL_RTI_SM_ERR_ALARM; - if (val64 & temp64) - goto reset; + err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &isrbar0->rti_err_reg); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + err, &isrbar0->rti_err_reg); + hldev->stats.sw_dev_info_stats.rti_err_cnt++; + temp64 = XGE_HAL_RTI_SM_ERR_ALARM; + if (val64 & temp64) + goto reset; } return XGE_HAL_OK; @@ -3964,20 +3944,20 @@ __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason) u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mac_int_status); + &isrbar0->mac_int_status); if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT)) - return XGE_HAL_OK; + return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->mac_rmac_err_reg); + &isrbar0->mac_rmac_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->mac_rmac_err_reg); + val64, &isrbar0->mac_rmac_err_reg); hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++; temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR; if (val64 & temp64) { - xge_hal_device_reset(hldev); - xge_hal_device_enable(hldev); - xge_hal_device_intr_enable(hldev); + xge_hal_device_reset(hldev); + xge_hal_device_enable(hldev); + xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; @@ -3996,20 +3976,20 @@ __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason) u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->xgxs_int_status); + &isrbar0->xgxs_int_status); if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS)) - return XGE_HAL_OK; + return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &isrbar0->xgxs_rxgxs_err_reg); + &isrbar0->xgxs_rxgxs_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &isrbar0->xgxs_rxgxs_err_reg); + val64, &isrbar0->xgxs_rxgxs_err_reg); hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++; temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR; if (val64 & temp64) { - xge_hal_device_reset(hldev); - xge_hal_device_enable(hldev); - xge_hal_device_intr_enable(hldev); + xge_hal_device_reset(hldev); + xge_hal_device_enable(hldev); + xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; @@ -4037,12 +4017,12 @@ xge_hal_device_enable(xge_hal_device_t *hldev) int i, j; if (!hldev->hw_is_initialized) { - xge_hal_status_e status; + xge_hal_status_e status; - status = __hal_device_hw_initialize(hldev); - if (status != XGE_HAL_OK) { - return status; - } + status = __hal_device_hw_initialize(hldev); + if (status != XGE_HAL_OK) { + return status; + } } /* @@ -4056,58 +4036,58 @@ xge_hal_device_enable(xge_hal_device_t *hldev) __hal_device_bus_master_enable(hldev); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - /* - * Configure the link stability period. - */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_control); - if (hldev->config.link_stability_period != - XGE_HAL_DEFAULT_USE_HARDCODE) { - - val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( - hldev->config.link_stability_period); - } else { - /* - * Use the link stability period 1 ms as default - */ - val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( - XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->misc_control); - - /* - * Clearing any possible Link up/down interrupts that - * could have popped up just before Enabling the card. - */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->misc_int_reg); - if (val64) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->misc_int_reg); - xge_debug_device(XGE_TRACE, "%s","link state cleared"); - } + /* + * Configure the link stability period. + */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->misc_control); + if (hldev->config.link_stability_period != + XGE_HAL_DEFAULT_USE_HARDCODE) { + + val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( + hldev->config.link_stability_period); + } else { + /* + * Use the link stability period 1 ms as default + */ + val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( + XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->misc_control); + + /* + * Clearing any possible Link up/down interrupts that + * could have popped up just before Enabling the card. + */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->misc_int_reg); + if (val64) { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->misc_int_reg); + xge_debug_device(XGE_TRACE, "%s","link state cleared"); + } } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - /* - * Clearing any possible Link state change interrupts that - * could have popped up just before Enabling the card. - */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mac_rmac_err_reg); - if (val64) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->mac_rmac_err_reg); - xge_debug_device(XGE_TRACE, "%s", "link state cleared"); - } + /* + * Clearing any possible Link state change interrupts that + * could have popped up just before Enabling the card. + */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->mac_rmac_err_reg); + if (val64) { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->mac_rmac_err_reg); + xge_debug_device(XGE_TRACE, "%s", "link state cleared"); + } } if (__hal_device_wait_quiescent(hldev, &val64)) { - return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } /* Enabling Laser. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); val64 |= XGE_HAL_ADAPTER_EOI_TX_ON; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); @@ -4122,21 +4102,21 @@ xge_hal_device_enable(xge_hal_device_t *hldev) val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); } else { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | - XGE_HAL_ADAPTER_LED_ON ); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | + XGE_HAL_ADAPTER_LED_ON ); } val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */ val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); + &bar0->adapter_control); /* We spin here waiting for the Link to come up. * This is the fix for the Link being unstable after the reset. */ @@ -4144,73 +4124,73 @@ xge_hal_device_enable(xge_hal_device_t *hldev) j = 0; do { - adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_status); - - /* Read the adapter control register for Adapter_enable bit */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && - (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { - j++; - if (j >= hldev->config.link_valid_cnt) { - if (xge_hal_device_status(hldev, &adp_status) == - XGE_HAL_OK) { - if (__hal_verify_pcc_idle(hldev, - adp_status) != XGE_HAL_OK) { - return - XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; - } - xge_debug_device(XGE_TRACE, - "adp_status: "XGE_OS_LLXFMT - ", link is up on " - "adapter enable!", - (unsigned long long)adp_status); - val64 = xge_os_pio_mem_read64( - hldev->pdev, - hldev->regh0, - &bar0->adapter_control); - val64 = val64| - (XGE_HAL_ADAPTER_EOI_TX_ON | - XGE_HAL_ADAPTER_LED_ON ); - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, val64, - &bar0->adapter_control); - xge_os_mdelay(1); - - val64 = xge_os_pio_mem_read64( - hldev->pdev, - hldev->regh0, - &bar0->adapter_control); - break; /* out of for loop */ - } else { - return - XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; - } - } - } else { - j = 0; /* Reset the count */ - /* Turn on the Laser */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; - xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, - val64, &bar0->adapter_control); - - xge_os_mdelay(1); - - /* Now re-enable it as due to noise, hardware - * turned it off */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - val64 |= XGE_HAL_ADAPTER_CNTL_EN; - val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); - } - xge_os_mdelay(1); /* Sleep for 1 msec */ - i++; + adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_status); + + /* Read the adapter control register for Adapter_enable bit */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && + (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { + j++; + if (j >= hldev->config.link_valid_cnt) { + if (xge_hal_device_status(hldev, &adp_status) == + XGE_HAL_OK) { + if (__hal_verify_pcc_idle(hldev, + adp_status) != XGE_HAL_OK) { + return + XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + } + xge_debug_device(XGE_TRACE, + "adp_status: "XGE_OS_LLXFMT + ", link is up on " + "adapter enable!", + (unsigned long long)adp_status); + val64 = xge_os_pio_mem_read64( + hldev->pdev, + hldev->regh0, + &bar0->adapter_control); + val64 = val64| + (XGE_HAL_ADAPTER_EOI_TX_ON | + XGE_HAL_ADAPTER_LED_ON ); + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, val64, + &bar0->adapter_control); + xge_os_mdelay(1); + + val64 = xge_os_pio_mem_read64( + hldev->pdev, + hldev->regh0, + &bar0->adapter_control); + break; /* out of for loop */ + } else { + return + XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + } + } + } else { + j = 0; /* Reset the count */ + /* Turn on the Laser */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; + xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, + val64, &bar0->adapter_control); + + xge_os_mdelay(1); + + /* Now re-enable it as due to noise, hardware + * turned it off */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + val64 |= XGE_HAL_ADAPTER_CNTL_EN; + val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->adapter_control); + } + xge_os_mdelay(1); /* Sleep for 1 msec */ + i++; } while (i < hldev->config.link_retry_cnt); __hal_device_led_actifity_fix(hldev); @@ -4221,15 +4201,15 @@ xge_hal_device_enable(xge_hal_device_t *hldev) * poll notificatoin after adapter is enabled */ __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL, - &bar0->dtx_control); + &bar0->dtx_control); (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL, - &bar0->dtx_control); + &bar0->dtx_control); (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL, - &bar0->dtx_control); + &bar0->dtx_control); (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); xge_os_mdelay(100); /* Sleep for 500 msec */ @@ -4237,21 +4217,21 @@ xge_hal_device_enable(xge_hal_device_t *hldev) if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) #endif { - /* - * With some switches the link state change interrupt does not - * occur even though the xgxs reset is done as per SPN-006. So, - * poll the adapter status register and check if the link state - * is ok. - */ - adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_status); - if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | - XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) - { - xge_debug_device(XGE_TRACE, "%s", - "enable device causing link state change ind.."); - (void) __hal_device_handle_link_state_change(hldev); - } + /* + * With some switches the link state change interrupt does not + * occur even though the xgxs reset is done as per SPN-006. So, + * poll the adapter status register and check if the link state + * is ok. + */ + adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_status); + if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | + XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) + { + xge_debug_device(XGE_TRACE, "%s", + "enable device causing link state change ind.."); + (void) __hal_device_handle_link_state_change(hldev); + } } if (hldev->config.stats_refresh_time_sec != @@ -4267,19 +4247,19 @@ xge_hal_device_enable(xge_hal_device_t *hldev) * * Disable this device. To gracefully reset the adapter, the host should: * - * - call xge_hal_device_disable(); + * - call xge_hal_device_disable(); * - * - call xge_hal_device_intr_disable(); + * - call xge_hal_device_intr_disable(); * - * - close all opened channels and clean up outstanding resources; + * - close all opened channels and clean up outstanding resources; * - * - do some work (error recovery, change mtu, reset, etc); + * - do some work (error recovery, change mtu, reset, etc); * - * - call xge_hal_device_enable(); + * - call xge_hal_device_enable(); * - * - open channels, replenish RxDs, etc. + * - open channels, replenish RxDs, etc. * - * - call xge_hal_device_intr_enable(). + * - call xge_hal_device_intr_enable(). * * Note: Disabling the device does _not_ include disabling of interrupts. * After disabling the device stops receiving new frames but those frames @@ -4307,21 +4287,21 @@ xge_hal_device_disable(xge_hal_device_t *hldev) &bar0->adapter_control); if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) { - status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, - XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, - XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); - status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; + XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, + XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); + status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } if (hldev->config.stats_refresh_time_sec != XGE_HAL_STATS_REFRESH_DISABLE) - __hal_stats_disable(&hldev->stats); + __hal_stats_disable(&hldev->stats); #ifdef XGE_DEBUG_ASSERT - else + else xge_assert(!hldev->stats.is_enabled); #endif @@ -4357,7 +4337,7 @@ xge_hal_device_reset(xge_hal_device_t *hldev) xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt); if (!hldev->is_initialized) - return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; + return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; /* actual "soft" reset of the adapter */ status = __hal_device_reset(hldev); @@ -4371,7 +4351,7 @@ xge_hal_device_reset(xge_hal_device_t *hldev) /* re-initialize rxufca_intr_thres */ hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; - hldev->reset_needed_after_close = 0; + hldev->reset_needed_after_close = 0; return status; } @@ -4404,36 +4384,36 @@ xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) *hw_status = tmp64; if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) { - xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) { - xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) { - xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) { - xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) { - xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) { - xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) { - xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); + return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) { - xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); + return XGE_HAL_FAIL; } #ifndef XGE_HAL_HERC_EMULATION /* @@ -4444,8 +4424,8 @@ xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) && xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC && hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) { - xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); - return XGE_HAL_FAIL; + xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); + return XGE_HAL_FAIL; } #endif @@ -4459,34 +4439,34 @@ __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag) xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, - msi_control), &msi_control_reg); + msi_control), &msi_control_reg); if (flag) - msi_control_reg |= 0x1; + msi_control_reg |= 0x1; else - msi_control_reg &= ~0x1; + msi_control_reg &= ~0x1; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, - msi_control), msi_control_reg); + msi_control), msi_control_reg); } void __hal_device_msix_intr_endis(xge_hal_device_t *hldev, - xge_hal_channel_t *channel, int flag) + xge_hal_channel_t *channel, int flag) { u64 val64; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xmsi_mask_reg); + &bar0->xmsi_mask_reg); if (flag) - val64 &= ~(1LL << ( 63 - channel->msix_idx )); + val64 &= ~(1LL << ( 63 - channel->msix_idx )); else - val64 |= (1LL << ( 63 - channel->msix_idx )); + val64 |= (1LL << ( 63 - channel->msix_idx )); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->xmsi_mask_reg); + &bar0->xmsi_mask_reg); } /** @@ -4508,39 +4488,39 @@ xge_hal_device_intr_enable(xge_hal_device_t *hldev) /* PRC initialization and configuration */ xge_list_for_each(item, &hldev->ring_channels) { - xge_hal_channel_h channel; - channel = xge_container_of(item, xge_hal_channel_t, item); - __hal_ring_prc_enable(channel); + xge_hal_channel_h channel; + channel = xge_container_of(item, xge_hal_channel_t, item); + __hal_ring_prc_enable(channel); } /* enable traffic only interrupts */ if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) { - /* - * make sure all interrupts going to be disabled if MSI - * is enabled. - */ - __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); + /* + * make sure all interrupts going to be disabled if MSI + * is enabled. + */ + __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); } else { - /* - * Enable the Tx traffic interrupts only if the TTI feature is - * enabled. - */ - val64 = 0; - if (hldev->tti_enabled) - val64 = XGE_HAL_TX_TRAFFIC_INTR; + /* + * Enable the Tx traffic interrupts only if the TTI feature is + * enabled. + */ + val64 = 0; + if (hldev->tti_enabled) + val64 = XGE_HAL_TX_TRAFFIC_INTR; - if (!hldev->config.bimodal_interrupts) - val64 |= XGE_HAL_RX_TRAFFIC_INTR; + if (!hldev->config.bimodal_interrupts) + val64 |= XGE_HAL_RX_TRAFFIC_INTR; - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) - val64 |= XGE_HAL_RX_TRAFFIC_INTR; + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) + val64 |= XGE_HAL_RX_TRAFFIC_INTR; - val64 |=XGE_HAL_TX_PIC_INTR | - XGE_HAL_MC_INTR | - XGE_HAL_TX_DMA_INTR | - (hldev->config.sched_timer_us != - XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); - __hal_device_intr_mgmt(hldev, val64, 1); + val64 |=XGE_HAL_TX_PIC_INTR | + XGE_HAL_MC_INTR | + XGE_HAL_TX_DMA_INTR | + (hldev->config.sched_timer_us != + XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); + __hal_device_intr_mgmt(hldev, val64, 1); } /* @@ -4548,41 +4528,41 @@ xge_hal_device_intr_enable(xge_hal_device_t *hldev) */ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - /* - * To enable MSI-X, MSI also needs to be enabled, - * due to a bug in the herc NIC. - */ - __hal_device_msi_intr_endis(hldev, 1); - } + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { + /* + * To enable MSI-X, MSI also needs to be enabled, + * due to a bug in the herc NIC. + */ + __hal_device_msi_intr_endis(hldev, 1); + } - /* Enable the MSI-X interrupt for each configured channel */ - xge_list_for_each(item, &hldev->fifo_channels) { - xge_hal_channel_t *channel; + /* Enable the MSI-X interrupt for each configured channel */ + xge_list_for_each(item, &hldev->fifo_channels) { + xge_hal_channel_t *channel; - channel = xge_container_of(item, - xge_hal_channel_t, item); + channel = xge_container_of(item, + xge_hal_channel_t, item); - /* 0 vector is reserved for alarms */ - if (!channel->msix_idx) - continue; + /* 0 vector is reserved for alarms */ + if (!channel->msix_idx) + continue; - __hal_device_msix_intr_endis(hldev, channel, 1); - } + __hal_device_msix_intr_endis(hldev, channel, 1); + } - xge_list_for_each(item, &hldev->ring_channels) { - xge_hal_channel_t *channel; + xge_list_for_each(item, &hldev->ring_channels) { + xge_hal_channel_t *channel; - channel = xge_container_of(item, - xge_hal_channel_t, item); + channel = xge_container_of(item, + xge_hal_channel_t, item); - /* 0 vector is reserved for alarms */ - if (!channel->msix_idx) - continue; + /* 0 vector is reserved for alarms */ + if (!channel->msix_idx) + continue; - __hal_device_msix_intr_endis(hldev, channel, 1); - } + __hal_device_msix_intr_endis(hldev, channel, 1); + } } xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled"); @@ -4608,49 +4588,49 @@ xge_hal_device_intr_disable(xge_hal_device_t *hldev) if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - /* - * To disable MSI-X, MSI also needs to be disabled, - * due to a bug in the herc NIC. - */ - __hal_device_msi_intr_endis(hldev, 0); - } + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { + /* + * To disable MSI-X, MSI also needs to be disabled, + * due to a bug in the herc NIC. + */ + __hal_device_msi_intr_endis(hldev, 0); + } - /* Disable the MSI-X interrupt for each configured channel */ - xge_list_for_each(item, &hldev->fifo_channels) { - xge_hal_channel_t *channel; + /* Disable the MSI-X interrupt for each configured channel */ + xge_list_for_each(item, &hldev->fifo_channels) { + xge_hal_channel_t *channel; - channel = xge_container_of(item, - xge_hal_channel_t, item); + channel = xge_container_of(item, + xge_hal_channel_t, item); - /* 0 vector is reserved for alarms */ - if (!channel->msix_idx) - continue; + /* 0 vector is reserved for alarms */ + if (!channel->msix_idx) + continue; - __hal_device_msix_intr_endis(hldev, channel, 0); + __hal_device_msix_intr_endis(hldev, channel, 0); - } + } - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, - &bar0->tx_traffic_mask); + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, + &bar0->tx_traffic_mask); - xge_list_for_each(item, &hldev->ring_channels) { - xge_hal_channel_t *channel; + xge_list_for_each(item, &hldev->ring_channels) { + xge_hal_channel_t *channel; - channel = xge_container_of(item, - xge_hal_channel_t, item); + channel = xge_container_of(item, + xge_hal_channel_t, item); - /* 0 vector is reserved for alarms */ - if (!channel->msix_idx) - continue; + /* 0 vector is reserved for alarms */ + if (!channel->msix_idx) + continue; - __hal_device_msix_intr_endis(hldev, channel, 0); - } + __hal_device_msix_intr_endis(hldev, channel, 0); + } - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, - &bar0->rx_traffic_mask); + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, + &bar0->rx_traffic_mask); } /* @@ -4660,25 +4640,25 @@ xge_hal_device_intr_disable(xge_hal_device_t *hldev) */ val64 = 0; if (hldev->tti_enabled) - val64 = XGE_HAL_TX_TRAFFIC_INTR; + val64 = XGE_HAL_TX_TRAFFIC_INTR; val64 |= XGE_HAL_RX_TRAFFIC_INTR | - XGE_HAL_TX_PIC_INTR | - XGE_HAL_MC_INTR | - (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? - XGE_HAL_SCHED_INTR : 0); + XGE_HAL_TX_PIC_INTR | + XGE_HAL_MC_INTR | + (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? + XGE_HAL_SCHED_INTR : 0); __hal_device_intr_mgmt(hldev, val64, 0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, - &bar0->general_int_mask); + &bar0->general_int_mask); /* disable all configured PRCs */ xge_list_for_each(item, &hldev->ring_channels) { - xge_hal_channel_h channel; - channel = xge_container_of(item, xge_hal_channel_t, item); - __hal_ring_prc_disable(channel); + xge_hal_channel_h channel; + channel = xge_container_of(item, xge_hal_channel_t, item); + __hal_ring_prc_disable(channel); } xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled"); @@ -4704,13 +4684,13 @@ xge_hal_device_mcast_enable(xge_hal_device_t *hldev) int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; if (hldev == NULL) - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; if (hldev->mcast_refcnt) - return XGE_HAL_OK; + return XGE_HAL_OK; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; + mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; hldev->mcast_refcnt = 1; @@ -4724,17 +4704,17 @@ xge_hal_device_mcast_enable(xge_hal_device_t *hldev) XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL), &bar0->rmac_addr_data1_mem); val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rmac_addr_cmd_mem); + &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, - &bar0->rmac_addr_cmd_mem, 0, - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + &bar0->rmac_addr_cmd_mem, 0, + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } return XGE_HAL_OK; @@ -4759,13 +4739,13 @@ xge_hal_device_mcast_disable(xge_hal_device_t *hldev) int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; if (hldev == NULL) - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; if (hldev->mcast_refcnt == 0) - return XGE_HAL_OK; + return XGE_HAL_OK; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; + mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; hldev->mcast_refcnt = 0; @@ -4774,23 +4754,23 @@ xge_hal_device_mcast_disable(xge_hal_device_t *hldev) /* Disable all Multicast addresses */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL), - &bar0->rmac_addr_data0_mem); + &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0), - &bar0->rmac_addr_data1_mem); + &bar0->rmac_addr_data1_mem); val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rmac_addr_cmd_mem); + &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, - &bar0->rmac_addr_cmd_mem, 0, - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + &bar0->rmac_addr_cmd_mem, 0, + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } return XGE_HAL_OK; @@ -4815,23 +4795,23 @@ xge_hal_device_promisc_enable(xge_hal_device_t *hldev) bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (!hldev->is_promisc) { - /* Put the NIC into promiscuous mode */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mac_cfg); - val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; + /* Put the NIC into promiscuous mode */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->mac_cfg); + val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_RMAC_CFG_KEY(0x4C0D), - &bar0->rmac_cfg_key); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_RMAC_CFG_KEY(0x4C0D), + &bar0->rmac_cfg_key); - __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), - &bar0->mac_cfg); + __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, + (u32)(val64 >> 32), + &bar0->mac_cfg); - hldev->is_promisc = 1; - xge_debug_device(XGE_TRACE, - "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled", - (unsigned long long)val64); + hldev->is_promisc = 1; + xge_debug_device(XGE_TRACE, + "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled", + (unsigned long long)val64); } } @@ -4854,23 +4834,23 @@ xge_hal_device_promisc_disable(xge_hal_device_t *hldev) bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (hldev->is_promisc) { - /* Remove the NIC from promiscuous mode */ - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mac_cfg); - val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; + /* Remove the NIC from promiscuous mode */ + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->mac_cfg); + val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_RMAC_CFG_KEY(0x4C0D), - &bar0->rmac_cfg_key); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_RMAC_CFG_KEY(0x4C0D), + &bar0->rmac_cfg_key); - __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), - &bar0->mac_cfg); + __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, + (u32)(val64 >> 32), + &bar0->mac_cfg); - hldev->is_promisc = 0; - xge_debug_device(XGE_TRACE, - "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled", - (unsigned long long)val64); + hldev->is_promisc = 0; + xge_debug_device(XGE_TRACE, + "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled", + (unsigned long long)val64); } } @@ -4895,19 +4875,20 @@ xge_hal_device_promisc_disable(xge_hal_device_t *hldev) */ xge_hal_status_e xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, - macaddr_t *macaddr) + macaddr_t *macaddr) { - xge_hal_pci_bar0_t *bar0 = - (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + xge_hal_pci_bar0_t *bar0; u64 val64; int i; if (hldev == NULL) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } + bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) { - return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; + return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; } #ifdef XGE_HAL_HERC_EMULATION @@ -4915,42 +4896,42 @@ xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000, &bar0->rmac_addr_data1_mem); - val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); + val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); - /* poll until done */ + /* poll until done */ __hal_device_register_poll(hldev, - &bar0->rmac_addr_cmd_mem, 0, - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); + &bar0->rmac_addr_cmd_mem, 0, + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); #endif val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD | - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rmac_addr_data0_mem); for (i=0; i < XGE_HAL_ETH_ALEN; i++) { - (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); + (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); } #ifdef XGE_HAL_HERC_EMULATION for (i=0; i < XGE_HAL_ETH_ALEN; i++) { - (*macaddr)[i] = (u8)0; + (*macaddr)[i] = (u8)0; } (*macaddr)[1] = (u8)1; @@ -4979,43 +4960,43 @@ xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, */ xge_hal_status_e xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index, - macaddr_t macaddr) + macaddr_t macaddr) { xge_hal_pci_bar0_t *bar0 = - (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64, temp64; int i; if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) - return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; + return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; temp64 = 0; for (i=0; i < XGE_HAL_ETH_ALEN; i++) { - temp64 |= macaddr[i]; - temp64 <<= 8; + temp64 |= macaddr[i]; + temp64 <<= 8; } temp64 >>= 8; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64), - &bar0->rmac_addr_data0_mem); + &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL), - &bar0->rmac_addr_data1_mem); + &bar0->rmac_addr_data1_mem); val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE | - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, - XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } return XGE_HAL_OK; @@ -5044,9 +5025,9 @@ xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index) status = xge_hal_device_macaddr_set(hldev, index, macaddr); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, "%s", - "Not able to set the mac addr"); - return status; + xge_debug_device(XGE_ERR, "%s", + "Not able to set the mac addr"); + return status; } return XGE_HAL_OK; @@ -5065,15 +5046,15 @@ xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted) int i; if (hldev == NULL) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) { - macaddr_t macaddr; - (void) xge_hal_device_macaddr_get(hldev, i, &macaddr); - if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) { - return i; - } + macaddr_t macaddr; + (void) xge_hal_device_macaddr_get(hldev, i, &macaddr); + if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) { + return i; + } } return -1; @@ -5106,23 +5087,23 @@ xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu) * 2b) device is being upped for first time. */ if (hldev->config.mtu != new_mtu) { - if (hldev->reset_needed_after_close || - !hldev->mtu_first_time_set) { - status = xge_hal_device_reset(hldev); - if (status != XGE_HAL_OK) { - xge_debug_device(XGE_TRACE, "%s", - "fatal: can not reset the device"); - return status; - } - } - /* store the new MTU in device, reset will use it */ - hldev->config.mtu = new_mtu; - xge_debug_device(XGE_TRACE, "new MTU %d applied", - new_mtu); + if (hldev->reset_needed_after_close || + !hldev->mtu_first_time_set) { + status = xge_hal_device_reset(hldev); + if (status != XGE_HAL_OK) { + xge_debug_device(XGE_TRACE, "%s", + "fatal: can not reset the device"); + return status; + } + } + /* store the new MTU in device, reset will use it */ + hldev->config.mtu = new_mtu; + xge_debug_device(XGE_TRACE, "new MTU %d applied", + new_mtu); } if (!hldev->mtu_first_time_set) - hldev->mtu_first_time_set = 1; + hldev->mtu_first_time_set = 1; return XGE_HAL_OK; } @@ -5160,7 +5141,7 @@ xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu) */ xge_hal_status_e xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, - xge_hal_device_config_t *device_config) + xge_hal_device_config_t *device_config) { int i; xge_hal_status_e status; @@ -5171,12 +5152,12 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, int total_dram_size_max = 0; xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing", - (unsigned long long)(ulong_t)hldev); + (unsigned long long)(ulong_t)hldev); /* sanity check */ if (g_xge_hal_driver == NULL || !g_xge_hal_driver->is_initialized) { - return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; + return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; } xge_os_memzero(hldev, sizeof(xge_hal_device_t)); @@ -5187,15 +5168,15 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, */ status = __hal_device_config_check_common(device_config); if (status != XGE_HAL_OK) - return status; + return status; /* apply config */ xge_os_memcpy(&hldev->config, device_config, - sizeof(xge_hal_device_config_t)); + sizeof(xge_hal_device_config_t)); /* save original attr */ xge_os_memcpy(&hldev->orig_attr, attr, - sizeof(xge_hal_device_attr_t)); + sizeof(xge_hal_device_attr_t)); /* initialize rxufca_intr_thres */ hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; @@ -5214,11 +5195,11 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us; hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh, - g_xge_hal_driver->config.queue_size_initial, - g_xge_hal_driver->config.queue_size_max, - __hal_device_event_queued, hldev); + g_xge_hal_driver->config.queue_size_initial, + g_xge_hal_driver->config.queue_size_max, + __hal_device_event_queued, hldev); if (hldev->queueh == NULL) - return XGE_HAL_ERR_OUT_OF_MEMORY; + return XGE_HAL_ERR_OUT_OF_MEMORY; hldev->magic = XGE_HAL_MAGIC; @@ -5242,90 +5223,57 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, xge_list_init(&hldev->ring_channels); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - /* fixups for xena */ - hldev->config.rth_en = 0; - hldev->config.rth_spdm_en = 0; - hldev->config.rts_mac_en = 0; - total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; - - status = __hal_device_config_check_xena(device_config); - if (status != XGE_HAL_OK) { - xge_hal_device_terminate(hldev); - return status; - } - if (hldev->config.bimodal_interrupts == 1) { - xge_hal_device_terminate(hldev); - return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED; - } else if (hldev->config.bimodal_interrupts == - XGE_HAL_DEFAULT_USE_HARDCODE) - hldev->config.bimodal_interrupts = 0; + /* fixups for xena */ + hldev->config.rth_en = 0; + hldev->config.rth_spdm_en = 0; + hldev->config.rts_mac_en = 0; + total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; + + status = __hal_device_config_check_xena(device_config); + if (status != XGE_HAL_OK) { + xge_hal_device_terminate(hldev); + return status; + } + if (hldev->config.bimodal_interrupts == 1) { + xge_hal_device_terminate(hldev); + return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED; + } else if (hldev->config.bimodal_interrupts == + XGE_HAL_DEFAULT_USE_HARDCODE) + hldev->config.bimodal_interrupts = 0; } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - /* fixups for herc */ - total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; - status = __hal_device_config_check_herc(device_config); - if (status != XGE_HAL_OK) { - xge_hal_device_terminate(hldev); - return status; - } - if (hldev->config.bimodal_interrupts == - XGE_HAL_DEFAULT_USE_HARDCODE) - hldev->config.bimodal_interrupts = 1; + /* fixups for herc */ + total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; + status = __hal_device_config_check_herc(device_config); + if (status != XGE_HAL_OK) { + xge_hal_device_terminate(hldev); + return status; + } + if (hldev->config.bimodal_interrupts == + XGE_HAL_DEFAULT_USE_HARDCODE) + hldev->config.bimodal_interrupts = 1; } else { - xge_debug_device(XGE_ERR, - "detected unknown device_id 0x%x", hldev->device_id); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_BAD_DEVICE_ID; - } - -#ifdef XGEHAL_RNIC - - if(__hal_blockpool_create(hldev,&hldev->block_pool, - XGE_HAL_BLOCKPOOL_SIZE) != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "block pool: __hal_blockpool_create failed"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_OUT_OF_MEMORY; + xge_debug_device(XGE_ERR, + "detected unknown device_id 0x%x", hldev->device_id); + xge_hal_device_terminate(hldev); + return XGE_HAL_ERR_BAD_DEVICE_ID; } - if(__hal_regpool_create(hldev,&hldev->reg_pool, - XGE_HAL_REGPOOL_SIZE) != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "reg pool: __hal_regpool_create failed"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - - for(i = 0; i < XGE_HAL_MAX_VIRTUAL_PATHS; i++) { - if(__hal_vp_initialize(hldev, i, &device_config->vp_config[i]) - != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "virtual Paths: __hal_vp_initialize failed"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - } - -#endif - /* allocate and initialize FIFO types of channels according to * configuration */ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { - if (!device_config->fifo.queue[i].configured) - continue; + if (!device_config->fifo.queue[i].configured) + continue; - channel = __hal_channel_allocate(hldev, i, -#ifdef XGEHAL_RNIC - 0, -#endif - XGE_HAL_CHANNEL_TYPE_FIFO); - if (channel == NULL) { - xge_debug_device(XGE_ERR, - "fifo: __hal_channel_allocate failed"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - /* add new channel to the device */ - xge_list_insert(&channel->item, &hldev->free_channels); + channel = __hal_channel_allocate(hldev, i, + XGE_HAL_CHANNEL_TYPE_FIFO); + if (channel == NULL) { + xge_debug_device(XGE_ERR, + "fifo: __hal_channel_allocate failed"); + xge_hal_device_terminate(hldev); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + /* add new channel to the device */ + xge_list_insert(&channel->item, &hldev->free_channels); } /* @@ -5334,65 +5282,62 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, total_dram_size = 0; ring_auto_dram_cfg = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!device_config->ring.queue[i].configured) - continue; - if (device_config->ring.queue[i].dram_size_mb == - XGE_HAL_DEFAULT_USE_HARDCODE) { - ring_auto_dram_cfg++; - continue; - } - total_dram_size += device_config->ring.queue[i].dram_size_mb; + if (!device_config->ring.queue[i].configured) + continue; + if (device_config->ring.queue[i].dram_size_mb == + XGE_HAL_DEFAULT_USE_HARDCODE) { + ring_auto_dram_cfg++; + continue; + } + total_dram_size += device_config->ring.queue[i].dram_size_mb; } left_dram_size = total_dram_size_max - total_dram_size; if (left_dram_size < 0 || (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) { - xge_debug_device(XGE_ERR, - "ring config: exceeded DRAM size %d MB", - total_dram_size_max); - xge_hal_device_terminate(hldev); - return XGE_HAL_BADCFG_RING_QUEUE_SIZE; - } + xge_debug_device(XGE_ERR, + "ring config: exceeded DRAM size %d MB", + total_dram_size_max); + xge_hal_device_terminate(hldev); + return XGE_HAL_BADCFG_RING_QUEUE_SIZE; + } /* * allocate and initialize RING types of channels according to * configuration */ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!device_config->ring.queue[i].configured) - continue; - - if (device_config->ring.queue[i].dram_size_mb == - XGE_HAL_DEFAULT_USE_HARDCODE) { - hldev->config.ring.queue[i].dram_size_mb = - device_config->ring.queue[i].dram_size_mb = - left_dram_size / ring_auto_dram_cfg; - } - - channel = __hal_channel_allocate(hldev, i, -#ifdef XGEHAL_RNIC - 0, -#endif - XGE_HAL_CHANNEL_TYPE_RING); - if (channel == NULL) { - xge_debug_device(XGE_ERR, - "ring: __hal_channel_allocate failed"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - /* add new channel to the device */ - xge_list_insert(&channel->item, &hldev->free_channels); + if (!device_config->ring.queue[i].configured) + continue; + + if (device_config->ring.queue[i].dram_size_mb == + XGE_HAL_DEFAULT_USE_HARDCODE) { + hldev->config.ring.queue[i].dram_size_mb = + device_config->ring.queue[i].dram_size_mb = + left_dram_size / ring_auto_dram_cfg; + } + + channel = __hal_channel_allocate(hldev, i, + XGE_HAL_CHANNEL_TYPE_RING); + if (channel == NULL) { + xge_debug_device(XGE_ERR, + "ring: __hal_channel_allocate failed"); + xge_hal_device_terminate(hldev); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + /* add new channel to the device */ + xge_list_insert(&channel->item, &hldev->free_channels); } /* get subsystem IDs */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), - &subsys_device); + xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), + &subsys_device); xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), - &subsys_vendor); + xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), + &subsys_vendor); xge_debug_device(XGE_TRACE, - "subsystem_id %04x:%04x", - subsys_vendor, subsys_device); + "subsystem_id %04x:%04x", + subsys_vendor, subsys_device); /* reset device initially */ (void) __hal_device_reset(hldev); @@ -5400,26 +5345,26 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, /* set host endian before, to assure proper action */ status = __hal_device_set_swapper(hldev); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "__hal_device_set_swapper failed"); - xge_hal_device_terminate(hldev); - (void) __hal_device_reset(hldev); - return status; + xge_debug_device(XGE_ERR, + "__hal_device_set_swapper failed"); + xge_hal_device_terminate(hldev); + (void) __hal_device_reset(hldev); + return status; } #ifndef XGE_HAL_HERC_EMULATION if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) - __hal_device_xena_fix_mac(hldev); + __hal_device_xena_fix_mac(hldev); #endif /* MAC address initialization. * For now only one mac address will be read and used. */ status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "xge_hal_device_macaddr_get failed"); - xge_hal_device_terminate(hldev); - return status; + xge_debug_device(XGE_ERR, + "xge_hal_device_macaddr_get failed"); + xge_hal_device_terminate(hldev); + return status; } if (hldev->macaddr[0][0] == 0xFF && @@ -5428,39 +5373,39 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, hldev->macaddr[0][3] == 0xFF && hldev->macaddr[0][4] == 0xFF && hldev->macaddr[0][5] == 0xFF) { - xge_debug_device(XGE_ERR, - "xge_hal_device_macaddr_get returns all FFs"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_INVALID_MAC_ADDRESS; + xge_debug_device(XGE_ERR, + "xge_hal_device_macaddr_get returns all FFs"); + xge_hal_device_terminate(hldev); + return XGE_HAL_ERR_INVALID_MAC_ADDRESS; } xge_debug_device(XGE_TRACE, - "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", - hldev->macaddr[0][0], hldev->macaddr[0][1], - hldev->macaddr[0][2], hldev->macaddr[0][3], - hldev->macaddr[0][4], hldev->macaddr[0][5]); + "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", + hldev->macaddr[0][0], hldev->macaddr[0][1], + hldev->macaddr[0][2], hldev->macaddr[0][3], + hldev->macaddr[0][4], hldev->macaddr[0][5]); status = __hal_stats_initialize(&hldev->stats, hldev); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "__hal_stats_initialize failed"); - xge_hal_device_terminate(hldev); - return status; + xge_debug_device(XGE_ERR, + "__hal_stats_initialize failed"); + xge_hal_device_terminate(hldev); + return status; } status = __hal_device_hw_initialize(hldev); if (status != XGE_HAL_OK) { - xge_debug_device(XGE_ERR, - "__hal_device_hw_initialize failed"); - xge_hal_device_terminate(hldev); - return status; + xge_debug_device(XGE_ERR, + "__hal_device_hw_initialize failed"); + xge_hal_device_terminate(hldev); + return status; } hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE); if (hldev->dump_buf == NULL) { - xge_debug_device(XGE_ERR, - "__hal_device_hw_initialize failed"); - xge_hal_device_terminate(hldev); - return XGE_HAL_ERR_OUT_OF_MEMORY; + xge_debug_device(XGE_ERR, + "__hal_device_hw_initialize failed"); + xge_hal_device_terminate(hldev); + return XGE_HAL_ERR_OUT_OF_MEMORY; } @@ -5471,7 +5416,7 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh); #endif /* Getting VPD data */ - __hal_device_get_vpd_data(hldev); + __hal_device_get_vpd_data(hldev); hldev->is_initialized = 1; @@ -5491,6 +5436,33 @@ void xge_hal_device_terminating(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; + xge_list_t *item; + xge_hal_channel_t *channel; +#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) + unsigned long flags=0; +#endif + + /* + * go through each opened tx channel and aquire + * lock, so it will serialize with HAL termination flag + */ + xge_list_for_each(item, &hldev->fifo_channels) { + channel = xge_container_of(item, xge_hal_channel_t, item); +#if defined(XGE_HAL_TX_MULTI_RESERVE) + xge_os_spin_lock(&channel->reserve_lock); +#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) + xge_os_spin_lock_irq(&channel->reserve_lock, flags); +#endif + + channel->terminating = 1; + +#if defined(XGE_HAL_TX_MULTI_RESERVE) + xge_os_spin_unlock(&channel->reserve_lock); +#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) + xge_os_spin_unlock_irq(&channel->reserve_lock, flags); +#endif + } + hldev->terminating = 1; } @@ -5505,9 +5477,6 @@ xge_hal_device_terminating(xge_hal_device_h devh) void xge_hal_device_terminate(xge_hal_device_t *hldev) { -#ifdef XGEHAL_RNIC - int i; -#endif xge_assert(g_xge_hal_driver != NULL); xge_assert(hldev != NULL); xge_assert(hldev->magic == XGE_HAL_MAGIC); @@ -5516,7 +5485,7 @@ xge_hal_device_terminate(xge_hal_device_t *hldev) hldev->terminating = 1; hldev->is_initialized = 0; - hldev->in_poll = 0; + hldev->in_poll = 0; hldev->magic = XGE_HAL_DEAD; #if defined(XGE_HAL_TX_MULTI_POST) @@ -5526,69 +5495,58 @@ xge_hal_device_terminate(xge_hal_device_t *hldev) #endif xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating", - (unsigned long long)(ulong_t)hldev); + (unsigned long long)(ulong_t)hldev); xge_assert(xge_list_is_empty(&hldev->fifo_channels)); xge_assert(xge_list_is_empty(&hldev->ring_channels)); if (hldev->stats.is_initialized) { - __hal_stats_terminate(&hldev->stats); + __hal_stats_terminate(&hldev->stats); } /* close if open and free all channels */ while (!xge_list_is_empty(&hldev->free_channels)) { - xge_hal_channel_t *channel = (xge_hal_channel_t*) - hldev->free_channels.next; + xge_hal_channel_t *channel = (xge_hal_channel_t*) + hldev->free_channels.next; - xge_assert(!channel->is_open); - xge_list_remove(&channel->item); - __hal_channel_free(channel); + xge_assert(!channel->is_open); + xge_list_remove(&channel->item); + __hal_channel_free(channel); } if (hldev->queueh) { - xge_queue_destroy(hldev->queueh); + xge_queue_destroy(hldev->queueh); } if (hldev->spdm_table) { - xge_os_free(hldev->pdev, - hldev->spdm_table[0], - (sizeof(xge_hal_spdm_entry_t) * - hldev->spdm_max_entries)); - xge_os_free(hldev->pdev, - hldev->spdm_table, - (sizeof(xge_hal_spdm_entry_t *) * - hldev->spdm_max_entries)); - xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); - hldev->spdm_table = NULL; + xge_os_free(hldev->pdev, + hldev->spdm_table[0], + (sizeof(xge_hal_spdm_entry_t) * + hldev->spdm_max_entries)); + xge_os_free(hldev->pdev, + hldev->spdm_table, + (sizeof(xge_hal_spdm_entry_t *) * + hldev->spdm_max_entries)); + xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); + hldev->spdm_table = NULL; } if (hldev->dump_buf) { xge_os_free(hldev->pdev, hldev->dump_buf, - XGE_HAL_DUMP_BUF_SIZE); - hldev->dump_buf = NULL; + XGE_HAL_DUMP_BUF_SIZE); + hldev->dump_buf = NULL; } if (hldev->device_id != 0) { - int j, pcisize; - - pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? - XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; - for (j = 0; j < pcisize; j++) { - xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, - *((u32*)&hldev->pci_config_space_bios + j)); - } - } -#ifdef XGEHAL_RNIC + int j, pcisize; - for(i = 0; i < XGE_HAL_MAX_VIRTUAL_PATHS; i++) { - __hal_vp_terminate(hldev, i); + pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? + XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; + for (j = 0; j < pcisize; j++) { + xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, + *((u32*)&hldev->pci_config_space_bios + j)); + } } - - __hal_blockpool_destroy(&hldev->block_pool); - - __hal_regpool_destroy(&hldev->reg_pool); -#endif - } /** * __hal_device_get_vpd_data - Getting vpd_data. @@ -5601,67 +5559,67 @@ xge_hal_device_terminate(xge_hal_device_t *hldev) void __hal_device_get_vpd_data(xge_hal_device_t *hldev) { - u8 * vpd_data; - u8 data; - int index = 0, count, fail = 0; - u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR; - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR; - - xge_os_strcpy((char *) hldev->vpd_data.product_name, - "10 Gigabit Ethernet Adapter"); - xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available"); - - vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE); - if ( vpd_data == 0 ) - return; - - for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) { - xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index); - xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data); - xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0); - for (count = 0; count < 5; count++ ) { - xge_os_mdelay(2); - xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data); - if (data == XGE_HAL_VPD_READ_COMPLETE) - break; - } - - if (count >= 5) { - xge_os_printf("ERR, Reading VPD data failed"); - fail = 1; - break; - } - - xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4), - (u32 *)&vpd_data[index]); - } - - if(!fail) { - - /* read serial number of adapter */ - for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) { - if ((vpd_data[count] == 'S') && - (vpd_data[count + 1] == 'N') && - (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) { - memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH); - memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3], - vpd_data[count + 2]); - break; - } - } - - if (vpd_data[1] < XGE_HAL_VPD_LENGTH) { - memset(hldev->vpd_data.product_name, 0, vpd_data[1]); - memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]); - } - - } - - xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE); + u8 * vpd_data; + u8 data; + int index = 0, count, fail = 0; + u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR; + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) + vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR; + + xge_os_strcpy((char *) hldev->vpd_data.product_name, + "10 Gigabit Ethernet Adapter"); + xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available"); + + vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16); + if ( vpd_data == 0 ) + return; + + for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) { + xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index); + xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data); + xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0); + for (count = 0; count < 5; count++ ) { + xge_os_mdelay(2); + xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data); + if (data == XGE_HAL_VPD_READ_COMPLETE) + break; + } + + if (count >= 5) { + xge_os_printf("ERR, Reading VPD data failed"); + fail = 1; + break; + } + + xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4), + (u32 *)&vpd_data[index]); + } + + if(!fail) { + + /* read serial number of adapter */ + for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) { + if ((vpd_data[count] == 'S') && + (vpd_data[count + 1] == 'N') && + (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) { + memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH); + memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3], + vpd_data[count + 2]); + break; + } + } + + if (vpd_data[1] < XGE_HAL_VPD_LENGTH) { + memset(hldev->vpd_data.product_name, 0, vpd_data[1]); + memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]); + } + + } + + xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16); } - + /** * xge_hal_device_handle_tcode - Handle transfer code. * @channelh: Channel handle. @@ -5675,86 +5633,86 @@ __hal_device_get_vpd_data(xge_hal_device_t *hldev) * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h. * * Returns: one of the xge_hal_status_e{} enumerated types. - * XGE_HAL_OK - for success. + * XGE_HAL_OK - for success. * XGE_HAL_ERR_CRITICAL - when encounters critical error. */ xge_hal_status_e xge_hal_device_handle_tcode (xge_hal_channel_h channelh, - xge_hal_dtr_h dtrh, u8 t_code) + xge_hal_dtr_h dtrh, u8 t_code) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; if (t_code > 15) { - xge_os_printf("invalid t_code %d", t_code); - return XGE_HAL_OK; + xge_os_printf("invalid t_code %d", t_code); + return XGE_HAL_OK; } if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++; #if defined(XGE_HAL_DEBUG_BAD_TCODE) - xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; - xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" - XGE_OS_LLXFMT":"XGE_OS_LLXFMT, - txdp->control_1, txdp->control_2, txdp->buffer_pointer, - txdp->host_control); + xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; + xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" + XGE_OS_LLXFMT":"XGE_OS_LLXFMT, + txdp->control_1, txdp->control_2, txdp->buffer_pointer, + txdp->host_control); #endif - /* handle link "down" immediately without going through - * xge_hal_device_poll() routine. */ - if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { - /* link is down */ - if (hldev->link_state != XGE_HAL_LINK_DOWN) { - xge_hal_pci_bar0_t *bar0 = - (xge_hal_pci_bar0_t *)(void *)hldev->bar0; - u64 val64; - - hldev->link_state = XGE_HAL_LINK_DOWN; - - val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->adapter_control); - - /* turn off LED */ - val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); - xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, val64, - &bar0->adapter_control); - - g_xge_hal_driver->uld_callbacks.link_down( - hldev->upper_layer_info); - } - } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || - t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { - __hal_device_handle_targetabort(hldev); - return XGE_HAL_ERR_CRITICAL; - } - return XGE_HAL_ERR_PKT_DROP; + /* handle link "down" immediately without going through + * xge_hal_device_poll() routine. */ + if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { + /* link is down */ + if (hldev->link_state != XGE_HAL_LINK_DOWN) { + xge_hal_pci_bar0_t *bar0 = + (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + u64 val64; + + hldev->link_state = XGE_HAL_LINK_DOWN; + + val64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->adapter_control); + + /* turn off LED */ + val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); + xge_os_pio_mem_write64(hldev->pdev, + hldev->regh0, val64, + &bar0->adapter_control); + + g_xge_hal_driver->uld_callbacks.link_down( + hldev->upper_layer_info); + } + } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || + t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { + __hal_device_handle_targetabort(hldev); + return XGE_HAL_ERR_CRITICAL; + } + return XGE_HAL_ERR_PKT_DROP; } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++; #if defined(XGE_HAL_DEBUG_BAD_TCODE) - xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; - xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT - ":"XGE_OS_LLXFMT, rxdp->control_1, - rxdp->control_2, rxdp->buffer0_ptr, - rxdp->host_control); + xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; + xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT + ":"XGE_OS_LLXFMT, rxdp->control_1, + rxdp->control_2, rxdp->buffer0_ptr, + rxdp->host_control); #endif - if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { - hldev->stats.sw_dev_err_stats.ecc_err_cnt++; - __hal_device_handle_eccerr(hldev, "rxd_t_code", - (u64)t_code); - return XGE_HAL_ERR_CRITICAL; - } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || - t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { - hldev->stats.sw_dev_err_stats.parity_err_cnt++; - __hal_device_handle_parityerr(hldev, "rxd_t_code", - (u64)t_code); - return XGE_HAL_ERR_CRITICAL; - /* do not drop if detected unknown IPv6 extension */ - } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) { - return XGE_HAL_ERR_PKT_DROP; - } + if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { + hldev->stats.sw_dev_err_stats.ecc_err_cnt++; + __hal_device_handle_eccerr(hldev, "rxd_t_code", + (u64)t_code); + return XGE_HAL_ERR_CRITICAL; + } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || + t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { + hldev->stats.sw_dev_err_stats.parity_err_cnt++; + __hal_device_handle_parityerr(hldev, "rxd_t_code", + (u64)t_code); + return XGE_HAL_ERR_CRITICAL; + /* do not drop if detected unknown IPv6 extension */ + } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) { + return XGE_HAL_ERR_PKT_DROP; + } } return XGE_HAL_OK; } @@ -5769,7 +5727,7 @@ xge_hal_device_handle_tcode (xge_hal_channel_h channelh, * See also: xge_hal_device_link_state_e{}. */ xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, - xge_hal_device_link_state_e *ls) + xge_hal_device_link_state_e *ls) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; @@ -5795,35 +5753,35 @@ xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, * See also: xge_hal_device_config_t{}. */ void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, - int one_shot) + int one_shot) { u64 val64; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = - (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + (xge_hal_pci_bar0_t *)(void *)hldev->bar0; unsigned int interval = hldev->config.pci_freq_mherz * interval_us; interval = __hal_fix_time_ival_herc(hldev, interval); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->scheduled_int_ctrl); + &bar0->scheduled_int_ctrl); if (interval) { - val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; - val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); - if (one_shot) { - val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; - } - val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; + val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; + val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); + if (one_shot) { + val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; + } + val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; } else { - val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; + val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->scheduled_int_ctrl); + val64, &bar0->scheduled_int_ctrl); xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s", - (unsigned long long)val64, - interval ? "enabled" : "disabled"); + (unsigned long long)val64, + interval ? "enabled" : "disabled"); } /** @@ -5841,56 +5799,56 @@ xge_hal_device_check_id(xge_hal_device_h devh) switch (hldev->device_id) { case XGE_PCI_DEVICE_ID_XENA_1: case XGE_PCI_DEVICE_ID_XENA_2: - return XGE_HAL_CARD_XENA; + return XGE_HAL_CARD_XENA; case XGE_PCI_DEVICE_ID_HERC_1: case XGE_PCI_DEVICE_ID_HERC_2: - return XGE_HAL_CARD_HERC; + return XGE_HAL_CARD_HERC; case XGE_PCI_DEVICE_ID_TITAN_1: case XGE_PCI_DEVICE_ID_TITAN_2: - return XGE_HAL_CARD_TITAN; + return XGE_HAL_CARD_TITAN; default: - return XGE_HAL_CARD_UNKNOWN; + return XGE_HAL_CARD_UNKNOWN; } } /** * xge_hal_device_pci_info_get - Get PCI bus informations such as width, - * frequency, and mode from previously stored values. - * @devh: HAL device handle. - * @pci_mode: pointer to a variable of enumerated type - * xge_hal_pci_mode_e{}. - * @bus_frequency: pointer to a variable of enumerated type - * xge_hal_pci_bus_frequency_e{}. - * @bus_width: pointer to a variable of enumerated type - * xge_hal_pci_bus_width_e{}. + * frequency, and mode from previously stored values. + * @devh: HAL device handle. + * @pci_mode: pointer to a variable of enumerated type + * xge_hal_pci_mode_e{}. + * @bus_frequency: pointer to a variable of enumerated type + * xge_hal_pci_bus_frequency_e{}. + * @bus_width: pointer to a variable of enumerated type + * xge_hal_pci_bus_width_e{}. * * Get pci mode, frequency, and PCI bus width. * Returns: one of the xge_hal_status_e{} enumerated types. - * XGE_HAL_OK - for success. - * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. + * XGE_HAL_OK - for success. + * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. */ xge_hal_status_e xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, - xge_hal_pci_bus_frequency_e *bus_frequency, - xge_hal_pci_bus_width_e *bus_width) + xge_hal_pci_bus_frequency_e *bus_frequency, + xge_hal_pci_bus_width_e *bus_width) { xge_hal_status_e rc_status; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { - rc_status = XGE_HAL_ERR_INVALID_DEVICE; - xge_debug_device(XGE_ERR, - "xge_hal_device_pci_info_get error, rc %d for device %p", - rc_status, hldev); + rc_status = XGE_HAL_ERR_INVALID_DEVICE; + xge_debug_device(XGE_ERR, + "xge_hal_device_pci_info_get error, rc %d for device %p", + rc_status, hldev); - return rc_status; + return rc_status; } - *pci_mode = hldev->pci_mode; - *bus_frequency = hldev->bus_frequency; - *bus_width = hldev->bus_width; - rc_status = XGE_HAL_OK; + *pci_mode = hldev->pci_mode; + *bus_frequency = hldev->bus_frequency; + *bus_width = hldev->bus_width; + rc_status = XGE_HAL_OK; return rc_status; } @@ -5906,9 +5864,9 @@ int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) { (void) xge_hal_device_reset(hldev); if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) { - xge_hal_device_terminate(hldev); - (void) __hal_device_reset(hldev); - return 1; + xge_hal_device_terminate(hldev); + (void) __hal_device_reset(hldev); + return 1; } return 0; } @@ -5929,29 +5887,29 @@ int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) */ xge_hal_status_e __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, - u16 spdm_entry, u64 *spdm_line_val) + u16 spdm_entry, u64 *spdm_line_val) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE | - XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | - XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); + XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | + XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_spdm_mem_ctrl); + &bar0->rts_rth_spdm_mem_ctrl); /* poll until done */ if (__hal_device_register_poll(hldev, - &bar0->rts_rth_spdm_mem_ctrl, 0, - XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + &bar0->rts_rth_spdm_mem_ctrl, 0, + XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->rts_rth_spdm_mem_data); + hldev->regh0, &bar0->rts_rth_spdm_mem_data); return XGE_HAL_OK; } @@ -5975,13 +5933,13 @@ __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) */ *spdm_entry = 0; for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) { - if (hldev->spdm_table[*spdm_entry]->in_use) { - break; - } + if (hldev->spdm_table[*spdm_entry]->in_use) { + break; + } } if (*spdm_entry >= hldev->spdm_max_entries) { - return XGE_HAL_ERR_SPDM_TABLE_FULL; + return XGE_HAL_ERR_SPDM_TABLE_FULL; } /* @@ -5991,19 +5949,19 @@ __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) * whether the entry is free or not. */ if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry, - &spdm_line_val)) != XGE_HAL_OK) { - return status; + &spdm_line_val)) != XGE_HAL_OK) { + return status; } /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */ if ((spdm_line_val & BIT(63))) { - /* - * Log a warning - */ - xge_debug_device(XGE_ERR, "Local SPDM table is not " - "consistent with the actual one for the spdm " - "entry %d", *spdm_entry); - return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; + /* + * Log a warning + */ + xge_debug_device(XGE_ERR, "Local SPDM table is not " + "consistent with the actual one for the spdm " + "entry %d", *spdm_entry); + return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; } return XGE_HAL_OK; @@ -6038,44 +5996,44 @@ __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) /* handle most of the key */ while (len >= 12) { - a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) - + ((u32)msg[3]<<24)); - b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) - + ((u32)msg[7]<<24)); - c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) - + ((u32)msg[11]<<24)); - mix(a,b,c); - msg += 12; len -= 12; + a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) + + ((u32)msg[3]<<24)); + b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) + + ((u32)msg[7]<<24)); + c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) + + ((u32)msg[11]<<24)); + mix(a,b,c); + msg += 12; len -= 12; } /* handle the last 11 bytes */ c += length; switch(len) /* all the case statements fall through */ { - case 11: c+= ((u32)msg[10]<<24); - break; - case 10: c+= ((u32)msg[9]<<16); - break; - case 9 : c+= ((u32)msg[8]<<8); - break; - /* the first byte of c is reserved for the length */ - case 8 : b+= ((u32)msg[7]<<24); - break; - case 7 : b+= ((u32)msg[6]<<16); - break; - case 6 : b+= ((u32)msg[5]<<8); - break; - case 5 : b+= msg[4]; - break; - case 4 : a+= ((u32)msg[3]<<24); - break; - case 3 : a+= ((u32)msg[2]<<16); - break; - case 2 : a+= ((u32)msg[1]<<8); - break; - case 1 : a+= msg[0]; - break; - /* case 0: nothing left to add */ + case 11: c+= ((u32)msg[10]<<24); + break; + case 10: c+= ((u32)msg[9]<<16); + break; + case 9 : c+= ((u32)msg[8]<<8); + break; + /* the first byte of c is reserved for the length */ + case 8 : b+= ((u32)msg[7]<<24); + break; + case 7 : b+= ((u32)msg[6]<<16); + break; + case 6 : b+= ((u32)msg[5]<<8); + break; + case 5 : b+= msg[4]; + break; + case 4 : a+= ((u32)msg[3]<<24); + break; + case 3 : a+= ((u32)msg[2]<<16); + break; + case 2 : a+= ((u32)msg[1]<<8); + break; + case 1 : a+= msg[0]; + break; + /* case 0: nothing left to add */ } mix(a,b,c); @@ -6093,9 +6051,9 @@ __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) * @l4_sp: L4 source port. * @l4_dp: L4 destination port. * @is_tcp: Set to 1, if the protocol is TCP. - * 0, if the protocol is UDP. + * 0, if the protocol is UDP. * @is_ipv4: Set to 1, if the protocol is IPv4. - * 0, if the protocol is IPv6. + * 0, if the protocol is IPv6. * @tgt_queue: Target queue to route the receive packet. * * This function add a new entry to the SPDM table. @@ -6103,7 +6061,7 @@ __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in - * the time(timeout). + * the time(timeout). * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full. * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry. * @@ -6111,8 +6069,8 @@ __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) */ xge_hal_status_e xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, - xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, - u8 is_tcp, u8 is_ipv4, u8 tgt_queue) + xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, + u8 is_tcp, u8 is_ipv4, u8 tgt_queue) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; @@ -6129,12 +6087,12 @@ xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, if (!hldev->config.rth_spdm_en) { - return XGE_HAL_ERR_SPDM_NOT_ENABLED; + return XGE_HAL_ERR_SPDM_NOT_ENABLED; } if ((tgt_queue < XGE_HAL_MIN_RING_NUM) || - (tgt_queue > XGE_HAL_MAX_RING_NUM)) { - return XGE_HAL_ERR_SPDM_INVALID_ENTRY; + (tgt_queue > XGE_HAL_MAX_RING_NUM)) { + return XGE_HAL_ERR_SPDM_INVALID_ENTRY; } @@ -6148,9 +6106,9 @@ xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, */ if (is_ipv4) { - ipaddr_len = 4; // In bytes + ipaddr_len = 4; // In bytes } else { - ipaddr_len = 16; + ipaddr_len = 16; } /* @@ -6160,48 +6118,48 @@ xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, * byte starting from MSB and store it in the key. */ if (is_ipv4) { - for (off = 0; off < ipaddr_len; off++) { - u32 mask = vBIT32(0xff,(off*8),8); - int shift = 32-(off+1)*8; - msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); - msg[off+ipaddr_len] = - (u8)((dst_ip->ipv4.addr & mask) >> shift); - } + for (off = 0; off < ipaddr_len; off++) { + u32 mask = vBIT32(0xff,(off*8),8); + int shift = 32-(off+1)*8; + msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); + msg[off+ipaddr_len] = + (u8)((dst_ip->ipv4.addr & mask) >> shift); + } } else { - for (off = 0; off < ipaddr_len; off++) { - int loc = off % 8; - u64 mask = vBIT(0xff,(loc*8),8); - int shift = 64-(loc+1)*8; + for (off = 0; off < ipaddr_len; off++) { + int loc = off % 8; + u64 mask = vBIT(0xff,(loc*8),8); + int shift = 64-(loc+1)*8; - msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) - >> shift); - msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] - & mask) >> shift); - } + msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) + >> shift); + msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] + & mask) >> shift); + } } off = (2*ipaddr_len); if (hldev->config.rth_spdm_use_l4) { - msg[off] = (u8)((l4_sp & 0xff00) >> 8); - msg[off + 1] = (u8)(l4_sp & 0xff); - msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); - msg[off + 3] = (u8)(l4_dp & 0xff); - off += 4; + msg[off] = (u8)((l4_sp & 0xff00) >> 8); + msg[off + 1] = (u8)(l4_sp & 0xff); + msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); + msg[off + 3] = (u8)(l4_dp & 0xff); + off += 4; } /* * Calculate jenkins hash for this configuration */ val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, - &bar0->rts_rth_jhash_cfg); + hldev->regh0, + &bar0->rts_rth_jhash_cfg); jhash_golden_ratio = (u32)(val64 >> 32); jhash_init_val = (u32)(val64 & 0xffffffff); jhash_value = __hal_calc_jhash(msg, off, - jhash_golden_ratio, - jhash_init_val); + jhash_golden_ratio, + jhash_init_val); xge_os_spin_lock(&hldev->spdm_lock); @@ -6212,18 +6170,18 @@ xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, * the free entry is performed in the local table. */ if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry)) - != XGE_HAL_OK) { - xge_os_spin_unlock(&hldev->spdm_lock); - return status; + != XGE_HAL_OK) { + xge_os_spin_unlock(&hldev->spdm_lock); + return status; } /* * Add this entry to the SPDM table */ status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp, - is_tcp, is_ipv4, tgt_queue, - jhash_value, /* calculated jhash */ - spdm_entry); + is_tcp, is_ipv4, tgt_queue, + jhash_value, /* calculated jhash */ + spdm_entry); xge_os_spin_unlock(&hldev->spdm_lock); @@ -6238,25 +6196,25 @@ xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, * @l4_sp: L4 source port. * @l4_dp: L4 destination port. * @is_tcp: Set to 1, if the protocol is TCP. - * 0, if the protocol os UDP. + * 0, if the protocol os UDP. * @is_ipv4: Set to 1, if the protocol is IPv4. - * 0, if the protocol is IPv6. + * 0, if the protocol is IPv6. * * This function remove an entry from the SPDM table. * * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in - * the time(timeout). + * the time(timeout). * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM - * table. + * table. * * See also: xge_hal_spdm_entry_add{}. */ xge_hal_status_e xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, - xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, - u8 is_tcp, u8 is_ipv4) + xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, + u8 is_tcp, u8 is_ipv4) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; @@ -6272,7 +6230,7 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, u16 spdm_l4_dp; if (!hldev->config.rth_spdm_en) { - return XGE_HAL_ERR_SPDM_NOT_ENABLED; + return XGE_HAL_ERR_SPDM_NOT_ENABLED; } xge_os_spin_lock(&hldev->spdm_lock); @@ -6282,22 +6240,22 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, * timeout happens. */ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, - XGE_HAL_RX_PIC_INT_REG_SPDM_READY, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + XGE_HAL_RX_PIC_INT_REG_SPDM_READY, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - xge_os_spin_unlock(&hldev->spdm_lock); - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + /* upper layer may require to repeat */ + xge_os_spin_unlock(&hldev->spdm_lock); + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } /* * Clear the SPDM READY bit. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rxpic_int_reg); + &bar0->rxpic_int_reg); val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rxpic_int_reg); + &bar0->rxpic_int_reg); /* * Search in the local SPDM table to get the index of the @@ -6305,42 +6263,42 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, */ spdm_entry = 0; for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) { - if ((!hldev->spdm_table[spdm_entry]->in_use) || - (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || - (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || - (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || - (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { - continue; - } - - /* - * Compare the src/dst IP addresses of source and target - */ - if (is_ipv4) { - if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr - != src_ip->ipv4.addr) || - (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr - != dst_ip->ipv4.addr)) { - continue; - } - } else { - if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] - != src_ip->ipv6.addr[0]) || - (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] - != src_ip->ipv6.addr[1]) || - (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] - != dst_ip->ipv6.addr[0]) || - (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] - != dst_ip->ipv6.addr[1])) { - continue; - } - } - break; + if ((!hldev->spdm_table[spdm_entry]->in_use) || + (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || + (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || + (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || + (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { + continue; + } + + /* + * Compare the src/dst IP addresses of source and target + */ + if (is_ipv4) { + if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr + != src_ip->ipv4.addr) || + (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr + != dst_ip->ipv4.addr)) { + continue; + } + } else { + if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] + != src_ip->ipv6.addr[0]) || + (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] + != src_ip->ipv6.addr[1]) || + (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] + != dst_ip->ipv6.addr[0]) || + (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] + != dst_ip->ipv6.addr[1])) { + continue; + } + } + break; } if (spdm_entry >= hldev->spdm_max_entries) { - xge_os_spin_unlock(&hldev->spdm_lock); - return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; + xge_os_spin_unlock(&hldev->spdm_lock); + return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; } /* @@ -6349,27 +6307,27 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, */ for(line_no = 0; line_no < 8; line_no++) { - /* - * SPDM line 2,3,4 are valid only for IPv6 entry. - * SPDM line 5 & 6 are reserved. We don't have to - * read these entries in the above cases. - */ - if (((is_ipv4) && - ((line_no == 2)||(line_no == 3)||(line_no == 4))) || - (line_no == 5) || - (line_no == 6)) { - continue; - } - - if ((status = __hal_read_spdm_entry_line( - hldev, - line_no, - spdm_entry, - &spdm_line_arr[line_no])) - != XGE_HAL_OK) { - xge_os_spin_unlock(&hldev->spdm_lock); - return status; - } + /* + * SPDM line 2,3,4 are valid only for IPv6 entry. + * SPDM line 5 & 6 are reserved. We don't have to + * read these entries in the above cases. + */ + if (((is_ipv4) && + ((line_no == 2)||(line_no == 3)||(line_no == 4))) || + (line_no == 5) || + (line_no == 6)) { + continue; + } + + if ((status = __hal_read_spdm_entry_line( + hldev, + line_no, + spdm_entry, + &spdm_line_arr[line_no])) + != XGE_HAL_OK) { + xge_os_spin_unlock(&hldev->spdm_lock); + return status; + } } /* @@ -6380,13 +6338,13 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, * bit. */ if (!(spdm_line_arr[7] & BIT(63))) { - /* - * Log a warning - */ - xge_debug_device(XGE_ERR, "Local SPDM table is not " - "consistent with the actual one for the spdm " - "entry %d ", spdm_entry); - goto err_exit; + /* + * Log a warning + */ + xge_debug_device(XGE_ERR, "Local SPDM table is not " + "consistent with the actual one for the spdm " + "entry %d ", spdm_entry); + goto err_exit; } /* @@ -6403,49 +6361,49 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, (spdm_is_ipv4 != is_ipv4) || (spdm_l4_sp != l4_sp) || (spdm_l4_dp != l4_dp)) { - /* - * Log a warning - */ - xge_debug_device(XGE_ERR, "Local SPDM table is not " - "consistent with the actual one for the spdm " - "entry %d ", spdm_entry); - goto err_exit; + /* + * Log a warning + */ + xge_debug_device(XGE_ERR, "Local SPDM table is not " + "consistent with the actual one for the spdm " + "entry %d ", spdm_entry); + goto err_exit; } if (is_ipv4) { - /* Upper 32 bits of spdm_line(64 bit) contains the - * src IPv4 address. Lower 32 bits of spdm_line - * contains the destination IPv4 address. - */ - u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); - u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); - - if ((temp_src_ip != src_ip->ipv4.addr) || - (temp_dst_ip != dst_ip->ipv4.addr)) { - xge_debug_device(XGE_ERR, "Local SPDM table is not " - "consistent with the actual one for the spdm " - "entry %d ", spdm_entry); - goto err_exit; - } + /* Upper 32 bits of spdm_line(64 bit) contains the + * src IPv4 address. Lower 32 bits of spdm_line + * contains the destination IPv4 address. + */ + u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); + u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); + + if ((temp_src_ip != src_ip->ipv4.addr) || + (temp_dst_ip != dst_ip->ipv4.addr)) { + xge_debug_device(XGE_ERR, "Local SPDM table is not " + "consistent with the actual one for the spdm " + "entry %d ", spdm_entry); + goto err_exit; + } } else { - /* - * SPDM line 1 & 2 contains the src IPv6 address. - * SPDM line 3 & 4 contains the dst IPv6 address. - */ - if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || - (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || - (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || - (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { - - /* - * Log a warning - */ - xge_debug_device(XGE_ERR, "Local SPDM table is not " - "consistent with the actual one for the spdm " - "entry %d ", spdm_entry); - goto err_exit; - } + /* + * SPDM line 1 & 2 contains the src IPv6 address. + * SPDM line 3 & 4 contains the dst IPv6 address. + */ + if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || + (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || + (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || + (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { + + /* + * Log a warning + */ + xge_debug_device(XGE_ERR, "Local SPDM table is not " + "consistent with the actual one for the spdm " + "entry %d ", spdm_entry); + goto err_exit; + } } /* @@ -6454,19 +6412,19 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, spdm_line_arr[7] &= ~BIT(63); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - spdm_line_arr[7], - (void *)((char *)hldev->spdm_mem_base + - (spdm_entry * 64) + (7 * 8))); + spdm_line_arr[7], + (void *)((char *)hldev->spdm_mem_base + + (spdm_entry * 64) + (7 * 8))); /* * Wait for the operation to be completed. */ if (__hal_device_register_poll(hldev, - &bar0->rxpic_int_reg, 1, - XGE_HAL_RX_PIC_INT_REG_SPDM_READY, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - xge_os_spin_unlock(&hldev->spdm_lock); - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + &bar0->rxpic_int_reg, 1, + XGE_HAL_RX_PIC_INT_REG_SPDM_READY, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + xge_os_spin_unlock(&hldev->spdm_lock); + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } /* @@ -6499,14 +6457,14 @@ static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel) if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) - channel->rti = (u8)ring_qid; + channel->rti = (u8)ring_qid; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rx_traffic_mask); + &bar0->rx_traffic_mask); val64 &= ~BIT(ring_qid); xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, val64, - &bar0->rx_traffic_mask); + hldev->regh0, val64, + &bar0->rx_traffic_mask); } /* @@ -6525,14 +6483,14 @@ static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel) if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) - channel->tti = (u8)fifo_qid; + channel->tti = (u8)fifo_qid; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->tx_traffic_mask); + &bar0->tx_traffic_mask); val64 &= ~BIT(fifo_qid); xge_os_pio_mem_write64(hldev->pdev, - hldev->regh0, val64, - &bar0->tx_traffic_mask); + hldev->regh0, val64, + &bar0->tx_traffic_mask); } /** @@ -6556,25 +6514,25 @@ xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg) channel->msi_msg = msi_msg; if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { - int ring = channel->post_qid; - xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," - " MSI: %d", channel->msi_msg, ring, msi); - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rx_mat); - val64 |= XGE_HAL_SET_RX_MAT(ring, msi); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rx_mat); - __hal_device_rti_set(ring, channel); + int ring = channel->post_qid; + xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," + " MSI: %d", channel->msi_msg, ring, msi); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->rx_mat); + val64 |= XGE_HAL_SET_RX_MAT(ring, msi); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rx_mat); + __hal_device_rti_set(ring, channel); } else { - int fifo = channel->post_qid; - xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," - " MSI: %d", channel->msi_msg, fifo, msi); - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->tx_mat[0]); - val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->tx_mat[0]); - __hal_device_tti_set(fifo, channel); + int fifo = channel->post_qid; + xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," + " MSI: %d", channel->msi_msg, fifo, msi); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->tx_mat[0]); + val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->tx_mat[0]); + __hal_device_tti_set(fifo, channel); } return XGE_HAL_OK; @@ -6654,9 +6612,9 @@ xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id) * address/value cobination to the specified msi number. */ static void __hal_set_msix_vals (xge_hal_device_h devh, - u32 *msix_value, - u64 *msix_addr, - int msix_idx) + u32 *msix_value, + u64 *msix_addr, + int msix_idx) { int cnt = 0; @@ -6666,21 +6624,21 @@ static void __hal_set_msix_vals (xge_hal_device_h devh, val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE; __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), &bar0->xmsi_access); + (u32)(val64 >> 32), &bar0->xmsi_access); __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, - (u32)(val64), &bar0->xmsi_access); + (u32)(val64), &bar0->xmsi_access); do { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xmsi_access); - if (val64 & XGE_HAL_XMSI_STROBE) - break; - cnt++; - xge_os_mdelay(20); + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->xmsi_access); + if (val64 & XGE_HAL_XMSI_STROBE) + break; + cnt++; + xge_os_mdelay(20); } while(cnt < 5); *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xmsi_data)); + &bar0->xmsi_data)); *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xmsi_address); + &bar0->xmsi_address); } /** @@ -6702,31 +6660,31 @@ xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx) u64 val64; if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { - /* Currently Ring and RTI is one on one. */ - int ring = channel->post_qid; - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rx_mat); - val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rx_mat); - __hal_device_rti_set(ring, channel); - hldev->config.fifo.queue[channel->post_qid].intr_vector = - msix_idx; + /* Currently Ring and RTI is one on one. */ + int ring = channel->post_qid; + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->rx_mat); + val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rx_mat); + __hal_device_rti_set(ring, channel); + hldev->config.fifo.queue[channel->post_qid].intr_vector = + msix_idx; } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - int fifo = channel->post_qid; - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->tx_mat[0]); - val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->tx_mat[0]); - __hal_device_tti_set(fifo, channel); - hldev->config.ring.queue[channel->post_qid].intr_vector = - msix_idx; + int fifo = channel->post_qid; + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->tx_mat[0]); + val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->tx_mat[0]); + __hal_device_tti_set(fifo, channel); + hldev->config.ring.queue[channel->post_qid].intr_vector = + msix_idx; } channel->msix_idx = msix_idx; __hal_set_msix_vals(hldev, &channel->msix_data, - &channel->msix_address, - channel->msix_idx); + &channel->msix_address, + channel->msix_idx); return XGE_HAL_OK; } @@ -6759,18 +6717,18 @@ xge_hal_lro_init(u32 lro_scale, int i; if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE) - hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE; + hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE; if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE) - hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN; + hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN; for (i=0; i < XGE_HAL_MAX_RING_NUM; i++) { - xge_os_memzero(hldev->lro_desc[i].lro_pool, - sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS); + xge_os_memzero(hldev->lro_desc[i].lro_pool, + sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS); - hldev->lro_desc[i].lro_next_idx = 0; - hldev->lro_desc[i].lro_recent = NULL; + hldev->lro_desc[i].lro_next_idx = 0; + hldev->lro_desc[i].lro_recent = NULL; } return XGE_HAL_OK; @@ -6809,7 +6767,7 @@ void xge_hal_device_poll(xge_hal_device_h devh) { unsigned char item_buf[sizeof(xge_queue_item_t) + - XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; + XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; xge_queue_status_e qstatus; xge_hal_status_e hstatus; @@ -6818,110 +6776,110 @@ xge_hal_device_poll(xge_hal_device_h devh) xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + - XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); + XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); _again: if (!hldev->is_initialized || hldev->terminating || hldev->magic != XGE_HAL_MAGIC) - return; + return; if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000) { - /* - * Wait for an Hour - */ - hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++; + /* + * Wait for an Hour + */ + hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++; } else { - /* - * Logging Error messages in the excess temperature, - * Bias current, laser ouput for three cycle - */ - __hal_updt_stats_xpak(hldev); - hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0; + /* + * Logging Error messages in the excess temperature, + * Bias current, laser ouput for three cycle + */ + __hal_updt_stats_xpak(hldev); + hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0; } if (!queue_has_critical_event) queue_has_critical_event = - __queue_get_reset_critical(hldev->queueh); + __queue_get_reset_critical(hldev->queueh); hldev->in_poll = 1; while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) { - qstatus = xge_queue_consume(hldev->queueh, - XGE_DEFAULT_EVENT_MAX_DATA_SIZE, - item); - if (qstatus == XGE_QUEUE_IS_EMPTY) - break; - - xge_debug_queue(XGE_TRACE, - "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x" - XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type, - (u64)(ulong_t)item->context); - - if (!hldev->is_initialized || - hldev->magic != XGE_HAL_MAGIC) { - hldev->in_poll = 0; - return; - } - - switch (item->event_type) { - case XGE_HAL_EVENT_LINK_IS_UP: { - if (!queue_has_critical_event && - g_xge_hal_driver->uld_callbacks.link_up) { - g_xge_hal_driver->uld_callbacks.link_up( - hldev->upper_layer_info); - hldev->link_state = XGE_HAL_LINK_UP; - } - } break; - case XGE_HAL_EVENT_LINK_IS_DOWN: { - if (!queue_has_critical_event && - g_xge_hal_driver->uld_callbacks.link_down) { - g_xge_hal_driver->uld_callbacks.link_down( - hldev->upper_layer_info); - hldev->link_state = XGE_HAL_LINK_DOWN; - } - } break; - case XGE_HAL_EVENT_SERR: - case XGE_HAL_EVENT_ECCERR: - case XGE_HAL_EVENT_PARITYERR: - case XGE_HAL_EVENT_TARGETABORT: - case XGE_HAL_EVENT_SLOT_FREEZE: { - void *item_data = xge_queue_item_data(item); - xge_hal_event_e event_type = item->event_type; - u64 val64 = *((u64*)item_data); - - if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) - if (xge_hal_device_is_slot_freeze(hldev)) - event_type = XGE_HAL_EVENT_SLOT_FREEZE; - if (g_xge_hal_driver->uld_callbacks.crit_err) { - g_xge_hal_driver->uld_callbacks.crit_err( - hldev->upper_layer_info, - event_type, - val64); - /* handle one critical event per poll cycle */ - hldev->in_poll = 0; - return; - } - } break; - default: { - xge_debug_queue(XGE_TRACE, - "got non-HAL event %d", - item->event_type); - } break; - } - - /* broadcast this event */ - if (g_xge_hal_driver->uld_callbacks.event) - g_xge_hal_driver->uld_callbacks.event(item); + qstatus = xge_queue_consume(hldev->queueh, + XGE_DEFAULT_EVENT_MAX_DATA_SIZE, + item); + if (qstatus == XGE_QUEUE_IS_EMPTY) + break; + + xge_debug_queue(XGE_TRACE, + "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x" + XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type, + (u64)(ulong_t)item->context); + + if (!hldev->is_initialized || + hldev->magic != XGE_HAL_MAGIC) { + hldev->in_poll = 0; + return; + } + + switch (item->event_type) { + case XGE_HAL_EVENT_LINK_IS_UP: { + if (!queue_has_critical_event && + g_xge_hal_driver->uld_callbacks.link_up) { + g_xge_hal_driver->uld_callbacks.link_up( + hldev->upper_layer_info); + hldev->link_state = XGE_HAL_LINK_UP; + } + } break; + case XGE_HAL_EVENT_LINK_IS_DOWN: { + if (!queue_has_critical_event && + g_xge_hal_driver->uld_callbacks.link_down) { + g_xge_hal_driver->uld_callbacks.link_down( + hldev->upper_layer_info); + hldev->link_state = XGE_HAL_LINK_DOWN; + } + } break; + case XGE_HAL_EVENT_SERR: + case XGE_HAL_EVENT_ECCERR: + case XGE_HAL_EVENT_PARITYERR: + case XGE_HAL_EVENT_TARGETABORT: + case XGE_HAL_EVENT_SLOT_FREEZE: { + void *item_data = xge_queue_item_data(item); + xge_hal_event_e event_type = item->event_type; + u64 val64 = *((u64*)item_data); + + if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) + if (xge_hal_device_is_slot_freeze(hldev)) + event_type = XGE_HAL_EVENT_SLOT_FREEZE; + if (g_xge_hal_driver->uld_callbacks.crit_err) { + g_xge_hal_driver->uld_callbacks.crit_err( + hldev->upper_layer_info, + event_type, + val64); + /* handle one critical event per poll cycle */ + hldev->in_poll = 0; + return; + } + } break; + default: { + xge_debug_queue(XGE_TRACE, + "got non-HAL event %d", + item->event_type); + } break; + } + + /* broadcast this event */ + if (g_xge_hal_driver->uld_callbacks.event) + g_xge_hal_driver->uld_callbacks.event(item); } if (g_xge_hal_driver->uld_callbacks.before_device_poll) { - if (g_xge_hal_driver->uld_callbacks.before_device_poll( - hldev) != 0) { - hldev->in_poll = 0; - return; - } + if (g_xge_hal_driver->uld_callbacks.before_device_poll( + hldev) != 0) { + hldev->in_poll = 0; + return; + } } hstatus = __hal_device_poll(hldev); @@ -6936,7 +6894,7 @@ _again: */ if (hstatus == XGE_HAL_ERR_CRITICAL) { queue_has_critical_event = 1; - goto _again; + goto _again; } hldev->in_poll = 0; @@ -6961,10 +6919,10 @@ xge_hal_rts_rth_init(xge_hal_device_t *hldev) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_ctrl); + &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_ctrl); + val64, &bar0->rts_ctrl); } /** @@ -6988,13 +6946,13 @@ xge_hal_rts_rth_clr(xge_hal_device_t *hldev) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_ctrl); + &bar0->rts_ctrl); val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_ctrl); + val64, &bar0->rts_ctrl); val64 = 0; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); } /** @@ -7013,21 +6971,21 @@ xge_hal_rts_rth_clr(xge_hal_device_t *hldev) */ void xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, - u16 bucket_size) + u16 bucket_size) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = XGE_HAL_RTS_DEFAULT_Q(def_q); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_default_q); + &bar0->rts_default_q); val64 = hash_type; val64 |= XGE_HAL_RTS_RTH_EN; val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size); val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); } /** @@ -7046,10 +7004,10 @@ xge_hal_rts_rth_start(xge_hal_device_t *hldev) val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); val64 |= XGE_HAL_RTS_RTH_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); } /** @@ -7067,10 +7025,10 @@ xge_hal_rts_rth_stop(xge_hal_device_t *hldev) u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); val64 &= ~XGE_HAL_RTS_RTH_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_cfg); + &bar0->rts_rth_cfg); } /** @@ -7088,32 +7046,32 @@ xge_hal_rts_rth_stop(xge_hal_device_t *hldev) xge_hal_status_e xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size) { - xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; + xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; u32 idx; for (idx = 0; idx < itable_size; idx++) { - val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | - XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); + val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | + XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_map_mem_data); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_rth_map_mem_data); - /* execute */ - val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | - XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | - XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_map_mem_ctrl); + /* execute */ + val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | + XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | + XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_rth_map_mem_ctrl); - /* poll until done */ - if (__hal_device_register_poll(hldev, - &bar0->rts_rth_map_mem_ctrl, 0, - XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, - XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { - /* upper layer may require to repeat */ - return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; - } + /* poll until done */ + if (__hal_device_register_poll(hldev, + &bar0->rts_rth_map_mem_ctrl, 0, + XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { + /* upper layer may require to repeat */ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + } } return XGE_HAL_OK; @@ -7142,26 +7100,26 @@ xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key) nreg = 0; while( KeySize ) { - val64 = 0; - for ( i = 0; i < 8 ; i++) { - /* Prepare 64-bit word for 'nreg' containing 8 keys. */ - if (i) - val64 <<= 8; - val64 |= Key[entry++]; - } + val64 = 0; + for ( i = 0; i < 8 ; i++) { + /* Prepare 64-bit word for 'nreg' containing 8 keys. */ + if (i) + val64 <<= 8; + val64 |= Key[entry++]; + } - KeySize--; + KeySize--; - /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_hash_mask[nreg++]); + /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_rth_hash_mask[nreg++]); } while( nreg < 5 ) { - /* Clear the rest if key is less than 40 bytes */ - val64 = 0; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_rth_hash_mask[nreg++]); + /* Clear the rest if key is less than 40 bytes */ + val64 = 0; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_rth_hash_mask[nreg++]); } } @@ -7178,7 +7136,7 @@ xge_hal_device_is_closed(xge_hal_device_h devh) if (xge_list_is_empty(&hldev->fifo_channels) && xge_list_is_empty(&hldev->ring_channels)) - return 1; + return 1; return 0; } @@ -7194,54 +7152,116 @@ xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index) xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; + max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; if ( index >= max_addr ) - return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; + return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; /* * Calculate the section value */ section = index / 32; - xge_debug_device(XGE_TRACE, "the Section value is %d ", section); + xge_debug_device(XGE_TRACE, "the Section value is %d ", section); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_mac_cfg); + &bar0->rts_mac_cfg); switch(section) { - case 0: - val64 |= XGE_HAL_RTS_MAC_SECT0_EN; - break; - case 1: - val64 |= XGE_HAL_RTS_MAC_SECT1_EN; - break; - case 2: - val64 |= XGE_HAL_RTS_MAC_SECT2_EN; - break; - case 3: - val64 |= XGE_HAL_RTS_MAC_SECT3_EN; - break; - case 4: - val64 |= XGE_HAL_RTS_MAC_SECT4_EN; - break; - case 5: - val64 |= XGE_HAL_RTS_MAC_SECT5_EN; - break; - case 6: - val64 |= XGE_HAL_RTS_MAC_SECT6_EN; - break; - case 7: - val64 |= XGE_HAL_RTS_MAC_SECT7_EN; - break; - default: - xge_debug_device(XGE_ERR, "Invalid Section value %d " - , section); - } + case 0: + val64 |= XGE_HAL_RTS_MAC_SECT0_EN; + break; + case 1: + val64 |= XGE_HAL_RTS_MAC_SECT1_EN; + break; + case 2: + val64 |= XGE_HAL_RTS_MAC_SECT2_EN; + break; + case 3: + val64 |= XGE_HAL_RTS_MAC_SECT3_EN; + break; + case 4: + val64 |= XGE_HAL_RTS_MAC_SECT4_EN; + break; + case 5: + val64 |= XGE_HAL_RTS_MAC_SECT5_EN; + break; + case 6: + val64 |= XGE_HAL_RTS_MAC_SECT6_EN; + break; + case 7: + val64 |= XGE_HAL_RTS_MAC_SECT7_EN; + break; + default: + xge_debug_device(XGE_ERR, "Invalid Section value %d " + , section); + } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rts_mac_cfg); + val64, &bar0->rts_mac_cfg); return XGE_HAL_OK; } +/** + * xge_hal_fix_rldram_ecc_error + * @hldev: private member of the device structure. + * + * SXE-02-010. This function will turn OFF the ECC error reporting for the + * interface bet'n external Micron RLDRAM II device and memory controller. + * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U + * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the + * qualification of Herc. + */ +xge_hal_status_e +xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev) +{ + xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; + u64 val64; + + // Enter Test Mode. + val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + // Enable fg/bg tests. + val64 = 0x0100000000000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_driver); + + // Enable RLDRAM configuration. + val64 = 0x0000000000017B00ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_mrs); + + // Enable RLDRAM queues. + val64 = 0x0000000001017B00ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_mrs); + + // Setup test ranges + val64 = 0x00000000001E0100ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_add); + + val64 = 0x00000100001F0100ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_add_bkg); + // Start Reads. + val64 = 0x0001000000010000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1, + XGE_HAL_MC_RLDRAM_TEST_DONE, + XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){ + return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; + } + + // Exit test mode + val64 = 0x0000000000000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + return XGE_HAL_OK; +} diff --git a/sys/dev/nxge/xgehal/xgehal-driver.c b/sys/dev/nxge/xgehal/xgehal-driver.c index c8d1989..430931c 100644 --- a/sys/dev/nxge/xgehal/xgehal-driver.c +++ b/sys/dev/nxge/xgehal/xgehal-driver.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-driver.c - * - * Description: HAL driver object functionality - * - * Created: 10 May 2004 - */ - #include <dev/nxge/include/xgehal-driver.h> #include <dev/nxge/include/xgehal-device.h> @@ -70,22 +62,22 @@ xge_hal_driver_tracebuf_dump(void) int off = 0; if (g_xge_os_tracebuf == NULL) { - return; + return; } xge_os_printf("################ Trace dump Begin ###############"); if (g_xge_os_tracebuf->wrapped_once) { - for (i = 0; i < g_xge_os_tracebuf->size - - g_xge_os_tracebuf->offset; i += off) { - if (*(dmesg_start + i)) - xge_os_printf(dmesg_start + i); - off = xge_os_strlen(dmesg_start + i) + 1; - } + for (i = 0; i < g_xge_os_tracebuf->size - + g_xge_os_tracebuf->offset; i += off) { + if (*(dmesg_start + i)) + xge_os_printf(dmesg_start + i); + off = xge_os_strlen(dmesg_start + i) + 1; + } } for (i = 0; i < g_xge_os_tracebuf->offset; i += off) { - if (*(dmesg + i)) - xge_os_printf(dmesg + i); - off = xge_os_strlen(dmesg + i) + 1; + if (*(dmesg + i)) + xge_os_printf(dmesg + i); + off = xge_os_strlen(dmesg + i) + 1; } xge_os_printf("################ Trace dump End ###############"); } @@ -100,29 +92,29 @@ xge_hal_driver_tracebuf_read(int bufsize, char *retbuf, int *retsize) *retbuf = 0; if (g_xge_os_tracebuf == NULL) { - return XGE_HAL_FAIL; + return XGE_HAL_FAIL; } if (g_xge_os_tracebuf->wrapped_once) { - for (i = 0; i < g_xge_os_tracebuf->size - - g_xge_os_tracebuf->offset; i += off) { - if (*(dmesg_start + i)) { - xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg_start + i); - retbuf_off += xge_os_strlen(dmesg_start + i) + 1; - if (retbuf_off > bufsize) - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - off = xge_os_strlen(dmesg_start + i) + 1; - } + for (i = 0; i < g_xge_os_tracebuf->size - + g_xge_os_tracebuf->offset; i += off) { + if (*(dmesg_start + i)) { + xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg_start + i); + retbuf_off += xge_os_strlen(dmesg_start + i) + 1; + if (retbuf_off > bufsize) + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + off = xge_os_strlen(dmesg_start + i) + 1; + } } for (i = 0; i < g_xge_os_tracebuf->offset; i += off) { - if (*(dmesg + i)) { - xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg + i); - retbuf_off += xge_os_strlen(dmesg + i) + 1; - if (retbuf_off > bufsize) - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - off = xge_os_strlen(dmesg + i) + 1; + if (*(dmesg + i)) { + xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg + i); + retbuf_off += xge_os_strlen(dmesg + i) + 1; + if (retbuf_off > bufsize) + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + off = xge_os_strlen(dmesg + i) + 1; } *retsize = retbuf_off; @@ -138,37 +130,37 @@ void xge_hal_driver_bar0_offset_check(void) { xge_assert(xge_offsetof(xge_hal_pci_bar0_t, adapter_status) == - 0x108); + 0x108); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_traffic_int) == - 0x08E0); + 0x08E0); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, dtx_control) == - 0x09E8); + 0x09E8); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_fifo_partition_0) == - 0x1108); + 0x1108); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_enable) == - 0x1170); + 0x1170); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, prc_rxd0_n[0]) == - 0x1930); + 0x1930); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rti_command_mem) == - 0x19B8); + 0x19B8); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_cfg) == - 0x2100); + 0x2100); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rmac_addr_cmd_mem) == - 0x2128); + 0x2128); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_link_util) == - 0x2170); + 0x2170); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_pause_thresh_q0q3) == - 0x2918); + 0x2918); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_err_reg) == - 0x1040); + 0x1040); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rxdma_int_status) == - 0x1800); + 0x1800); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_tmac_err_reg) == - 0x2010); + 0x2010); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_err_reg) == - 0x2810); + 0x2810); xge_assert(xge_offsetof(xge_hal_pci_bar0_t, xgxs_int_status) == - 0x3000); + 0x3000); } #endif @@ -188,7 +180,7 @@ xge_hal_driver_bar0_offset_check(void) */ xge_hal_status_e xge_hal_driver_initialize(xge_hal_driver_config_t *config, - xge_hal_uld_cbs_t *uld_callbacks) + xge_hal_uld_cbs_t *uld_callbacks) { xge_hal_status_e status; @@ -203,29 +195,29 @@ xge_hal_driver_initialize(xge_hal_driver_config_t *config, #ifdef XGE_TRACE_INTO_CIRCULAR_ARR if (config->tracebuf_size == 0) - /* - * Trace buffer implementation is not lock protected. - * The only harm to expect is memcpy() to go beyond of - * allowed boundaries. To make it safe (driver-wise), - * we pre-allocate needed number of extra bytes. - */ - config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR + - XGE_OS_TRACE_MSGBUF_MAX; + /* + * Trace buffer implementation is not lock protected. + * The only harm to expect is memcpy() to go beyond of + * allowed boundaries. To make it safe (driver-wise), + * we pre-allocate needed number of extra bytes. + */ + config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR + + XGE_OS_TRACE_MSGBUF_MAX; #endif status = __hal_driver_config_check(config); if (status != XGE_HAL_OK) - return status; + return status; xge_os_memzero(g_xge_hal_driver, sizeof(xge_hal_driver_t)); /* apply config */ xge_os_memcpy(&g_xge_hal_driver->config, config, - sizeof(xge_hal_driver_config_t)); + sizeof(xge_hal_driver_config_t)); /* apply ULD callbacks */ xge_os_memcpy(&g_xge_hal_driver->uld_callbacks, uld_callbacks, - sizeof(xge_hal_uld_cbs_t)); + sizeof(xge_hal_uld_cbs_t)); g_xge_hal_driver->is_initialized = 1; @@ -233,17 +225,17 @@ xge_hal_driver_initialize(xge_hal_driver_config_t *config, g_tracebuf.size = config->tracebuf_size; g_tracebuf.data = (char *)xge_os_malloc(NULL, g_tracebuf.size); if (g_tracebuf.data == NULL) { - xge_os_printf("cannot allocate trace buffer!"); - return XGE_HAL_ERR_OUT_OF_MEMORY; + xge_os_printf("cannot allocate trace buffer!"); + return XGE_HAL_ERR_OUT_OF_MEMORY; } /* timestamps disabled by default */ g_tracebuf.timestamp = config->tracebuf_timestamp_en; if (g_tracebuf.timestamp) { - xge_os_timestamp(g_tracebuf.msg); - g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX - - xge_os_strlen(g_tracebuf.msg); + xge_os_timestamp(g_tracebuf.msg); + g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX - + xge_os_strlen(g_tracebuf.msg); } else - g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX; + g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX; g_tracebuf.offset = 0; *g_tracebuf.msg = 0; xge_os_memzero(g_tracebuf.data, g_tracebuf.size); @@ -268,7 +260,7 @@ xge_hal_driver_terminate(void) #ifdef XGE_TRACE_INTO_CIRCULAR_ARR if (g_tracebuf.size) { - xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size); + xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size); } #endif @@ -276,25 +268,25 @@ xge_hal_driver_terminate(void) #ifdef XGE_OS_MEMORY_CHECK { - int i, leaks=0; - xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt); - for (i=0; i<g_malloc_cnt; i++) { - if (g_malloc_arr[i].ptr != NULL) { - xge_os_printf("OSPAL: memory leak detected at " - "%s:%d:"XGE_OS_LLXFMT":%d", - g_malloc_arr[i].file, - g_malloc_arr[i].line, - (unsigned long long)(ulong_t) - g_malloc_arr[i].ptr, - g_malloc_arr[i].size); - leaks++; - } - } - if (leaks) { - xge_os_printf("OSPAL: %d memory leaks detected", leaks); - } else { - xge_os_printf("OSPAL: no memory leaks detected"); - } + int i, leaks=0; + xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt); + for (i=0; i<g_malloc_cnt; i++) { + if (g_malloc_arr[i].ptr != NULL) { + xge_os_printf("OSPAL: memory leak detected at " + "%s:%d:"XGE_OS_LLXFMT":%d", + g_malloc_arr[i].file, + g_malloc_arr[i].line, + (unsigned long long)(ulong_t) + g_malloc_arr[i].ptr, + g_malloc_arr[i].size); + leaks++; + } + } + if (leaks) { + xge_os_printf("OSPAL: %d memory leaks detected", leaks); + } else { + xge_os_printf("OSPAL: no memory leaks detected"); + } } #endif } diff --git a/sys/dev/nxge/xgehal/xgehal-fifo-fp.c b/sys/dev/nxge/xgehal/xgehal-fifo-fp.c index 4f59674..efc4d37 100644 --- a/sys/dev/nxge/xgehal/xgehal-fifo-fp.c +++ b/sys/dev/nxge/xgehal/xgehal-fifo-fp.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-fifo-fp.c - * - * Description: Tx fifo object functionality (fast path) - * - * Created: 10 June 2004 - */ - #ifdef XGE_DEBUG_FP #include <dev/nxge/include/xgehal-fifo.h> #endif @@ -46,7 +38,7 @@ __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh) xge_assert(txdp); txdl_priv = (xge_hal_fifo_txdl_priv_t *) - (ulong_t)txdp->host_control; + (ulong_t)txdp->host_control; xge_assert(txdl_priv); xge_assert(txdl_priv->dma_object); @@ -59,19 +51,19 @@ __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh) __HAL_STATIC_FIFO __HAL_INLINE_FIFO void __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - u64 ctrl_1) + u64 ctrl_1) { xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; xge_hal_fifo_hw_pair_t *hw_pair = fifo->hw_pair; xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; xge_hal_fifo_txdl_priv_t *txdl_priv; - u64 ctrl; + u64 ctrl; txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA; #ifdef XGE_DEBUG_ASSERT - /* make sure Xena overwrites the (illegal) t_code value on completion */ - XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5); + /* make sure Xena overwrites the (illegal) t_code value on completion */ + XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5); #endif txdl_priv = __hal_fifo_txdl_priv(dtrh); @@ -80,14 +72,14 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, /* sync the TxDL to device */ xge_os_dma_sync(fifo->channel.pdev, txdl_priv->dma_handle, - txdl_priv->dma_addr, - txdl_priv->dma_offset, - txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */, - XGE_OS_DMA_DIR_TODEVICE); + txdl_priv->dma_addr, + txdl_priv->dma_offset, + txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */, + XGE_OS_DMA_DIR_TODEVICE); #endif /* write the pointer first */ xge_os_pio_mem_write64(fifo->channel.pdev, - fifo->channel.regh1, + fifo->channel.regh1, txdl_priv->dma_addr, &hw_pair->txdl_pointer); @@ -97,7 +89,7 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, ctrl |= fifo->no_snoop_bits; if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) { - ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC; + ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC; } /* @@ -118,89 +110,89 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, __hal_channel_dtr_post(channelh, dtrh); xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1, - ctrl, &hw_pair->list_control); + ctrl, &hw_pair->list_control); xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" " - "into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr, - (unsigned long long)ctrl, - (unsigned long long)(ulong_t)&hw_pair->txdl_pointer); + "into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr, + (unsigned long long)ctrl, + (unsigned long long)(ulong_t)&hw_pair->txdl_pointer); #ifdef XGE_HAL_FIFO_DUMP_TXD xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" - XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT, - txdp->control_1, txdp->control_2, txdp->buffer_pointer, - txdp->host_control, txdl_priv->dma_addr); + XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT, + txdp->control_1, txdp->control_2, txdp->buffer_pointer, + txdp->host_control, txdl_priv->dma_addr); #endif fifo->channel.stats.total_posts++; fifo->channel.usage_cnt++; if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt) - fifo->channel.stats.usage_max = fifo->channel.usage_cnt; + fifo->channel.stats.usage_max = fifo->channel.usage_cnt; } __HAL_STATIC_FIFO __HAL_INLINE_FIFO void __hal_fifo_txdl_free_many(xge_hal_channel_h channelh, - xge_hal_fifo_txd_t *txdp, int list_size, int frags) + xge_hal_fifo_txd_t *txdp, int list_size, int frags) { xge_hal_fifo_txdl_priv_t *current_txdl_priv; xge_hal_fifo_txdl_priv_t *next_txdl_priv; int invalid_frags = frags % list_size; if (invalid_frags){ - xge_debug_fifo(XGE_ERR, - "freeing corrupt dtrh %p, fragments %d list size %d", - txdp, frags, list_size); - xge_assert(invalid_frags == 0); + xge_debug_fifo(XGE_ERR, + "freeing corrupt dtrh %p, fragments %d list size %d", + txdp, frags, list_size); + xge_assert(invalid_frags == 0); } while(txdp){ - xge_debug_fifo(XGE_TRACE, - "freeing linked dtrh %p, fragments %d list size %d", - txdp, frags, list_size); - current_txdl_priv = __hal_fifo_txdl_priv(txdp); + xge_debug_fifo(XGE_TRACE, + "freeing linked dtrh %p, fragments %d list size %d", + txdp, frags, list_size); + current_txdl_priv = __hal_fifo_txdl_priv(txdp); #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK) - current_txdl_priv->allocated = 0; + current_txdl_priv->allocated = 0; #endif - __hal_channel_dtr_free(channelh, txdp); - next_txdl_priv = current_txdl_priv->next_txdl_priv; - xge_assert(frags); - frags -= list_size; - if (next_txdl_priv) { - current_txdl_priv->next_txdl_priv = NULL; - txdp = next_txdl_priv->first_txdp; - } - else { - xge_debug_fifo(XGE_TRACE, - "freed linked dtrh fragments %d list size %d", - frags, list_size); - break; - } + __hal_channel_dtr_free(channelh, txdp); + next_txdl_priv = current_txdl_priv->next_txdl_priv; + xge_assert(frags); + frags -= list_size; + if (next_txdl_priv) { + current_txdl_priv->next_txdl_priv = NULL; + txdp = next_txdl_priv->first_txdp; + } + else { + xge_debug_fifo(XGE_TRACE, + "freed linked dtrh fragments %d list size %d", + frags, list_size); + break; + } } xge_assert(frags == 0) } __HAL_STATIC_FIFO __HAL_INLINE_FIFO void __hal_fifo_txdl_restore_many(xge_hal_channel_h channelh, - xge_hal_fifo_txd_t *txdp, int txdl_count) + xge_hal_fifo_txd_t *txdp, int txdl_count) { xge_hal_fifo_txdl_priv_t *current_txdl_priv; xge_hal_fifo_txdl_priv_t *next_txdl_priv; int i = txdl_count; xge_assert(((xge_hal_channel_t *)channelh)->reserve_length + - txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial); + txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial); current_txdl_priv = __hal_fifo_txdl_priv(txdp); do{ - xge_assert(i); + xge_assert(i); #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK) - current_txdl_priv->allocated = 0; + current_txdl_priv->allocated = 0; #endif - next_txdl_priv = current_txdl_priv->next_txdl_priv; - txdp = current_txdl_priv->first_txdp; - current_txdl_priv->next_txdl_priv = NULL; - __hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i); - xge_debug_fifo(XGE_TRACE, - "dtrh %p restored at offset %d", txdp, i); - current_txdl_priv = next_txdl_priv; + next_txdl_priv = current_txdl_priv->next_txdl_priv; + txdp = current_txdl_priv->first_txdp; + current_txdl_priv->next_txdl_priv = NULL; + __hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i); + xge_debug_fifo(XGE_TRACE, + "dtrh %p restored at offset %d", txdp, i); + current_txdl_priv = next_txdl_priv; } while(current_txdl_priv); __hal_channel_dtr_restore(channelh, NULL, txdl_count); } @@ -222,7 +214,7 @@ xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh) xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; return ((char *)(ulong_t)txdp->host_control) + - sizeof(xge_hal_fifo_txdl_priv_t); + sizeof(xge_hal_fifo_txdl_priv_t); } /** @@ -247,7 +239,7 @@ xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh) } /** * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more - * than single txdl. + * than single txdl. * @channelh: Channel handle. * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter * with a valid handle. @@ -267,7 +259,7 @@ xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh) */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, - xge_hal_dtr_h *dtrh, const int frags) + xge_hal_dtr_h *dtrh, const int frags) { xge_hal_status_e status = XGE_HAL_OK; int alloc_frags = 0, dang_frags = 0; @@ -281,7 +273,7 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, unsigned long flags=0; #endif xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d", - frags); + frags); xge_assert(frags < (fifo->txdl_per_memblock * max_frags)); #if defined(XGE_HAL_TX_MULTI_RESERVE) xge_os_spin_lock(&fifo->channel.reserve_lock); @@ -289,68 +281,68 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags); #endif while(alloc_frags < frags) { - status = __hal_channel_dtr_alloc(channelh, - (xge_hal_dtr_h *)(void*)&next_txdp); - if (status != XGE_HAL_OK){ - xge_debug_fifo(XGE_ERR, - "failed to allocate linked fragments rc %d", - status); - xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS); - if (*dtrh) { - xge_assert(alloc_frags/max_frags); - __hal_fifo_txdl_restore_many(channelh, - (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags); - } - if (dang_dtrh) { - xge_assert(dang_frags/max_frags); - __hal_fifo_txdl_restore_many(channelh, - (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags); - } - break; - } - xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p" - " for frags %d", next_txdp, frags); - next_txdl_priv = __hal_fifo_txdl_priv(next_txdp); - xge_assert(next_txdl_priv); - xge_assert(next_txdl_priv->first_txdp == next_txdp); - next_txdl_priv->dang_txdl = NULL; - next_txdl_priv->dang_frags = 0; - next_txdl_priv->next_txdl_priv = NULL; + status = __hal_channel_dtr_alloc(channelh, + (xge_hal_dtr_h *)(void*)&next_txdp); + if (status != XGE_HAL_OK){ + xge_debug_fifo(XGE_ERR, + "failed to allocate linked fragments rc %d", + status); + xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS); + if (*dtrh) { + xge_assert(alloc_frags/max_frags); + __hal_fifo_txdl_restore_many(channelh, + (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags); + } + if (dang_dtrh) { + xge_assert(dang_frags/max_frags); + __hal_fifo_txdl_restore_many(channelh, + (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags); + } + break; + } + xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p" + " for frags %d", next_txdp, frags); + next_txdl_priv = __hal_fifo_txdl_priv(next_txdp); + xge_assert(next_txdl_priv); + xge_assert(next_txdl_priv->first_txdp == next_txdp); + next_txdl_priv->dang_txdl = NULL; + next_txdl_priv->dang_frags = 0; + next_txdl_priv->next_txdl_priv = NULL; #if defined(XGE_OS_MEMORY_CHECK) - next_txdl_priv->allocated = 1; + next_txdl_priv->allocated = 1; #endif - if (!curr_txdp || !curr_txdl_priv) { - curr_txdp = next_txdp; - curr_txdl_priv = next_txdl_priv; - *dtrh = (xge_hal_dtr_h)next_txdp; - alloc_frags = max_frags; - continue; - } - if (curr_txdl_priv->memblock == - next_txdl_priv->memblock) { - xge_debug_fifo(XGE_TRACE, - "linking dtrh %p, with %p", - *dtrh, next_txdp); - xge_assert (next_txdp == - curr_txdp + max_frags); - alloc_frags += max_frags; - curr_txdl_priv->next_txdl_priv = next_txdl_priv; - } - else { - xge_assert(*dtrh); - xge_assert(dang_dtrh == NULL); - dang_dtrh = *dtrh; - dang_frags = alloc_frags; - xge_debug_fifo(XGE_TRACE, - "dangling dtrh %p, linked with dtrh %p", - *dtrh, next_txdp); - next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh; - next_txdl_priv->dang_frags = alloc_frags; - alloc_frags = max_frags; - *dtrh = next_txdp; - } - curr_txdp = next_txdp; - curr_txdl_priv = next_txdl_priv; + if (!curr_txdp || !curr_txdl_priv) { + curr_txdp = next_txdp; + curr_txdl_priv = next_txdl_priv; + *dtrh = (xge_hal_dtr_h)next_txdp; + alloc_frags = max_frags; + continue; + } + if (curr_txdl_priv->memblock == + next_txdl_priv->memblock) { + xge_debug_fifo(XGE_TRACE, + "linking dtrh %p, with %p", + *dtrh, next_txdp); + xge_assert (next_txdp == + curr_txdp + max_frags); + alloc_frags += max_frags; + curr_txdl_priv->next_txdl_priv = next_txdl_priv; + } + else { + xge_assert(*dtrh); + xge_assert(dang_dtrh == NULL); + dang_dtrh = *dtrh; + dang_frags = alloc_frags; + xge_debug_fifo(XGE_TRACE, + "dangling dtrh %p, linked with dtrh %p", + *dtrh, next_txdp); + next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh; + next_txdl_priv->dang_frags = alloc_frags; + alloc_frags = max_frags; + *dtrh = next_txdp; + } + curr_txdp = next_txdp; + curr_txdl_priv = next_txdl_priv; } #if defined(XGE_HAL_TX_MULTI_RESERVE) @@ -360,30 +352,30 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, #endif if (status == XGE_HAL_OK) { - xge_hal_fifo_txdl_priv_t * txdl_priv; - xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh; - xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats; - txdl_priv = __hal_fifo_txdl_priv(txdp); - /* reset the TxDL's private */ - txdl_priv->align_dma_offset = 0; - txdl_priv->align_vaddr_start = txdl_priv->align_vaddr; - txdl_priv->align_used_frags = 0; - txdl_priv->frags = 0; - txdl_priv->bytes_sent = 0; - txdl_priv->alloc_frags = alloc_frags; - /* reset TxD0 */ - txdp->control_1 = txdp->control_2 = 0; + xge_hal_fifo_txdl_priv_t * txdl_priv; + xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh; + xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats; + txdl_priv = __hal_fifo_txdl_priv(txdp); + /* reset the TxDL's private */ + txdl_priv->align_dma_offset = 0; + txdl_priv->align_vaddr_start = txdl_priv->align_vaddr; + txdl_priv->align_used_frags = 0; + txdl_priv->frags = 0; + txdl_priv->bytes_sent = 0; + txdl_priv->alloc_frags = alloc_frags; + /* reset TxD0 */ + txdp->control_1 = txdp->control_2 = 0; #if defined(XGE_OS_MEMORY_CHECK) - txdl_priv->allocated = 1; + txdl_priv->allocated = 1; #endif - /* update statistics */ - statsp->total_posts_dtrs_many++; - statsp->total_posts_frags_many += txdl_priv->alloc_frags; - if (txdl_priv->dang_frags){ - statsp->total_posts_dang_dtrs++; - statsp->total_posts_dang_frags += txdl_priv->dang_frags; - } + /* update statistics */ + statsp->total_posts_dtrs_many++; + statsp->total_posts_frags_many += txdl_priv->alloc_frags; + if (txdl_priv->dang_frags){ + statsp->total_posts_dang_dtrs++; + statsp->total_posts_dang_frags += txdl_priv->dang_frags; + } } return status; @@ -436,28 +428,28 @@ xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) #endif if (status == XGE_HAL_OK) { - xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh; - xge_hal_fifo_txdl_priv_t *txdl_priv; - - txdl_priv = __hal_fifo_txdl_priv(txdp); - - /* reset the TxDL's private */ - txdl_priv->align_dma_offset = 0; - txdl_priv->align_vaddr_start = txdl_priv->align_vaddr; - txdl_priv->align_used_frags = 0; - txdl_priv->frags = 0; - txdl_priv->alloc_frags = - ((xge_hal_fifo_t *)channelh)->config->max_frags; - txdl_priv->dang_txdl = NULL; - txdl_priv->dang_frags = 0; - txdl_priv->next_txdl_priv = NULL; - txdl_priv->bytes_sent = 0; - - /* reset TxD0 */ - txdp->control_1 = txdp->control_2 = 0; + xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh; + xge_hal_fifo_txdl_priv_t *txdl_priv; + + txdl_priv = __hal_fifo_txdl_priv(txdp); + + /* reset the TxDL's private */ + txdl_priv->align_dma_offset = 0; + txdl_priv->align_vaddr_start = txdl_priv->align_vaddr; + txdl_priv->align_used_frags = 0; + txdl_priv->frags = 0; + txdl_priv->alloc_frags = + ((xge_hal_fifo_t *)channelh)->config->max_frags; + txdl_priv->dang_txdl = NULL; + txdl_priv->dang_frags = 0; + txdl_priv->next_txdl_priv = NULL; + txdl_priv->bytes_sent = 0; + + /* reset TxD0 */ + txdp->control_1 = txdp->control_2 = 0; #if defined(XGE_OS_MEMORY_CHECK) - txdl_priv->allocated = 1; + txdl_priv->allocated = 1; #endif } @@ -488,7 +480,7 @@ xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh, int dtr_sp_size, - xge_hal_dtr_h dtr_sp) + xge_hal_dtr_h dtr_sp) { /* FIXME: implement */ return XGE_HAL_OK; @@ -536,7 +528,7 @@ xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) #endif __hal_fifo_dtr_post_single(channelh, dtrh, - (u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST)); + (u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST)); #if defined(XGE_HAL_TX_MULTI_POST) xge_os_spin_unlock(fifo->post_lock_ptr); @@ -563,7 +555,7 @@ xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num, - xge_hal_dtr_h dtrs[]) + xge_hal_dtr_h dtrs[]) { int i; xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; @@ -582,7 +574,7 @@ xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num, txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]); txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] + - (txdl_priv_last->frags - 1); + (txdl_priv_last->frags - 1); txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST; #if defined(XGE_HAL_TX_MULTI_POST) @@ -593,22 +585,22 @@ xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num, #endif for (i=0; i<num; i++) { - xge_hal_fifo_txdl_priv_t *txdl_priv; - u64 val64; - xge_hal_dtr_h dtrh = dtrs[i]; - - txdl_priv = __hal_fifo_txdl_priv(dtrh); - txdl_priv = txdl_priv; /* Cheat lint */ - - val64 = 0; - if (i == 0) { - val64 |= XGE_HAL_TX_FIFO_FIRST_LIST; - } else if (i == num -1) { - val64 |= XGE_HAL_TX_FIFO_LAST_LIST; - } - - val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC; - __hal_fifo_dtr_post_single(channelh, dtrh, val64); + xge_hal_fifo_txdl_priv_t *txdl_priv; + u64 val64; + xge_hal_dtr_h dtrh = dtrs[i]; + + txdl_priv = __hal_fifo_txdl_priv(dtrh); + txdl_priv = txdl_priv; /* Cheat lint */ + + val64 = 0; + if (i == 0) { + val64 |= XGE_HAL_TX_FIFO_FIRST_LIST; + } else if (i == num -1) { + val64 |= XGE_HAL_TX_FIFO_LAST_LIST; + } + + val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC; + __hal_fifo_dtr_post_single(channelh, dtrh, val64); } #if defined(XGE_HAL_TX_MULTI_POST) @@ -658,7 +650,7 @@ xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num, */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, - xge_hal_dtr_h *dtrh, u8 *t_code) + xge_hal_dtr_h *dtrh, u8 *t_code) { xge_hal_fifo_txd_t *txdp; xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; @@ -669,7 +661,7 @@ xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, __hal_channel_dtr_try_complete(channelh, dtrh); txdp = (xge_hal_fifo_txd_t *)*dtrh; if (txdp == NULL) { - return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; + return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; } #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) @@ -680,28 +672,28 @@ xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, * Note: 16bytes means Control_1 & Control_2 */ xge_os_dma_sync(fifo->channel.pdev, txdl_priv->dma_handle, - txdl_priv->dma_addr, - txdl_priv->dma_offset, - 16, - XGE_OS_DMA_DIR_FROMDEVICE); + txdl_priv->dma_addr, + txdl_priv->dma_offset, + 16, + XGE_OS_DMA_DIR_FROMDEVICE); #endif /* check whether host owns it */ if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) { - xge_assert(txdp->host_control!=0); + xge_assert(txdp->host_control!=0); - __hal_channel_dtr_complete(channelh); + __hal_channel_dtr_complete(channelh); - *t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1); + *t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1); - /* see XGE_HAL_SET_TXD_T_CODE() above.. */ - xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5); + /* see XGE_HAL_SET_TXD_T_CODE() above.. */ + xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5); - if (fifo->channel.usage_cnt > 0) - fifo->channel.usage_cnt--; + if (fifo->channel.usage_cnt > 0) + fifo->channel.usage_cnt--; - return XGE_HAL_OK; + return XGE_HAL_OK; } /* no more completions */ @@ -742,7 +734,7 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr) unsigned long flags = 0; #endif xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv( - (xge_hal_fifo_txd_t *)dtr); + (xge_hal_fifo_txd_t *)dtr); int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags; #if defined(XGE_HAL_TX_MULTI_FREE) xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock); @@ -752,35 +744,35 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr) #endif if (txdl_priv->alloc_frags > max_frags) { - xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *) - txdl_priv->dang_txdl; - int dang_frags = txdl_priv->dang_frags; - int alloc_frags = txdl_priv->alloc_frags; - txdl_priv->dang_txdl = NULL; - txdl_priv->dang_frags = 0; - txdl_priv->alloc_frags = 0; - /* dtrh must have a linked list of dtrh */ - xge_assert(txdl_priv->next_txdl_priv); - - /* free any dangling dtrh first */ - if (dang_txdp) { - xge_debug_fifo(XGE_TRACE, - "freeing dangled dtrh %p for %d fragments", - dang_txdp, dang_frags); - __hal_fifo_txdl_free_many(channelh, dang_txdp, - max_frags, dang_frags); - } - - /* now free the reserved dtrh list */ - xge_debug_fifo(XGE_TRACE, - "freeing dtrh %p list of %d fragments", dtr, - alloc_frags); - __hal_fifo_txdl_free_many(channelh, - (xge_hal_fifo_txd_t *)dtr, max_frags, - alloc_frags); + xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *) + txdl_priv->dang_txdl; + int dang_frags = txdl_priv->dang_frags; + int alloc_frags = txdl_priv->alloc_frags; + txdl_priv->dang_txdl = NULL; + txdl_priv->dang_frags = 0; + txdl_priv->alloc_frags = 0; + /* dtrh must have a linked list of dtrh */ + xge_assert(txdl_priv->next_txdl_priv); + + /* free any dangling dtrh first */ + if (dang_txdp) { + xge_debug_fifo(XGE_TRACE, + "freeing dangled dtrh %p for %d fragments", + dang_txdp, dang_frags); + __hal_fifo_txdl_free_many(channelh, dang_txdp, + max_frags, dang_frags); + } + + /* now free the reserved dtrh list */ + xge_debug_fifo(XGE_TRACE, + "freeing dtrh %p list of %d fragments", dtr, + alloc_frags); + __hal_fifo_txdl_free_many(channelh, + (xge_hal_fifo_txd_t *)dtr, max_frags, + alloc_frags); } else - __hal_channel_dtr_free(channelh, dtr); + __hal_channel_dtr_free(channelh, dtr); ((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent; @@ -844,8 +836,8 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr) */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, - xge_hal_dtr_h dtrh, int frag_idx, void *vaddr, - dma_addr_t dma_pointer, int size, int misaligned_size) + xge_hal_dtr_h dtrh, int frag_idx, void *vaddr, + dma_addr_t dma_pointer, int size, int misaligned_size) { xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; xge_hal_fifo_txdl_priv_t *txdl_priv; @@ -857,7 +849,7 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags; if (frag_idx != 0) { - txdp->control_1 = txdp->control_2 = 0; + txdp->control_1 = txdp->control_2 = 0; } /* On some systems buffer size could be zero. @@ -866,17 +858,17 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, xge_assert(size > 0); xge_assert(frag_idx < txdl_priv->alloc_frags); xge_assert(misaligned_size != 0 && - misaligned_size <= fifo->config->alignment_size); + misaligned_size <= fifo->config->alignment_size); remaining_size = size - misaligned_size; xge_assert(remaining_size >= 0); xge_os_memcpy((char*)txdl_priv->align_vaddr_start, - vaddr, misaligned_size); + vaddr, misaligned_size); - if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) { + if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) { return XGE_HAL_ERR_OUT_ALIGNED_FRAGS; - } + } /* setup new buffer */ prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; @@ -887,29 +879,29 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, txdl_priv->frags++; txdl_priv->align_used_frags++; txdl_priv->align_vaddr_start += fifo->config->alignment_size; - txdl_priv->align_dma_offset = 0; + txdl_priv->align_dma_offset = 0; #if defined(XGE_OS_DMA_REQUIRES_SYNC) /* sync new buffer */ xge_os_dma_sync(fifo->channel.pdev, - txdl_priv->align_dma_handle, - txdp->buffer_pointer, - 0, - misaligned_size, - XGE_OS_DMA_DIR_TODEVICE); + txdl_priv->align_dma_handle, + txdp->buffer_pointer, + 0, + misaligned_size, + XGE_OS_DMA_DIR_TODEVICE); #endif if (remaining_size) { - xge_assert(frag_idx < txdl_priv->alloc_frags); - txdp++; - txdp->buffer_pointer = (u64)dma_pointer + - misaligned_size; - txdp->control_1 = - XGE_HAL_TXD_BUFFER0_SIZE(remaining_size); - txdl_priv->bytes_sent += remaining_size; - txdp->control_2 = 0; - fifo->channel.stats.total_buffers++; - txdl_priv->frags++; + xge_assert(frag_idx < txdl_priv->alloc_frags); + txdp++; + txdp->buffer_pointer = (u64)dma_pointer + + misaligned_size; + txdp->control_1 = + XGE_HAL_TXD_BUFFER0_SIZE(remaining_size); + txdl_priv->bytes_sent += remaining_size; + txdp->control_2 = 0; + fifo->channel.stats.total_buffers++; + txdl_priv->frags++; } return XGE_HAL_OK; @@ -936,7 +928,7 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - void *vaddr, int size) + void *vaddr, int size) { xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; xge_hal_fifo_txdl_priv_t *txdl_priv; @@ -952,7 +944,7 @@ xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, return XGE_HAL_ERR_OUT_ALIGNED_FRAGS; xge_os_memcpy((char*)txdl_priv->align_vaddr_start + - txdl_priv->align_dma_offset, vaddr, size); + txdl_priv->align_dma_offset, vaddr, size); fifo->channel.stats.copied_frags++; @@ -977,7 +969,7 @@ xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - int frag_idx) + int frag_idx) { xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; xge_hal_fifo_txdl_priv_t *txdl_priv; @@ -990,13 +982,13 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags; if (frag_idx != 0) { - txdp->control_1 = txdp->control_2 = 0; + txdp->control_1 = txdp->control_2 = 0; } prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff; txdp->control_1 |= - XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset); + XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset); txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset; fifo->channel.stats.total_buffers++; fifo->channel.stats.copied_buffers++; @@ -1006,16 +998,16 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, #if defined(XGE_OS_DMA_REQUIRES_SYNC) /* sync pre-mapped buffer */ xge_os_dma_sync(fifo->channel.pdev, - txdl_priv->align_dma_handle, - txdp->buffer_pointer, - 0, - txdl_priv->align_dma_offset, - XGE_OS_DMA_DIR_TODEVICE); + txdl_priv->align_dma_handle, + txdp->buffer_pointer, + 0, + txdl_priv->align_dma_offset, + XGE_OS_DMA_DIR_TODEVICE); #endif /* increment vaddr_start for the next buffer_append() iteration */ txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset; - txdl_priv->align_dma_offset = 0; + txdl_priv->align_dma_offset = 0; } /** @@ -1048,7 +1040,7 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, */ __HAL_STATIC_FIFO __HAL_INLINE_FIFO void xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - int frag_idx, dma_addr_t dma_pointer, int size) + int frag_idx, dma_addr_t dma_pointer, int size) { xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; xge_hal_fifo_txdl_priv_t *txdl_priv; @@ -1058,7 +1050,7 @@ xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags; if (frag_idx != 0) { - txdp->control_1 = txdp->control_2 = 0; + txdp->control_1 = txdp->control_2 = 0; } /* Note: @@ -1161,13 +1153,13 @@ xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh) __hal_channel_dtr_try_complete(channelh, &dtrh); txdp = (xge_hal_fifo_txd_t *)dtrh; if (txdp == NULL) { - return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; + return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; } /* check whether host owns it */ if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) { - xge_assert(txdp->host_control!=0); - return XGE_HAL_OK; + xge_assert(txdp->host_control!=0); + return XGE_HAL_OK; } /* no more completions */ diff --git a/sys/dev/nxge/xgehal/xgehal-fifo.c b/sys/dev/nxge/xgehal/xgehal-fifo.c index de6befd..303afaf 100644 --- a/sys/dev/nxge/xgehal/xgehal-fifo.c +++ b/sys/dev/nxge/xgehal/xgehal-fifo.c @@ -26,26 +26,18 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-fifo.c - * - * Description: fifo object implementation - * - * Created: 10 May 2004 - */ - #include <dev/nxge/include/xgehal-fifo.h> #include <dev/nxge/include/xgehal-device.h> static xge_hal_status_e __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh, - void *memblock, - int memblock_index, - xge_hal_mempool_dma_t *dma_object, - void *item, - int index, - int is_last, - void *userdata) + void *memblock, + int memblock_index, + xge_hal_mempool_dma_t *dma_object, + void *item, + int index, + int is_last, + void *userdata) { int memblock_item_idx; xge_hal_fifo_txdl_priv_t *txdl_priv; @@ -54,10 +46,10 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh, xge_assert(item); txdl_priv = (xge_hal_fifo_txdl_priv_t *) \ - __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, - memblock_index, - item, - &memblock_item_idx); + __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, + memblock_index, + item, + &memblock_item_idx); xge_assert(txdl_priv); /* pre-format HAL's TxDL's private */ @@ -85,22 +77,22 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh, xge_hal_status_e status; if (fifo->config->alignment_size) { status =__hal_fifo_dtr_align_alloc_map(fifo, txdp); - if (status != XGE_HAL_OK) { - xge_debug_mm(XGE_ERR, - "align buffer[%d] %d bytes, status %d", - index, - fifo->align_size, - status); - return status; - } + if (status != XGE_HAL_OK) { + xge_debug_mm(XGE_ERR, + "align buffer[%d] %d bytes, status %d", + index, + fifo->align_size, + status); + return status; + } } } #endif #endif if (fifo->channel.dtr_init) { - fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index, - fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL); + fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index, + fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL); } return XGE_HAL_OK; @@ -109,13 +101,13 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh, static xge_hal_status_e __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh, - void *memblock, - int memblock_index, - xge_hal_mempool_dma_t *dma_object, - void *item, - int index, - int is_last, - void *userdata) + void *memblock, + int memblock_index, + xge_hal_mempool_dma_t *dma_object, + void *item, + int index, + int is_last, + void *userdata) { int memblock_item_idx; xge_hal_fifo_txdl_priv_t *txdl_priv; @@ -126,33 +118,33 @@ __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh, xge_assert(item); txdl_priv = (xge_hal_fifo_txdl_priv_t *) \ - __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, - memblock_index, - item, - &memblock_item_idx); + __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, + memblock_index, + item, + &memblock_item_idx); xge_assert(txdl_priv); #ifdef XGE_HAL_ALIGN_XMIT if (fifo->config->alignment_size) { - if (txdl_priv->align_dma_addr != 0) { - xge_os_dma_unmap(fifo->channel.pdev, - txdl_priv->align_dma_handle, - txdl_priv->align_dma_addr, - fifo->align_size, - XGE_OS_DMA_DIR_TODEVICE); - - txdl_priv->align_dma_addr = 0; - } - - if (txdl_priv->align_vaddr != NULL) { - xge_os_dma_free(fifo->channel.pdev, - txdl_priv->align_vaddr, - fifo->align_size, - &txdl_priv->align_dma_acch, - &txdl_priv->align_dma_handle); - - txdl_priv->align_vaddr = NULL; - } + if (txdl_priv->align_dma_addr != 0) { + xge_os_dma_unmap(fifo->channel.pdev, + txdl_priv->align_dma_handle, + txdl_priv->align_dma_addr, + fifo->align_size, + XGE_OS_DMA_DIR_TODEVICE); + + txdl_priv->align_dma_addr = 0; + } + + if (txdl_priv->align_vaddr != NULL) { + xge_os_dma_free(fifo->channel.pdev, + txdl_priv->align_vaddr, + fifo->align_size, + &txdl_priv->align_dma_acch, + &txdl_priv->align_dma_handle); + + txdl_priv->align_vaddr = NULL; + } } #endif @@ -180,46 +172,46 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) #endif #if defined(XGE_HAL_TX_MULTI_POST) if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - fifo->post_lock_ptr = &hldev->xena_post_lock; + fifo->post_lock_ptr = &hldev->xena_post_lock; } else { xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev); - fifo->post_lock_ptr = &fifo->channel.post_lock; + fifo->post_lock_ptr = &fifo->channel.post_lock; } #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { - fifo->post_lock_ptr = &hldev->xena_post_lock; + fifo->post_lock_ptr = &hldev->xena_post_lock; } else { xge_os_spin_lock_init_irq(&fifo->channel.post_lock, - hldev->irqh); - fifo->post_lock_ptr = &fifo->channel.post_lock; + hldev->irqh); + fifo->post_lock_ptr = &fifo->channel.post_lock; } #endif fifo->align_size = - fifo->config->alignment_size * fifo->config->max_aligned_frags; + fifo->config->alignment_size * fifo->config->max_aligned_frags; /* Initializing the BAR1 address as the start of * the FIFO queue pointer and as a location of FIFO control * word. */ fifo->hw_pair = (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 + - (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); + (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); /* apply "interrupts per txdl" attribute */ fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ; if (queue->intr) { - fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST; + fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST; } fifo->no_snoop_bits = - (int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits)); + (int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits)); /* * FIFO memory management strategy: * * TxDL splitted into three independent parts: - * - set of TxD's - * - TxD HAL private part - * - upper layer private part + * - set of TxD's + * - TxD HAL private part + * - upper layer private part * * Adaptative memory allocation used. i.e. Memory allocated on * demand with the size which will fit into one memory block. @@ -239,18 +231,18 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) + attr->per_dtr_space; fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) / - __xge_os_cacheline_size) * - __xge_os_cacheline_size; + __xge_os_cacheline_size) * + __xge_os_cacheline_size; /* recompute txdl size to be cacheline aligned */ fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t); txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) / - __xge_os_cacheline_size) * __xge_os_cacheline_size; + __xge_os_cacheline_size) * __xge_os_cacheline_size; if (fifo->txdl_size != txdl_size) xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d", - fifo->config->max_frags, fifo->txdl_size, txdl_size, - __xge_os_cacheline_size); + fifo->config->max_frags, fifo->txdl_size, txdl_size, + __xge_os_cacheline_size); fifo->txdl_size = txdl_size; @@ -260,62 +252,62 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) fifo->channel.dtr_init = attr->dtr_init; fifo->channel.userdata = attr->userdata; fifo->txdl_per_memblock = fifo->config->memblock_size / - fifo->txdl_size; + fifo->txdl_size; fifo->mempool = __hal_mempool_create(hldev->pdev, - fifo->config->memblock_size, - fifo->txdl_size, - fifo->priv_size, - queue->initial, - queue->max, - __hal_fifo_mempool_item_alloc, - __hal_fifo_mempool_item_free, - fifo); + fifo->config->memblock_size, + fifo->txdl_size, + fifo->priv_size, + queue->initial, + queue->max, + __hal_fifo_mempool_item_alloc, + __hal_fifo_mempool_item_free, + fifo); if (fifo->mempool == NULL) { - return XGE_HAL_ERR_OUT_OF_MEMORY; + return XGE_HAL_ERR_OUT_OF_MEMORY; } status = __hal_channel_initialize(channelh, attr, - (void **) __hal_mempool_items_arr(fifo->mempool), - queue->initial, queue->max, - fifo->config->reserve_threshold); + (void **) __hal_mempool_items_arr(fifo->mempool), + queue->initial, queue->max, + fifo->config->reserve_threshold); if (status != XGE_HAL_OK) { - __hal_fifo_close(channelh); - return status; + __hal_fifo_close(channelh); + return status; } xge_debug_fifo(XGE_TRACE, - "DTR reserve_length:%d reserve_top:%d\n" - "max_frags:%d reserve_threshold:%d\n" - "memblock_size:%d alignment_size:%d max_aligned_frags:%d", - fifo->channel.reserve_length, fifo->channel.reserve_top, - fifo->config->max_frags, fifo->config->reserve_threshold, - fifo->config->memblock_size, fifo->config->alignment_size, - fifo->config->max_aligned_frags); + "DTR reserve_length:%d reserve_top:%d\n" + "max_frags:%d reserve_threshold:%d\n" + "memblock_size:%d alignment_size:%d max_aligned_frags:%d", + fifo->channel.reserve_length, fifo->channel.reserve_top, + fifo->config->max_frags, fifo->config->reserve_threshold, + fifo->config->memblock_size, fifo->config->alignment_size, + fifo->config->max_aligned_frags); #ifdef XGE_DEBUG_ASSERT for ( i = 0; i < fifo->channel.reserve_length; i++) { - xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d" - " handle:%p", i, fifo->channel.reserve_arr[i]); + xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d" + " handle:%p", i, fifo->channel.reserve_arr[i]); } #endif xge_assert(fifo->channel.reserve_length); /* reverse the FIFO dtr array */ - max_arr_index = fifo->channel.reserve_length - 1; - max_arr_index -=fifo->channel.reserve_top; + max_arr_index = fifo->channel.reserve_length - 1; + max_arr_index -=fifo->channel.reserve_top; xge_assert(max_arr_index); mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2; for (i = 0; i < mid_point; i++) { - dtrh = fifo->channel.reserve_arr[i]; - fifo->channel.reserve_arr[i] = - fifo->channel.reserve_arr[max_arr_index - i]; - fifo->channel.reserve_arr[max_arr_index - i] = dtrh; + dtrh = fifo->channel.reserve_arr[i]; + fifo->channel.reserve_arr[i] = + fifo->channel.reserve_arr[max_arr_index - i]; + fifo->channel.reserve_arr[max_arr_index - i] = dtrh; } #ifdef XGE_DEBUG_ASSERT for ( i = 0; i < fifo->channel.reserve_length; i++) { - xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d" - " handle:%p", i, fifo->channel.reserve_arr[i]); + xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d" + " handle:%p", i, fifo->channel.reserve_arr[i]); } #endif @@ -329,7 +321,7 @@ __hal_fifo_close(xge_hal_channel_h channelh) xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh; if (fifo->mempool) { - __hal_mempool_destroy(fifo->mempool); + __hal_mempool_destroy(fifo->mempool); } __hal_channel_terminate(channelh); @@ -341,10 +333,10 @@ __hal_fifo_close(xge_hal_channel_h channelh) #endif if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { #if defined(XGE_HAL_TX_MULTI_POST) - xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev); + xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev); #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) - xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock, - hldev->pdev); + xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock, + hldev->pdev); #endif } } @@ -383,59 +375,59 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh) * FIFOs are enabled! page 6-77 user guide */ if (!hldev->config.rts_qos_en) { - /* all zeroes for Round-Robin */ - for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, - tx_fifo_wrr[i]); - } - - /* reset all of them but '0' */ - for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, - tx_fifo_partitions[i]); - } + /* all zeroes for Round-Robin */ + for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, + tx_fifo_wrr[i]); + } + + /* reset all of them but '0' */ + for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, + tx_fifo_partitions[i]); + } } else { /* Change the default settings */ - for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - tx_fifo_wrr_value[i], tx_fifo_wrr[i]); - } + for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + tx_fifo_wrr_value[i], tx_fifo_wrr[i]); + } } /* configure only configured FIFOs */ val64 = 0; part0 = 0; for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { - int reg_half = i % 2; - int reg_num = i / 2; - - if (hldev->config.fifo.queue[i].configured) { - int priority = hldev->config.fifo.queue[i].priority; - val64 |= - vBIT((hldev->config.fifo.queue[i].max-1), - (((reg_half) * 32) + 19), - 13) | vBIT(priority, (((reg_half)*32) + 5), 3); - } - - /* NOTE: do write operation for each second u64 half - * or force for first one if configured number - * is even */ - if (reg_half) { - if (reg_num == 0) { - /* skip partition '0', must write it once at - * the end */ - part0 = val64; - } else { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, tx_fifo_partitions[reg_num]); - xge_debug_fifo(XGE_TRACE, - "fifo partition_%d at: " - "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, - reg_num, (unsigned long long)(ulong_t) - tx_fifo_partitions[reg_num], - (unsigned long long)val64); - } - val64 = 0; - } + int reg_half = i % 2; + int reg_num = i / 2; + + if (hldev->config.fifo.queue[i].configured) { + int priority = hldev->config.fifo.queue[i].priority; + val64 |= + vBIT((hldev->config.fifo.queue[i].max-1), + (((reg_half) * 32) + 19), + 13) | vBIT(priority, (((reg_half)*32) + 5), 3); + } + + /* NOTE: do write operation for each second u64 half + * or force for first one if configured number + * is even */ + if (reg_half) { + if (reg_num == 0) { + /* skip partition '0', must write it once at + * the end */ + part0 = val64; + } else { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, tx_fifo_partitions[reg_num]); + xge_debug_fifo(XGE_TRACE, + "fifo partition_%d at: " + "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, + reg_num, (unsigned long long)(ulong_t) + tx_fifo_partitions[reg_num], + (unsigned long long)val64); + } + val64 = 0; + } } part0 |= BIT(0); /* to enable the FIFO partition. */ @@ -445,10 +437,10 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh) __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32), tx_fifo_partitions[0]); xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: " - "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, - (unsigned long long)(ulong_t) - tx_fifo_partitions[0], - (unsigned long long) part0); + "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, + (unsigned long long)(ulong_t) + tx_fifo_partitions[0], + (unsigned long long) part0); /* * Initialization of Tx_PA_CONFIG register to ignore packet @@ -457,9 +449,9 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh) val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tx_pa_cfg); val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR | - XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI | - XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL | - XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR; + XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI | + XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL | + XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->tx_pa_cfg); @@ -467,30 +459,30 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh) * Assign MSI-X vectors */ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { - xge_list_t *item; - xge_hal_channel_t *channel = NULL; - - if (!hldev->config.fifo.queue[i].configured || - !hldev->config.fifo.queue[i].intr_vector || - !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX) - continue; - - /* find channel */ - xge_list_for_each(item, &hldev->free_channels) { - xge_hal_channel_t *tmp; - tmp = xge_container_of(item, xge_hal_channel_t, - item); - if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO && - tmp->post_qid == i) { - channel = tmp; - break; - } - } - - if (channel) { - xge_hal_channel_msix_set(channel, - hldev->config.fifo.queue[i].intr_vector); - } + xge_list_t *item; + xge_hal_channel_t *channel = NULL; + + if (!hldev->config.fifo.queue[i].configured || + !hldev->config.fifo.queue[i].intr_vector || + !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX) + continue; + + /* find channel */ + xge_list_for_each(item, &hldev->free_channels) { + xge_hal_channel_t *tmp; + tmp = xge_container_of(item, xge_hal_channel_t, + item); + if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO && + tmp->post_qid == i) { + channel = tmp; + break; + } + } + + if (channel) { + xge_hal_channel_msix_set(channel, + hldev->config.fifo.queue[i].intr_vector); + } } xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized"); @@ -500,23 +492,23 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh) void __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { - xge_hal_fifo_txdl_priv_t *txdl_priv; + xge_hal_fifo_txdl_priv_t *txdl_priv; xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; txdl_priv = __hal_fifo_txdl_priv(txdp); if (txdl_priv->align_dma_addr != 0) { - xge_os_dma_unmap(fifo->channel.pdev, - txdl_priv->align_dma_handle, - txdl_priv->align_dma_addr, - fifo->align_size, - XGE_OS_DMA_DIR_TODEVICE); + xge_os_dma_unmap(fifo->channel.pdev, + txdl_priv->align_dma_handle, + txdl_priv->align_dma_addr, + fifo->align_size, + XGE_OS_DMA_DIR_TODEVICE); - txdl_priv->align_dma_addr = 0; + txdl_priv->align_dma_addr = 0; } - if (txdl_priv->align_vaddr != NULL) { + if (txdl_priv->align_vaddr != NULL) { xge_os_dma_free(fifo->channel.pdev, txdl_priv->align_vaddr, fifo->align_size, @@ -525,13 +517,13 @@ __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) txdl_priv->align_vaddr = NULL; - } + } } xge_hal_status_e __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { - xge_hal_fifo_txdl_priv_t *txdl_priv; + xge_hal_fifo_txdl_priv_t *txdl_priv; xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; @@ -540,25 +532,25 @@ __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) txdl_priv = __hal_fifo_txdl_priv(txdp); /* allocate alignment DMA-buffer */ - txdl_priv->align_vaddr = xge_os_dma_malloc(fifo->channel.pdev, - fifo->align_size, - XGE_OS_DMA_CACHELINE_ALIGNED | - XGE_OS_DMA_STREAMING, - &txdl_priv->align_dma_handle, - &txdl_priv->align_dma_acch); + txdl_priv->align_vaddr = (char *)xge_os_dma_malloc(fifo->channel.pdev, + fifo->align_size, + XGE_OS_DMA_CACHELINE_ALIGNED | + XGE_OS_DMA_STREAMING, + &txdl_priv->align_dma_handle, + &txdl_priv->align_dma_acch); if (txdl_priv->align_vaddr == NULL) { - return XGE_HAL_ERR_OUT_OF_MEMORY; + return XGE_HAL_ERR_OUT_OF_MEMORY; } /* map it */ txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev, - txdl_priv->align_dma_handle, txdl_priv->align_vaddr, - fifo->align_size, - XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING); + txdl_priv->align_dma_handle, txdl_priv->align_vaddr, + fifo->align_size, + XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING); if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) { - __hal_fifo_dtr_align_free_unmap(channelh, dtrh); - return XGE_HAL_ERR_OUT_OF_MAPPING; + __hal_fifo_dtr_align_free_unmap(channelh, dtrh); + return XGE_HAL_ERR_OUT_OF_MAPPING; } return XGE_HAL_OK; diff --git a/sys/dev/nxge/xgehal/xgehal-mgmt.c b/sys/dev/nxge/xgehal/xgehal-mgmt.c index 3e30e25..7f8346b 100644 --- a/sys/dev/nxge/xgehal/xgehal-mgmt.c +++ b/sys/dev/nxge/xgehal/xgehal-mgmt.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-mgmt.c - * - * Description: Xframe-family management facility implementation - * - * Created: 1 September 2004 - */ - #include <dev/nxge/include/xgehal-mgmt.h> #include <dev/nxge/include/xgehal-driver.h> #include <dev/nxge/include/xgehal-device.h> @@ -57,37 +49,37 @@ */ xge_hal_status_e xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info, - int size) + int size) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_mgmt_about_info_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, vendor_id), - &about_info->vendor); + xge_offsetof(xge_hal_pci_config_le_t, vendor_id), + &about_info->vendor); xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, device_id), - &about_info->device); + xge_offsetof(xge_hal_pci_config_le_t, device_id), + &about_info->device); xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), - &about_info->subsys_vendor); + xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), + &about_info->subsys_vendor); xge_os_pci_read16(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), - &about_info->subsys_device); + xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), + &about_info->subsys_device); xge_os_pci_read8(hldev->pdev, hldev->cfgh, - xge_offsetof(xge_hal_pci_config_le_t, revision), - &about_info->board_rev); + xge_offsetof(xge_hal_pci_config_le_t, revision), + &about_info->board_rev); xge_os_strcpy(about_info->vendor_name, XGE_DRIVER_VENDOR); xge_os_strcpy(about_info->chip_name, XGE_CHIP_FAMILY); @@ -104,7 +96,7 @@ xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info, xge_os_strcpy(about_info->ll_build, XGELL_VERSION_BUILD); about_info->transponder_temperature = - xge_hal_read_xfp_current_temp(devh); + xge_hal_read_xfp_current_temp(devh); return XGE_HAL_OK; } @@ -127,38 +119,38 @@ xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info, */ xge_hal_status_e xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset, - u64 *value) + u64 *value) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (bar_id == 0) { - if (offset > sizeof(xge_hal_pci_bar0_t)-8) { - return XGE_HAL_ERR_INVALID_OFFSET; - } - *value = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - (void *)(hldev->bar0 + offset)); + if (offset > sizeof(xge_hal_pci_bar0_t)-8) { + return XGE_HAL_ERR_INVALID_OFFSET; + } + *value = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + (void *)(hldev->bar0 + offset)); } else if (bar_id == 1 && - (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA || - xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)) { - int i; - for (i=0; i<XGE_HAL_MAX_FIFO_NUM_HERC; i++) { - if (offset == i*0x2000 || offset == i*0x2000+0x18) { - break; - } - } - if (i == XGE_HAL_MAX_FIFO_NUM_HERC) { - return XGE_HAL_ERR_INVALID_OFFSET; - } - *value = xge_os_pio_mem_read64(hldev->pdev, hldev->regh1, - (void *)(hldev->bar1 + offset)); + (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA || + xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)) { + int i; + for (i=0; i<XGE_HAL_MAX_FIFO_NUM_HERC; i++) { + if (offset == i*0x2000 || offset == i*0x2000+0x18) { + break; + } + } + if (i == XGE_HAL_MAX_FIFO_NUM_HERC) { + return XGE_HAL_ERR_INVALID_OFFSET; + } + *value = xge_os_pio_mem_read64(hldev->pdev, hldev->regh1, + (void *)(hldev->bar1 + offset)); } else if (bar_id == 1) { - /* FIXME: check TITAN BAR1 offsets */ + /* FIXME: check TITAN BAR1 offsets */ } else { - return XGE_HAL_ERR_INVALID_BAR_ID; + return XGE_HAL_ERR_INVALID_BAR_ID; } return XGE_HAL_OK; @@ -183,38 +175,38 @@ xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset, */ xge_hal_status_e xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset, - u64 value) + u64 value) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (bar_id == 0) { - if (offset > sizeof(xge_hal_pci_bar0_t)-8) { - return XGE_HAL_ERR_INVALID_OFFSET; - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, value, - (void *)(hldev->bar0 + offset)); + if (offset > sizeof(xge_hal_pci_bar0_t)-8) { + return XGE_HAL_ERR_INVALID_OFFSET; + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, value, + (void *)(hldev->bar0 + offset)); } else if (bar_id == 1 && - (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA || - xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)) { - int i; - for (i=0; i<XGE_HAL_MAX_FIFO_NUM_HERC; i++) { - if (offset == i*0x2000 || offset == i*0x2000+0x18) { - break; - } - } - if (i == XGE_HAL_MAX_FIFO_NUM_HERC) { - return XGE_HAL_ERR_INVALID_OFFSET; - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh1, value, - (void *)(hldev->bar1 + offset)); + (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA || + xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)) { + int i; + for (i=0; i<XGE_HAL_MAX_FIFO_NUM_HERC; i++) { + if (offset == i*0x2000 || offset == i*0x2000+0x18) { + break; + } + } + if (i == XGE_HAL_MAX_FIFO_NUM_HERC) { + return XGE_HAL_ERR_INVALID_OFFSET; + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh1, value, + (void *)(hldev->bar1 + offset)); } else if (bar_id == 1) { - /* FIXME: check TITAN BAR1 offsets */ + /* FIXME: check TITAN BAR1 offsets */ } else { - return XGE_HAL_ERR_INVALID_BAR_ID; + return XGE_HAL_ERR_INVALID_BAR_ID; } return XGE_HAL_OK; @@ -237,24 +229,24 @@ xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset, */ xge_hal_status_e xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats, - int size) + int size) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; - xge_hal_stats_hw_info_t *hw_info; + xge_hal_stats_hw_info_t *hw_info; xge_assert(xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN); if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_stats_hw_info_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } if ((status = xge_hal_stats_hw (devh, &hw_info)) != XGE_HAL_OK) { - return status; + return status; } xge_os_memcpy(hw_stats, hw_info, sizeof(xge_hal_stats_hw_info_t)); @@ -280,21 +272,21 @@ xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; - xge_hal_stats_hw_info_t *hw_info; + xge_hal_stats_hw_info_t *hw_info; xge_assert(xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN); if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (off > sizeof(xge_hal_stats_hw_info_t)-4 || size > 8) { - return XGE_HAL_ERR_INVALID_OFFSET; + return XGE_HAL_ERR_INVALID_OFFSET; } if ((status = xge_hal_stats_hw (devh, &hw_info)) != XGE_HAL_OK) { - return status; + return status; } xge_os_memcpy(out, (char*)hw_info + off, size); @@ -319,28 +311,28 @@ xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out) */ xge_hal_status_e xge_hal_mgmt_pcim_stats(xge_hal_device_h devh, - xge_hal_mgmt_pcim_stats_t *pcim_stats, int size) + xge_hal_mgmt_pcim_stats_t *pcim_stats, int size) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; - xge_hal_stats_pcim_info_t *pcim_info; + xge_hal_stats_pcim_info_t *pcim_info; xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN); if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_stats_pcim_info_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } if ((status = xge_hal_stats_pcim (devh, &pcim_info)) != XGE_HAL_OK) { - return status; + return status; } xge_os_memcpy(pcim_stats, pcim_info, - sizeof(xge_hal_stats_pcim_info_t)); + sizeof(xge_hal_stats_pcim_info_t)); return XGE_HAL_OK; } @@ -360,25 +352,25 @@ xge_hal_mgmt_pcim_stats(xge_hal_device_h devh, */ xge_hal_status_e xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size, - char *out) + char *out) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; - xge_hal_stats_pcim_info_t *pcim_info; + xge_hal_stats_pcim_info_t *pcim_info; xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN); if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (off > sizeof(xge_hal_stats_pcim_info_t)-8 || size > 8) { - return XGE_HAL_ERR_INVALID_OFFSET; + return XGE_HAL_ERR_INVALID_OFFSET; } if ((status = xge_hal_stats_pcim (devh, &pcim_info)) != XGE_HAL_OK) { - return status; + return status; } xge_os_memcpy(out, (char*)pcim_info + off, size); @@ -404,21 +396,21 @@ xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size, */ xge_hal_status_e xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *sw_stats, - int size) + int size) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_stats_sw_err_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } if (!hldev->stats.is_initialized || !hldev->stats.is_enabled) { - return XGE_HAL_INF_STATS_IS_NOT_READY; + return XGE_HAL_INF_STATS_IS_NOT_READY; } /* Updating xpak stats value */ @@ -451,27 +443,27 @@ xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *sw_stats, */ xge_hal_status_e xge_hal_mgmt_device_stats(xge_hal_device_h devh, - xge_hal_mgmt_device_stats_t *device_stats, int size) + xge_hal_mgmt_device_stats_t *device_stats, int size) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_hal_stats_device_info_t *device_info; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_stats_device_info_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } if ((status = xge_hal_stats_device (devh, &device_info)) != XGE_HAL_OK) { - return status; + return status; } xge_os_memcpy(device_stats, device_info, - sizeof(xge_hal_stats_device_info_t)); + sizeof(xge_hal_stats_device_info_t)); return XGE_HAL_OK; } @@ -495,7 +487,7 @@ __hal_update_ring_bump(xge_hal_device_t *hldev, int queue, void * addr; addr = (reg == 1)? (&bar0->ring_bump_counter2) : - (&bar0->ring_bump_counter1); + (&bar0->ring_bump_counter1); rbc = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, addr); chinfo->ring_bump_cnt = XGE_HAL_RING_BUMP_CNT(queue, rbc); } @@ -521,27 +513,27 @@ __hal_update_ring_bump(xge_hal_device_t *hldev, int queue, */ xge_hal_status_e xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh, - xge_hal_mgmt_channel_stats_t *channel_stats, int size) + xge_hal_mgmt_channel_stats_t *channel_stats, int size) { xge_hal_status_e status; xge_hal_stats_channel_info_t *channel_info; xge_hal_channel_t *channel = (xge_hal_channel_t* ) channelh; if (size != sizeof(xge_hal_stats_channel_info_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } if ((status = xge_hal_stats_channel (channelh, &channel_info)) != - XGE_HAL_OK) { - return status; + XGE_HAL_OK) { + return status; } if (xge_hal_device_check_id(channel->devh) == XGE_HAL_CARD_HERC) { - __hal_update_ring_bump( (xge_hal_device_t *) channel->devh, channel->post_qid, channel_info); + __hal_update_ring_bump( (xge_hal_device_t *) channel->devh, channel->post_qid, channel_info); } xge_os_memcpy(channel_stats, channel_info, - sizeof(xge_hal_stats_channel_info_t)); + sizeof(xge_hal_stats_channel_info_t)); return XGE_HAL_OK; } @@ -566,27 +558,27 @@ xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh, */ xge_hal_status_e xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset, - int value_bits, u32 *value) + int value_bits, u32 *value) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (offset > sizeof(xge_hal_pci_config_t)-value_bits/8) { - return XGE_HAL_ERR_INVALID_OFFSET; + return XGE_HAL_ERR_INVALID_OFFSET; } if (value_bits == 8) { - xge_os_pci_read8(hldev->pdev, hldev->cfgh, offset, (u8*)value); + xge_os_pci_read8(hldev->pdev, hldev->cfgh, offset, (u8*)value); } else if (value_bits == 16) { - xge_os_pci_read16(hldev->pdev, hldev->cfgh, offset, - (u16*)value); + xge_os_pci_read16(hldev->pdev, hldev->cfgh, offset, + (u16*)value); } else if (value_bits == 32) { - xge_os_pci_read32(hldev->pdev, hldev->cfgh, offset, value); + xge_os_pci_read32(hldev->pdev, hldev->cfgh, offset, value); } else { - return XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE; + return XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE; } return XGE_HAL_OK; @@ -610,16 +602,16 @@ xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset, */ xge_hal_status_e xge_hal_mgmt_device_config(xge_hal_device_h devh, - xge_hal_mgmt_device_config_t *dev_config, int size) + xge_hal_mgmt_device_config_t *dev_config, int size) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_mgmt_device_config_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } xge_os_memcpy(dev_config, &hldev->config, @@ -648,15 +640,15 @@ xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config, int size) { if (g_xge_hal_driver == NULL) { - return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; + return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; } if (size != sizeof(xge_hal_mgmt_driver_config_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } xge_os_memcpy(drv_config, &g_xge_hal_driver->config, - sizeof(xge_hal_mgmt_driver_config_t)); + sizeof(xge_hal_mgmt_driver_config_t)); return XGE_HAL_OK; } @@ -678,27 +670,27 @@ xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config, int size) */ xge_hal_status_e xge_hal_mgmt_pci_config(xge_hal_device_h devh, - xge_hal_mgmt_pci_config_t *pci_config, int size) + xge_hal_mgmt_pci_config_t *pci_config, int size) { int i; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (size != sizeof(xge_hal_mgmt_pci_config_t)) { - return XGE_HAL_ERR_VERSION_CONFLICT; + return XGE_HAL_ERR_VERSION_CONFLICT; } /* refresh PCI config space */ for (i = 0; i < 0x68/4+1; i++) { - xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, - (u32*)&hldev->pci_config_space + i); + xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, + (u32*)&hldev->pci_config_space + i); } xge_os_memcpy(pci_config, &hldev->pci_config_space, - sizeof(xge_hal_mgmt_pci_config_t)); + sizeof(xge_hal_mgmt_pci_config_t)); return XGE_HAL_OK; } @@ -719,17 +711,17 @@ xge_hal_mgmt_pci_config(xge_hal_device_h devh, * */ xge_hal_status_e -xge_hal_mgmt_trace_read (char *buffer, - unsigned buf_size, - unsigned *offset, - unsigned *read_length) +xge_hal_mgmt_trace_read (char *buffer, + unsigned buf_size, + unsigned *offset, + unsigned *read_length) { int data_offset; int start_offset; if ((g_xge_os_tracebuf == NULL) || - (g_xge_os_tracebuf->offset == g_xge_os_tracebuf->size - 2)) { - return XGE_HAL_EOF_TRACE_BUF; + (g_xge_os_tracebuf->offset == g_xge_os_tracebuf->size - 2)) { + return XGE_HAL_EOF_TRACE_BUF; } data_offset = g_xge_os_tracebuf->offset + 1; @@ -737,7 +729,7 @@ xge_hal_mgmt_trace_read (char *buffer, if (*offset >= (unsigned)xge_os_strlen(g_xge_os_tracebuf->data + data_offset)) { - return XGE_HAL_EOF_TRACE_BUF; + return XGE_HAL_EOF_TRACE_BUF; } xge_os_memzero(buffer, buf_size); @@ -747,7 +739,7 @@ xge_hal_mgmt_trace_read (char *buffer, start_offset); if (*read_length >= buf_size) { - *read_length = buf_size - 1; + *read_length = buf_size - 1; } xge_os_memcpy(buffer, g_xge_os_tracebuf->data + start_offset, @@ -783,17 +775,17 @@ xge_hal_restore_link_led(xge_hal_device_h devh) */ if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) && (xge_hal_device_rev(hldev) <= 3)) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - if (hldev->link_state == XGE_HAL_LINK_UP) { - val64 |= XGE_HAL_ADAPTER_LED_ON; - } else { - val64 &= ~XGE_HAL_ADAPTER_LED_ON; - } - - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); - return; + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + if (hldev->link_state == XGE_HAL_LINK_UP) { + val64 |= XGE_HAL_ADAPTER_LED_ON; + } else { + val64 &= ~XGE_HAL_ADAPTER_LED_ON; + } + + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->adapter_control); + return; } /* @@ -803,15 +795,15 @@ xge_hal_restore_link_led(xge_hal_device_h devh) * represents the gpio control register. In the case of Herc, LED * handling is done by beacon control register as opposed to gpio * control register in Xena. Beacon control is used only to toggle - * and the value written into it does not depend on the link state. - * It is upto the ULD to toggle the LED even number of times which - * brings the LED to it's original state. + * and the value written into it does not depend on the link state. + * It is upto the ULD to toggle the LED even number of times which + * brings the LED to it's original state. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->beacon_control); - val64 |= 0x0000800000000000ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->beacon_control); + &bar0->beacon_control); + val64 |= 0x0000800000000000ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + val64, &bar0->beacon_control); } /** @@ -834,12 +826,12 @@ xge_hal_flick_link_led(xge_hal_device_h devh) */ if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) && (xge_hal_device_rev(hldev) <= 3)) { - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); - val64 ^= XGE_HAL_ADAPTER_LED_ON; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); - return; + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->adapter_control); + val64 ^= XGE_HAL_ADAPTER_LED_ON; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->adapter_control); + return; } /* @@ -850,10 +842,10 @@ xge_hal_flick_link_led(xge_hal_device_h devh) * in Xena. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->beacon_control); + &bar0->beacon_control); val64 ^= XGE_HAL_GPIO_CTRL_GPIO_0; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->beacon_control); + &bar0->beacon_control); } /** @@ -878,20 +870,20 @@ xge_hal_read_eeprom(xge_hal_device_h devh, int off, u32* data) xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; val64 = XGE_HAL_I2C_CONTROL_DEV_ID(XGE_DEV_ID) | - XGE_HAL_I2C_CONTROL_ADDR(off) | - XGE_HAL_I2C_CONTROL_BYTE_CNT(0x3) | - XGE_HAL_I2C_CONTROL_READ | XGE_HAL_I2C_CONTROL_CNTL_START; + XGE_HAL_I2C_CONTROL_ADDR(off) | + XGE_HAL_I2C_CONTROL_BYTE_CNT(0x3) | + XGE_HAL_I2C_CONTROL_READ | XGE_HAL_I2C_CONTROL_CNTL_START; __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control); while (exit_cnt < 5) { - val64 = __hal_serial_mem_read64(hldev, &bar0->i2c_control); - if (XGE_HAL_I2C_CONTROL_CNTL_END(val64)) { - *data = XGE_HAL_I2C_CONTROL_GET_DATA(val64); - ret = XGE_HAL_OK; - break; - } - exit_cnt++; + val64 = __hal_serial_mem_read64(hldev, &bar0->i2c_control); + if (XGE_HAL_I2C_CONTROL_CNTL_END(val64)) { + *data = XGE_HAL_I2C_CONTROL_GET_DATA(val64); + ret = XGE_HAL_OK; + break; + } + exit_cnt++; } return ret; @@ -922,20 +914,20 @@ xge_hal_write_eeprom(xge_hal_device_h devh, int off, u32 data, int cnt) xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; val64 = XGE_HAL_I2C_CONTROL_DEV_ID(XGE_DEV_ID) | - XGE_HAL_I2C_CONTROL_ADDR(off) | - XGE_HAL_I2C_CONTROL_BYTE_CNT(cnt) | - XGE_HAL_I2C_CONTROL_SET_DATA(data) | - XGE_HAL_I2C_CONTROL_CNTL_START; + XGE_HAL_I2C_CONTROL_ADDR(off) | + XGE_HAL_I2C_CONTROL_BYTE_CNT(cnt) | + XGE_HAL_I2C_CONTROL_SET_DATA(data) | + XGE_HAL_I2C_CONTROL_CNTL_START; __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control); while (exit_cnt < 5) { - val64 = __hal_serial_mem_read64(hldev, &bar0->i2c_control); - if (XGE_HAL_I2C_CONTROL_CNTL_END(val64)) { - if (!(val64 & XGE_HAL_I2C_CONTROL_NACK)) - ret = XGE_HAL_OK; - break; - } - exit_cnt++; + val64 = __hal_serial_mem_read64(hldev, &bar0->i2c_control); + if (XGE_HAL_I2C_CONTROL_CNTL_END(val64)) { + if (!(val64 & XGE_HAL_I2C_CONTROL_NACK)) + ret = XGE_HAL_OK; + break; + } + exit_cnt++; } return ret; @@ -962,51 +954,51 @@ xge_hal_register_test(xge_hal_device_h devh, u64 *data) int fail = 0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->pif_rd_swapper_fb); + &bar0->pif_rd_swapper_fb); if (val64 != 0x123456789abcdefULL) { - fail = 1; - xge_debug_osdep(XGE_TRACE, "Read Test level 1 fails"); + fail = 1; + xge_debug_osdep(XGE_TRACE, "Read Test level 1 fails"); } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rmac_pause_cfg); + &bar0->rmac_pause_cfg); if (val64 != 0xc000ffff00000000ULL) { - fail = 1; - xge_debug_osdep(XGE_TRACE, "Read Test level 2 fails"); + fail = 1; + xge_debug_osdep(XGE_TRACE, "Read Test level 2 fails"); } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rx_queue_cfg); + &bar0->rx_queue_cfg); if (val64 != 0x0808080808080808ULL) { - fail = 1; - xge_debug_osdep(XGE_TRACE, "Read Test level 3 fails"); + fail = 1; + xge_debug_osdep(XGE_TRACE, "Read Test level 3 fails"); } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xgxs_efifo_cfg); + &bar0->xgxs_efifo_cfg); if (val64 != 0x000000001923141EULL) { - fail = 1; - xge_debug_osdep(XGE_TRACE, "Read Test level 4 fails"); + fail = 1; + xge_debug_osdep(XGE_TRACE, "Read Test level 4 fails"); } val64 = 0x5A5A5A5A5A5A5A5AULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->xmsi_data); + &bar0->xmsi_data); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xmsi_data); + &bar0->xmsi_data); if (val64 != 0x5A5A5A5A5A5A5A5AULL) { - fail = 1; - xge_debug_osdep(XGE_ERR, "Write Test level 1 fails"); + fail = 1; + xge_debug_osdep(XGE_ERR, "Write Test level 1 fails"); } val64 = 0xA5A5A5A5A5A5A5A5ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->xmsi_data); + &bar0->xmsi_data); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->xmsi_data); + &bar0->xmsi_data); if (val64 != 0xA5A5A5A5A5A5A5A5ULL) { - fail = 1; - xge_debug_osdep(XGE_ERR, "Write Test level 2 fails"); + fail = 1; + xge_debug_osdep(XGE_ERR, "Write Test level 2 fails"); } *data = fail; @@ -1034,19 +1026,19 @@ xge_hal_rldram_test(xge_hal_device_h devh, u64 *data) int cnt, iteration = 0, test_pass = 0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_control); + &bar0->adapter_control); val64 &= ~XGE_HAL_ADAPTER_ECC_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->adapter_control); + &bar0->adapter_control); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mc_rldram_test_ctrl); + &bar0->mc_rldram_test_ctrl); val64 |= XGE_HAL_MC_RLDRAM_TEST_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_ctrl); + &bar0->mc_rldram_test_ctrl); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mc_rldram_mrs); + &bar0->mc_rldram_mrs); val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE; __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control); @@ -1054,85 +1046,85 @@ xge_hal_rldram_test(xge_hal_device_h devh, u64 *data) __hal_serial_mem_write64(hldev, val64, &bar0->i2c_control); while (iteration < 2) { - val64 = 0x55555555aaaa0000ULL; - if (iteration == 1) { - val64 ^= 0xFFFFFFFFFFFF0000ULL; - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_d0); - - val64 = 0xaaaa5a5555550000ULL; - if (iteration == 1) { - val64 ^= 0xFFFFFFFFFFFF0000ULL; - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_d1); - - val64 = 0x55aaaaaaaa5a0000ULL; - if (iteration == 1) { - val64 ^= 0xFFFFFFFFFFFF0000ULL; - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_d2); - - val64 = (u64) (0x0000003fffff0000ULL); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_add); - - - val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_ctrl); - - val64 |= - XGE_HAL_MC_RLDRAM_TEST_MODE | XGE_HAL_MC_RLDRAM_TEST_WRITE | - XGE_HAL_MC_RLDRAM_TEST_GO; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_ctrl); - - for (cnt = 0; cnt < 5; cnt++) { - val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->mc_rldram_test_ctrl); - if (val64 & XGE_HAL_MC_RLDRAM_TEST_DONE) - break; - xge_os_mdelay(200); - } - - if (cnt == 5) - break; - - val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_ctrl); - - val64 |= XGE_HAL_MC_RLDRAM_TEST_MODE | - XGE_HAL_MC_RLDRAM_TEST_GO; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_test_ctrl); - - for (cnt = 0; cnt < 5; cnt++) { - val64 = xge_os_pio_mem_read64(hldev->pdev, - hldev->regh0, &bar0->mc_rldram_test_ctrl); - if (val64 & XGE_HAL_MC_RLDRAM_TEST_DONE) - break; - xge_os_mdelay(500); - } - - if (cnt == 5) - break; - - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->mc_rldram_test_ctrl); - if (val64 & XGE_HAL_MC_RLDRAM_TEST_PASS) - test_pass = 1; - - iteration++; + val64 = 0x55555555aaaa0000ULL; + if (iteration == 1) { + val64 ^= 0xFFFFFFFFFFFF0000ULL; + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_d0); + + val64 = 0xaaaa5a5555550000ULL; + if (iteration == 1) { + val64 ^= 0xFFFFFFFFFFFF0000ULL; + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_d1); + + val64 = 0x55aaaaaaaa5a0000ULL; + if (iteration == 1) { + val64 ^= 0xFFFFFFFFFFFF0000ULL; + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_d2); + + val64 = (u64) (0x0000003fffff0000ULL); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_add); + + + val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + val64 |= + XGE_HAL_MC_RLDRAM_TEST_MODE | XGE_HAL_MC_RLDRAM_TEST_WRITE | + XGE_HAL_MC_RLDRAM_TEST_GO; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + for (cnt = 0; cnt < 5; cnt++) { + val64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->mc_rldram_test_ctrl); + if (val64 & XGE_HAL_MC_RLDRAM_TEST_DONE) + break; + xge_os_mdelay(200); + } + + if (cnt == 5) + break; + + val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + val64 |= XGE_HAL_MC_RLDRAM_TEST_MODE | + XGE_HAL_MC_RLDRAM_TEST_GO; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_test_ctrl); + + for (cnt = 0; cnt < 5; cnt++) { + val64 = xge_os_pio_mem_read64(hldev->pdev, + hldev->regh0, &bar0->mc_rldram_test_ctrl); + if (val64 & XGE_HAL_MC_RLDRAM_TEST_DONE) + break; + xge_os_mdelay(500); + } + + if (cnt == 5) + break; + + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->mc_rldram_test_ctrl); + if (val64 & XGE_HAL_MC_RLDRAM_TEST_PASS) + test_pass = 1; + + iteration++; } if (!test_pass) - *data = 1; + *data = 1; else - *data = 0; + *data = 0; return XGE_HAL_OK; } @@ -1161,30 +1153,30 @@ xge_hal_pma_loopback( xge_hal_device_h devh, int enable ) #if 0 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); - if ( enable ) - { - val64 |= ( XGE_HAL_MAC_CFG_TMAC_LOOPBACK | XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); - } + if ( enable ) + { + val64 |= ( XGE_HAL_MAC_CFG_TMAC_LOOPBACK | XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); + } __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, - (u32)(val64 >> 32), (char*)&bar0->mac_cfg); + (u32)(val64 >> 32), (char*)&bar0->mac_cfg); xge_os_mdelay(1); #endif val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); @@ -1197,42 +1189,42 @@ xge_hal_pma_loopback( xge_hal_device_h devh, int enable ) #define _HAL_LOOPBK_PMA 1 if( enable ) - data |= 1; + data |= 1; else - data &= 0xfe; + data &= 0xfe; val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DATA(data) | - XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_DATA(data) | + XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); - return XGE_HAL_OK; + return XGE_HAL_OK; } u16 @@ -1246,27 +1238,27 @@ xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr ) /* address transaction */ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); do { - val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); - if (i++ > 10) - { - break; - } + val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); + if (i++ > 10) + { + break; + } }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1)); /* Data transaction */ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); @@ -1276,11 +1268,11 @@ xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr ) do { - val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); - if (i++ > 10) - { - break; - } + val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); + if (i++ > 10) + { + break; + } }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1)); rval16 = (u16)XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(val64); @@ -1298,9 +1290,9 @@ xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value ) /* address transaction */ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); @@ -1308,23 +1300,23 @@ xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value ) do { - val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); - if (i++ > 10) - { - break; - } + val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); + if (i++ > 10) + { + break; + } } while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != - XGE_HAL_MDIO_CONTROL_MMD_CTRL(1)); + XGE_HAL_MDIO_CONTROL_MMD_CTRL(1)); /* Data transaction */ val64 = 0x0; val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) | - XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | - XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | - XGE_HAL_MDIO_CONTROL_MMD_DATA(value) | - XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE); + XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) | + XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) | + XGE_HAL_MDIO_CONTROL_MMD_DATA(value) | + XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE); __hal_serial_mem_write64(hldev, val64, &bar0->mdio_control); val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START); @@ -1334,11 +1326,11 @@ xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value ) do { - val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); - if (i++ > 10) - { - break; - } + val64 = __hal_serial_mem_read64(hldev, &bar0->mdio_control); + if (i++ > 10) + { + break; + } }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1)); return XGE_HAL_OK; @@ -1365,51 +1357,51 @@ xge_hal_eeprom_test(xge_hal_device_h devh, u64 *data) /* Test Write Error at offset 0 */ if (!xge_hal_write_eeprom(hldev, 0, 0, 3)) - fail = 1; + fail = 1; /* Test Write at offset 4f0 */ if (xge_hal_write_eeprom(hldev, 0x4F0, 0x01234567, 3)) - fail = 1; + fail = 1; if (xge_hal_read_eeprom(hldev, 0x4F0, &ret_data)) - fail = 1; + fail = 1; if (ret_data != 0x01234567) - fail = 1; + fail = 1; /* Reset the EEPROM data go FFFF */ (void) xge_hal_write_eeprom(hldev, 0x4F0, 0xFFFFFFFF, 3); /* Test Write Request Error at offset 0x7c */ if (!xge_hal_write_eeprom(hldev, 0x07C, 0, 3)) - fail = 1; + fail = 1; /* Test Write Request at offset 0x7fc */ if (xge_hal_write_eeprom(hldev, 0x7FC, 0x01234567, 3)) - fail = 1; + fail = 1; if (xge_hal_read_eeprom(hldev, 0x7FC, &ret_data)) - fail = 1; + fail = 1; if (ret_data != 0x01234567) - fail = 1; + fail = 1; /* Reset the EEPROM data go FFFF */ (void) xge_hal_write_eeprom(hldev, 0x7FC, 0xFFFFFFFF, 3); /* Test Write Error at offset 0x80 */ if (!xge_hal_write_eeprom(hldev, 0x080, 0, 3)) - fail = 1; + fail = 1; /* Test Write Error at offset 0xfc */ if (!xge_hal_write_eeprom(hldev, 0x0FC, 0, 3)) - fail = 1; + fail = 1; /* Test Write Error at offset 0x100 */ if (!xge_hal_write_eeprom(hldev, 0x100, 0, 3)) - fail = 1; + fail = 1; /* Test Write Error at offset 4ec */ if (!xge_hal_write_eeprom(hldev, 0x4EC, 0, 3)) - fail = 1; + fail = 1; *data = fail; return XGE_HAL_OK; @@ -1441,14 +1433,14 @@ xge_hal_bist_test(xge_hal_device_h devh, u64 *data) xge_os_pci_write8(hldev->pdev, hldev->cfgh, 0x0f, bist); while (cnt < 20) { - xge_os_pci_read8(hldev->pdev, hldev->cfgh, 0x0f, &bist); - if (!(bist & 0x40)) { - *data = (bist & 0x0f); - ret = XGE_HAL_OK; - break; - } - xge_os_mdelay(100); - cnt++; + xge_os_pci_read8(hldev->pdev, hldev->cfgh, 0x0f, &bist); + if (!(bist & 0x40)) { + *data = (bist & 0x0f); + ret = XGE_HAL_OK; + break; + } + xge_os_mdelay(100); + cnt++; } return ret; @@ -1473,9 +1465,9 @@ xge_hal_link_test(xge_hal_device_h devh, u64 *data) u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->adapter_status); + &bar0->adapter_status); if (val64 & XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) - *data = 1; + *data = 1; return XGE_HAL_OK; } @@ -1498,11 +1490,11 @@ void xge_hal_getpause_data(xge_hal_device_h devh, int *tx, int *rx) u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rmac_pause_cfg); + &bar0->rmac_pause_cfg); if (val64 & XGE_HAL_RMAC_PAUSE_GEN_EN) - *tx = 1; + *tx = 1; if (val64 & XGE_HAL_RMAC_PAUSE_RCV_EN) - *rx = 1; + *rx = 1; } /** @@ -1526,17 +1518,17 @@ int xge_hal_setpause_data(xge_hal_device_h devh, int tx, int rx) u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rmac_pause_cfg); + &bar0->rmac_pause_cfg); if (tx) - val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; + val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; else - val64 &= ~XGE_HAL_RMAC_PAUSE_GEN_EN; + val64 &= ~XGE_HAL_RMAC_PAUSE_GEN_EN; if (rx) - val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; + val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; else - val64 &= ~XGE_HAL_RMAC_PAUSE_RCV_EN; + val64 &= ~XGE_HAL_RMAC_PAUSE_RCV_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->rmac_pause_cfg); + val64, &bar0->rmac_pause_cfg); return 0; } @@ -1549,39 +1541,39 @@ int xge_hal_setpause_data(xge_hal_device_h devh, int tx, int rx) */ u32 xge_hal_read_xfp_current_temp(xge_hal_device_h hldev) { - u16 val_1, val_2, i = 0; - u32 actual; + u16 val_1, val_2, i = 0; + u32 actual; - /* First update the NVRAM table of XFP. */ + /* First update the NVRAM table of XFP. */ - (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000, 0x3); + (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000, 0x3); - /* Now wait for the transfer to complete */ - do - { - xge_os_mdelay( 50 ); // wait 50 milliseonds + /* Now wait for the transfer to complete */ + do + { + xge_os_mdelay( 50 ); // wait 50 milliseonds - val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000); + val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000); - if ( i++ > 10 ) - { - // waited 500 ms which should be plenty of time. - break; - } - }while (( val_1 & 0x000C ) != 0x0004); + if ( i++ > 10 ) + { + // waited 500 ms which should be plenty of time. + break; + } + }while (( val_1 & 0x000C ) != 0x0004); - /* Now NVRAM table of XFP should be updated, so read the temp */ - val_1 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8067); - val_2 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8068); + /* Now NVRAM table of XFP should be updated, so read the temp */ + val_1 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8067); + val_2 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8068); - actual = ((val_1 << 8) | val_2); + actual = ((val_1 << 8) | val_2); - if (actual >= 32768) - actual = actual- 65536; - actual = actual/256; + if (actual >= 32768) + actual = actual- 65536; + actual = actual/256; - return actual; + return actual; } /** @@ -1603,52 +1595,52 @@ void __hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value) */ if(value == 3) { - switch(type) - { - case 1: - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_temp = 0; - - /* - * Notify the ULD on Excess Xpak temperature alarm msg - */ - if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) { - g_xge_hal_driver->uld_callbacks.xpak_alarm_log( - hldev->upper_layer_info, - XGE_HAL_XPAK_ALARM_EXCESS_TEMP); - } - break; - case 2: - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_bias_current = 0; - - /* - * Notify the ULD on Excess xpak bias current alarm msg - */ - if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) { - g_xge_hal_driver->uld_callbacks.xpak_alarm_log( - hldev->upper_layer_info, - XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT); - } - break; - case 3: - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_laser_output = 0; - - /* - * Notify the ULD on Excess Xpak Laser o/p power - * alarm msg - */ - if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) { - g_xge_hal_driver->uld_callbacks.xpak_alarm_log( - hldev->upper_layer_info, - XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT); - } - break; - default: - xge_debug_osdep(XGE_TRACE, "Incorrect XPAK Alarm " - "type "); - } + switch(type) + { + case 1: + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_temp = 0; + + /* + * Notify the ULD on Excess Xpak temperature alarm msg + */ + if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) { + g_xge_hal_driver->uld_callbacks.xpak_alarm_log( + hldev->upper_layer_info, + XGE_HAL_XPAK_ALARM_EXCESS_TEMP); + } + break; + case 2: + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_bias_current = 0; + + /* + * Notify the ULD on Excess xpak bias current alarm msg + */ + if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) { + g_xge_hal_driver->uld_callbacks.xpak_alarm_log( + hldev->upper_layer_info, + XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT); + } + break; + case 3: + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_laser_output = 0; + + /* + * Notify the ULD on Excess Xpak Laser o/p power + * alarm msg + */ + if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) { + g_xge_hal_driver->uld_callbacks.xpak_alarm_log( + hldev->upper_layer_info, + XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT); + } + break; + default: + xge_debug_osdep(XGE_TRACE, "Incorrect XPAK Alarm " + "type "); + } } } @@ -1671,102 +1663,102 @@ void __hal_updt_stats_xpak(xge_hal_device_t *hldev) val_1 = 0x0; val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); if((val_1 == 0xFFFF) || (val_1 == 0x0000)) - { - xge_debug_osdep(XGE_TRACE, "ERR: MDIO slave access failed - " - "Returned %x", val_1); - return; - } + { + xge_debug_osdep(XGE_TRACE, "ERR: MDIO slave access failed - " + "Returned %x", val_1); + return; + } /* Check for the expected value of 2040 at PMA address 0x0000 */ if(val_1 != 0x2040) - { - xge_debug_osdep(XGE_TRACE, "Incorrect value at PMA address 0x0000 - "); - xge_debug_osdep(XGE_TRACE, "Returned: %llx- Expected: 0x2040", - (unsigned long long)(unsigned long)val_1); - return; - } + { + xge_debug_osdep(XGE_TRACE, "Incorrect value at PMA address 0x0000 - "); + xge_debug_osdep(XGE_TRACE, "Returned: %llx- Expected: 0x2040", + (unsigned long long)(unsigned long)val_1); + return; + } /* Loading the DOM register to MDIO register */ - addr = 0xA100; - (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr, 0x0); - val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); + addr = 0xA100; + (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr, 0x0); + val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); /* * Reading the Alarm flags */ - addr = 0xA070; - val_1 = 0x0; - val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); + addr = 0xA070; + val_1 = 0x0; + val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); if(CHECKBIT(val_1, 0x7)) { - hldev->stats.sw_dev_err_stats.stats_xpak. - alarm_transceiver_temp_high++; - hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp++; - __hal_chk_xpak_counter(hldev, 0x1, - hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp); + hldev->stats.sw_dev_err_stats.stats_xpak. + alarm_transceiver_temp_high++; + hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp++; + __hal_chk_xpak_counter(hldev, 0x1, + hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp); } else { - hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp = 0; + hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp = 0; } if(CHECKBIT(val_1, 0x6)) - hldev->stats.sw_dev_err_stats.stats_xpak. - alarm_transceiver_temp_low++; + hldev->stats.sw_dev_err_stats.stats_xpak. + alarm_transceiver_temp_low++; if(CHECKBIT(val_1, 0x3)) { - hldev->stats.sw_dev_err_stats.stats_xpak. - alarm_laser_bias_current_high++; - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_bias_current++; - __hal_chk_xpak_counter(hldev, 0x2, - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_bias_current); + hldev->stats.sw_dev_err_stats.stats_xpak. + alarm_laser_bias_current_high++; + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_bias_current++; + __hal_chk_xpak_counter(hldev, 0x2, + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_bias_current); } else { - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_bias_current = 0; + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_bias_current = 0; } if(CHECKBIT(val_1, 0x2)) - hldev->stats.sw_dev_err_stats.stats_xpak. - alarm_laser_bias_current_low++; + hldev->stats.sw_dev_err_stats.stats_xpak. + alarm_laser_bias_current_low++; if(CHECKBIT(val_1, 0x1)) { - hldev->stats.sw_dev_err_stats.stats_xpak. - alarm_laser_output_power_high++; - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_laser_output++; - __hal_chk_xpak_counter(hldev, 0x3, - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_laser_output); + hldev->stats.sw_dev_err_stats.stats_xpak. + alarm_laser_output_power_high++; + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_laser_output++; + __hal_chk_xpak_counter(hldev, 0x3, + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_laser_output); } else { - hldev->stats.sw_dev_err_stats.xpak_counter. - excess_laser_output = 0; + hldev->stats.sw_dev_err_stats.xpak_counter. + excess_laser_output = 0; } if(CHECKBIT(val_1, 0x0)) - hldev->stats.sw_dev_err_stats.stats_xpak. - alarm_laser_output_power_low++; + hldev->stats.sw_dev_err_stats.stats_xpak. + alarm_laser_output_power_low++; /* * Reading the warning flags */ - addr = 0xA074; - val_1 = 0x0; - val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); + addr = 0xA074; + val_1 = 0x0; + val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr); if(CHECKBIT(val_1, 0x7)) - hldev->stats.sw_dev_err_stats.stats_xpak. - warn_transceiver_temp_high++; + hldev->stats.sw_dev_err_stats.stats_xpak. + warn_transceiver_temp_high++; if(CHECKBIT(val_1, 0x6)) - hldev->stats.sw_dev_err_stats.stats_xpak. - warn_transceiver_temp_low++; + hldev->stats.sw_dev_err_stats.stats_xpak. + warn_transceiver_temp_low++; if(CHECKBIT(val_1, 0x3)) - hldev->stats.sw_dev_err_stats.stats_xpak. - warn_laser_bias_current_high++; + hldev->stats.sw_dev_err_stats.stats_xpak. + warn_laser_bias_current_high++; if(CHECKBIT(val_1, 0x2)) - hldev->stats.sw_dev_err_stats.stats_xpak. - warn_laser_bias_current_low++; + hldev->stats.sw_dev_err_stats.stats_xpak. + warn_laser_bias_current_low++; if(CHECKBIT(val_1, 0x1)) - hldev->stats.sw_dev_err_stats.stats_xpak. - warn_laser_output_power_high++; + hldev->stats.sw_dev_err_stats.stats_xpak. + warn_laser_output_power_high++; if(CHECKBIT(val_1, 0x0)) - hldev->stats.sw_dev_err_stats.stats_xpak. - warn_laser_output_power_low++; + hldev->stats.sw_dev_err_stats.stats_xpak. + warn_laser_output_power_low++; } diff --git a/sys/dev/nxge/xgehal/xgehal-mgmtaux.c b/sys/dev/nxge/xgehal/xgehal-mgmtaux.c index e2f0046..00550d8 100644 --- a/sys/dev/nxge/xgehal/xgehal-mgmtaux.c +++ b/sys/dev/nxge/xgehal/xgehal-mgmtaux.c @@ -26,31 +26,23 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-mgmtaux.c - * - * Description: Xframe-family management auxiliary API implementation - * - * Created: 1 September 2004 - */ - #include <dev/nxge/include/xgehal-mgmt.h> #include <dev/nxge/include/xgehal-driver.h> #include <dev/nxge/include/xgehal-device.h> #ifdef XGE_OS_HAS_SNPRINTF #define __hal_aux_snprintf(retbuf, bufsize, fmt, key, value, retsize) \ - if (bufsize <= 0) return XGE_HAL_ERR_OUT_OF_SPACE; \ - retsize = xge_os_snprintf(retbuf, bufsize, fmt, key, \ - XGE_HAL_AUX_SEPA, value); \ - if (retsize < 0 || retsize >= bufsize) return XGE_HAL_ERR_OUT_OF_SPACE; + if (bufsize <= 0) return XGE_HAL_ERR_OUT_OF_SPACE; \ + retsize = xge_os_snprintf(retbuf, bufsize, fmt, key, \ + XGE_HAL_AUX_SEPA, value); \ + if (retsize < 0 || retsize >= bufsize) return XGE_HAL_ERR_OUT_OF_SPACE; #else #define __hal_aux_snprintf(retbuf, bufsize, fmt, key, value, retsize) \ if (bufsize <= 0) return XGE_HAL_ERR_OUT_OF_SPACE; \ - retsize = xge_os_sprintf(retbuf, fmt, key, XGE_HAL_AUX_SEPA, value); \ + retsize = xge_os_sprintf(retbuf, fmt, key, XGE_HAL_AUX_SEPA, value); \ xge_assert(retsize < bufsize); \ if (retsize < 0 || retsize >= bufsize) \ - return XGE_HAL_ERR_OUT_OF_SPACE; + return XGE_HAL_ERR_OUT_OF_SPACE; #endif #define __HAL_AUX_ENTRY_DECLARE(size, buf) \ @@ -65,14 +57,14 @@ leftsize -= entrysize; \ *retsize = bufsize - leftsize; -#define __hal_aux_pci_link_info(name, index, var) { \ - __HAL_AUX_ENTRY(name, \ - (unsigned long long)pcim.link_info[index].var, "%llu") \ +#define __hal_aux_pci_link_info(name, index, var) { \ + __HAL_AUX_ENTRY(name, \ + (unsigned long long)pcim.link_info[index].var, "%llu") \ } -#define __hal_aux_pci_aggr_info(name, index, var) { \ - __HAL_AUX_ENTRY(name, \ - (unsigned long long)pcim.aggr_info[index].var, "%llu") \ +#define __hal_aux_pci_aggr_info(name, index, var) { \ + __HAL_AUX_ENTRY(name, \ + (unsigned long long)pcim.aggr_info[index].var, "%llu") \ } /** @@ -95,23 +87,23 @@ * See also: xge_hal_mgmt_reg_read(). */ xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh, - unsigned int offset, int bufsize, char *retbuf, - int *retsize) + unsigned int offset, int bufsize, char *retbuf, + int *retsize) { xge_hal_status_e status; u64 retval; status = xge_hal_mgmt_reg_read(devh, 0, offset, &retval); if (status != XGE_HAL_OK) { - return status; + return status; } if (bufsize < XGE_OS_SPRINTF_STRLEN) { - return XGE_HAL_ERR_OUT_OF_SPACE; + return XGE_HAL_ERR_OUT_OF_SPACE; } *retsize = xge_os_sprintf(retbuf, "0x%04X%c0x%08X%08X\n", offset, - XGE_HAL_AUX_SEPA, (u32)(retval>>32), (u32)retval); + XGE_HAL_AUX_SEPA, (u32)(retval>>32), (u32)retval); return XGE_HAL_OK; } @@ -135,24 +127,24 @@ xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh, * See also: xge_hal_mgmt_reg_read(). */ xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh, - unsigned int offset, int bufsize, char *retbuf, - int *retsize) + unsigned int offset, int bufsize, char *retbuf, + int *retsize) { xge_hal_status_e status; u64 retval; status = xge_hal_mgmt_reg_read(devh, 1, offset, &retval); if (status != XGE_HAL_OK) { - return status; + return status; } if (bufsize < XGE_OS_SPRINTF_STRLEN) { - return XGE_HAL_ERR_OUT_OF_SPACE; + return XGE_HAL_ERR_OUT_OF_SPACE; } - *retsize = xge_os_sprintf(retbuf, "0x%04X%c0x%08X%08X\n", - offset, - XGE_HAL_AUX_SEPA, (u32)(retval>>32), (u32)retval); + *retsize = xge_os_sprintf(retbuf, "0x%04X%c0x%08X%08X\n", + offset, + XGE_HAL_AUX_SEPA, (u32)(retval>>32), (u32)retval); return XGE_HAL_OK; } @@ -174,13 +166,13 @@ xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh, * See also: xge_hal_mgmt_reg_write(). */ xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh, - unsigned int offset, u64 value) + unsigned int offset, u64 value) { xge_hal_status_e status; status = xge_hal_mgmt_reg_write(devh, 0, offset, value); if (status != XGE_HAL_OK) { - return status; + return status; } return XGE_HAL_OK; @@ -204,16 +196,16 @@ xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh, * See also: xge_hal_mgmt_about(), xge_hal_aux_device_dump(). */ xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize) + char *retbuf, int *retsize) { xge_hal_status_e status; xge_hal_mgmt_about_info_t about_info; __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); status = xge_hal_mgmt_about(devh, &about_info, - sizeof(xge_hal_mgmt_about_info_t)); + sizeof(xge_hal_mgmt_about_info_t)); if (status != XGE_HAL_OK) { - return status; + return status; } __HAL_AUX_ENTRY("vendor", about_info.vendor, "0x%x"); @@ -234,7 +226,7 @@ xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY("ll_build", about_info.ll_build, "%s"); __HAL_AUX_ENTRY("transponder_temperature", - about_info.transponder_temperature, "%d C"); + about_info.transponder_temperature, "%d C"); __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -259,8 +251,8 @@ xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize, * xge_hal_aux_stats_pci_read(), * xge_hal_aux_device_dump(). */ -xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize) +xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize, + char *retbuf, int *retsize) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; @@ -268,113 +260,113 @@ xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { - xge_hal_mgmt_hw_stats_t hw; - - status = xge_hal_mgmt_hw_stats(devh, &hw, - sizeof(xge_hal_mgmt_hw_stats_t)); - if (status != XGE_HAL_OK) { - return status; - } - - __HAL_AUX_ENTRY("tmac_data_octets", hw.tmac_data_octets, "%u"); - __HAL_AUX_ENTRY("tmac_frms", hw.tmac_frms, "%u"); - __HAL_AUX_ENTRY("tmac_drop_frms", (unsigned long long) - hw.tmac_drop_frms, "%llu"); - __HAL_AUX_ENTRY("tmac_bcst_frms", hw.tmac_bcst_frms, "%u"); - __HAL_AUX_ENTRY("tmac_mcst_frms", hw.tmac_mcst_frms, "%u"); - __HAL_AUX_ENTRY("tmac_pause_ctrl_frms", (unsigned long long) - hw.tmac_pause_ctrl_frms, "%llu"); - __HAL_AUX_ENTRY("tmac_ucst_frms", hw.tmac_ucst_frms, "%u"); - __HAL_AUX_ENTRY("tmac_ttl_octets", hw.tmac_ttl_octets, "%u"); - __HAL_AUX_ENTRY("tmac_any_err_frms", hw.tmac_any_err_frms, "%u"); - __HAL_AUX_ENTRY("tmac_nucst_frms", hw.tmac_nucst_frms, "%u"); - __HAL_AUX_ENTRY("tmac_ttl_less_fb_octets", (unsigned long long) - hw.tmac_ttl_less_fb_octets, "%llu"); - __HAL_AUX_ENTRY("tmac_vld_ip_octets", (unsigned long long) - hw.tmac_vld_ip_octets, "%llu"); - __HAL_AUX_ENTRY("tmac_drop_ip", hw.tmac_drop_ip, "%u"); - __HAL_AUX_ENTRY("tmac_vld_ip", hw.tmac_vld_ip, "%u"); - __HAL_AUX_ENTRY("tmac_rst_tcp", hw.tmac_rst_tcp, "%u"); - __HAL_AUX_ENTRY("tmac_icmp", hw.tmac_icmp, "%u"); - __HAL_AUX_ENTRY("tmac_tcp", (unsigned long long) - hw.tmac_tcp, "%llu"); - __HAL_AUX_ENTRY("reserved_0", hw.reserved_0, "%u"); - __HAL_AUX_ENTRY("tmac_udp", hw.tmac_udp, "%u"); + xge_hal_mgmt_hw_stats_t hw; + + status = xge_hal_mgmt_hw_stats(devh, &hw, + sizeof(xge_hal_mgmt_hw_stats_t)); + if (status != XGE_HAL_OK) { + return status; + } + + __HAL_AUX_ENTRY("tmac_data_octets", hw.tmac_data_octets, "%u"); + __HAL_AUX_ENTRY("tmac_frms", hw.tmac_frms, "%u"); + __HAL_AUX_ENTRY("tmac_drop_frms", (unsigned long long) + hw.tmac_drop_frms, "%llu"); + __HAL_AUX_ENTRY("tmac_bcst_frms", hw.tmac_bcst_frms, "%u"); + __HAL_AUX_ENTRY("tmac_mcst_frms", hw.tmac_mcst_frms, "%u"); + __HAL_AUX_ENTRY("tmac_pause_ctrl_frms", (unsigned long long) + hw.tmac_pause_ctrl_frms, "%llu"); + __HAL_AUX_ENTRY("tmac_ucst_frms", hw.tmac_ucst_frms, "%u"); + __HAL_AUX_ENTRY("tmac_ttl_octets", hw.tmac_ttl_octets, "%u"); + __HAL_AUX_ENTRY("tmac_any_err_frms", hw.tmac_any_err_frms, "%u"); + __HAL_AUX_ENTRY("tmac_nucst_frms", hw.tmac_nucst_frms, "%u"); + __HAL_AUX_ENTRY("tmac_ttl_less_fb_octets", (unsigned long long) + hw.tmac_ttl_less_fb_octets, "%llu"); + __HAL_AUX_ENTRY("tmac_vld_ip_octets", (unsigned long long) + hw.tmac_vld_ip_octets, "%llu"); + __HAL_AUX_ENTRY("tmac_drop_ip", hw.tmac_drop_ip, "%u"); + __HAL_AUX_ENTRY("tmac_vld_ip", hw.tmac_vld_ip, "%u"); + __HAL_AUX_ENTRY("tmac_rst_tcp", hw.tmac_rst_tcp, "%u"); + __HAL_AUX_ENTRY("tmac_icmp", hw.tmac_icmp, "%u"); + __HAL_AUX_ENTRY("tmac_tcp", (unsigned long long) + hw.tmac_tcp, "%llu"); + __HAL_AUX_ENTRY("reserved_0", hw.reserved_0, "%u"); + __HAL_AUX_ENTRY("tmac_udp", hw.tmac_udp, "%u"); } else { - int i; - xge_hal_mgmt_pcim_stats_t pcim; - status = xge_hal_mgmt_pcim_stats(devh, &pcim, - sizeof(xge_hal_mgmt_pcim_stats_t)); - if (status != XGE_HAL_OK) { - return status; - } - - for (i = 0; i < XGE_HAL_MAC_LINKS; i++) { - __hal_aux_pci_link_info("tx_frms", i, - tx_frms); - __hal_aux_pci_link_info("tx_ttl_eth_octets", - i, tx_ttl_eth_octets ); - __hal_aux_pci_link_info("tx_data_octets", i, - tx_data_octets); - __hal_aux_pci_link_info("tx_mcst_frms", i, - tx_mcst_frms); - __hal_aux_pci_link_info("tx_bcst_frms", i, - tx_bcst_frms); - __hal_aux_pci_link_info("tx_ucst_frms", i, - tx_ucst_frms); - __hal_aux_pci_link_info("tx_tagged_frms", i, - tx_tagged_frms); - __hal_aux_pci_link_info("tx_vld_ip", i, - tx_vld_ip); - __hal_aux_pci_link_info("tx_vld_ip_octets", i, - tx_vld_ip_octets); - __hal_aux_pci_link_info("tx_icmp", i, - tx_icmp); - __hal_aux_pci_link_info("tx_tcp", i, - tx_tcp); - __hal_aux_pci_link_info("tx_rst_tcp", i, - tx_rst_tcp); - __hal_aux_pci_link_info("tx_udp", i, - tx_udp); - __hal_aux_pci_link_info("tx_unknown_protocol", i, - tx_unknown_protocol); - __hal_aux_pci_link_info("tx_parse_error", i, - tx_parse_error); - __hal_aux_pci_link_info("tx_pause_ctrl_frms", i, - tx_pause_ctrl_frms); - __hal_aux_pci_link_info("tx_lacpdu_frms", i, - tx_lacpdu_frms); - __hal_aux_pci_link_info("tx_marker_pdu_frms", i, - tx_marker_pdu_frms); - __hal_aux_pci_link_info("tx_marker_resp_pdu_frms", i, - tx_marker_resp_pdu_frms); - __hal_aux_pci_link_info("tx_drop_ip", i, - tx_drop_ip); - __hal_aux_pci_link_info("tx_xgmii_char1_match", i, - tx_xgmii_char1_match); - __hal_aux_pci_link_info("tx_xgmii_char2_match", i, - tx_xgmii_char2_match); - __hal_aux_pci_link_info("tx_xgmii_column1_match", i, - tx_xgmii_column1_match); - __hal_aux_pci_link_info("tx_xgmii_column2_match", i, - tx_xgmii_column2_match); - __hal_aux_pci_link_info("tx_drop_frms", i, - tx_drop_frms); - __hal_aux_pci_link_info("tx_any_err_frms", i, - tx_any_err_frms); - } - - for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) { - __hal_aux_pci_aggr_info("tx_frms", i, tx_frms); - __hal_aux_pci_aggr_info("tx_mcst_frms", i, - tx_mcst_frms); - __hal_aux_pci_aggr_info("tx_bcst_frms", i, - tx_bcst_frms); - __hal_aux_pci_aggr_info("tx_discarded_frms", i, - tx_discarded_frms); - __hal_aux_pci_aggr_info("tx_errored_frms", i, - tx_errored_frms); - } + int i; + xge_hal_mgmt_pcim_stats_t pcim; + status = xge_hal_mgmt_pcim_stats(devh, &pcim, + sizeof(xge_hal_mgmt_pcim_stats_t)); + if (status != XGE_HAL_OK) { + return status; + } + + for (i = 0; i < XGE_HAL_MAC_LINKS; i++) { + __hal_aux_pci_link_info("tx_frms", i, + tx_frms); + __hal_aux_pci_link_info("tx_ttl_eth_octets", + i, tx_ttl_eth_octets ); + __hal_aux_pci_link_info("tx_data_octets", i, + tx_data_octets); + __hal_aux_pci_link_info("tx_mcst_frms", i, + tx_mcst_frms); + __hal_aux_pci_link_info("tx_bcst_frms", i, + tx_bcst_frms); + __hal_aux_pci_link_info("tx_ucst_frms", i, + tx_ucst_frms); + __hal_aux_pci_link_info("tx_tagged_frms", i, + tx_tagged_frms); + __hal_aux_pci_link_info("tx_vld_ip", i, + tx_vld_ip); + __hal_aux_pci_link_info("tx_vld_ip_octets", i, + tx_vld_ip_octets); + __hal_aux_pci_link_info("tx_icmp", i, + tx_icmp); + __hal_aux_pci_link_info("tx_tcp", i, + tx_tcp); + __hal_aux_pci_link_info("tx_rst_tcp", i, + tx_rst_tcp); + __hal_aux_pci_link_info("tx_udp", i, + tx_udp); + __hal_aux_pci_link_info("tx_unknown_protocol", i, + tx_unknown_protocol); + __hal_aux_pci_link_info("tx_parse_error", i, + tx_parse_error); + __hal_aux_pci_link_info("tx_pause_ctrl_frms", i, + tx_pause_ctrl_frms); + __hal_aux_pci_link_info("tx_lacpdu_frms", i, + tx_lacpdu_frms); + __hal_aux_pci_link_info("tx_marker_pdu_frms", i, + tx_marker_pdu_frms); + __hal_aux_pci_link_info("tx_marker_resp_pdu_frms", i, + tx_marker_resp_pdu_frms); + __hal_aux_pci_link_info("tx_drop_ip", i, + tx_drop_ip); + __hal_aux_pci_link_info("tx_xgmii_char1_match", i, + tx_xgmii_char1_match); + __hal_aux_pci_link_info("tx_xgmii_char2_match", i, + tx_xgmii_char2_match); + __hal_aux_pci_link_info("tx_xgmii_column1_match", i, + tx_xgmii_column1_match); + __hal_aux_pci_link_info("tx_xgmii_column2_match", i, + tx_xgmii_column2_match); + __hal_aux_pci_link_info("tx_drop_frms", i, + tx_drop_frms); + __hal_aux_pci_link_info("tx_any_err_frms", i, + tx_any_err_frms); + } + + for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) { + __hal_aux_pci_aggr_info("tx_frms", i, tx_frms); + __hal_aux_pci_aggr_info("tx_mcst_frms", i, + tx_mcst_frms); + __hal_aux_pci_aggr_info("tx_bcst_frms", i, + tx_bcst_frms); + __hal_aux_pci_aggr_info("tx_discarded_frms", i, + tx_discarded_frms); + __hal_aux_pci_aggr_info("tx_errored_frms", i, + tx_errored_frms); + } } __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -400,8 +392,8 @@ xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize, * xge_hal_aux_stats_pci_read(), xge_hal_aux_stats_tmac_read(), * xge_hal_aux_device_dump(). */ -xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize) +xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize, + char *retbuf, int *retsize) { xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; @@ -409,274 +401,274 @@ xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { - xge_hal_mgmt_hw_stats_t hw; - - status = xge_hal_mgmt_hw_stats(devh, &hw, - sizeof(xge_hal_mgmt_hw_stats_t)); - if (status != XGE_HAL_OK) { - return status; - } - - __HAL_AUX_ENTRY("rmac_data_octets", hw.rmac_data_octets, "%u"); - __HAL_AUX_ENTRY("rmac_vld_frms", hw.rmac_vld_frms, "%u"); - __HAL_AUX_ENTRY("rmac_fcs_err_frms", (unsigned long long) - hw.rmac_fcs_err_frms, "%llu"); - __HAL_AUX_ENTRY("mac_drop_frms", (unsigned long long) - hw.rmac_drop_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_vld_bcst_frms", hw.rmac_vld_bcst_frms, - "%u"); - __HAL_AUX_ENTRY("rmac_vld_mcst_frms", hw.rmac_vld_mcst_frms, - "%u"); - __HAL_AUX_ENTRY("rmac_out_rng_len_err_frms", - hw.rmac_out_rng_len_err_frms, "%u"); - __HAL_AUX_ENTRY("rmac_in_rng_len_err_frms", - hw.rmac_in_rng_len_err_frms, "%u"); - __HAL_AUX_ENTRY("rmac_long_frms", (unsigned long long) - hw.rmac_long_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_pause_ctrl_frms", (unsigned long long) - hw.rmac_pause_ctrl_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_unsup_ctrl_frms", (unsigned long long) - hw.rmac_unsup_ctrl_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_accepted_ucst_frms", - hw.rmac_accepted_ucst_frms, "%u"); - __HAL_AUX_ENTRY("rmac_ttl_octets", hw.rmac_ttl_octets, "%u"); - __HAL_AUX_ENTRY("rmac_discarded_frms", hw.rmac_discarded_frms, - "%u"); - __HAL_AUX_ENTRY("rmac_accepted_nucst_frms", - hw.rmac_accepted_nucst_frms, "%u"); - __HAL_AUX_ENTRY("reserved_1", hw.reserved_1, "%u"); - __HAL_AUX_ENTRY("rmac_drop_events", hw.rmac_drop_events, "%u"); - __HAL_AUX_ENTRY("rmac_ttl_less_fb_octets", (unsigned long long) - hw.rmac_ttl_less_fb_octets, "%llu"); - __HAL_AUX_ENTRY("rmac_ttl_frms", (unsigned long long) - hw.rmac_ttl_frms, "%llu"); - __HAL_AUX_ENTRY("reserved_2", (unsigned long long) - hw.reserved_2, "%llu"); - __HAL_AUX_ENTRY("rmac_usized_frms", hw.rmac_usized_frms, "%u"); - __HAL_AUX_ENTRY("reserved_3", hw.reserved_3, "%u"); - __HAL_AUX_ENTRY("rmac_frag_frms", hw.rmac_frag_frms, "%u"); - __HAL_AUX_ENTRY("rmac_osized_frms", hw.rmac_osized_frms, "%u"); - __HAL_AUX_ENTRY("reserved_4", hw.reserved_4, "%u"); - __HAL_AUX_ENTRY("rmac_jabber_frms", hw.rmac_jabber_frms, "%u"); - __HAL_AUX_ENTRY("rmac_ttl_64_frms", (unsigned long long) - hw.rmac_ttl_64_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_ttl_65_127_frms", (unsigned long long) - hw.rmac_ttl_65_127_frms, "%llu"); - __HAL_AUX_ENTRY("reserved_5", (unsigned long long) - hw.reserved_5, "%llu"); - __HAL_AUX_ENTRY("rmac_ttl_128_255_frms", (unsigned long long) - hw.rmac_ttl_128_255_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_ttl_256_511_frms", (unsigned long long) - hw.rmac_ttl_256_511_frms, "%llu"); - __HAL_AUX_ENTRY("reserved_6", (unsigned long long) - hw.reserved_6, "%llu"); - __HAL_AUX_ENTRY("rmac_ttl_512_1023_frms", (unsigned long long) - hw.rmac_ttl_512_1023_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_ttl_1024_1518_frms", (unsigned long long) - hw.rmac_ttl_1024_1518_frms, "%llu"); - __HAL_AUX_ENTRY("rmac_ip", hw.rmac_ip, "%u"); - __HAL_AUX_ENTRY("reserved_7", hw.reserved_7, "%u"); - __HAL_AUX_ENTRY("rmac_ip_octets", (unsigned long long) - hw.rmac_ip_octets, "%llu"); - __HAL_AUX_ENTRY("rmac_drop_ip", hw.rmac_drop_ip, "%u"); - __HAL_AUX_ENTRY("rmac_hdr_err_ip", hw.rmac_hdr_err_ip, "%u"); - __HAL_AUX_ENTRY("reserved_8", hw.reserved_8, "%u"); - __HAL_AUX_ENTRY("rmac_icmp", hw.rmac_icmp, "%u"); - __HAL_AUX_ENTRY("rmac_tcp", (unsigned long long) - hw.rmac_tcp, "%llu"); - __HAL_AUX_ENTRY("rmac_err_drp_udp", hw.rmac_err_drp_udp, "%u"); - __HAL_AUX_ENTRY("rmac_udp", hw.rmac_udp, "%u"); - __HAL_AUX_ENTRY("rmac_xgmii_err_sym", (unsigned long long) - hw.rmac_xgmii_err_sym, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q0", (unsigned long long) - hw.rmac_frms_q0, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q1", (unsigned long long) - hw.rmac_frms_q1, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q2", (unsigned long long) - hw.rmac_frms_q2, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q3", (unsigned long long) - hw.rmac_frms_q3, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q4", (unsigned long long) - hw.rmac_frms_q4, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q5", (unsigned long long) - hw.rmac_frms_q5, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q6", (unsigned long long) - hw.rmac_frms_q6, "%llu"); - __HAL_AUX_ENTRY("rmac_frms_q7", (unsigned long long) - hw.rmac_frms_q7, "%llu"); - __HAL_AUX_ENTRY("rmac_full_q3", hw.rmac_full_q3, "%d"); - __HAL_AUX_ENTRY("rmac_full_q2", hw.rmac_full_q2, "%d"); - __HAL_AUX_ENTRY("rmac_full_q1", hw.rmac_full_q1, "%d"); - __HAL_AUX_ENTRY("rmac_full_q0", hw.rmac_full_q0, "%d"); - __HAL_AUX_ENTRY("rmac_full_q7", hw.rmac_full_q7, "%d"); - __HAL_AUX_ENTRY("rmac_full_q6", hw.rmac_full_q6, "%d"); - __HAL_AUX_ENTRY("rmac_full_q5", hw.rmac_full_q5, "%d"); - __HAL_AUX_ENTRY("rmac_full_q4", hw.rmac_full_q4, "%d"); - __HAL_AUX_ENTRY("reserved_9", hw.reserved_9, "%u"); - __HAL_AUX_ENTRY("rmac_pause_cnt", hw.rmac_pause_cnt, "%u"); - __HAL_AUX_ENTRY("rmac_xgmii_data_err_cnt", (unsigned long long) - hw.rmac_xgmii_data_err_cnt, "%llu"); - __HAL_AUX_ENTRY("rmac_xgmii_ctrl_err_cnt", (unsigned long long) - hw.rmac_xgmii_ctrl_err_cnt, "%llu"); - __HAL_AUX_ENTRY("rmac_err_tcp", hw.rmac_err_tcp, "%u"); - __HAL_AUX_ENTRY("rmac_accepted_ip", hw.rmac_accepted_ip, "%u"); + xge_hal_mgmt_hw_stats_t hw; + + status = xge_hal_mgmt_hw_stats(devh, &hw, + sizeof(xge_hal_mgmt_hw_stats_t)); + if (status != XGE_HAL_OK) { + return status; + } + + __HAL_AUX_ENTRY("rmac_data_octets", hw.rmac_data_octets, "%u"); + __HAL_AUX_ENTRY("rmac_vld_frms", hw.rmac_vld_frms, "%u"); + __HAL_AUX_ENTRY("rmac_fcs_err_frms", (unsigned long long) + hw.rmac_fcs_err_frms, "%llu"); + __HAL_AUX_ENTRY("mac_drop_frms", (unsigned long long) + hw.rmac_drop_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_vld_bcst_frms", hw.rmac_vld_bcst_frms, + "%u"); + __HAL_AUX_ENTRY("rmac_vld_mcst_frms", hw.rmac_vld_mcst_frms, + "%u"); + __HAL_AUX_ENTRY("rmac_out_rng_len_err_frms", + hw.rmac_out_rng_len_err_frms, "%u"); + __HAL_AUX_ENTRY("rmac_in_rng_len_err_frms", + hw.rmac_in_rng_len_err_frms, "%u"); + __HAL_AUX_ENTRY("rmac_long_frms", (unsigned long long) + hw.rmac_long_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_pause_ctrl_frms", (unsigned long long) + hw.rmac_pause_ctrl_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_unsup_ctrl_frms", (unsigned long long) + hw.rmac_unsup_ctrl_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_accepted_ucst_frms", + hw.rmac_accepted_ucst_frms, "%u"); + __HAL_AUX_ENTRY("rmac_ttl_octets", hw.rmac_ttl_octets, "%u"); + __HAL_AUX_ENTRY("rmac_discarded_frms", hw.rmac_discarded_frms, + "%u"); + __HAL_AUX_ENTRY("rmac_accepted_nucst_frms", + hw.rmac_accepted_nucst_frms, "%u"); + __HAL_AUX_ENTRY("reserved_1", hw.reserved_1, "%u"); + __HAL_AUX_ENTRY("rmac_drop_events", hw.rmac_drop_events, "%u"); + __HAL_AUX_ENTRY("rmac_ttl_less_fb_octets", (unsigned long long) + hw.rmac_ttl_less_fb_octets, "%llu"); + __HAL_AUX_ENTRY("rmac_ttl_frms", (unsigned long long) + hw.rmac_ttl_frms, "%llu"); + __HAL_AUX_ENTRY("reserved_2", (unsigned long long) + hw.reserved_2, "%llu"); + __HAL_AUX_ENTRY("rmac_usized_frms", hw.rmac_usized_frms, "%u"); + __HAL_AUX_ENTRY("reserved_3", hw.reserved_3, "%u"); + __HAL_AUX_ENTRY("rmac_frag_frms", hw.rmac_frag_frms, "%u"); + __HAL_AUX_ENTRY("rmac_osized_frms", hw.rmac_osized_frms, "%u"); + __HAL_AUX_ENTRY("reserved_4", hw.reserved_4, "%u"); + __HAL_AUX_ENTRY("rmac_jabber_frms", hw.rmac_jabber_frms, "%u"); + __HAL_AUX_ENTRY("rmac_ttl_64_frms", (unsigned long long) + hw.rmac_ttl_64_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_ttl_65_127_frms", (unsigned long long) + hw.rmac_ttl_65_127_frms, "%llu"); + __HAL_AUX_ENTRY("reserved_5", (unsigned long long) + hw.reserved_5, "%llu"); + __HAL_AUX_ENTRY("rmac_ttl_128_255_frms", (unsigned long long) + hw.rmac_ttl_128_255_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_ttl_256_511_frms", (unsigned long long) + hw.rmac_ttl_256_511_frms, "%llu"); + __HAL_AUX_ENTRY("reserved_6", (unsigned long long) + hw.reserved_6, "%llu"); + __HAL_AUX_ENTRY("rmac_ttl_512_1023_frms", (unsigned long long) + hw.rmac_ttl_512_1023_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_ttl_1024_1518_frms", (unsigned long long) + hw.rmac_ttl_1024_1518_frms, "%llu"); + __HAL_AUX_ENTRY("rmac_ip", hw.rmac_ip, "%u"); + __HAL_AUX_ENTRY("reserved_7", hw.reserved_7, "%u"); + __HAL_AUX_ENTRY("rmac_ip_octets", (unsigned long long) + hw.rmac_ip_octets, "%llu"); + __HAL_AUX_ENTRY("rmac_drop_ip", hw.rmac_drop_ip, "%u"); + __HAL_AUX_ENTRY("rmac_hdr_err_ip", hw.rmac_hdr_err_ip, "%u"); + __HAL_AUX_ENTRY("reserved_8", hw.reserved_8, "%u"); + __HAL_AUX_ENTRY("rmac_icmp", hw.rmac_icmp, "%u"); + __HAL_AUX_ENTRY("rmac_tcp", (unsigned long long) + hw.rmac_tcp, "%llu"); + __HAL_AUX_ENTRY("rmac_err_drp_udp", hw.rmac_err_drp_udp, "%u"); + __HAL_AUX_ENTRY("rmac_udp", hw.rmac_udp, "%u"); + __HAL_AUX_ENTRY("rmac_xgmii_err_sym", (unsigned long long) + hw.rmac_xgmii_err_sym, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q0", (unsigned long long) + hw.rmac_frms_q0, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q1", (unsigned long long) + hw.rmac_frms_q1, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q2", (unsigned long long) + hw.rmac_frms_q2, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q3", (unsigned long long) + hw.rmac_frms_q3, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q4", (unsigned long long) + hw.rmac_frms_q4, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q5", (unsigned long long) + hw.rmac_frms_q5, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q6", (unsigned long long) + hw.rmac_frms_q6, "%llu"); + __HAL_AUX_ENTRY("rmac_frms_q7", (unsigned long long) + hw.rmac_frms_q7, "%llu"); + __HAL_AUX_ENTRY("rmac_full_q3", hw.rmac_full_q3, "%d"); + __HAL_AUX_ENTRY("rmac_full_q2", hw.rmac_full_q2, "%d"); + __HAL_AUX_ENTRY("rmac_full_q1", hw.rmac_full_q1, "%d"); + __HAL_AUX_ENTRY("rmac_full_q0", hw.rmac_full_q0, "%d"); + __HAL_AUX_ENTRY("rmac_full_q7", hw.rmac_full_q7, "%d"); + __HAL_AUX_ENTRY("rmac_full_q6", hw.rmac_full_q6, "%d"); + __HAL_AUX_ENTRY("rmac_full_q5", hw.rmac_full_q5, "%d"); + __HAL_AUX_ENTRY("rmac_full_q4", hw.rmac_full_q4, "%d"); + __HAL_AUX_ENTRY("reserved_9", hw.reserved_9, "%u"); + __HAL_AUX_ENTRY("rmac_pause_cnt", hw.rmac_pause_cnt, "%u"); + __HAL_AUX_ENTRY("rmac_xgmii_data_err_cnt", (unsigned long long) + hw.rmac_xgmii_data_err_cnt, "%llu"); + __HAL_AUX_ENTRY("rmac_xgmii_ctrl_err_cnt", (unsigned long long) + hw.rmac_xgmii_ctrl_err_cnt, "%llu"); + __HAL_AUX_ENTRY("rmac_err_tcp", hw.rmac_err_tcp, "%u"); + __HAL_AUX_ENTRY("rmac_accepted_ip", hw.rmac_accepted_ip, "%u"); } else { - int i; - xge_hal_mgmt_pcim_stats_t pcim; - status = xge_hal_mgmt_pcim_stats(devh, &pcim, - sizeof(xge_hal_mgmt_pcim_stats_t)); - if (status != XGE_HAL_OK) { - return status; - } - for (i = 0; i < XGE_HAL_MAC_LINKS; i++) { - __hal_aux_pci_link_info("rx_ttl_frms", i, - rx_ttl_frms); - __hal_aux_pci_link_info("rx_vld_frms", i, - rx_vld_frms); - __hal_aux_pci_link_info("rx_offld_frms", i, - rx_offld_frms); - __hal_aux_pci_link_info("rx_ttl_eth_octets", i, - rx_ttl_eth_octets); - __hal_aux_pci_link_info("rx_data_octets", i, - rx_data_octets); - __hal_aux_pci_link_info("rx_offld_octets", i, - rx_offld_octets); - __hal_aux_pci_link_info("rx_vld_mcst_frms", i, - rx_vld_mcst_frms); - __hal_aux_pci_link_info("rx_vld_bcst_frms", i, - rx_vld_bcst_frms); - __hal_aux_pci_link_info("rx_accepted_ucst_frms", i, - rx_accepted_ucst_frms); - __hal_aux_pci_link_info("rx_accepted_nucst_frms", i, - rx_accepted_nucst_frms); - __hal_aux_pci_link_info("rx_tagged_frms", i, - rx_tagged_frms); - __hal_aux_pci_link_info("rx_long_frms", i, - rx_long_frms); - __hal_aux_pci_link_info("rx_usized_frms", i, - rx_usized_frms); - __hal_aux_pci_link_info("rx_osized_frms", i, - rx_osized_frms); - __hal_aux_pci_link_info("rx_frag_frms", i, - rx_frag_frms); - __hal_aux_pci_link_info("rx_jabber_frms", i, - rx_jabber_frms); - __hal_aux_pci_link_info("rx_ttl_64_frms", i, - rx_ttl_64_frms); - __hal_aux_pci_link_info("rx_ttl_65_127_frms", i, - rx_ttl_65_127_frms); - __hal_aux_pci_link_info("rx_ttl_128_255_frms", i, - rx_ttl_128_255_frms); - __hal_aux_pci_link_info("rx_ttl_256_511_frms", i, - rx_ttl_256_511_frms); - __hal_aux_pci_link_info("rx_ttl_512_1023_frms", i, - rx_ttl_512_1023_frms); - __hal_aux_pci_link_info("rx_ttl_1024_1518_frms", i, - rx_ttl_1024_1518_frms); - __hal_aux_pci_link_info("rx_ttl_1519_4095_frms", i, - rx_ttl_1519_4095_frms); - __hal_aux_pci_link_info("rx_ttl_40956_8191_frms", i, - rx_ttl_40956_8191_frms); - __hal_aux_pci_link_info("rx_ttl_8192_max_frms", i, - rx_ttl_8192_max_frms); - __hal_aux_pci_link_info("rx_ttl_gt_max_frms", i, - rx_ttl_gt_max_frms); - __hal_aux_pci_link_info("rx_ip", i, - rx_ip); - __hal_aux_pci_link_info("rx_ip_octets", i, - rx_ip_octets); - - __hal_aux_pci_link_info("rx_hdr_err_ip", i, - rx_hdr_err_ip); - - __hal_aux_pci_link_info("rx_icmp", i, - rx_icmp); - __hal_aux_pci_link_info("rx_tcp", i, - rx_tcp); - __hal_aux_pci_link_info("rx_udp", i, - rx_udp); - __hal_aux_pci_link_info("rx_err_tcp", i, - rx_err_tcp); - __hal_aux_pci_link_info("rx_pause_cnt", i, - rx_pause_cnt); - __hal_aux_pci_link_info("rx_pause_ctrl_frms", i, - rx_pause_ctrl_frms); - __hal_aux_pci_link_info("rx_unsup_ctrl_frms", i, - rx_pause_cnt); - __hal_aux_pci_link_info("rx_in_rng_len_err_frms", i, - rx_in_rng_len_err_frms); - __hal_aux_pci_link_info("rx_out_rng_len_err_frms", i, - rx_out_rng_len_err_frms); - __hal_aux_pci_link_info("rx_drop_frms", i, - rx_drop_frms); - __hal_aux_pci_link_info("rx_discarded_frms", i, - rx_discarded_frms); - __hal_aux_pci_link_info("rx_drop_ip", i, - rx_drop_ip); - __hal_aux_pci_link_info("rx_err_drp_udp", i, - rx_err_drp_udp); - __hal_aux_pci_link_info("rx_lacpdu_frms", i, - rx_lacpdu_frms); - __hal_aux_pci_link_info("rx_marker_pdu_frms", i, - rx_marker_pdu_frms); - __hal_aux_pci_link_info("rx_marker_resp_pdu_frms", i, - rx_marker_resp_pdu_frms); - __hal_aux_pci_link_info("rx_unknown_pdu_frms", i, - rx_unknown_pdu_frms); - __hal_aux_pci_link_info("rx_illegal_pdu_frms", i, - rx_illegal_pdu_frms); - __hal_aux_pci_link_info("rx_fcs_discard", i, - rx_fcs_discard); - __hal_aux_pci_link_info("rx_len_discard", i, - rx_len_discard); - __hal_aux_pci_link_info("rx_pf_discard", i, - rx_pf_discard); - __hal_aux_pci_link_info("rx_trash_discard", i, - rx_trash_discard); - __hal_aux_pci_link_info("rx_rts_discard", i, - rx_trash_discard); - __hal_aux_pci_link_info("rx_wol_discard", i, - rx_wol_discard); - __hal_aux_pci_link_info("rx_red_discard", i, - rx_red_discard); - __hal_aux_pci_link_info("rx_ingm_full_discard", i, - rx_ingm_full_discard); - __hal_aux_pci_link_info("rx_xgmii_data_err_cnt", i, - rx_xgmii_data_err_cnt); - __hal_aux_pci_link_info("rx_xgmii_ctrl_err_cnt", i, - rx_xgmii_ctrl_err_cnt); - __hal_aux_pci_link_info("rx_xgmii_err_sym", i, - rx_xgmii_err_sym); - __hal_aux_pci_link_info("rx_xgmii_char1_match", i, - rx_xgmii_char1_match); - __hal_aux_pci_link_info("rx_xgmii_char2_match", i, - rx_xgmii_char2_match); - __hal_aux_pci_link_info("rx_xgmii_column1_match", i, - rx_xgmii_column1_match); - __hal_aux_pci_link_info("rx_xgmii_column2_match", i, - rx_xgmii_column2_match); - __hal_aux_pci_link_info("rx_local_fault", i, - rx_local_fault); - __hal_aux_pci_link_info("rx_remote_fault", i, - rx_remote_fault); - __hal_aux_pci_link_info("rx_queue_full", i, - rx_queue_full); - } - for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) { - __hal_aux_pci_aggr_info("rx_frms", i, rx_frms); - __hal_aux_pci_link_info("rx_data_octets", i, - rx_data_octets); - __hal_aux_pci_aggr_info("rx_mcst_frms", i, - rx_mcst_frms); - __hal_aux_pci_aggr_info("rx_bcst_frms", i, - rx_bcst_frms); - __hal_aux_pci_aggr_info("rx_discarded_frms", i, - rx_discarded_frms); - __hal_aux_pci_aggr_info("rx_errored_frms", i, - rx_errored_frms); - __hal_aux_pci_aggr_info("rx_unknown_protocol_frms", i, - rx_unknown_protocol_frms); - } + int i; + xge_hal_mgmt_pcim_stats_t pcim; + status = xge_hal_mgmt_pcim_stats(devh, &pcim, + sizeof(xge_hal_mgmt_pcim_stats_t)); + if (status != XGE_HAL_OK) { + return status; + } + for (i = 0; i < XGE_HAL_MAC_LINKS; i++) { + __hal_aux_pci_link_info("rx_ttl_frms", i, + rx_ttl_frms); + __hal_aux_pci_link_info("rx_vld_frms", i, + rx_vld_frms); + __hal_aux_pci_link_info("rx_offld_frms", i, + rx_offld_frms); + __hal_aux_pci_link_info("rx_ttl_eth_octets", i, + rx_ttl_eth_octets); + __hal_aux_pci_link_info("rx_data_octets", i, + rx_data_octets); + __hal_aux_pci_link_info("rx_offld_octets", i, + rx_offld_octets); + __hal_aux_pci_link_info("rx_vld_mcst_frms", i, + rx_vld_mcst_frms); + __hal_aux_pci_link_info("rx_vld_bcst_frms", i, + rx_vld_bcst_frms); + __hal_aux_pci_link_info("rx_accepted_ucst_frms", i, + rx_accepted_ucst_frms); + __hal_aux_pci_link_info("rx_accepted_nucst_frms", i, + rx_accepted_nucst_frms); + __hal_aux_pci_link_info("rx_tagged_frms", i, + rx_tagged_frms); + __hal_aux_pci_link_info("rx_long_frms", i, + rx_long_frms); + __hal_aux_pci_link_info("rx_usized_frms", i, + rx_usized_frms); + __hal_aux_pci_link_info("rx_osized_frms", i, + rx_osized_frms); + __hal_aux_pci_link_info("rx_frag_frms", i, + rx_frag_frms); + __hal_aux_pci_link_info("rx_jabber_frms", i, + rx_jabber_frms); + __hal_aux_pci_link_info("rx_ttl_64_frms", i, + rx_ttl_64_frms); + __hal_aux_pci_link_info("rx_ttl_65_127_frms", i, + rx_ttl_65_127_frms); + __hal_aux_pci_link_info("rx_ttl_128_255_frms", i, + rx_ttl_128_255_frms); + __hal_aux_pci_link_info("rx_ttl_256_511_frms", i, + rx_ttl_256_511_frms); + __hal_aux_pci_link_info("rx_ttl_512_1023_frms", i, + rx_ttl_512_1023_frms); + __hal_aux_pci_link_info("rx_ttl_1024_1518_frms", i, + rx_ttl_1024_1518_frms); + __hal_aux_pci_link_info("rx_ttl_1519_4095_frms", i, + rx_ttl_1519_4095_frms); + __hal_aux_pci_link_info("rx_ttl_40956_8191_frms", i, + rx_ttl_40956_8191_frms); + __hal_aux_pci_link_info("rx_ttl_8192_max_frms", i, + rx_ttl_8192_max_frms); + __hal_aux_pci_link_info("rx_ttl_gt_max_frms", i, + rx_ttl_gt_max_frms); + __hal_aux_pci_link_info("rx_ip", i, + rx_ip); + __hal_aux_pci_link_info("rx_ip_octets", i, + rx_ip_octets); + + __hal_aux_pci_link_info("rx_hdr_err_ip", i, + rx_hdr_err_ip); + + __hal_aux_pci_link_info("rx_icmp", i, + rx_icmp); + __hal_aux_pci_link_info("rx_tcp", i, + rx_tcp); + __hal_aux_pci_link_info("rx_udp", i, + rx_udp); + __hal_aux_pci_link_info("rx_err_tcp", i, + rx_err_tcp); + __hal_aux_pci_link_info("rx_pause_cnt", i, + rx_pause_cnt); + __hal_aux_pci_link_info("rx_pause_ctrl_frms", i, + rx_pause_ctrl_frms); + __hal_aux_pci_link_info("rx_unsup_ctrl_frms", i, + rx_pause_cnt); + __hal_aux_pci_link_info("rx_in_rng_len_err_frms", i, + rx_in_rng_len_err_frms); + __hal_aux_pci_link_info("rx_out_rng_len_err_frms", i, + rx_out_rng_len_err_frms); + __hal_aux_pci_link_info("rx_drop_frms", i, + rx_drop_frms); + __hal_aux_pci_link_info("rx_discarded_frms", i, + rx_discarded_frms); + __hal_aux_pci_link_info("rx_drop_ip", i, + rx_drop_ip); + __hal_aux_pci_link_info("rx_err_drp_udp", i, + rx_err_drp_udp); + __hal_aux_pci_link_info("rx_lacpdu_frms", i, + rx_lacpdu_frms); + __hal_aux_pci_link_info("rx_marker_pdu_frms", i, + rx_marker_pdu_frms); + __hal_aux_pci_link_info("rx_marker_resp_pdu_frms", i, + rx_marker_resp_pdu_frms); + __hal_aux_pci_link_info("rx_unknown_pdu_frms", i, + rx_unknown_pdu_frms); + __hal_aux_pci_link_info("rx_illegal_pdu_frms", i, + rx_illegal_pdu_frms); + __hal_aux_pci_link_info("rx_fcs_discard", i, + rx_fcs_discard); + __hal_aux_pci_link_info("rx_len_discard", i, + rx_len_discard); + __hal_aux_pci_link_info("rx_pf_discard", i, + rx_pf_discard); + __hal_aux_pci_link_info("rx_trash_discard", i, + rx_trash_discard); + __hal_aux_pci_link_info("rx_rts_discard", i, + rx_trash_discard); + __hal_aux_pci_link_info("rx_wol_discard", i, + rx_wol_discard); + __hal_aux_pci_link_info("rx_red_discard", i, + rx_red_discard); + __hal_aux_pci_link_info("rx_ingm_full_discard", i, + rx_ingm_full_discard); + __hal_aux_pci_link_info("rx_xgmii_data_err_cnt", i, + rx_xgmii_data_err_cnt); + __hal_aux_pci_link_info("rx_xgmii_ctrl_err_cnt", i, + rx_xgmii_ctrl_err_cnt); + __hal_aux_pci_link_info("rx_xgmii_err_sym", i, + rx_xgmii_err_sym); + __hal_aux_pci_link_info("rx_xgmii_char1_match", i, + rx_xgmii_char1_match); + __hal_aux_pci_link_info("rx_xgmii_char2_match", i, + rx_xgmii_char2_match); + __hal_aux_pci_link_info("rx_xgmii_column1_match", i, + rx_xgmii_column1_match); + __hal_aux_pci_link_info("rx_xgmii_column2_match", i, + rx_xgmii_column2_match); + __hal_aux_pci_link_info("rx_local_fault", i, + rx_local_fault); + __hal_aux_pci_link_info("rx_remote_fault", i, + rx_remote_fault); + __hal_aux_pci_link_info("rx_queue_full", i, + rx_queue_full); + } + for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) { + __hal_aux_pci_aggr_info("rx_frms", i, rx_frms); + __hal_aux_pci_link_info("rx_data_octets", i, + rx_data_octets); + __hal_aux_pci_aggr_info("rx_mcst_frms", i, + rx_mcst_frms); + __hal_aux_pci_aggr_info("rx_bcst_frms", i, + rx_bcst_frms); + __hal_aux_pci_aggr_info("rx_discarded_frms", i, + rx_discarded_frms); + __hal_aux_pci_aggr_info("rx_errored_frms", i, + rx_errored_frms); + __hal_aux_pci_aggr_info("rx_unknown_protocol_frms", i, + rx_unknown_protocol_frms); + } } __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -702,7 +694,7 @@ xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize, * xge_hal_aux_device_dump(). */ xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize) + int bufsize, char *retbuf, int *retsize) { xge_hal_status_e status; xge_hal_mgmt_hw_stats_t hw; @@ -712,31 +704,31 @@ xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh, if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) { - __HAL_AUX_ENTRY_END(bufsize, retsize); + __HAL_AUX_ENTRY_END(bufsize, retsize); - return XGE_HAL_OK; + return XGE_HAL_OK; } status = xge_hal_mgmt_hw_stats(devh, &hw, - sizeof(xge_hal_mgmt_hw_stats_t)); + sizeof(xge_hal_mgmt_hw_stats_t)); if (status != XGE_HAL_OK) { - return status; + return status; } __HAL_AUX_ENTRY("tmac_frms_oflow", hw.tmac_frms_oflow, "%u"); __HAL_AUX_ENTRY("tmac_data_octets_oflow", hw.tmac_data_octets_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("tmac_mcst_frms_oflow", hw.tmac_mcst_frms_oflow, "%u"); __HAL_AUX_ENTRY("tmac_bcst_frms_oflow", hw.tmac_bcst_frms_oflow, "%u"); __HAL_AUX_ENTRY("tmac_ttl_octets_oflow", hw.tmac_ttl_octets_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("tmac_ucst_frms_oflow", hw.tmac_ucst_frms_oflow, "%u"); __HAL_AUX_ENTRY("tmac_nucst_frms_oflow", hw.tmac_nucst_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("tmac_any_err_frms_oflow", hw.tmac_any_err_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("tmac_vlan_frms", (unsigned long long)hw.tmac_vlan_frms, - "%llu"); + "%llu"); __HAL_AUX_ENTRY("tmac_vld_ip_oflow", hw.tmac_vld_ip_oflow, "%u"); __HAL_AUX_ENTRY("tmac_drop_ip_oflow", hw.tmac_drop_ip_oflow, "%u"); __HAL_AUX_ENTRY("tmac_icmp_oflow", hw.tmac_icmp_oflow, "%u"); @@ -746,51 +738,51 @@ xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh, __HAL_AUX_ENTRY("tpa_parse_failure", hw.tpa_parse_failure, "%u"); __HAL_AUX_ENTRY("rmac_vld_frms_oflow", hw.rmac_vld_frms_oflow, "%u"); __HAL_AUX_ENTRY("rmac_data_octets_oflow", hw.rmac_data_octets_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_vld_mcst_frms_oflow", hw.rmac_vld_mcst_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_vld_bcst_frms_oflow", hw.rmac_vld_bcst_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_ttl_octets_oflow", hw.rmac_ttl_octets_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_accepted_ucst_frms_oflow", - hw.rmac_accepted_ucst_frms_oflow, "%u"); + hw.rmac_accepted_ucst_frms_oflow, "%u"); __HAL_AUX_ENTRY("rmac_accepted_nucst_frms_oflow", - hw.rmac_accepted_nucst_frms_oflow, "%u"); + hw.rmac_accepted_nucst_frms_oflow, "%u"); __HAL_AUX_ENTRY("rmac_discarded_frms_oflow", - hw.rmac_discarded_frms_oflow, "%u"); + hw.rmac_discarded_frms_oflow, "%u"); __HAL_AUX_ENTRY("rmac_drop_events_oflow", hw.rmac_drop_events_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_usized_frms_oflow", hw.rmac_usized_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_osized_frms_oflow", hw.rmac_osized_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_frag_frms_oflow", hw.rmac_frag_frms_oflow, "%u"); __HAL_AUX_ENTRY("rmac_jabber_frms_oflow", hw.rmac_jabber_frms_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_ip_oflow", hw.rmac_ip_oflow, "%u"); __HAL_AUX_ENTRY("rmac_drop_ip_oflow", hw.rmac_drop_ip_oflow, "%u"); __HAL_AUX_ENTRY("rmac_icmp_oflow", hw.rmac_icmp_oflow, "%u"); __HAL_AUX_ENTRY("rmac_udp_oflow", hw.rmac_udp_oflow, "%u"); __HAL_AUX_ENTRY("rmac_err_drp_udp_oflow", hw.rmac_err_drp_udp_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_pause_cnt_oflow", hw.rmac_pause_cnt_oflow, "%u"); __HAL_AUX_ENTRY("rmac_ttl_1519_4095_frms", - (unsigned long long)hw.rmac_ttl_1519_4095_frms, "%llu"); + (unsigned long long)hw.rmac_ttl_1519_4095_frms, "%llu"); __HAL_AUX_ENTRY("rmac_ttl_4096_8191_frms", - (unsigned long long)hw.rmac_ttl_4096_8191_frms, "%llu"); + (unsigned long long)hw.rmac_ttl_4096_8191_frms, "%llu"); __HAL_AUX_ENTRY("rmac_ttl_8192_max_frms", - (unsigned long long)hw.rmac_ttl_8192_max_frms, "%llu"); + (unsigned long long)hw.rmac_ttl_8192_max_frms, "%llu"); __HAL_AUX_ENTRY("rmac_ttl_gt_max_frms", - (unsigned long long)hw.rmac_ttl_gt_max_frms, "%llu"); + (unsigned long long)hw.rmac_ttl_gt_max_frms, "%llu"); __HAL_AUX_ENTRY("rmac_osized_alt_frms", - (unsigned long long)hw.rmac_osized_alt_frms, "%llu"); + (unsigned long long)hw.rmac_osized_alt_frms, "%llu"); __HAL_AUX_ENTRY("rmac_jabber_alt_frms", - (unsigned long long)hw.rmac_jabber_alt_frms, "%llu"); + (unsigned long long)hw.rmac_jabber_alt_frms, "%llu"); __HAL_AUX_ENTRY("rmac_gt_max_alt_frms", - (unsigned long long)hw.rmac_gt_max_alt_frms, "%llu"); + (unsigned long long)hw.rmac_gt_max_alt_frms, "%llu"); __HAL_AUX_ENTRY("rmac_vlan_frms", - (unsigned long long)hw.rmac_vlan_frms, "%llu"); + (unsigned long long)hw.rmac_vlan_frms, "%llu"); __HAL_AUX_ENTRY("rmac_fcs_discard", hw.rmac_fcs_discard, "%u"); __HAL_AUX_ENTRY("rmac_len_discard", hw.rmac_len_discard, "%u"); __HAL_AUX_ENTRY("rmac_da_discard", hw.rmac_da_discard, "%u"); @@ -798,9 +790,9 @@ xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh, __HAL_AUX_ENTRY("rmac_rts_discard", hw.rmac_rts_discard, "%u"); __HAL_AUX_ENTRY("rmac_red_discard", hw.rmac_red_discard, "%u"); __HAL_AUX_ENTRY("rmac_ingm_full_discard", hw.rmac_ingm_full_discard, - "%u"); + "%u"); __HAL_AUX_ENTRY("rmac_accepted_ip_oflow", hw.rmac_accepted_ip_oflow, - "%u"); + "%u"); __HAL_AUX_ENTRY("link_fault_cnt", hw.link_fault_cnt, "%u"); __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -828,7 +820,7 @@ xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh, * xge_hal_aux_device_dump(). */ xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize) + char *retbuf, int *retsize) { xge_hal_status_e status; xge_hal_mgmt_hw_stats_t hw; @@ -838,16 +830,16 @@ xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize, if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) { - __HAL_AUX_ENTRY_END(bufsize, retsize); + __HAL_AUX_ENTRY_END(bufsize, retsize); - return XGE_HAL_OK; + return XGE_HAL_OK; } status = xge_hal_mgmt_hw_stats(devh, &hw, - sizeof(xge_hal_mgmt_hw_stats_t)); + sizeof(xge_hal_mgmt_hw_stats_t)); if (status != XGE_HAL_OK) { - return status; + return status; } __HAL_AUX_ENTRY("new_rd_req_cnt", hw.new_rd_req_cnt, "%u"); @@ -892,7 +884,7 @@ xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize, * See also: xge_hal_aux_device_dump(). */ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize) + int bufsize, char *retbuf, int *retsize) { xge_list_t *item; xge_hal_channel_t *channel; @@ -903,14 +895,14 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); status = xge_hal_mgmt_device_stats(hldev, &devstat, - sizeof(xge_hal_mgmt_device_stats_t)); + sizeof(xge_hal_mgmt_device_stats_t)); if (status != XGE_HAL_OK) { - return status; + return status; } if (!hldev->config.bimodal_interrupts) { - __HAL_AUX_ENTRY("rx_traffic_intr_cnt", - devstat.rx_traffic_intr_cnt, "%u"); + __HAL_AUX_ENTRY("rx_traffic_intr_cnt", + devstat.rx_traffic_intr_cnt, "%u"); } __HAL_AUX_ENTRY("tx_traffic_intr_cnt", devstat.tx_traffic_intr_cnt, "%u"); __HAL_AUX_ENTRY("txpic_intr_cnt", devstat.txpic_intr_cnt, "%u"); @@ -924,121 +916,121 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, __HAL_AUX_ENTRY("mc_intr_cnt", devstat.mc_intr_cnt, "%u"); __HAL_AUX_ENTRY("not_xge_intr_cnt", devstat.not_xge_intr_cnt, "%u"); __HAL_AUX_ENTRY("not_traffic_intr_cnt", - devstat.not_traffic_intr_cnt, "%u"); + devstat.not_traffic_intr_cnt, "%u"); __HAL_AUX_ENTRY("traffic_intr_cnt", devstat.traffic_intr_cnt, "%u"); __HAL_AUX_ENTRY("total_intr_cnt", devstat.total_intr_cnt, "%u"); __HAL_AUX_ENTRY("soft_reset_cnt", devstat.soft_reset_cnt, "%u"); if (hldev->config.rxufca_hi_lim != hldev->config.rxufca_lo_lim && hldev->config.rxufca_lo_lim != 0) { - __HAL_AUX_ENTRY("rxufca_lo_adjust_cnt", - devstat.rxufca_lo_adjust_cnt, "%u"); - __HAL_AUX_ENTRY("rxufca_hi_adjust_cnt", - devstat.rxufca_hi_adjust_cnt, "%u"); + __HAL_AUX_ENTRY("rxufca_lo_adjust_cnt", + devstat.rxufca_lo_adjust_cnt, "%u"); + __HAL_AUX_ENTRY("rxufca_hi_adjust_cnt", + devstat.rxufca_hi_adjust_cnt, "%u"); } if (hldev->config.bimodal_interrupts) { - __HAL_AUX_ENTRY("bimodal_lo_adjust_cnt", - devstat.bimodal_lo_adjust_cnt, "%u"); - __HAL_AUX_ENTRY("bimodal_hi_adjust_cnt", - devstat.bimodal_hi_adjust_cnt, "%u"); + __HAL_AUX_ENTRY("bimodal_lo_adjust_cnt", + devstat.bimodal_lo_adjust_cnt, "%u"); + __HAL_AUX_ENTRY("bimodal_hi_adjust_cnt", + devstat.bimodal_hi_adjust_cnt, "%u"); } #if defined(XGE_HAL_CONFIG_LRO) __HAL_AUX_ENTRY("tot_frms_lroised", - devstat.tot_frms_lroised, "%u"); + devstat.tot_frms_lroised, "%u"); __HAL_AUX_ENTRY("tot_lro_sessions", - devstat.tot_lro_sessions, "%u"); + devstat.tot_lro_sessions, "%u"); __HAL_AUX_ENTRY("lro_frm_len_exceed_cnt", - devstat.lro_frm_len_exceed_cnt, "%u"); + devstat.lro_frm_len_exceed_cnt, "%u"); __HAL_AUX_ENTRY("lro_sg_exceed_cnt", - devstat.lro_sg_exceed_cnt, "%u"); + devstat.lro_sg_exceed_cnt, "%u"); __HAL_AUX_ENTRY("lro_out_of_seq_pkt_cnt", - devstat.lro_out_of_seq_pkt_cnt, "%u"); + devstat.lro_out_of_seq_pkt_cnt, "%u"); __HAL_AUX_ENTRY("lro_dup_pkt_cnt", - devstat.lro_dup_pkt_cnt, "%u"); + devstat.lro_dup_pkt_cnt, "%u"); #endif /* for each opened rx channel */ xge_list_for_each(item, &hldev->ring_channels) { - char key[XGE_OS_SPRINTF_STRLEN]; - channel = xge_container_of(item, xge_hal_channel_t, item); - - status = xge_hal_mgmt_channel_stats(channel, &chstat, - sizeof(xge_hal_mgmt_channel_stats_t)); - if (status != XGE_HAL_OK) { - return status; - } - - (void) xge_os_sprintf(key, "ring%d_", channel->post_qid); - - xge_os_strcpy(key+6, "full_cnt"); - __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u"); - xge_os_strcpy(key+6, "usage_max"); - __HAL_AUX_ENTRY(key, chstat.usage_max, "%u"); - xge_os_strcpy(key+6, "usage_cnt"); - __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); - xge_os_strcpy(key+6, "reserve_free_swaps_cnt"); - __HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u"); - if (!hldev->config.bimodal_interrupts) { - xge_os_strcpy(key+6, "avg_compl_per_intr_cnt"); - __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u"); - } - xge_os_strcpy(key+6, "total_compl_cnt"); - __HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u"); - xge_os_strcpy(key+6, "bump_cnt"); - __HAL_AUX_ENTRY(key, chstat.ring_bump_cnt, "%u"); + char key[XGE_OS_SPRINTF_STRLEN]; + channel = xge_container_of(item, xge_hal_channel_t, item); + + status = xge_hal_mgmt_channel_stats(channel, &chstat, + sizeof(xge_hal_mgmt_channel_stats_t)); + if (status != XGE_HAL_OK) { + return status; + } + + (void) xge_os_sprintf(key, "ring%d_", channel->post_qid); + + xge_os_strcpy(key+6, "full_cnt"); + __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u"); + xge_os_strcpy(key+6, "usage_max"); + __HAL_AUX_ENTRY(key, chstat.usage_max, "%u"); + xge_os_strcpy(key+6, "usage_cnt"); + __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); + xge_os_strcpy(key+6, "reserve_free_swaps_cnt"); + __HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u"); + if (!hldev->config.bimodal_interrupts) { + xge_os_strcpy(key+6, "avg_compl_per_intr_cnt"); + __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u"); + } + xge_os_strcpy(key+6, "total_compl_cnt"); + __HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u"); + xge_os_strcpy(key+6, "bump_cnt"); + __HAL_AUX_ENTRY(key, chstat.ring_bump_cnt, "%u"); } /* for each opened tx channel */ xge_list_for_each(item, &hldev->fifo_channels) { - char key[XGE_OS_SPRINTF_STRLEN]; - channel = xge_container_of(item, xge_hal_channel_t, item); - - status = xge_hal_mgmt_channel_stats(channel, &chstat, - sizeof(xge_hal_mgmt_channel_stats_t)); - if (status != XGE_HAL_OK) { - return status; - } - - (void) xge_os_sprintf(key, "fifo%d_", channel->post_qid); - - xge_os_strcpy(key+6, "full_cnt"); - __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u"); - xge_os_strcpy(key+6, "usage_max"); - __HAL_AUX_ENTRY(key, chstat.usage_max, "%u"); - xge_os_strcpy(key+6, "usage_cnt"); - __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); - xge_os_strcpy(key+6, "reserve_free_swaps_cnt"); - __HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u"); - xge_os_strcpy(key+6, "avg_compl_per_intr_cnt"); - __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u"); - xge_os_strcpy(key+6, "total_compl_cnt"); - __HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u"); - xge_os_strcpy(key+6, "total_posts"); - __HAL_AUX_ENTRY(key, chstat.total_posts, "%u"); - xge_os_strcpy(key+6, "total_posts_many"); - __HAL_AUX_ENTRY(key, chstat.total_posts_many, "%u"); - xge_os_strcpy(key+6, "copied_frags"); - __HAL_AUX_ENTRY(key, chstat.copied_frags, "%u"); - xge_os_strcpy(key+6, "copied_buffers"); - __HAL_AUX_ENTRY(key, chstat.copied_buffers, "%u"); - xge_os_strcpy(key+6, "total_buffers"); - __HAL_AUX_ENTRY(key, chstat.total_buffers, "%u"); - xge_os_strcpy(key+6, "avg_buffers_per_post"); - __HAL_AUX_ENTRY(key, chstat.avg_buffers_per_post, "%u"); - xge_os_strcpy(key+6, "avg_buffer_size"); - __HAL_AUX_ENTRY(key, chstat.avg_buffer_size, "%u"); - xge_os_strcpy(key+6, "avg_post_size"); - __HAL_AUX_ENTRY(key, chstat.avg_post_size, "%u"); - xge_os_strcpy(key+6, "total_posts_dtrs_many"); - __HAL_AUX_ENTRY(key, chstat.total_posts_dtrs_many, "%u"); - xge_os_strcpy(key+6, "total_posts_frags_many"); - __HAL_AUX_ENTRY(key, chstat.total_posts_frags_many, "%u"); - xge_os_strcpy(key+6, "total_posts_dang_dtrs"); - __HAL_AUX_ENTRY(key, chstat.total_posts_dang_dtrs, "%u"); - xge_os_strcpy(key+6, "total_posts_dang_frags"); - __HAL_AUX_ENTRY(key, chstat.total_posts_dang_frags, "%u"); + char key[XGE_OS_SPRINTF_STRLEN]; + channel = xge_container_of(item, xge_hal_channel_t, item); + + status = xge_hal_mgmt_channel_stats(channel, &chstat, + sizeof(xge_hal_mgmt_channel_stats_t)); + if (status != XGE_HAL_OK) { + return status; + } + + (void) xge_os_sprintf(key, "fifo%d_", channel->post_qid); + + xge_os_strcpy(key+6, "full_cnt"); + __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u"); + xge_os_strcpy(key+6, "usage_max"); + __HAL_AUX_ENTRY(key, chstat.usage_max, "%u"); + xge_os_strcpy(key+6, "usage_cnt"); + __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); + xge_os_strcpy(key+6, "reserve_free_swaps_cnt"); + __HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u"); + xge_os_strcpy(key+6, "avg_compl_per_intr_cnt"); + __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u"); + xge_os_strcpy(key+6, "total_compl_cnt"); + __HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u"); + xge_os_strcpy(key+6, "total_posts"); + __HAL_AUX_ENTRY(key, chstat.total_posts, "%u"); + xge_os_strcpy(key+6, "total_posts_many"); + __HAL_AUX_ENTRY(key, chstat.total_posts_many, "%u"); + xge_os_strcpy(key+6, "copied_frags"); + __HAL_AUX_ENTRY(key, chstat.copied_frags, "%u"); + xge_os_strcpy(key+6, "copied_buffers"); + __HAL_AUX_ENTRY(key, chstat.copied_buffers, "%u"); + xge_os_strcpy(key+6, "total_buffers"); + __HAL_AUX_ENTRY(key, chstat.total_buffers, "%u"); + xge_os_strcpy(key+6, "avg_buffers_per_post"); + __HAL_AUX_ENTRY(key, chstat.avg_buffers_per_post, "%u"); + xge_os_strcpy(key+6, "avg_buffer_size"); + __HAL_AUX_ENTRY(key, chstat.avg_buffer_size, "%u"); + xge_os_strcpy(key+6, "avg_post_size"); + __HAL_AUX_ENTRY(key, chstat.avg_post_size, "%u"); + xge_os_strcpy(key+6, "total_posts_dtrs_many"); + __HAL_AUX_ENTRY(key, chstat.total_posts_dtrs_many, "%u"); + xge_os_strcpy(key+6, "total_posts_frags_many"); + __HAL_AUX_ENTRY(key, chstat.total_posts_frags_many, "%u"); + xge_os_strcpy(key+6, "total_posts_dang_dtrs"); + __HAL_AUX_ENTRY(key, chstat.total_posts_dang_dtrs, "%u"); + xge_os_strcpy(key+6, "total_posts_dang_frags"); + __HAL_AUX_ENTRY(key, chstat.total_posts_dang_frags, "%u"); } __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -1066,7 +1058,7 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, * See also: xge_hal_aux_device_dump(). */ xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize) + int bufsize, char *retbuf, int *retsize) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_hal_status_e status; @@ -1077,9 +1069,9 @@ xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh, __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); status = xge_hal_mgmt_sw_stats(hldev, &sw_dev_err_stats, - sizeof(xge_hal_mgmt_sw_stats_t)); + sizeof(xge_hal_mgmt_sw_stats_t)); if (status != XGE_HAL_OK) { - return status; + return status; } __HAL_AUX_ENTRY("sm_err_cnt",sw_dev_err_stats.sm_err_cnt, "%u"); @@ -1092,39 +1084,39 @@ xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh, for (t_code = 1; t_code < 16; t_code++) { int t_code_cnt = sw_dev_err_stats.rxd_t_code_err_cnt[t_code]; if (t_code_cnt) { - (void) xge_os_sprintf(buf, "rxd_t_code_%d", t_code); - __HAL_AUX_ENTRY(buf, t_code_cnt, "%u"); + (void) xge_os_sprintf(buf, "rxd_t_code_%d", t_code); + __HAL_AUX_ENTRY(buf, t_code_cnt, "%u"); } t_code_cnt = sw_dev_err_stats.txd_t_code_err_cnt[t_code]; - if (t_code_cnt) { - (void) xge_os_sprintf(buf, "txd_t_code_%d", t_code); - __HAL_AUX_ENTRY(buf, t_code_cnt, "%u"); - } + if (t_code_cnt) { + (void) xge_os_sprintf(buf, "txd_t_code_%d", t_code); + __HAL_AUX_ENTRY(buf, t_code_cnt, "%u"); + } } __HAL_AUX_ENTRY("alarm_transceiver_temp_high",sw_dev_err_stats. - stats_xpak.alarm_transceiver_temp_high, "%u"); + stats_xpak.alarm_transceiver_temp_high, "%u"); __HAL_AUX_ENTRY("alarm_transceiver_temp_low",sw_dev_err_stats. - stats_xpak.alarm_transceiver_temp_low, "%u"); + stats_xpak.alarm_transceiver_temp_low, "%u"); __HAL_AUX_ENTRY("alarm_laser_bias_current_high",sw_dev_err_stats. - stats_xpak.alarm_laser_bias_current_high, "%u"); + stats_xpak.alarm_laser_bias_current_high, "%u"); __HAL_AUX_ENTRY("alarm_laser_bias_current_low",sw_dev_err_stats. - stats_xpak.alarm_laser_bias_current_low, "%u"); + stats_xpak.alarm_laser_bias_current_low, "%u"); __HAL_AUX_ENTRY("alarm_laser_output_power_high",sw_dev_err_stats. - stats_xpak.alarm_laser_output_power_high, "%u"); + stats_xpak.alarm_laser_output_power_high, "%u"); __HAL_AUX_ENTRY("alarm_laser_output_power_low",sw_dev_err_stats. - stats_xpak.alarm_laser_output_power_low, "%u"); + stats_xpak.alarm_laser_output_power_low, "%u"); __HAL_AUX_ENTRY("warn_transceiver_temp_high",sw_dev_err_stats. - stats_xpak.warn_transceiver_temp_high, "%u"); + stats_xpak.warn_transceiver_temp_high, "%u"); __HAL_AUX_ENTRY("warn_transceiver_temp_low",sw_dev_err_stats. - stats_xpak.warn_transceiver_temp_low, "%u"); + stats_xpak.warn_transceiver_temp_low, "%u"); __HAL_AUX_ENTRY("warn_laser_bias_current_high",sw_dev_err_stats. - stats_xpak.warn_laser_bias_current_high, "%u"); + stats_xpak.warn_laser_bias_current_high, "%u"); __HAL_AUX_ENTRY("warn_laser_bias_current_low",sw_dev_err_stats. - stats_xpak.warn_laser_bias_current_low, "%u"); + stats_xpak.warn_laser_bias_current_low, "%u"); __HAL_AUX_ENTRY("warn_laser_output_power_high",sw_dev_err_stats. - stats_xpak.warn_laser_output_power_high, "%u"); + stats_xpak.warn_laser_output_power_high, "%u"); __HAL_AUX_ENTRY("warn_laser_output_power_low",sw_dev_err_stats. - stats_xpak.warn_laser_output_power_low, "%u"); + stats_xpak.warn_laser_output_power_low, "%u"); __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -1148,17 +1140,17 @@ xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh, * * See also: xge_hal_mgmt_pci_config(), xge_hal_aux_device_dump(). */ -xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, - char *retbuf, int *retsize) +xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, + char *retbuf, int *retsize) { xge_hal_status_e status; xge_hal_mgmt_pci_config_t pci_config; __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); status = xge_hal_mgmt_pci_config(devh, &pci_config, - sizeof(xge_hal_mgmt_pci_config_t)); + sizeof(xge_hal_mgmt_pci_config_t)); if (status != XGE_HAL_OK) { - return status; + return status; } __HAL_AUX_ENTRY("vendor_id", pci_config.vendor_id, "0x%04X"); @@ -1170,7 +1162,7 @@ xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY("pciClass2", pci_config.pciClass[1], "0x%02X"); __HAL_AUX_ENTRY("pciClass3", pci_config.pciClass[2], "0x%02X"); __HAL_AUX_ENTRY("cache_line_size", - pci_config.cache_line_size, "0x%02X"); + pci_config.cache_line_size, "0x%02X"); __HAL_AUX_ENTRY("latency_timer", pci_config.latency_timer, "0x%02X"); __HAL_AUX_ENTRY("header_type", pci_config.header_type, "0x%02X"); __HAL_AUX_ENTRY("bist", pci_config.bist, "0x%02X"); @@ -1179,17 +1171,17 @@ xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY("base_addr1_lo", pci_config.base_addr1_lo, "0x%08X"); __HAL_AUX_ENTRY("base_addr1_hi", pci_config.base_addr1_hi, "0x%08X"); __HAL_AUX_ENTRY("not_Implemented1", - pci_config.not_Implemented1, "0x%08X"); + pci_config.not_Implemented1, "0x%08X"); __HAL_AUX_ENTRY("not_Implemented2", pci_config.not_Implemented2, - "0x%08X"); + "0x%08X"); __HAL_AUX_ENTRY("cardbus_cis_pointer", pci_config.cardbus_cis_pointer, - "0x%08X"); + "0x%08X"); __HAL_AUX_ENTRY("subsystem_vendor_id", pci_config.subsystem_vendor_id, - "0x%04X"); + "0x%04X"); __HAL_AUX_ENTRY("subsystem_id", pci_config.subsystem_id, "0x%04X"); __HAL_AUX_ENTRY("rom_base", pci_config.rom_base, "0x%08X"); __HAL_AUX_ENTRY("capabilities_pointer", - pci_config.capabilities_pointer, "0x%02X"); + pci_config.capabilities_pointer, "0x%02X"); __HAL_AUX_ENTRY("interrupt_line", pci_config.interrupt_line, "0x%02X"); __HAL_AUX_ENTRY("interrupt_pin", pci_config.interrupt_pin, "0x%02X"); __HAL_AUX_ENTRY("min_grant", pci_config.min_grant, "0x%02X"); @@ -1198,9 +1190,9 @@ xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY("msi_next_ptr", pci_config.msi_next_ptr, "0x%02X"); __HAL_AUX_ENTRY("msi_control", pci_config.msi_control, "0x%04X"); __HAL_AUX_ENTRY("msi_lower_address", pci_config.msi_lower_address, - "0x%08X"); + "0x%08X"); __HAL_AUX_ENTRY("msi_higher_address", pci_config.msi_higher_address, - "0x%08X"); + "0x%08X"); __HAL_AUX_ENTRY("msi_data", pci_config.msi_data, "0x%04X"); __HAL_AUX_ENTRY("msi_unused", pci_config.msi_unused, "0x%04X"); __HAL_AUX_ENTRY("vpd_cap_id", pci_config.vpd_cap_id, "0x%02X"); @@ -1213,16 +1205,16 @@ xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, __HAL_AUX_ENTRY("pcix_status", pci_config.pcix_status, "0x%08X"); if (xge_hal_device_check_id(devh) == XGE_HAL_CARD_HERC) { - char key[XGE_OS_SPRINTF_STRLEN]; - int i; - - for (i = 0; - i < (XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE - 0x68)/4; - i++) { - (void) xge_os_sprintf(key, "%03x:", 4*i + 0x68); - __HAL_AUX_ENTRY(key, *((int *)pci_config.rsvd_b1 + i), - "0x%08X"); - } + char key[XGE_OS_SPRINTF_STRLEN]; + int i; + + for (i = 0; + i < (XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE - 0x68)/4; + i++) { + (void) xge_os_sprintf(key, "%03x:", 4*i + 0x68); + __HAL_AUX_ENTRY(key, *((int *)pci_config.rsvd_b1 + i), + "0x%08X"); + } } __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -1246,7 +1238,7 @@ xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize, * See also: xge_hal_aux_device_dump(). */ xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize) + int bufsize, char *retbuf, int *retsize) { xge_list_t *item; xge_hal_channel_t *channel; @@ -1254,81 +1246,81 @@ xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh, __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); if (hldev->magic != XGE_HAL_MAGIC) { - return XGE_HAL_ERR_INVALID_DEVICE; - } + return XGE_HAL_ERR_INVALID_DEVICE; + } /* for each opened rx channel */ xge_list_for_each(item, &hldev->ring_channels) { - char key[XGE_OS_SPRINTF_STRLEN]; - channel = xge_container_of(item, xge_hal_channel_t, item); - - if (channel->is_open != 1) - continue; - - (void) xge_os_sprintf(key, "ring%d_", channel->post_qid); - xge_os_strcpy(key+6, "type"); - __HAL_AUX_ENTRY(key, channel->type, "%u"); - xge_os_strcpy(key+6, "length"); - __HAL_AUX_ENTRY(key, channel->length, "%u"); - xge_os_strcpy(key+6, "is_open"); - __HAL_AUX_ENTRY(key, channel->is_open, "%u"); - xge_os_strcpy(key+6, "reserve_initial"); - __HAL_AUX_ENTRY(key, channel->reserve_initial, "%u"); - xge_os_strcpy(key+6, "reserve_max"); - __HAL_AUX_ENTRY(key, channel->reserve_max, "%u"); - xge_os_strcpy(key+6, "reserve_length"); - __HAL_AUX_ENTRY(key, channel->reserve_length, "%u"); - xge_os_strcpy(key+6, "reserve_top"); - __HAL_AUX_ENTRY(key, channel->reserve_top, "%u"); - xge_os_strcpy(key+6, "reserve_threshold"); - __HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u"); - xge_os_strcpy(key+6, "free_length"); - __HAL_AUX_ENTRY(key, channel->free_length, "%u"); - xge_os_strcpy(key+6, "post_index"); - __HAL_AUX_ENTRY(key, channel->post_index, "%u"); - xge_os_strcpy(key+6, "compl_index"); - __HAL_AUX_ENTRY(key, channel->compl_index, "%u"); - xge_os_strcpy(key+6, "per_dtr_space"); - __HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u"); - xge_os_strcpy(key+6, "usage_cnt"); - __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); + char key[XGE_OS_SPRINTF_STRLEN]; + channel = xge_container_of(item, xge_hal_channel_t, item); + + if (channel->is_open != 1) + continue; + + (void) xge_os_sprintf(key, "ring%d_", channel->post_qid); + xge_os_strcpy(key+6, "type"); + __HAL_AUX_ENTRY(key, channel->type, "%u"); + xge_os_strcpy(key+6, "length"); + __HAL_AUX_ENTRY(key, channel->length, "%u"); + xge_os_strcpy(key+6, "is_open"); + __HAL_AUX_ENTRY(key, channel->is_open, "%u"); + xge_os_strcpy(key+6, "reserve_initial"); + __HAL_AUX_ENTRY(key, channel->reserve_initial, "%u"); + xge_os_strcpy(key+6, "reserve_max"); + __HAL_AUX_ENTRY(key, channel->reserve_max, "%u"); + xge_os_strcpy(key+6, "reserve_length"); + __HAL_AUX_ENTRY(key, channel->reserve_length, "%u"); + xge_os_strcpy(key+6, "reserve_top"); + __HAL_AUX_ENTRY(key, channel->reserve_top, "%u"); + xge_os_strcpy(key+6, "reserve_threshold"); + __HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u"); + xge_os_strcpy(key+6, "free_length"); + __HAL_AUX_ENTRY(key, channel->free_length, "%u"); + xge_os_strcpy(key+6, "post_index"); + __HAL_AUX_ENTRY(key, channel->post_index, "%u"); + xge_os_strcpy(key+6, "compl_index"); + __HAL_AUX_ENTRY(key, channel->compl_index, "%u"); + xge_os_strcpy(key+6, "per_dtr_space"); + __HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u"); + xge_os_strcpy(key+6, "usage_cnt"); + __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); } /* for each opened tx channel */ xge_list_for_each(item, &hldev->fifo_channels) { - char key[XGE_OS_SPRINTF_STRLEN]; - channel = xge_container_of(item, xge_hal_channel_t, item); - - if (channel->is_open != 1) - continue; - - (void) xge_os_sprintf(key, "fifo%d_", channel->post_qid); - xge_os_strcpy(key+6, "type"); - __HAL_AUX_ENTRY(key, channel->type, "%u"); - xge_os_strcpy(key+6, "length"); - __HAL_AUX_ENTRY(key, channel->length, "%u"); - xge_os_strcpy(key+6, "is_open"); - __HAL_AUX_ENTRY(key, channel->is_open, "%u"); - xge_os_strcpy(key+6, "reserve_initial"); - __HAL_AUX_ENTRY(key, channel->reserve_initial, "%u"); - xge_os_strcpy(key+6, "reserve_max"); - __HAL_AUX_ENTRY(key, channel->reserve_max, "%u"); - xge_os_strcpy(key+6, "reserve_length"); - __HAL_AUX_ENTRY(key, channel->reserve_length, "%u"); - xge_os_strcpy(key+6, "reserve_top"); - __HAL_AUX_ENTRY(key, channel->reserve_top, "%u"); - xge_os_strcpy(key+6, "reserve_threshold"); - __HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u"); - xge_os_strcpy(key+6, "free_length"); - __HAL_AUX_ENTRY(key, channel->free_length, "%u"); - xge_os_strcpy(key+6, "post_index"); - __HAL_AUX_ENTRY(key, channel->post_index, "%u"); - xge_os_strcpy(key+6, "compl_index"); - __HAL_AUX_ENTRY(key, channel->compl_index, "%u"); - xge_os_strcpy(key+6, "per_dtr_space"); - __HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u"); - xge_os_strcpy(key+6, "usage_cnt"); - __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); + char key[XGE_OS_SPRINTF_STRLEN]; + channel = xge_container_of(item, xge_hal_channel_t, item); + + if (channel->is_open != 1) + continue; + + (void) xge_os_sprintf(key, "fifo%d_", channel->post_qid); + xge_os_strcpy(key+6, "type"); + __HAL_AUX_ENTRY(key, channel->type, "%u"); + xge_os_strcpy(key+6, "length"); + __HAL_AUX_ENTRY(key, channel->length, "%u"); + xge_os_strcpy(key+6, "is_open"); + __HAL_AUX_ENTRY(key, channel->is_open, "%u"); + xge_os_strcpy(key+6, "reserve_initial"); + __HAL_AUX_ENTRY(key, channel->reserve_initial, "%u"); + xge_os_strcpy(key+6, "reserve_max"); + __HAL_AUX_ENTRY(key, channel->reserve_max, "%u"); + xge_os_strcpy(key+6, "reserve_length"); + __HAL_AUX_ENTRY(key, channel->reserve_length, "%u"); + xge_os_strcpy(key+6, "reserve_top"); + __HAL_AUX_ENTRY(key, channel->reserve_top, "%u"); + xge_os_strcpy(key+6, "reserve_threshold"); + __HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u"); + xge_os_strcpy(key+6, "free_length"); + __HAL_AUX_ENTRY(key, channel->free_length, "%u"); + xge_os_strcpy(key+6, "post_index"); + __HAL_AUX_ENTRY(key, channel->post_index, "%u"); + xge_os_strcpy(key+6, "compl_index"); + __HAL_AUX_ENTRY(key, channel->compl_index, "%u"); + xge_os_strcpy(key+6, "per_dtr_space"); + __HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u"); + xge_os_strcpy(key+6, "usage_cnt"); + __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u"); } __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -1368,22 +1360,22 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); for (offset = 0; offset < 1574; offset++) { - status = xge_hal_mgmt_reg_read(hldev, 0, offset*8, &retval); - if (status != XGE_HAL_OK) { - goto error; - } + status = xge_hal_mgmt_reg_read(hldev, 0, offset*8, &retval); + if (status != XGE_HAL_OK) { + goto error; + } - if (!retval) continue; + if (!retval) continue; - xge_os_printf("0x%04x 0x%08x%08x", offset*8, - (u32)(retval>>32), (u32)retval); + xge_os_printf("0x%04x 0x%08x%08x", offset*8, + (u32)(retval>>32), (u32)retval); } xge_os_println("\n"); @@ -1391,7 +1383,7 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); @@ -1399,7 +1391,7 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); @@ -1407,7 +1399,7 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); @@ -1415,23 +1407,23 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - status = xge_hal_aux_stats_herc_enchanced(hldev, - XGE_HAL_DUMP_BUF_SIZE, hldev->dump_buf, &retsize); - if (status != XGE_HAL_OK) { - goto error; - } - xge_os_println(hldev->dump_buf); + status = xge_hal_aux_stats_herc_enchanced(hldev, + XGE_HAL_DUMP_BUF_SIZE, hldev->dump_buf, &retsize); + if (status != XGE_HAL_OK) { + goto error; + } + xge_os_println(hldev->dump_buf); } status = xge_hal_aux_stats_sw_dev_read(hldev, XGE_HAL_DUMP_BUF_SIZE, - hldev->dump_buf, &retsize); + hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); @@ -1439,7 +1431,7 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); @@ -1447,7 +1439,7 @@ xge_hal_aux_device_dump(xge_hal_device_h devh) hldev->dump_buf, &retsize); if (status != XGE_HAL_OK) { - goto error; + goto error; } xge_os_println(hldev->dump_buf); @@ -1479,13 +1471,13 @@ xge_hal_aux_driver_config_read(int bufsize, char *retbuf, int *retsize) __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); status = xge_hal_mgmt_driver_config(&drv_config, - sizeof(xge_hal_driver_config_t)); + sizeof(xge_hal_driver_config_t)); if (status != XGE_HAL_OK) { - return status; + return status; } __HAL_AUX_ENTRY("queue size initial", - drv_config.queue_size_initial, "%u"); + drv_config.queue_size_initial, "%u"); __HAL_AUX_ENTRY("queue size max", drv_config.queue_size_max, "%u"); __HAL_AUX_ENTRY_END(bufsize, retsize); @@ -1509,7 +1501,7 @@ xge_hal_aux_driver_config_read(int bufsize, char *retbuf, int *retsize) * See also: xge_hal_aux_driver_config_read(). */ xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh, - int bufsize, char *retbuf, int *retsize) + int bufsize, char *retbuf, int *retsize) { int i; xge_hal_status_e status; @@ -1519,36 +1511,36 @@ xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh, __HAL_AUX_ENTRY_DECLARE(bufsize, retbuf); dev_config = (xge_hal_device_config_t *) xge_os_malloc(hldev->pdev, - sizeof(xge_hal_device_config_t)); + sizeof(xge_hal_device_config_t)); if (dev_config == NULL) { - return XGE_HAL_FAIL; + return XGE_HAL_FAIL; } status = xge_hal_mgmt_device_config(devh, dev_config, - sizeof(xge_hal_device_config_t)); + sizeof(xge_hal_device_config_t)); if (status != XGE_HAL_OK) { - xge_os_free(hldev->pdev, dev_config, - sizeof(xge_hal_device_config_t)); - return status; + xge_os_free(hldev->pdev, dev_config, + sizeof(xge_hal_device_config_t)); + return status; } __HAL_AUX_ENTRY("mtu", dev_config->mtu, "%u"); __HAL_AUX_ENTRY("isr_polling_count", dev_config->isr_polling_cnt, "%u"); __HAL_AUX_ENTRY("latency_timer", dev_config->latency_timer, "%u"); __HAL_AUX_ENTRY("max_splits_trans", - dev_config->max_splits_trans, "%u"); + dev_config->max_splits_trans, "%u"); __HAL_AUX_ENTRY("mmrb_count", dev_config->mmrb_count, "%d"); __HAL_AUX_ENTRY("shared_splits", dev_config->shared_splits, "%u"); __HAL_AUX_ENTRY("stats_refresh_time_sec", - dev_config->stats_refresh_time_sec, "%u"); + dev_config->stats_refresh_time_sec, "%u"); __HAL_AUX_ENTRY("pci_freq_mherz", dev_config->pci_freq_mherz, "%u"); __HAL_AUX_ENTRY("intr_mode", dev_config->intr_mode, "%u"); __HAL_AUX_ENTRY("ring_memblock_size", - dev_config->ring.memblock_size, "%u"); + dev_config->ring.memblock_size, "%u"); __HAL_AUX_ENTRY("sched_timer_us", dev_config->sched_timer_us, "%u"); __HAL_AUX_ENTRY("sched_timer_one_shot", - dev_config->sched_timer_one_shot, "%u"); + dev_config->sched_timer_one_shot, "%u"); __HAL_AUX_ENTRY("rxufca_intr_thres", dev_config->rxufca_intr_thres, "%u"); __HAL_AUX_ENTRY("rxufca_lo_lim", dev_config->rxufca_lo_lim, "%u"); __HAL_AUX_ENTRY("rxufca_hi_lim", dev_config->rxufca_hi_lim, "%u"); @@ -1556,175 +1548,175 @@ xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh, for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - xge_hal_ring_queue_t *ring = &dev_config->ring.queue[i]; - xge_hal_rti_config_t *rti = &ring->rti; - - if (!ring->configured) - continue; - - (void) xge_os_sprintf(key, "ring%d_", i); - xge_os_strcpy(key+6, "inital"); - __HAL_AUX_ENTRY(key, ring->initial, "%u"); - xge_os_strcpy(key+6, "max"); - __HAL_AUX_ENTRY(key, ring->max, "%u"); - xge_os_strcpy(key+6, "buffer_mode"); - __HAL_AUX_ENTRY(key, ring->buffer_mode, "%u"); - xge_os_strcpy(key+6, "dram_size_mb"); - __HAL_AUX_ENTRY(key, ring->dram_size_mb, "%u"); - xge_os_strcpy(key+6, "backoff_interval_us"); - __HAL_AUX_ENTRY(key, ring->backoff_interval_us, "%u"); - xge_os_strcpy(key+6, "max_frame_len"); - __HAL_AUX_ENTRY(key, ring->max_frm_len, "%d"); - xge_os_strcpy(key+6, "priority"); - __HAL_AUX_ENTRY(key, ring->priority, "%u"); - xge_os_strcpy(key+6, "rth_en"); - __HAL_AUX_ENTRY(key, ring->rth_en, "%u"); - xge_os_strcpy(key+6, "no_snoop_bits"); - __HAL_AUX_ENTRY(key, ring->no_snoop_bits, "%u"); - xge_os_strcpy(key+6, "indicate_max_pkts"); - __HAL_AUX_ENTRY(key, ring->indicate_max_pkts, "%u"); - - xge_os_strcpy(key+6, "urange_a"); - __HAL_AUX_ENTRY(key, rti->urange_a, "%u"); - xge_os_strcpy(key+6, "ufc_a"); - __HAL_AUX_ENTRY(key, rti->ufc_a, "%u"); - xge_os_strcpy(key+6, "urange_b"); - __HAL_AUX_ENTRY(key, rti->urange_b, "%u"); - xge_os_strcpy(key+6, "ufc_b"); - __HAL_AUX_ENTRY(key, rti->ufc_b, "%u"); - xge_os_strcpy(key+6, "urange_c"); - __HAL_AUX_ENTRY(key, rti->urange_c, "%u"); - xge_os_strcpy(key+6, "ufc_c"); - __HAL_AUX_ENTRY(key, rti->ufc_c, "%u"); - xge_os_strcpy(key+6, "ufc_d"); - __HAL_AUX_ENTRY(key, rti->ufc_d, "%u"); - xge_os_strcpy(key+6, "timer_val_us"); - __HAL_AUX_ENTRY(key, rti->timer_val_us, "%u"); + xge_hal_ring_queue_t *ring = &dev_config->ring.queue[i]; + xge_hal_rti_config_t *rti = &ring->rti; + + if (!ring->configured) + continue; + + (void) xge_os_sprintf(key, "ring%d_", i); + xge_os_strcpy(key+6, "inital"); + __HAL_AUX_ENTRY(key, ring->initial, "%u"); + xge_os_strcpy(key+6, "max"); + __HAL_AUX_ENTRY(key, ring->max, "%u"); + xge_os_strcpy(key+6, "buffer_mode"); + __HAL_AUX_ENTRY(key, ring->buffer_mode, "%u"); + xge_os_strcpy(key+6, "dram_size_mb"); + __HAL_AUX_ENTRY(key, ring->dram_size_mb, "%u"); + xge_os_strcpy(key+6, "backoff_interval_us"); + __HAL_AUX_ENTRY(key, ring->backoff_interval_us, "%u"); + xge_os_strcpy(key+6, "max_frame_len"); + __HAL_AUX_ENTRY(key, ring->max_frm_len, "%d"); + xge_os_strcpy(key+6, "priority"); + __HAL_AUX_ENTRY(key, ring->priority, "%u"); + xge_os_strcpy(key+6, "rth_en"); + __HAL_AUX_ENTRY(key, ring->rth_en, "%u"); + xge_os_strcpy(key+6, "no_snoop_bits"); + __HAL_AUX_ENTRY(key, ring->no_snoop_bits, "%u"); + xge_os_strcpy(key+6, "indicate_max_pkts"); + __HAL_AUX_ENTRY(key, ring->indicate_max_pkts, "%u"); + + xge_os_strcpy(key+6, "urange_a"); + __HAL_AUX_ENTRY(key, rti->urange_a, "%u"); + xge_os_strcpy(key+6, "ufc_a"); + __HAL_AUX_ENTRY(key, rti->ufc_a, "%u"); + xge_os_strcpy(key+6, "urange_b"); + __HAL_AUX_ENTRY(key, rti->urange_b, "%u"); + xge_os_strcpy(key+6, "ufc_b"); + __HAL_AUX_ENTRY(key, rti->ufc_b, "%u"); + xge_os_strcpy(key+6, "urange_c"); + __HAL_AUX_ENTRY(key, rti->urange_c, "%u"); + xge_os_strcpy(key+6, "ufc_c"); + __HAL_AUX_ENTRY(key, rti->ufc_c, "%u"); + xge_os_strcpy(key+6, "ufc_d"); + __HAL_AUX_ENTRY(key, rti->ufc_d, "%u"); + xge_os_strcpy(key+6, "timer_val_us"); + __HAL_AUX_ENTRY(key, rti->timer_val_us, "%u"); } { - xge_hal_mac_config_t *mac= &dev_config->mac; - - __HAL_AUX_ENTRY("tmac_util_period", - mac->tmac_util_period, "%u"); - __HAL_AUX_ENTRY("rmac_util_period", - mac->rmac_util_period, "%u"); - __HAL_AUX_ENTRY("rmac_bcast_en", - mac->rmac_bcast_en, "%u"); - __HAL_AUX_ENTRY("rmac_pause_gen_en", - mac->rmac_pause_gen_en, "%d"); - __HAL_AUX_ENTRY("rmac_pause_rcv_en", - mac->rmac_pause_rcv_en, "%d"); - __HAL_AUX_ENTRY("rmac_pause_time", - mac->rmac_pause_time, "%u"); - __HAL_AUX_ENTRY("mc_pause_threshold_q0q3", - mac->mc_pause_threshold_q0q3, "%u"); - __HAL_AUX_ENTRY("mc_pause_threshold_q4q7", - mac->mc_pause_threshold_q4q7, "%u"); + xge_hal_mac_config_t *mac= &dev_config->mac; + + __HAL_AUX_ENTRY("tmac_util_period", + mac->tmac_util_period, "%u"); + __HAL_AUX_ENTRY("rmac_util_period", + mac->rmac_util_period, "%u"); + __HAL_AUX_ENTRY("rmac_bcast_en", + mac->rmac_bcast_en, "%u"); + __HAL_AUX_ENTRY("rmac_pause_gen_en", + mac->rmac_pause_gen_en, "%d"); + __HAL_AUX_ENTRY("rmac_pause_rcv_en", + mac->rmac_pause_rcv_en, "%d"); + __HAL_AUX_ENTRY("rmac_pause_time", + mac->rmac_pause_time, "%u"); + __HAL_AUX_ENTRY("mc_pause_threshold_q0q3", + mac->mc_pause_threshold_q0q3, "%u"); + __HAL_AUX_ENTRY("mc_pause_threshold_q4q7", + mac->mc_pause_threshold_q4q7, "%u"); } __HAL_AUX_ENTRY("fifo_max_frags", dev_config->fifo.max_frags, "%u"); __HAL_AUX_ENTRY("fifo_reserve_threshold", - dev_config->fifo.reserve_threshold, "%u"); + dev_config->fifo.reserve_threshold, "%u"); __HAL_AUX_ENTRY("fifo_memblock_size", - dev_config->fifo.memblock_size, "%u"); + dev_config->fifo.memblock_size, "%u"); #ifdef XGE_HAL_ALIGN_XMIT __HAL_AUX_ENTRY("fifo_alignment_size", - dev_config->fifo.alignment_size, "%u"); + dev_config->fifo.alignment_size, "%u"); #endif for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { - int j; - xge_hal_fifo_queue_t *fifo = &dev_config->fifo.queue[i]; - - if (!fifo->configured) - continue; - - (void) xge_os_sprintf(key, "fifo%d_", i); - xge_os_strcpy(key+6, "initial"); - __HAL_AUX_ENTRY(key, fifo->initial, "%u"); - xge_os_strcpy(key+6, "max"); - __HAL_AUX_ENTRY(key, fifo->max, "%u"); - xge_os_strcpy(key+6, "intr"); - __HAL_AUX_ENTRY(key, fifo->intr, "%u"); - xge_os_strcpy(key+6, "no_snoop_bits"); - __HAL_AUX_ENTRY(key, fifo->no_snoop_bits, "%u"); - - for (j = 0; j < XGE_HAL_MAX_FIFO_TTI_NUM; j++) { - xge_hal_tti_config_t *tti = - &dev_config->fifo.queue[i].tti[j]; - - if (!tti->enabled) - continue; - - (void) xge_os_sprintf(key, "fifo%d_tti%02d_", i, - i * XGE_HAL_MAX_FIFO_TTI_NUM + j); - xge_os_strcpy(key+12, "urange_a"); - __HAL_AUX_ENTRY(key, tti->urange_a, "%u"); - xge_os_strcpy(key+12, "ufc_a"); - __HAL_AUX_ENTRY(key, tti->ufc_a, "%u"); - xge_os_strcpy(key+12, "urange_b"); - __HAL_AUX_ENTRY(key, tti->urange_b, "%u"); - xge_os_strcpy(key+12, "ufc_b"); - __HAL_AUX_ENTRY(key, tti->ufc_b, "%u"); - xge_os_strcpy(key+12, "urange_c"); - __HAL_AUX_ENTRY(key, tti->urange_c, "%u"); - xge_os_strcpy(key+12, "ufc_c"); - __HAL_AUX_ENTRY(key, tti->ufc_c, "%u"); - xge_os_strcpy(key+12, "ufc_d"); - __HAL_AUX_ENTRY(key, tti->ufc_d, "%u"); - xge_os_strcpy(key+12, "timer_val_us"); - __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u"); - xge_os_strcpy(key+12, "timer_ci_en"); - __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u"); - } + int j; + xge_hal_fifo_queue_t *fifo = &dev_config->fifo.queue[i]; + + if (!fifo->configured) + continue; + + (void) xge_os_sprintf(key, "fifo%d_", i); + xge_os_strcpy(key+6, "initial"); + __HAL_AUX_ENTRY(key, fifo->initial, "%u"); + xge_os_strcpy(key+6, "max"); + __HAL_AUX_ENTRY(key, fifo->max, "%u"); + xge_os_strcpy(key+6, "intr"); + __HAL_AUX_ENTRY(key, fifo->intr, "%u"); + xge_os_strcpy(key+6, "no_snoop_bits"); + __HAL_AUX_ENTRY(key, fifo->no_snoop_bits, "%u"); + + for (j = 0; j < XGE_HAL_MAX_FIFO_TTI_NUM; j++) { + xge_hal_tti_config_t *tti = + &dev_config->fifo.queue[i].tti[j]; + + if (!tti->enabled) + continue; + + (void) xge_os_sprintf(key, "fifo%d_tti%02d_", i, + i * XGE_HAL_MAX_FIFO_TTI_NUM + j); + xge_os_strcpy(key+12, "urange_a"); + __HAL_AUX_ENTRY(key, tti->urange_a, "%u"); + xge_os_strcpy(key+12, "ufc_a"); + __HAL_AUX_ENTRY(key, tti->ufc_a, "%u"); + xge_os_strcpy(key+12, "urange_b"); + __HAL_AUX_ENTRY(key, tti->urange_b, "%u"); + xge_os_strcpy(key+12, "ufc_b"); + __HAL_AUX_ENTRY(key, tti->ufc_b, "%u"); + xge_os_strcpy(key+12, "urange_c"); + __HAL_AUX_ENTRY(key, tti->urange_c, "%u"); + xge_os_strcpy(key+12, "ufc_c"); + __HAL_AUX_ENTRY(key, tti->ufc_c, "%u"); + xge_os_strcpy(key+12, "ufc_d"); + __HAL_AUX_ENTRY(key, tti->ufc_d, "%u"); + xge_os_strcpy(key+12, "timer_val_us"); + __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u"); + xge_os_strcpy(key+12, "timer_ci_en"); + __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u"); + } } /* and bimodal TTIs */ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { - xge_hal_tti_config_t *tti = &hldev->bimodal_tti[i]; - - if (!tti->enabled) - continue; - - (void) xge_os_sprintf(key, "tti%02d_", - XGE_HAL_MAX_FIFO_TTI_RING_0 + i); - - xge_os_strcpy(key+6, "urange_a"); - __HAL_AUX_ENTRY(key, tti->urange_a, "%u"); - xge_os_strcpy(key+6, "ufc_a"); - __HAL_AUX_ENTRY(key, tti->ufc_a, "%u"); - xge_os_strcpy(key+6, "urange_b"); - __HAL_AUX_ENTRY(key, tti->urange_b, "%u"); - xge_os_strcpy(key+6, "ufc_b"); - __HAL_AUX_ENTRY(key, tti->ufc_b, "%u"); - xge_os_strcpy(key+6, "urange_c"); - __HAL_AUX_ENTRY(key, tti->urange_c, "%u"); - xge_os_strcpy(key+6, "ufc_c"); - __HAL_AUX_ENTRY(key, tti->ufc_c, "%u"); - xge_os_strcpy(key+6, "ufc_d"); - __HAL_AUX_ENTRY(key, tti->ufc_d, "%u"); - xge_os_strcpy(key+6, "timer_val_us"); - __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u"); - xge_os_strcpy(key+6, "timer_ac_en"); - __HAL_AUX_ENTRY(key, tti->timer_ac_en, "%u"); - xge_os_strcpy(key+6, "timer_ci_en"); - __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u"); + xge_hal_tti_config_t *tti = &hldev->bimodal_tti[i]; + + if (!tti->enabled) + continue; + + (void) xge_os_sprintf(key, "tti%02d_", + XGE_HAL_MAX_FIFO_TTI_RING_0 + i); + + xge_os_strcpy(key+6, "urange_a"); + __HAL_AUX_ENTRY(key, tti->urange_a, "%u"); + xge_os_strcpy(key+6, "ufc_a"); + __HAL_AUX_ENTRY(key, tti->ufc_a, "%u"); + xge_os_strcpy(key+6, "urange_b"); + __HAL_AUX_ENTRY(key, tti->urange_b, "%u"); + xge_os_strcpy(key+6, "ufc_b"); + __HAL_AUX_ENTRY(key, tti->ufc_b, "%u"); + xge_os_strcpy(key+6, "urange_c"); + __HAL_AUX_ENTRY(key, tti->urange_c, "%u"); + xge_os_strcpy(key+6, "ufc_c"); + __HAL_AUX_ENTRY(key, tti->ufc_c, "%u"); + xge_os_strcpy(key+6, "ufc_d"); + __HAL_AUX_ENTRY(key, tti->ufc_d, "%u"); + xge_os_strcpy(key+6, "timer_val_us"); + __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u"); + xge_os_strcpy(key+6, "timer_ac_en"); + __HAL_AUX_ENTRY(key, tti->timer_ac_en, "%u"); + xge_os_strcpy(key+6, "timer_ci_en"); + __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u"); } __HAL_AUX_ENTRY("dump_on_serr", dev_config->dump_on_serr, "%u"); __HAL_AUX_ENTRY("dump_on_eccerr", - dev_config->dump_on_eccerr, "%u"); + dev_config->dump_on_eccerr, "%u"); __HAL_AUX_ENTRY("dump_on_parityerr", - dev_config->dump_on_parityerr, "%u"); + dev_config->dump_on_parityerr, "%u"); __HAL_AUX_ENTRY("rth_en", dev_config->rth_en, "%u"); __HAL_AUX_ENTRY("rth_bucket_size", dev_config->rth_bucket_size, "%u"); __HAL_AUX_ENTRY_END(bufsize, retsize); xge_os_free(hldev->pdev, dev_config, - sizeof(xge_hal_device_config_t)); + sizeof(xge_hal_device_config_t)); return XGE_HAL_OK; } diff --git a/sys/dev/nxge/xgehal/xgehal-mm.c b/sys/dev/nxge/xgehal/xgehal-mm.c index d23f88a..a787dad 100644 --- a/sys/dev/nxge/xgehal/xgehal-mm.c +++ b/sys/dev/nxge/xgehal/xgehal-mm.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : hal-mm.c - * - * Description: chipset memory pool object implementation - * - * Created: 10 May 2004 - */ - #include <dev/nxge/include/xge-os-pal.h> #include <dev/nxge/include/xgehal-mm.h> #include <dev/nxge/include/xge-debug.h> @@ -45,7 +37,7 @@ */ xge_hal_status_e __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate, - int *num_allocated) + int *num_allocated) { int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; int n_items = mempool->items_per_memblock; @@ -53,165 +45,165 @@ __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate, *num_allocated = 0; if ((mempool->memblocks_allocated + num_allocate) > - mempool->memblocks_max) { - xge_debug_mm(XGE_ERR, "%s", - "__hal_mempool_grow: can grow anymore"); - return XGE_HAL_ERR_OUT_OF_MEMORY; + mempool->memblocks_max) { + xge_debug_mm(XGE_ERR, "%s", + "__hal_mempool_grow: can grow anymore"); + return XGE_HAL_ERR_OUT_OF_MEMORY; } for (i = mempool->memblocks_allocated; i < mempool->memblocks_allocated + num_allocate; i++) { - int j; - int is_last = - ((mempool->memblocks_allocated+num_allocate-1) == i); - xge_hal_mempool_dma_t *dma_object = - mempool->memblocks_dma_arr + i; - void *the_memblock; - int dma_flags; - - dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED; + int j; + int is_last = + ((mempool->memblocks_allocated+num_allocate-1) == i); + xge_hal_mempool_dma_t *dma_object = + mempool->memblocks_dma_arr + i; + void *the_memblock; + int dma_flags; + + dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED; #ifdef XGE_HAL_DMA_DTR_CONSISTENT - dma_flags |= XGE_OS_DMA_CONSISTENT; + dma_flags |= XGE_OS_DMA_CONSISTENT; #else - dma_flags |= XGE_OS_DMA_STREAMING; + dma_flags |= XGE_OS_DMA_STREAMING; #endif - /* allocate DMA-capable memblock */ - mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev, - mempool->memblock_size, - dma_flags, - &dma_object->handle, - &dma_object->acc_handle); - if (mempool->memblocks_arr[i] == NULL) { - xge_debug_mm(XGE_ERR, - "memblock[%d]: out of DMA memory", i); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - xge_os_memzero(mempool->memblocks_arr[i], - mempool->memblock_size); - the_memblock = mempool->memblocks_arr[i]; - - /* allocate memblock's private part. Each DMA memblock - * has a space allocated for item's private usage upon - * mempool's user request. Each time mempool grows, it will - * allocate new memblock and its private part at once. - * This helps to minimize memory usage a lot. */ - mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev, - mempool->items_priv_size * n_items); - if (mempool->memblocks_priv_arr[i] == NULL) { - xge_os_dma_free(mempool->pdev, - the_memblock, - mempool->memblock_size, - &dma_object->acc_handle, - &dma_object->handle); - xge_debug_mm(XGE_ERR, - "memblock_priv[%d]: out of virtual memory, " - "requested %d(%d:%d) bytes", i, - mempool->items_priv_size * n_items, - mempool->items_priv_size, n_items); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - xge_os_memzero(mempool->memblocks_priv_arr[i], - mempool->items_priv_size * n_items); - - /* map memblock to physical memory */ - dma_object->addr = xge_os_dma_map(mempool->pdev, - dma_object->handle, - the_memblock, - mempool->memblock_size, - XGE_OS_DMA_DIR_BIDIRECTIONAL, + /* allocate DMA-capable memblock */ + mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev, + mempool->memblock_size, + dma_flags, + &dma_object->handle, + &dma_object->acc_handle); + if (mempool->memblocks_arr[i] == NULL) { + xge_debug_mm(XGE_ERR, + "memblock[%d]: out of DMA memory", i); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + xge_os_memzero(mempool->memblocks_arr[i], + mempool->memblock_size); + the_memblock = mempool->memblocks_arr[i]; + + /* allocate memblock's private part. Each DMA memblock + * has a space allocated for item's private usage upon + * mempool's user request. Each time mempool grows, it will + * allocate new memblock and its private part at once. + * This helps to minimize memory usage a lot. */ + mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev, + mempool->items_priv_size * n_items); + if (mempool->memblocks_priv_arr[i] == NULL) { + xge_os_dma_free(mempool->pdev, + the_memblock, + mempool->memblock_size, + &dma_object->acc_handle, + &dma_object->handle); + xge_debug_mm(XGE_ERR, + "memblock_priv[%d]: out of virtual memory, " + "requested %d(%d:%d) bytes", i, + mempool->items_priv_size * n_items, + mempool->items_priv_size, n_items); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + xge_os_memzero(mempool->memblocks_priv_arr[i], + mempool->items_priv_size * n_items); + + /* map memblock to physical memory */ + dma_object->addr = xge_os_dma_map(mempool->pdev, + dma_object->handle, + the_memblock, + mempool->memblock_size, + XGE_OS_DMA_DIR_BIDIRECTIONAL, #ifdef XGE_HAL_DMA_DTR_CONSISTENT - XGE_OS_DMA_CONSISTENT + XGE_OS_DMA_CONSISTENT #else - XGE_OS_DMA_STREAMING + XGE_OS_DMA_STREAMING #endif - ); - if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) { - xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i], - mempool->items_priv_size * - n_items); - xge_os_dma_free(mempool->pdev, - the_memblock, - mempool->memblock_size, - &dma_object->acc_handle, - &dma_object->handle); - return XGE_HAL_ERR_OUT_OF_MAPPING; - } - - /* fill the items hash array */ - for (j=0; j<n_items; j++) { - int index = i*n_items + j; - - if (first_time && index >= mempool->items_initial) { - break; - } - - mempool->items_arr[index] = - ((char *)the_memblock + j*mempool->item_size); - - /* let caller to do more job on each item */ - if (mempool->item_func_alloc != NULL) { - xge_hal_status_e status; - - if ((status = mempool->item_func_alloc( - mempool, - the_memblock, - i, - dma_object, - mempool->items_arr[index], - index, - is_last, - mempool->userdata)) != XGE_HAL_OK) { - - if (mempool->item_func_free != NULL) { - int k; - - for (k=0; k<j; k++) { - - index =i*n_items + k; - - (void)mempool->item_func_free( - mempool, the_memblock, - i, dma_object, - mempool->items_arr[index], - index, is_last, - mempool->userdata); - } - } - - xge_os_free(mempool->pdev, - mempool->memblocks_priv_arr[i], - mempool->items_priv_size * - n_items); - xge_os_dma_unmap(mempool->pdev, - dma_object->handle, - dma_object->addr, - mempool->memblock_size, - XGE_OS_DMA_DIR_BIDIRECTIONAL); - xge_os_dma_free(mempool->pdev, - the_memblock, - mempool->memblock_size, - &dma_object->acc_handle, - &dma_object->handle); - return status; - } - } - - mempool->items_current = index + 1; - } - - xge_debug_mm(XGE_TRACE, - "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", " - "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024, - (unsigned long long)(ulong_t)mempool->memblocks_arr[i], - (unsigned long long)dma_object->addr); - - (*num_allocated)++; - - if (first_time && mempool->items_current == - mempool->items_initial) { - break; - } + ); + if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) { + xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i], + mempool->items_priv_size * + n_items); + xge_os_dma_free(mempool->pdev, + the_memblock, + mempool->memblock_size, + &dma_object->acc_handle, + &dma_object->handle); + return XGE_HAL_ERR_OUT_OF_MAPPING; + } + + /* fill the items hash array */ + for (j=0; j<n_items; j++) { + int index = i*n_items + j; + + if (first_time && index >= mempool->items_initial) { + break; + } + + mempool->items_arr[index] = + ((char *)the_memblock + j*mempool->item_size); + + /* let caller to do more job on each item */ + if (mempool->item_func_alloc != NULL) { + xge_hal_status_e status; + + if ((status = mempool->item_func_alloc( + mempool, + the_memblock, + i, + dma_object, + mempool->items_arr[index], + index, + is_last, + mempool->userdata)) != XGE_HAL_OK) { + + if (mempool->item_func_free != NULL) { + int k; + + for (k=0; k<j; k++) { + + index =i*n_items + k; + + (void)mempool->item_func_free( + mempool, the_memblock, + i, dma_object, + mempool->items_arr[index], + index, is_last, + mempool->userdata); + } + } + + xge_os_free(mempool->pdev, + mempool->memblocks_priv_arr[i], + mempool->items_priv_size * + n_items); + xge_os_dma_unmap(mempool->pdev, + dma_object->handle, + dma_object->addr, + mempool->memblock_size, + XGE_OS_DMA_DIR_BIDIRECTIONAL); + xge_os_dma_free(mempool->pdev, + the_memblock, + mempool->memblock_size, + &dma_object->acc_handle, + &dma_object->handle); + return status; + } + } + + mempool->items_current = index + 1; + } + + xge_debug_mm(XGE_TRACE, + "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", " + "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024, + (unsigned long long)(ulong_t)mempool->memblocks_arr[i], + (unsigned long long)dma_object->addr); + + (*num_allocated)++; + + if (first_time && mempool->items_current == + mempool->items_initial) { + break; + } } /* increment actual number of allocated memblocks */ @@ -236,9 +228,9 @@ __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate, */ xge_hal_mempool_t* __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size, - int items_priv_size, int items_initial, int items_max, - xge_hal_mempool_item_f item_func_alloc, - xge_hal_mempool_item_f item_func_free, void *userdata) + int items_priv_size, int items_initial, int items_max, + xge_hal_mempool_item_f item_func_alloc, + xge_hal_mempool_item_f item_func_free, void *userdata) { xge_hal_status_e status; int memblocks_to_allocate; @@ -246,114 +238,114 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size, int allocated; if (memblock_size < item_size) { - xge_debug_mm(XGE_ERR, - "memblock_size %d < item_size %d: misconfiguration", - memblock_size, item_size); - return NULL; + xge_debug_mm(XGE_ERR, + "memblock_size %d < item_size %d: misconfiguration", + memblock_size, item_size); + return NULL; } mempool = (xge_hal_mempool_t *) \ - xge_os_malloc(pdev, sizeof(xge_hal_mempool_t)); + xge_os_malloc(pdev, sizeof(xge_hal_mempool_t)); if (mempool == NULL) { - xge_debug_mm(XGE_ERR, "mempool allocation failure"); - return NULL; + xge_debug_mm(XGE_ERR, "mempool allocation failure"); + return NULL; } xge_os_memzero(mempool, sizeof(xge_hal_mempool_t)); - mempool->pdev = pdev; - mempool->memblock_size = memblock_size; - mempool->items_max = items_max; - mempool->items_initial = items_initial; - mempool->item_size = item_size; - mempool->items_priv_size = items_priv_size; - mempool->item_func_alloc = item_func_alloc; - mempool->item_func_free = item_func_free; - mempool->userdata = userdata; + mempool->pdev = pdev; + mempool->memblock_size = memblock_size; + mempool->items_max = items_max; + mempool->items_initial = items_initial; + mempool->item_size = item_size; + mempool->items_priv_size = items_priv_size; + mempool->item_func_alloc = item_func_alloc; + mempool->item_func_free = item_func_free; + mempool->userdata = userdata; mempool->memblocks_allocated = 0; mempool->items_per_memblock = memblock_size / item_size; mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / - mempool->items_per_memblock; + mempool->items_per_memblock; /* allocate array of memblocks */ mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev, - sizeof(void*) * mempool->memblocks_max); + sizeof(void*) * mempool->memblocks_max); if (mempool->memblocks_arr == NULL) { - xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure"); - __hal_mempool_destroy(mempool); - return NULL; + xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure"); + __hal_mempool_destroy(mempool); + return NULL; } xge_os_memzero(mempool->memblocks_arr, - sizeof(void*) * mempool->memblocks_max); + sizeof(void*) * mempool->memblocks_max); /* allocate array of private parts of items per memblocks */ mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev, - sizeof(void*) * mempool->memblocks_max); + sizeof(void*) * mempool->memblocks_max); if (mempool->memblocks_priv_arr == NULL) { - xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure"); - __hal_mempool_destroy(mempool); - return NULL; + xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure"); + __hal_mempool_destroy(mempool); + return NULL; } xge_os_memzero(mempool->memblocks_priv_arr, - sizeof(void*) * mempool->memblocks_max); + sizeof(void*) * mempool->memblocks_max); /* allocate array of memblocks DMA objects */ mempool->memblocks_dma_arr = - (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev, - sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max); + (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev, + sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max); if (mempool->memblocks_dma_arr == NULL) { - xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure"); - __hal_mempool_destroy(mempool); - return NULL; + xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure"); + __hal_mempool_destroy(mempool); + return NULL; } xge_os_memzero(mempool->memblocks_dma_arr, - sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max); + sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max); /* allocate hash array of items */ mempool->items_arr = (void **) xge_os_malloc(mempool->pdev, - sizeof(void*) * mempool->items_max); + sizeof(void*) * mempool->items_max); if (mempool->items_arr == NULL) { - xge_debug_mm(XGE_ERR, "items_arr allocation failure"); - __hal_mempool_destroy(mempool); - return NULL; + xge_debug_mm(XGE_ERR, "items_arr allocation failure"); + __hal_mempool_destroy(mempool); + return NULL; } xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max); mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev, - sizeof(void*) * mempool->items_max); + sizeof(void*) * mempool->items_max); if (mempool->shadow_items_arr == NULL) { - xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure"); - __hal_mempool_destroy(mempool); - return NULL; + xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure"); + __hal_mempool_destroy(mempool); + return NULL; } xge_os_memzero(mempool->shadow_items_arr, - sizeof(void *) * mempool->items_max); + sizeof(void *) * mempool->items_max); /* calculate initial number of memblocks */ memblocks_to_allocate = (mempool->items_initial + - mempool->items_per_memblock - 1) / - mempool->items_per_memblock; + mempool->items_per_memblock - 1) / + mempool->items_per_memblock; xge_debug_mm(XGE_TRACE, "allocating %d memblocks, " - "%d items per memblock", memblocks_to_allocate, - mempool->items_per_memblock); + "%d items per memblock", memblocks_to_allocate, + mempool->items_per_memblock); /* pre-allocate the mempool */ status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated); xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr, - sizeof(void*) * mempool->items_max); + sizeof(void*) * mempool->items_max); if (status != XGE_HAL_OK) { - xge_debug_mm(XGE_ERR, "mempool_grow failure"); - __hal_mempool_destroy(mempool); - return NULL; + xge_debug_mm(XGE_ERR, "mempool_grow failure"); + __hal_mempool_destroy(mempool); + return NULL; } xge_debug_mm(XGE_TRACE, - "total: allocated %dk of DMA-capable memory", - mempool->memblock_size * allocated / 1024); + "total: allocated %dk of DMA-capable memory", + mempool->memblock_size * allocated / 1024); return mempool; } @@ -367,69 +359,69 @@ __hal_mempool_destroy(xge_hal_mempool_t *mempool) int i, j; for (i=0; i<mempool->memblocks_allocated; i++) { - xge_hal_mempool_dma_t *dma_object; + xge_hal_mempool_dma_t *dma_object; - xge_assert(mempool->memblocks_arr[i]); - xge_assert(mempool->memblocks_dma_arr + i); + xge_assert(mempool->memblocks_arr[i]); + xge_assert(mempool->memblocks_dma_arr + i); - dma_object = mempool->memblocks_dma_arr + i; + dma_object = mempool->memblocks_dma_arr + i; - for (j=0; j<mempool->items_per_memblock; j++) { - int index = i*mempool->items_per_memblock + j; + for (j=0; j<mempool->items_per_memblock; j++) { + int index = i*mempool->items_per_memblock + j; - /* to skip last partially filled(if any) memblock */ - if (index >= mempool->items_current) { - break; - } + /* to skip last partially filled(if any) memblock */ + if (index >= mempool->items_current) { + break; + } - /* let caller to do more job on each item */ - if (mempool->item_func_free != NULL) { + /* let caller to do more job on each item */ + if (mempool->item_func_free != NULL) { - mempool->item_func_free(mempool, - mempool->memblocks_arr[i], - i, dma_object, - mempool->shadow_items_arr[index], - index, /* unused */ -1, - mempool->userdata); - } - } + mempool->item_func_free(mempool, + mempool->memblocks_arr[i], + i, dma_object, + mempool->shadow_items_arr[index], + index, /* unused */ -1, + mempool->userdata); + } + } - xge_os_dma_unmap(mempool->pdev, + xge_os_dma_unmap(mempool->pdev, dma_object->handle, dma_object->addr, - mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL); + mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL); - xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i], - mempool->items_priv_size * mempool->items_per_memblock); + xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i], + mempool->items_priv_size * mempool->items_per_memblock); - xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i], - mempool->memblock_size, &dma_object->acc_handle, - &dma_object->handle); + xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i], + mempool->memblock_size, &dma_object->acc_handle, + &dma_object->handle); } if (mempool->items_arr) { - xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) * - mempool->items_max); + xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) * + mempool->items_max); } if (mempool->shadow_items_arr) { - xge_os_free(mempool->pdev, mempool->shadow_items_arr, - sizeof(void*) * mempool->items_max); + xge_os_free(mempool->pdev, mempool->shadow_items_arr, + sizeof(void*) * mempool->items_max); } if (mempool->memblocks_dma_arr) { - xge_os_free(mempool->pdev, mempool->memblocks_dma_arr, - sizeof(xge_hal_mempool_dma_t) * - mempool->memblocks_max); + xge_os_free(mempool->pdev, mempool->memblocks_dma_arr, + sizeof(xge_hal_mempool_dma_t) * + mempool->memblocks_max); } if (mempool->memblocks_priv_arr) { - xge_os_free(mempool->pdev, mempool->memblocks_priv_arr, - sizeof(void*) * mempool->memblocks_max); + xge_os_free(mempool->pdev, mempool->memblocks_priv_arr, + sizeof(void*) * mempool->memblocks_max); } if (mempool->memblocks_arr) { - xge_os_free(mempool->pdev, mempool->memblocks_arr, - sizeof(void*) * mempool->memblocks_max); + xge_os_free(mempool->pdev, mempool->memblocks_arr, + sizeof(void*) * mempool->memblocks_max); } xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t)); diff --git a/sys/dev/nxge/xgehal/xgehal-ring-fp.c b/sys/dev/nxge/xgehal/xgehal-ring-fp.c index 9d5a09e..e25a265 100644 --- a/sys/dev/nxge/xgehal/xgehal-ring-fp.c +++ b/sys/dev/nxge/xgehal/xgehal-ring-fp.c @@ -26,133 +26,125 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-ring-fp.c - * - * Description: HAL Rx ring object functionality (fast path) - * - * Created: 10 June 2004 - */ - #ifdef XGE_DEBUG_FP #include <dev/nxge/include/xgehal-ring.h> #endif -__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t* -__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh) +__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t* +__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh) { xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; - xge_hal_ring_rxd_priv_t *rxd_priv; + xge_hal_ring_rxd_priv_t *rxd_priv; xge_assert(rxdp); -#if defined(XGE_HAL_USE_5B_MODE) +#if defined(XGE_HAL_USE_5B_MODE) xge_assert(ring); if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { - xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh; -#if defined (XGE_OS_PLATFORM_64BIT) - int memblock_idx = rxdp_5->host_control >> 16; - int i = rxdp_5->host_control & 0xFFFF; - rxd_priv = (xge_hal_ring_rxd_priv_t *) - ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i); + xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh; +#if defined (XGE_OS_PLATFORM_64BIT) + int memblock_idx = rxdp_5->host_control >> 16; + int i = rxdp_5->host_control & 0xFFFF; + rxd_priv = (xge_hal_ring_rxd_priv_t *) + ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i); #else - /* 32-bit case */ - rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control; + /* 32-bit case */ + rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control; #endif } else #endif { - rxd_priv = (xge_hal_ring_rxd_priv_t *) - (ulong_t)rxdp->host_control; + rxd_priv = (xge_hal_ring_rxd_priv_t *) + (ulong_t)rxdp->host_control; } xge_assert(rxd_priv); xge_assert(rxd_priv->dma_object); - xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle); + xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle); - xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset == - rxd_priv->dma_addr); + xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset == + rxd_priv->dma_addr); return rxd_priv; } -__HAL_STATIC_RING __HAL_INLINE_RING int +__HAL_STATIC_RING __HAL_INLINE_RING int __hal_ring_block_memblock_idx(xge_hal_ring_block_t *block) { return (int)*((u64 *)(void *)((char *)block + - XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)); + XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)); } -__HAL_STATIC_RING __HAL_INLINE_RING void +__HAL_STATIC_RING __HAL_INLINE_RING void __hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx) { *((u64 *)(void *)((char *)block + - XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) = - memblock_idx; + XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) = + memblock_idx; } -__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t +__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t __hal_ring_block_next_pointer(xge_hal_ring_block_t *block) { return (dma_addr_t)*((u64 *)(void *)((char *)block + - XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)); + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)); } -__HAL_STATIC_RING __HAL_INLINE_RING void +__HAL_STATIC_RING __HAL_INLINE_RING void __hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block, - dma_addr_t dma_next) + dma_addr_t dma_next) { - *((u64 *)(void *)((char *)block + - XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; + *((u64 *)(void *)((char *)block + + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; } /** - * xge_hal_ring_dtr_private - Get ULD private per-descriptor data. + * xge_hal_ring_dtr_private - Get ULD private per-descriptor data. * @channelh: Channel handle. * @dtrh: Descriptor handle. * - * Returns: private ULD info associated with the descriptor. - * ULD requests per-descriptor space via xge_hal_channel_open(). + * Returns: private ULD info associated with the descriptor. + * ULD requests per-descriptor space via xge_hal_channel_open(). * * See also: xge_hal_fifo_dtr_private(). * Usage: See ex_rx_compl{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING void* +__HAL_STATIC_RING __HAL_INLINE_RING void* xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { - return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) + - sizeof(xge_hal_ring_rxd_priv_t); + return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) + + sizeof(xge_hal_ring_rxd_priv_t); } /** - * xge_hal_ring_dtr_reserve - Reserve ring descriptor. + * xge_hal_ring_dtr_reserve - Reserve ring descriptor. * @channelh: Channel handle. - * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter - * with a valid handle. + * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter + * with a valid handle. * - * Reserve Rx descriptor for the subsequent filling-in (by upper layer - * driver (ULD)) and posting on the corresponding channel (@channelh) + * Reserve Rx descriptor for the subsequent filling-in (by upper layer + * driver (ULD)) and posting on the corresponding channel (@channelh) * via xge_hal_ring_dtr_post(). * - * Returns: XGE_HAL_OK - success. - * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. + * Returns: XGE_HAL_OK - success. + * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. * * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(), * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}. * Usage: See ex_post_all_rx{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e +__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) { xge_hal_status_e status; -#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) +#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) unsigned long flags; #endif -#if defined(XGE_HAL_RX_MULTI_RESERVE) +#if defined(XGE_HAL_RX_MULTI_RESERVE) xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock); #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock, @@ -161,21 +153,21 @@ xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) status = __hal_channel_dtr_alloc(channelh, dtrh); -#if defined(XGE_HAL_RX_MULTI_RESERVE) +#if defined(XGE_HAL_RX_MULTI_RESERVE) xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock); #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock, - flags); + flags); #endif if (status == XGE_HAL_OK) { - xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh; + xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh; - /* instead of memset: reset this RxD */ - rxdp->control_1 = rxdp->control_2 = 0; + /* instead of memset: reset this RxD */ + rxdp->control_1 = rxdp->control_2 = 0; -#if defined(XGE_OS_MEMORY_CHECK) - __hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1; +#if defined(XGE_OS_MEMORY_CHECK) + __hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1; #endif } @@ -183,110 +175,110 @@ xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) } /** - * xge_hal_ring_dtr_info_get - Get extended information associated with + * xge_hal_ring_dtr_info_get - Get extended information associated with * a completed receive descriptor for 1b mode. * @channelh: Channel handle. * @dtrh: Descriptor handle. - * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL. + * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL. * - * Retrieve extended information associated with a completed receive descriptor. + * Retrieve extended information associated with a completed receive descriptor. * * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(), * xge_hal_ring_dtr_5b_get(). */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - xge_hal_dtr_info_t *ext_info) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, + xge_hal_dtr_info_t *ext_info) { - /* cast to 1-buffer mode RxD: the code below relies on the fact - * that control_1 and control_2 are formatted the same way.. */ + /* cast to 1-buffer mode RxD: the code below relies on the fact + * that control_1 and control_2 are formatted the same way.. */ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1); ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1); - ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1); - ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1); + ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1); + ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1); ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2); - /* Herc only, a few extra cycles imposed on Xena and/or - * when RTH is not enabled. - * Alternatively, could check - * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */ + /* Herc only, a few extra cycles imposed on Xena and/or + * when RTH is not enabled. + * Alternatively, could check + * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */ ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1); ext_info->rth_spdm_hit = XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1); - ext_info->rth_hash_type = + ext_info->rth_hash_type = XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1); - ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2); + ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2); } /** - * xge_hal_ring_dtr_info_nb_get - Get extended information associated - * with a completed receive descriptor for 3b or 5b + * xge_hal_ring_dtr_info_nb_get - Get extended information associated + * with a completed receive descriptor for 3b or 5b * modes. * @channelh: Channel handle. * @dtrh: Descriptor handle. - * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL. + * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL. * - * Retrieve extended information associated with a completed receive descriptor. + * Retrieve extended information associated with a completed receive descriptor. * * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(), - * xge_hal_ring_dtr_5b_get(). + * xge_hal_ring_dtr_5b_get(). */ -__HAL_STATIC_RING __HAL_INLINE_RING void +__HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - xge_hal_dtr_info_t *ext_info) + xge_hal_dtr_info_t *ext_info) { - /* cast to 1-buffer mode RxD: the code below relies on the fact - * that control_1 and control_2 are formatted the same way.. */ + /* cast to 1-buffer mode RxD: the code below relies on the fact + * that control_1 and control_2 are formatted the same way.. */ xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1); ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1); - ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1); - ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1); - ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2); - /* Herc only, a few extra cycles imposed on Xena and/or - * when RTH is not enabled. Same comment as above. */ + ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1); + ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1); + ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2); + /* Herc only, a few extra cycles imposed on Xena and/or + * when RTH is not enabled. Same comment as above. */ ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1); ext_info->rth_spdm_hit = XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1); - ext_info->rth_hash_type = + ext_info->rth_hash_type = XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1); - ext_info->rth_value = (u32)rxdp->buffer0_ptr; + ext_info->rth_value = (u32)rxdp->buffer0_ptr; } /** * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor. * @dtrh: Descriptor handle. - * @dma_pointer: DMA address of a single receive buffer this descriptor - * should carry. Note that by the time - * xge_hal_ring_dtr_1b_set - * is called, the receive buffer should be already mapped - * to the corresponding Xframe device. + * @dma_pointer: DMA address of a single receive buffer this descriptor + * should carry. Note that by the time + * xge_hal_ring_dtr_1b_set + * is called, the receive buffer should be already mapped + * to the corresponding Xframe device. * @size: Size of the receive @dma_pointer buffer. * - * Prepare 1-buffer-mode Rx descriptor for posting - * (via xge_hal_ring_dtr_post()). + * Prepare 1-buffer-mode Rx descriptor for posting + * (via xge_hal_ring_dtr_post()). * - * This inline helper-function does not return any parameters and always + * This inline helper-function does not return any parameters and always * succeeds. * - * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set(). + * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set(). * Usage: See ex_post_all_rx{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size) { xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; - rxdp->buffer0_ptr = dma_pointer; - rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size); + rxdp->buffer0_ptr = dma_pointer; + rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size); xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_1b_set: rxdp %p control_2 %p buffer0_ptr %p", - (xge_hal_ring_rxd_1_t *)dtrh, - rxdp->control_2, - rxdp->buffer0_ptr); + (xge_hal_ring_rxd_1_t *)dtrh, + rxdp->control_2, + rxdp->buffer0_ptr); } /** @@ -294,21 +286,21 @@ xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size) * descriptor. * @channelh: Channel handle. * @dtrh: Descriptor handle. - * @dma_pointer: DMA address of a single receive buffer _this_ descriptor - * carries. Returned by HAL. - * @pkt_length: Length (in bytes) of the data in the buffer pointed by - * @dma_pointer. Returned by HAL. + * @dma_pointer: DMA address of a single receive buffer _this_ descriptor + * carries. Returned by HAL. + * @pkt_length: Length (in bytes) of the data in the buffer pointed by + * @dma_pointer. Returned by HAL. * - * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. - * This inline helper-function uses completed descriptor to populate receive - * buffer pointer and other "out" parameters. The function always succeeds. + * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. + * This inline helper-function uses completed descriptor to populate receive + * buffer pointer and other "out" parameters. The function always succeeds. * - * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). + * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). * Usage: See ex_rx_compl{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - dma_addr_t *dma_pointer, int *pkt_length) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, + dma_addr_t *dma_pointer, int *pkt_length) { xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; @@ -321,35 +313,35 @@ xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, /** * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor. * @dtrh: Descriptor handle. - * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers - * _this_ descriptor should carry. - * Note that by the time xge_hal_ring_dtr_3b_set - * is called, the receive buffers should be mapped - * to the corresponding Xframe device. - * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per - * buffer from @dma_pointers. - * - * Prepare 3-buffer-mode Rx descriptor for posting (via + * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers + * _this_ descriptor should carry. + * Note that by the time xge_hal_ring_dtr_3b_set + * is called, the receive buffers should be mapped + * to the corresponding Xframe device. + * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per + * buffer from @dma_pointers. + * + * Prepare 3-buffer-mode Rx descriptor for posting (via * xge_hal_ring_dtr_post()). - * This inline helper-function does not return any parameters and always + * This inline helper-function does not return any parameters and always * succeeds. * - * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set(). + * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set(). */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], - int sizes[]) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], + int sizes[]) { xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh; - rxdp->buffer0_ptr = dma_pointers[0]; - rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]); - rxdp->buffer1_ptr = dma_pointers[1]; - rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]); - rxdp->buffer2_ptr = dma_pointers[2]; - rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]); + rxdp->buffer0_ptr = dma_pointers[0]; + rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]); + rxdp->buffer1_ptr = dma_pointers[1]; + rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]); + rxdp->buffer2_ptr = dma_pointers[2]; + rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]); } /** @@ -357,76 +349,76 @@ xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], * descriptor. * @channelh: Channel handle. * @dtrh: Descriptor handle. - * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor - * carries. The first two buffers contain ethernet and - * (IP + transport) headers. The 3rd buffer contains packet - * data. - * Returned by HAL. - * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per + * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor + * carries. The first two buffers contain ethernet and + * (IP + transport) headers. The 3rd buffer contains packet + * data. + * Returned by HAL. + * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per * buffer from @dma_pointers. Returned by HAL. * - * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor. - * This inline helper-function uses completed descriptor to populate receive - * buffer pointer and other "out" parameters. The function always succeeds. + * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor. + * This inline helper-function uses completed descriptor to populate receive + * buffer pointer and other "out" parameters. The function always succeeds. * - * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). + * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - dma_addr_t dma_pointers[], int sizes[]) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, + dma_addr_t dma_pointers[], int sizes[]) { xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh; - dma_pointers[0] = rxdp->buffer0_ptr; + dma_pointers[0] = rxdp->buffer0_ptr; sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2); - dma_pointers[1] = rxdp->buffer1_ptr; + dma_pointers[1] = rxdp->buffer1_ptr; sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2); - dma_pointers[2] = rxdp->buffer2_ptr; + dma_pointers[2] = rxdp->buffer2_ptr; sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2); ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] + - sizes[2]; + sizes[2]; } /** * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor. * @dtrh: Descriptor handle. - * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers - * _this_ descriptor should carry. - * Note that by the time xge_hal_ring_dtr_5b_set - * is called, the receive buffers should be mapped - * to the corresponding Xframe device. - * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per - * buffer from @dma_pointers. - * - * Prepare 3-buffer-mode Rx descriptor for posting (via + * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers + * _this_ descriptor should carry. + * Note that by the time xge_hal_ring_dtr_5b_set + * is called, the receive buffers should be mapped + * to the corresponding Xframe device. + * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per + * buffer from @dma_pointers. + * + * Prepare 3-buffer-mode Rx descriptor for posting (via * xge_hal_ring_dtr_post()). - * This inline helper-function does not return any parameters and always + * This inline helper-function does not return any parameters and always * succeeds. * - * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set(). + * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set(). */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], - int sizes[]) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], + int sizes[]) { xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh; - rxdp->buffer0_ptr = dma_pointers[0]; - rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]); - rxdp->buffer1_ptr = dma_pointers[1]; - rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]); - rxdp->buffer2_ptr = dma_pointers[2]; - rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE); - rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]); - rxdp->buffer3_ptr = dma_pointers[3]; - rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE); - rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]); - rxdp->buffer4_ptr = dma_pointers[4]; - rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE); + rxdp->buffer0_ptr = dma_pointers[0]; + rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]); + rxdp->buffer1_ptr = dma_pointers[1]; + rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]); + rxdp->buffer2_ptr = dma_pointers[2]; + rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE); + rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]); + rxdp->buffer3_ptr = dma_pointers[3]; + rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE); + rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]); + rxdp->buffer4_ptr = dma_pointers[4]; + rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE); rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]); } @@ -435,42 +427,42 @@ xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], * descriptor. * @channelh: Channel handle. * @dtrh: Descriptor handle. - * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor - * carries. The first 4 buffers contains L2 (ethernet) through - * L5 headers. The 5th buffer contain received (applicaion) - * data. Returned by HAL. - * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per + * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor + * carries. The first 4 buffers contains L2 (ethernet) through + * L5 headers. The 5th buffer contain received (applicaion) + * data. Returned by HAL. + * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per * buffer from @dma_pointers. Returned by HAL. * - * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor. - * This inline helper-function uses completed descriptor to populate receive - * buffer pointer and other "out" parameters. The function always succeeds. + * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor. + * This inline helper-function uses completed descriptor to populate receive + * buffer pointer and other "out" parameters. The function always succeeds. * - * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). + * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, - dma_addr_t dma_pointers[], int sizes[]) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, + dma_addr_t dma_pointers[], int sizes[]) { xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh; - dma_pointers[0] = rxdp->buffer0_ptr; + dma_pointers[0] = rxdp->buffer0_ptr; sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2); - dma_pointers[1] = rxdp->buffer1_ptr; + dma_pointers[1] = rxdp->buffer1_ptr; sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2); - dma_pointers[2] = rxdp->buffer2_ptr; + dma_pointers[2] = rxdp->buffer2_ptr; sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2); - dma_pointers[3] = rxdp->buffer3_ptr; + dma_pointers[3] = rxdp->buffer3_ptr; sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3); - dma_pointers[4] = rxdp->buffer4_ptr; + dma_pointers[4] = rxdp->buffer4_ptr; sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3); ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] + - sizes[2] + sizes[3] + sizes[4]; + sizes[2] + sizes[3] + sizes[4]; } @@ -481,101 +473,101 @@ xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, * * TBD */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) - xge_hal_ring_rxd_priv_t *priv; +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) + xge_hal_ring_rxd_priv_t *priv; xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; #endif -#if defined(XGE_HAL_RX_MULTI_POST_IRQ) +#if defined(XGE_HAL_RX_MULTI_POST_IRQ) unsigned long flags; #endif - rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED; + rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED; #ifdef XGE_DEBUG_ASSERT - /* make sure Xena overwrites the (illegal) t_code on completion */ - XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C); + /* make sure Xena overwrites the (illegal) t_code on completion */ + XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C); #endif - xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d post_qid %d", - (unsigned long long)(ulong_t)dtrh, - ((xge_hal_ring_t *)channelh)->channel.post_index, - ((xge_hal_ring_t *)channelh)->channel.post_qid); + xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d post_qid %d", + (unsigned long long)(ulong_t)dtrh, + ((xge_hal_ring_t *)channelh)->channel.post_index, + ((xge_hal_ring_t *)channelh)->channel.post_qid); -#if defined(XGE_HAL_RX_MULTI_POST) +#if defined(XGE_HAL_RX_MULTI_POST) xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock); #elif defined(XGE_HAL_RX_MULTI_POST_IRQ) xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock, flags); #endif -#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER) +#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER) { - xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; - - if (channel->post_index != 0) { - xge_hal_dtr_h prev_dtrh; - xge_hal_ring_rxd_priv_t *rxdp_priv; - - rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp); - prev_dtrh = channel->work_arr[channel->post_index - 1]; - - if (prev_dtrh != NULL && - (rxdp_priv->dma_offset & (~0xFFF)) != - rxdp_priv->dma_offset) { - xge_assert((char *)prev_dtrh + - ((xge_hal_ring_t*)channel)->rxd_size == dtrh); - } - } + xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; + + if (channel->post_index != 0) { + xge_hal_dtr_h prev_dtrh; + xge_hal_ring_rxd_priv_t *rxdp_priv; + + rxdp_priv = __hal_ring_rxd_priv((xge_hal_ring_t*)channel, rxdp); + prev_dtrh = channel->work_arr[channel->post_index - 1]; + + if (prev_dtrh != NULL && + (rxdp_priv->dma_offset & (~0xFFF)) != + rxdp_priv->dma_offset) { + xge_assert((char *)prev_dtrh + + ((xge_hal_ring_t*)channel)->rxd_size == dtrh); + } + } } #endif __hal_channel_dtr_post(channelh, dtrh); -#if defined(XGE_HAL_RX_MULTI_POST) +#if defined(XGE_HAL_RX_MULTI_POST) xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock); #elif defined(XGE_HAL_RX_MULTI_POST_IRQ) xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock, - flags); + flags); #endif } /** - * xge_hal_ring_dtr_post_post - FIXME. + * xge_hal_ring_dtr_post_post - FIXME. * @channelh: Channel handle. * @dtrh: Descriptor handle. * * TBD */ -__HAL_STATIC_RING __HAL_INLINE_RING void +__HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) - xge_hal_ring_rxd_priv_t *priv; +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) + xge_hal_ring_rxd_priv_t *priv; #endif /* do POST */ - rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME; + rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME; -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) priv = __hal_ring_rxd_priv(ring, rxdp); xge_os_dma_sync(ring->channel.pdev, - priv->dma_handle, priv->dma_addr, - priv->dma_offset, ring->rxd_size, - XGE_OS_DMA_DIR_TODEVICE); + priv->dma_handle, priv->dma_addr, + priv->dma_offset, ring->rxd_size, + XGE_OS_DMA_DIR_TODEVICE); #endif xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post: rxdp %p control_1 %p", - (xge_hal_ring_rxd_1_t *)dtrh, - rxdp->control_1); + (xge_hal_ring_rxd_1_t *)dtrh, + rxdp->control_1); - if (ring->channel.usage_cnt > 0) - ring->channel.usage_cnt--; + if (ring->channel.usage_cnt > 0) + ring->channel.usage_cnt--; } /** @@ -585,192 +577,192 @@ xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) * * Similar as xge_hal_ring_dtr_post_post, but in addition it does memory barrier. */ -__HAL_STATIC_RING __HAL_INLINE_RING void +__HAL_STATIC_RING __HAL_INLINE_RING void xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) - xge_hal_ring_rxd_priv_t *priv; +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) + xge_hal_ring_rxd_priv_t *priv; #endif - /* Do memory barrier before changing the ownership */ - xge_os_wmb(); - + /* Do memory barrier before changing the ownership */ + xge_os_wmb(); + /* do POST */ - rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME; + rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME; -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) priv = __hal_ring_rxd_priv(ring, rxdp); xge_os_dma_sync(ring->channel.pdev, - priv->dma_handle, priv->dma_addr, - priv->dma_offset, ring->rxd_size, - XGE_OS_DMA_DIR_TODEVICE); + priv->dma_handle, priv->dma_addr, + priv->dma_offset, ring->rxd_size, + XGE_OS_DMA_DIR_TODEVICE); #endif - if (ring->channel.usage_cnt > 0) - ring->channel.usage_cnt--; + if (ring->channel.usage_cnt > 0) + ring->channel.usage_cnt--; xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post_wmb: rxdp %p control_1 %p rxds_with_host %d", - (xge_hal_ring_rxd_1_t *)dtrh, - rxdp->control_1, ring->channel.usage_cnt); + (xge_hal_ring_rxd_1_t *)dtrh, + rxdp->control_1, ring->channel.usage_cnt); } /** - * xge_hal_ring_dtr_post - Post descriptor on the ring channel. + * xge_hal_ring_dtr_post - Post descriptor on the ring channel. * @channelh: Channel handle. * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve(). * - * Post descriptor on the 'ring' type channel. - * Prior to posting the descriptor should be filled in accordance with - * Host/Xframe interface specification for a given service (LL, etc.). + * Post descriptor on the 'ring' type channel. + * Prior to posting the descriptor should be filled in accordance with + * Host/Xframe interface specification for a given service (LL, etc.). * * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post(). * Usage: See ex_post_all_rx{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { - xge_hal_ring_dtr_pre_post(channelh, dtrh); + xge_hal_ring_dtr_pre_post(channelh, dtrh); xge_hal_ring_dtr_post_post(channelh, dtrh); } /** - * xge_hal_ring_dtr_next_completed - Get the _next_ completed + * xge_hal_ring_dtr_next_completed - Get the _next_ completed * descriptor. * @channelh: Channel handle. * @dtrh: Descriptor handle. Returned by HAL. - * @t_code: Transfer code, as per Xframe User Guide, - * Receive Descriptor Format. Returned by HAL. + * @t_code: Transfer code, as per Xframe User Guide, + * Receive Descriptor Format. Returned by HAL. * - * Retrieve the _next_ completed descriptor. - * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy - * upper-layer driver (ULD) of new completed descriptors. After that + * Retrieve the _next_ completed descriptor. + * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy + * upper-layer driver (ULD) of new completed descriptors. After that * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest - * completions (the very first completion is passed by HAL via + * completions (the very first completion is passed by HAL via * xge_hal_channel_callback_f). * - * Implementation-wise, the upper-layer driver is free to call + * Implementation-wise, the upper-layer driver is free to call * xge_hal_ring_dtr_next_completed either immediately from inside the - * channel callback, or in a deferred fashion and separate (from HAL) + * channel callback, or in a deferred fashion and separate (from HAL) * context. * - * Non-zero @t_code means failure to fill-in receive buffer(s) + * Non-zero @t_code means failure to fill-in receive buffer(s) * of the descriptor. - * For instance, parity error detected during the data transfer. - * In this case Xframe will complete the descriptor and indicate - * for the host that the received data is not to be used. - * For details please refer to Xframe User Guide. + * For instance, parity error detected during the data transfer. + * In this case Xframe will complete the descriptor and indicate + * for the host that the received data is not to be used. + * For details please refer to Xframe User Guide. * - * Returns: XGE_HAL_OK - success. - * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors + * Returns: XGE_HAL_OK - success. + * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors * are currently available for processing. * * See also: xge_hal_channel_callback_f{}, * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}. * Usage: See ex_rx_compl{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e -xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, - u8 *t_code) +__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e +xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, + u8 *t_code) { - xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ + xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) - xge_hal_ring_rxd_priv_t *priv; +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) + xge_hal_ring_rxd_priv_t *priv; #endif __hal_channel_dtr_try_complete(ring, dtrh); rxdp = (xge_hal_ring_rxd_1_t *)*dtrh; - if (rxdp == NULL) { - return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; + if (rxdp == NULL) { + return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; } -#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) - /* Note: 24 bytes at most means: - * - Control_3 in case of 5-buffer mode - * - Control_1 and Control_2 +#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) + /* Note: 24 bytes at most means: + * - Control_3 in case of 5-buffer mode + * - Control_1 and Control_2 * - * This is the only length needs to be invalidated - * type of channels.*/ + * This is the only length needs to be invalidated + * type of channels.*/ priv = __hal_ring_rxd_priv(ring, rxdp); xge_os_dma_sync(ring->channel.pdev, - priv->dma_handle, priv->dma_addr, - priv->dma_offset, 24, - XGE_OS_DMA_DIR_FROMDEVICE); + priv->dma_handle, priv->dma_addr, + priv->dma_offset, 24, + XGE_OS_DMA_DIR_FROMDEVICE); #endif - /* check whether it is not the end */ - if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) && - !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) { -#ifndef XGE_HAL_IRQ_POLLING - if (++ring->cmpl_cnt > ring->indicate_max_pkts) { - /* reset it. since we don't want to return - * garbage to the ULD */ - *dtrh = 0; - return XGE_HAL_COMPLETIONS_REMAIN; - } + /* check whether it is not the end */ + if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) && + !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) { +#ifndef XGE_HAL_IRQ_POLLING + if (++ring->cmpl_cnt > ring->indicate_max_pkts) { + /* reset it. since we don't want to return + * garbage to the ULD */ + *dtrh = 0; + return XGE_HAL_COMPLETIONS_REMAIN; + } #endif #ifdef XGE_DEBUG_ASSERT -#if defined(XGE_HAL_USE_5B_MODE) -#if !defined(XGE_OS_PLATFORM_64BIT) - if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { - xge_assert(((xge_hal_ring_rxd_5_t *) - rxdp)->host_control!=0); - } +#if defined(XGE_HAL_USE_5B_MODE) +#if !defined(XGE_OS_PLATFORM_64BIT) + if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { + xge_assert(((xge_hal_ring_rxd_5_t *) + rxdp)->host_control!=0); + } #endif #else - xge_assert(rxdp->host_control!=0); + xge_assert(rxdp->host_control!=0); #endif #endif - __hal_channel_dtr_complete(ring); + __hal_channel_dtr_complete(ring); - *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1); + *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1); - /* see XGE_HAL_SET_RXD_T_CODE() above.. */ - xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C); + /* see XGE_HAL_SET_RXD_T_CODE() above.. */ + xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C); - xge_debug_ring(XGE_TRACE, - "compl_index %d post_qid %d t_code %d rxd 0x"XGE_OS_LLXFMT, - ((xge_hal_channel_t*)ring)->compl_index, - ((xge_hal_channel_t*)ring)->post_qid, *t_code, - (unsigned long long)(ulong_t)rxdp); + xge_debug_ring(XGE_TRACE, + "compl_index %d post_qid %d t_code %d rxd 0x"XGE_OS_LLXFMT, + ((xge_hal_channel_t*)ring)->compl_index, + ((xge_hal_channel_t*)ring)->post_qid, *t_code, + (unsigned long long)(ulong_t)rxdp); - ring->channel.usage_cnt++; - if (ring->channel.stats.usage_max < ring->channel.usage_cnt) - ring->channel.stats.usage_max = ring->channel.usage_cnt; + ring->channel.usage_cnt++; + if (ring->channel.stats.usage_max < ring->channel.usage_cnt) + ring->channel.stats.usage_max = ring->channel.usage_cnt; - return XGE_HAL_OK; + return XGE_HAL_OK; } - /* reset it. since we don't want to return + /* reset it. since we don't want to return * garbage to the ULD */ - *dtrh = 0; + *dtrh = 0; return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; } /** - * xge_hal_ring_dtr_free - Free descriptor. + * xge_hal_ring_dtr_free - Free descriptor. * @channelh: Channel handle. * @dtrh: Descriptor handle. * - * Free the reserved descriptor. This operation is "symmetrical" to - * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's + * Free the reserved descriptor. This operation is "symmetrical" to + * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's * lifecycle. * - * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can + * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can * be: * * - reserved (xge_hal_ring_dtr_reserve); * - * - posted (xge_hal_ring_dtr_post); + * - posted (xge_hal_ring_dtr_post); * * - completed (xge_hal_ring_dtr_next_completed); * - * - and recycled again (xge_hal_ring_dtr_free). + * - and recycled again (xge_hal_ring_dtr_free). * * For alternative state transitions and more details please refer to * the design doc. @@ -778,14 +770,14 @@ xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free(). * Usage: See ex_rx_compl{}. */ -__HAL_STATIC_RING __HAL_INLINE_RING void -xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) +__HAL_STATIC_RING __HAL_INLINE_RING void +xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) { -#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) +#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) unsigned long flags; #endif -#if defined(XGE_HAL_RX_MULTI_FREE) +#if defined(XGE_HAL_RX_MULTI_FREE) xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock); #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ) xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock, @@ -793,11 +785,11 @@ xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) #endif __hal_channel_dtr_free(channelh, dtrh); -#if defined(XGE_OS_MEMORY_CHECK) +#if defined(XGE_OS_MEMORY_CHECK) __hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtrh)->allocated = 0; #endif -#if defined(XGE_HAL_RX_MULTI_FREE) +#if defined(XGE_HAL_RX_MULTI_FREE) xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock); #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ) xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock, @@ -806,46 +798,46 @@ xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) } /** - * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed + * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed * @channelh: Channel handle. * - * Checks if the the _next_ completed descriptor is in host memory + * Checks if the the _next_ completed descriptor is in host memory * - * Returns: XGE_HAL_OK - success. - * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors + * Returns: XGE_HAL_OK - success. + * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors * are currently available for processing. */ -__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e +__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh) { - xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ + xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; xge_hal_dtr_h dtrh; __hal_channel_dtr_try_complete(ring, &dtrh); rxdp = (xge_hal_ring_rxd_1_t *)dtrh; - if (rxdp == NULL) { - return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; + if (rxdp == NULL) { + return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; } - /* check whether it is not the end */ - if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) && - !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) { + /* check whether it is not the end */ + if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) && + !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) { #ifdef XGE_DEBUG_ASSERT -#if defined(XGE_HAL_USE_5B_MODE) -#if !defined(XGE_OS_PLATFORM_64BIT) - if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { - xge_assert(((xge_hal_ring_rxd_5_t *) - rxdp)->host_control!=0); - } +#if defined(XGE_HAL_USE_5B_MODE) +#if !defined(XGE_OS_PLATFORM_64BIT) + if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { + xge_assert(((xge_hal_ring_rxd_5_t *) + rxdp)->host_control!=0); + } #endif #else - xge_assert(rxdp->host_control!=0); + xge_assert(rxdp->host_control!=0); #endif #endif - return XGE_HAL_OK; + return XGE_HAL_OK; } return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; diff --git a/sys/dev/nxge/xgehal/xgehal-ring.c b/sys/dev/nxge/xgehal/xgehal-ring.c index 84e8f9b..444e10c 100644 --- a/sys/dev/nxge/xgehal/xgehal-ring.c +++ b/sys/dev/nxge/xgehal/xgehal-ring.c @@ -26,21 +26,13 @@ * $FreeBSD$ */ -/* - * FileName : hal-ring.c - * - * Description: Rx ring object implementation - * - * Created: 10 May 2004 - */ - #include <dev/nxge/include/xgehal-ring.h> #include <dev/nxge/include/xgehal-device.h> #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) static ptrdiff_t __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh, - void *item) + void *item) { int memblock_idx; void *memblock; @@ -57,7 +49,7 @@ __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh, static dma_addr_t __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item, - pci_dma_h *dma_handle) + pci_dma_h *dma_handle) { int memblock_idx; void *memblock; @@ -69,12 +61,12 @@ __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item, /* get owner memblock by memblock index */ memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh, - memblock_idx); + memblock_idx); /* get memblock DMA object by memblock index */ memblock_dma_object = - __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh, - memblock_idx); + __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh, + memblock_idx); /* calculate offset in the memblock of this item */ dma_item_offset = (char*)item - (char*)memblock; @@ -86,7 +78,7 @@ __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item, static void __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh, - xge_hal_ring_t *ring, int from, int to) + xge_hal_ring_t *ring, int from, int to) { xge_hal_ring_block_t *to_item, *from_item; dma_addr_t to_dma, from_dma; @@ -94,12 +86,12 @@ __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh, /* get "from" RxD block */ from_item = (xge_hal_ring_block_t *) - __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from); + __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from); xge_assert(from_item); /* get "to" RxD block */ to_item = (xge_hal_ring_block_t *) - __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to); + __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to); xge_assert(to_item); /* return address of the beginning of previous RxD block */ @@ -111,33 +103,33 @@ __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh, /* return "from" RxD block's DMA start address */ from_dma = - __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle); + __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle); #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) /* we must sync "from" RxD block, so hardware will see it */ xge_os_dma_sync(ring->channel.pdev, from_dma_handle, - from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET, - __hal_ring_item_dma_offset(mempoolh, from_item) + - XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET, - sizeof(u64), - XGE_OS_DMA_DIR_TODEVICE); + from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET, + __hal_ring_item_dma_offset(mempoolh, from_item) + + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET, + sizeof(u64), + XGE_OS_DMA_DIR_TODEVICE); #endif xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT, - from, (unsigned long long)from_dma, to, - (unsigned long long)to_dma); + from, (unsigned long long)from_dma, to, + (unsigned long long)to_dma); } static xge_hal_status_e __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh, - void *memblock, - int memblock_index, - xge_hal_mempool_dma_t *dma_object, - void *item, - int index, - int is_last, - void *userdata) + void *memblock, + int memblock_index, + xge_hal_mempool_dma_t *dma_object, + void *item, + int index, + int is_last, + void *userdata) { int i; xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata; @@ -148,71 +140,71 @@ __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh, /* format rxds array */ for (i=ring->rxds_per_block-1; i>=0; i--) { - void *rxdblock_priv; - xge_hal_ring_rxd_priv_t *rxd_priv; - xge_hal_ring_rxd_1_t *rxdp; - int reserve_index = index * ring->rxds_per_block + i; - int memblock_item_idx; - - ring->reserved_rxds_arr[reserve_index] = (char *)item + - (ring->rxds_per_block - 1 - i) * ring->rxd_size; - - /* Note: memblock_item_idx is index of the item within - * the memblock. For instance, in case of three RxD-blocks - * per memblock this value can be 0,1 or 2. */ - rxdblock_priv = - __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, - memblock_index, item, - &memblock_item_idx); - rxdp = (xge_hal_ring_rxd_1_t *) - ring->reserved_rxds_arr[reserve_index]; - rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *) - ((char*)rxdblock_priv + ring->rxd_priv_size * i); - - /* pre-format per-RxD Ring's private */ - rxd_priv->dma_offset = (char*)rxdp - (char*)memblock; - rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset; - rxd_priv->dma_handle = dma_object->handle; + void *rxdblock_priv; + xge_hal_ring_rxd_priv_t *rxd_priv; + xge_hal_ring_rxd_1_t *rxdp; + int reserve_index = index * ring->rxds_per_block + i; + int memblock_item_idx; + + ring->reserved_rxds_arr[reserve_index] = (char *)item + + (ring->rxds_per_block - 1 - i) * ring->rxd_size; + + /* Note: memblock_item_idx is index of the item within + * the memblock. For instance, in case of three RxD-blocks + * per memblock this value can be 0,1 or 2. */ + rxdblock_priv = + __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, + memblock_index, item, + &memblock_item_idx); + rxdp = (xge_hal_ring_rxd_1_t *) + ring->reserved_rxds_arr[reserve_index]; + rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *) + ((char*)rxdblock_priv + ring->rxd_priv_size * i); + + /* pre-format per-RxD Ring's private */ + rxd_priv->dma_offset = (char*)rxdp - (char*)memblock; + rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset; + rxd_priv->dma_handle = dma_object->handle; #ifdef XGE_DEBUG_ASSERT - rxd_priv->dma_object = dma_object; + rxd_priv->dma_object = dma_object; #endif - /* pre-format Host_Control */ + /* pre-format Host_Control */ #if defined(XGE_HAL_USE_5B_MODE) - if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { - xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp; + if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { + xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp; #if defined(XGE_OS_PLATFORM_64BIT) - xge_assert(memblock_index <= 0xFFFF); - xge_assert(i <= 0xFFFF); - /* store memblock's index */ - rxdp_5->host_control = (u32)memblock_index << 16; - /* store index of memblock's private */ - rxdp_5->host_control |= (u32)(memblock_item_idx * - ring->rxds_per_block + i); + xge_assert(memblock_index <= 0xFFFF); + xge_assert(i <= 0xFFFF); + /* store memblock's index */ + rxdp_5->host_control = (u32)memblock_index << 16; + /* store index of memblock's private */ + rxdp_5->host_control |= (u32)(memblock_item_idx * + ring->rxds_per_block + i); #else - /* 32-bit case */ - rxdp_5->host_control = (u32)rxd_priv; + /* 32-bit case */ + rxdp_5->host_control = (u32)rxd_priv; #endif - } else { - /* 1b and 3b modes */ - rxdp->host_control = (u64)(ulong_t)rxd_priv; - } + } else { + /* 1b and 3b modes */ + rxdp->host_control = (u64)(ulong_t)rxd_priv; + } #else - /* 1b and 3b modes */ - rxdp->host_control = (u64)(ulong_t)rxd_priv; + /* 1b and 3b modes */ + rxdp->host_control = (u64)(ulong_t)rxd_priv; #endif } __hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index); if (is_last) { - /* link last one with first one */ - __hal_ring_rxdblock_link(mempoolh, ring, 0, index); + /* link last one with first one */ + __hal_ring_rxdblock_link(mempoolh, ring, 0, index); } if (index > 0 ) { - /* link this RxD block with previous one */ - __hal_ring_rxdblock_link(mempoolh, ring, index, index-1); + /* link this RxD block with previous one */ + __hal_ring_rxdblock_link(mempoolh, ring, index, index-1); } return XGE_HAL_OK; @@ -220,30 +212,30 @@ __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh, xge_hal_status_e __hal_ring_initial_replenish(xge_hal_channel_t *channel, - xge_hal_channel_reopen_e reopen) + xge_hal_channel_reopen_e reopen) { - xge_hal_dtr_h dtr; + xge_hal_dtr_h dtr = NULL; while (xge_hal_channel_dtr_count(channel) > 0) { - xge_hal_status_e status; - - status = xge_hal_ring_dtr_reserve(channel, &dtr); - xge_assert(status == XGE_HAL_OK); - - if (channel->dtr_init) { - status = channel->dtr_init(channel, - dtr, channel->reserve_length, - channel->userdata, - reopen); - if (status != XGE_HAL_OK) { - xge_hal_ring_dtr_free(channel, dtr); - xge_hal_channel_abort(channel, - XGE_HAL_CHANNEL_OC_NORMAL); - return status; - } - } - - xge_hal_ring_dtr_post(channel, dtr); + xge_hal_status_e status; + + status = xge_hal_ring_dtr_reserve(channel, &dtr); + xge_assert(status == XGE_HAL_OK); + + if (channel->dtr_init) { + status = channel->dtr_init(channel, + dtr, channel->reserve_length, + channel->userdata, + reopen); + if (status != XGE_HAL_OK) { + xge_hal_ring_dtr_free(channel, dtr); + xge_hal_channel_abort(channel, + XGE_HAL_CHANNEL_OC_NORMAL); + return status; + } + } + + xge_hal_ring_dtr_post(channel, dtr); } return XGE_HAL_OK; @@ -282,7 +274,7 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode); ring->rxd_priv_size = - sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space; + sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space; /* how many RxDs can fit into one block. Depends on configured * buffer_mode. */ @@ -292,44 +284,44 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev, - sizeof(void*) * queue->max * ring->rxds_per_block); + sizeof(void*) * queue->max * ring->rxds_per_block); if (ring->reserved_rxds_arr == NULL) { - __hal_ring_close(channelh); - return XGE_HAL_ERR_OUT_OF_MEMORY; + __hal_ring_close(channelh); + return XGE_HAL_ERR_OUT_OF_MEMORY; } ring->mempool = __hal_mempool_create( - hldev->pdev, - ring->config->memblock_size, - XGE_HAL_RING_RXDBLOCK_SIZE, - ring->rxdblock_priv_size, - queue->initial, queue->max, - __hal_ring_mempool_item_alloc, - NULL, /* nothing to free */ - ring); + hldev->pdev, + ring->config->memblock_size, + XGE_HAL_RING_RXDBLOCK_SIZE, + ring->rxdblock_priv_size, + queue->initial, queue->max, + __hal_ring_mempool_item_alloc, + NULL, /* nothing to free */ + ring); if (ring->mempool == NULL) { - __hal_ring_close(channelh); - return XGE_HAL_ERR_OUT_OF_MEMORY; + __hal_ring_close(channelh); + return XGE_HAL_ERR_OUT_OF_MEMORY; } status = __hal_channel_initialize(channelh, - attr, - ring->reserved_rxds_arr, - queue->initial * ring->rxds_per_block, - queue->max * ring->rxds_per_block, - 0 /* no threshold for ring! */); + attr, + ring->reserved_rxds_arr, + queue->initial * ring->rxds_per_block, + queue->max * ring->rxds_per_block, + 0 /* no threshold for ring! */); if (status != XGE_HAL_OK) { - __hal_ring_close(channelh); - return status; + __hal_ring_close(channelh); + return status; } /* sanity check that everything formatted ok */ xge_assert(ring->reserved_rxds_arr[0] == - (char *)ring->mempool->items_arr[0] + - (ring->rxds_per_block * ring->rxd_size - ring->rxd_size)); + (char *)ring->mempool->items_arr[0] + + (ring->rxds_per_block * ring->rxd_size - ring->rxd_size)); - /* Note: + /* Note: * Specifying dtr_init callback means two things: * 1) dtrs need to be initialized by ULD at channel-open time; * 2) dtrs need to be posted at channel-open time @@ -337,13 +329,13 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) * Currently we don't have a case when the 1) is done without the 2). */ if (ring->channel.dtr_init) { - if ((status = __hal_ring_initial_replenish ( - (xge_hal_channel_t *) channelh, - XGE_HAL_CHANNEL_OC_NORMAL) ) - != XGE_HAL_OK) { - __hal_ring_close(channelh); - return status; - } + if ((status = __hal_ring_initial_replenish ( + (xge_hal_channel_t *) channelh, + XGE_HAL_CHANNEL_OC_NORMAL) ) + != XGE_HAL_OK) { + __hal_ring_close(channelh); + return status; + } } /* initial replenish will increment the counter in its post() routine, @@ -359,7 +351,7 @@ __hal_ring_close(xge_hal_channel_h channelh) xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; xge_hal_ring_queue_t *queue; #if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\ - defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ) + defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ) xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh; #endif @@ -368,13 +360,13 @@ __hal_ring_close(xge_hal_channel_h channelh) queue = &ring->config->queue[ring->channel.post_qid]; if (ring->mempool) { - __hal_mempool_destroy(ring->mempool); + __hal_mempool_destroy(ring->mempool); } if (ring->reserved_rxds_arr) { - xge_os_free(ring->channel.pdev, - ring->reserved_rxds_arr, - sizeof(void*) * queue->max * ring->rxds_per_block); + xge_os_free(ring->channel.pdev, + ring->reserved_rxds_arr, + sizeof(void*) * queue->max * ring->rxds_per_block); } __hal_channel_terminate(channelh); @@ -406,12 +398,12 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh) xge_assert(ring); xge_assert(ring->channel.pdev); bar0 = (xge_hal_pci_bar0_t *) (void *) - ((xge_hal_device_t *)ring->channel.devh)->bar0; + ((xge_hal_device_t *)ring->channel.devh)->bar0; queue = &ring->config->queue[ring->channel.post_qid]; xge_assert(queue->buffer_mode == 1 || - queue->buffer_mode == 3 || - queue->buffer_mode == 5); + queue->buffer_mode == 3 || + queue->buffer_mode == 5); /* last block in fact becomes first. This is just the way it * is filled up and linked by item_alloc() */ @@ -419,43 +411,43 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh) block_num = queue->initial; first_block = __hal_mempool_item(ring->mempool, block_num - 1); val64 = __hal_ring_item_dma_addr(ring->mempool, - first_block, &dma_handle); + first_block, &dma_handle); xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0, - val64, &bar0->prc_rxd0_n[ring->channel.post_qid]); + val64, &bar0->prc_rxd0_n[ring->channel.post_qid]); xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized", - ring->channel.post_qid, (unsigned long long)val64); + ring->channel.post_qid, (unsigned long long)val64); val64 = xge_os_pio_mem_read64(ring->channel.pdev, - ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]); + ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC && !queue->rth_en) { - val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE; + val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE; } val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED; val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */ val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF); val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL( - (hldev->config.pci_freq_mherz * queue->backoff_interval_us)); + (hldev->config.pci_freq_mherz * queue->backoff_interval_us)); /* Beware: no snoop by the bridge if (no_snoop_bits) */ val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits); - /* Herc: always use group_reads */ + /* Herc: always use group_reads */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - val64 |= XGE_HAL_PRC_CTRL_GROUP_READS; + val64 |= XGE_HAL_PRC_CTRL_GROUP_READS; if (hldev->config.bimodal_interrupts) - if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) - val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT; + if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) + val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT; xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0, - val64, &bar0->prc_ctrl_n[ring->channel.post_qid]); + val64, &bar0->prc_ctrl_n[ring->channel.post_qid]); /* Configure Receive Protocol Assist */ val64 = xge_os_pio_mem_read64(ring->channel.pdev, - ring->channel.regh0, &bar0->rx_pa_cfg); + ring->channel.regh0, &bar0->rx_pa_cfg); val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode); val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL); /* Clean STRIP_VLAN_TAG bit and set as config from upper layer */ @@ -463,10 +455,10 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh) val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag); xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0, - val64, &bar0->rx_pa_cfg); + val64, &bar0->rx_pa_cfg); xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d", - ring->channel.post_qid, queue->buffer_mode); + ring->channel.post_qid, queue->buffer_mode); } void @@ -479,14 +471,14 @@ __hal_ring_prc_disable(xge_hal_channel_h channelh) xge_assert(ring); xge_assert(ring->channel.pdev); bar0 = (xge_hal_pci_bar0_t *) (void *) - ((xge_hal_device_t *)ring->channel.devh)->bar0; + ((xge_hal_device_t *)ring->channel.devh)->bar0; val64 = xge_os_pio_mem_read64(ring->channel.pdev, ring->channel.regh0, - &bar0->prc_ctrl_n[ring->channel.post_qid]); + &bar0->prc_ctrl_n[ring->channel.post_qid]); val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED); xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0, - val64, &bar0->prc_ctrl_n[ring->channel.post_qid]); + val64, &bar0->prc_ctrl_n[ring->channel.post_qid]); } void @@ -501,78 +493,78 @@ __hal_ring_hw_initialize(xge_hal_device_h devh) val64 = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!hldev->config.ring.queue[i].configured) - continue; - val64 |= vBIT(hldev->config.ring.queue[i].priority, - (5 + (i * 8)), 3); + if (!hldev->config.ring.queue[i].configured) + continue; + val64 |= vBIT(hldev->config.ring.queue[i].priority, + (5 + (i * 8)), 3); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rx_queue_priority); + &bar0->rx_queue_priority); xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT, - (unsigned long long)val64); + (unsigned long long)val64); /* Configuring ring queues according to per-ring configuration */ val64 = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!hldev->config.ring.queue[i].configured) - continue; - val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8); + if (!hldev->config.ring.queue[i].configured) + continue; + val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_queue_cfg); xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT, - (unsigned long long)val64); + (unsigned long long)val64); if (!hldev->config.rts_qos_en && !hldev->config.rts_port_en && !hldev->config.rts_mac_en) { - /* - * Activate default (QoS-based) Rx steering - */ - - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->rts_qos_steering); - for (j = 0; j < 8 /* QoS max */; j++) - { - for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) - { - if (!hldev->config.ring.queue[i].configured) - continue; - if (!hldev->config.ring.queue[i].rth_en) - val64 |= (BIT(i) >> (j*8)); - } - } - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->rts_qos_steering); - xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT, - (unsigned long long)val64); + /* + * Activate default (QoS-based) Rx steering + */ + + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + &bar0->rts_qos_steering); + for (j = 0; j < 8 /* QoS max */; j++) + { + for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) + { + if (!hldev->config.ring.queue[i].configured) + continue; + if (!hldev->config.ring.queue[i].rth_en) + val64 |= (BIT(i) >> (j*8)); + } + } + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->rts_qos_steering); + xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT, + (unsigned long long)val64); } /* Note: If a queue does not exist, it should be assigned a maximum - * length of zero. Otherwise, packet loss could occur. - * P. 4-4 User guide. + * length of zero. Otherwise, packet loss could occur. + * P. 4-4 User guide. * * All configured rings will be properly set at device open time * by utilizing device_mtu_set() API call. */ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (hldev->config.ring.queue[i].configured) - continue; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, - &bar0->rts_frm_len_n[i]); + if (hldev->config.ring.queue[i].configured) + continue; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, + &bar0->rts_frm_len_n[i]); } #ifdef XGE_HAL_HERC_EMULATION val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */ + ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */ val64 |= 0x0000000000010000; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - ((u8 *)bar0 + 0x2e60)); + ((u8 *)bar0 + 0x2e60)); val64 |= 0x003a000000000000; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */ + ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */ xge_os_mdelay(2000); #endif @@ -580,7 +572,7 @@ __hal_ring_hw_initialize(xge_hal_device_h devh) val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mc_rldram_mrs); val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE | - XGE_HAL_MC_RLDRAM_MRS_ENABLE; + XGE_HAL_MC_RLDRAM_MRS_ENABLE; __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), &bar0->mc_rldram_mrs); xge_os_wmb(); @@ -592,50 +584,50 @@ __hal_ring_hw_initialize(xge_hal_device_h devh) /* Temporary fixes for Herc RLDRAM */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279); - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_ref_per_herc); + val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279); + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_ref_per_herc); - val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, + val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mc_rldram_mrs_herc); - xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT, - (unsigned long long)val64); + xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT, + (unsigned long long)val64); - val64 = 0x0003570003010300ULL; - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->mc_rldram_mrs_herc); + val64 = 0x0003570003010300ULL; + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, + &bar0->mc_rldram_mrs_herc); - xge_os_mdelay(1); + xge_os_mdelay(1); } /* * Assign MSI-X vectors */ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - xge_list_t *item; - xge_hal_channel_t *channel = NULL; - - if (!hldev->config.ring.queue[i].configured || - !hldev->config.ring.queue[i].intr_vector || - !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX) - continue; - - /* find channel */ - xge_list_for_each(item, &hldev->free_channels) { - xge_hal_channel_t *tmp; - tmp = xge_container_of(item, xge_hal_channel_t, - item); - if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING && - tmp->post_qid == i) { - channel = tmp; - break; - } - } - - if (channel) { - xge_hal_channel_msix_set(channel, - hldev->config.ring.queue[i].intr_vector); - } + xge_list_t *item; + xge_hal_channel_t *channel = NULL; + + if (!hldev->config.ring.queue[i].configured || + !hldev->config.ring.queue[i].intr_vector || + !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX) + continue; + + /* find channel */ + xge_list_for_each(item, &hldev->free_channels) { + xge_hal_channel_t *tmp; + tmp = xge_container_of(item, xge_hal_channel_t, + item); + if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING && + tmp->post_qid == i) { + channel = tmp; + break; + } + } + + if (channel) { + xge_hal_channel_msix_set(channel, + hldev->config.ring.queue[i].intr_vector); + } } xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized"); @@ -649,21 +641,21 @@ __hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen) xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { - if (!hldev->config.ring.queue[i].configured) - continue; - if (hldev->config.ring.queue[i].max_frm_len != - XGE_HAL_RING_USE_MTU) { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_MAC_RTS_FRM_LEN_SET( - hldev->config.ring.queue[i].max_frm_len), - &bar0->rts_frm_len_n[i]); - } else { - xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen), - &bar0->rts_frm_len_n[i]); - } + if (!hldev->config.ring.queue[i].configured) + continue; + if (hldev->config.ring.queue[i].max_frm_len != + XGE_HAL_RING_USE_MTU) { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_MAC_RTS_FRM_LEN_SET( + hldev->config.ring.queue[i].max_frm_len), + &bar0->rts_frm_len_n[i]); + } else { + xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, + XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen), + &bar0->rts_frm_len_n[i]); + } } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen), - &bar0->rmac_max_pyld_len); + XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen), + &bar0->rmac_max_pyld_len); } diff --git a/sys/dev/nxge/xgehal/xgehal-stats.c b/sys/dev/nxge/xgehal/xgehal-stats.c index 2755ebb..c5f59b6 100644 --- a/sys/dev/nxge/xgehal/xgehal-stats.c +++ b/sys/dev/nxge/xgehal/xgehal-stats.c @@ -26,14 +26,6 @@ * $FreeBSD$ */ -/* - * FileName : xgehal-stats.c - * - * Description: statistics object implementation - * - * Created: 2 June 2004 - */ - #include <dev/nxge/include/xgehal-stats.h> #include <dev/nxge/include/xgehal-device.h> @@ -61,132 +53,132 @@ __hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh) dma_flags |= XGE_OS_DMA_STREAMING; #endif if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { - stats->hw_info = - (xge_hal_stats_hw_info_t *) xge_os_dma_malloc( - hldev->pdev, - sizeof(xge_hal_stats_hw_info_t), - dma_flags, - &stats->hw_info_dmah, - &stats->hw_info_dma_acch); - - if (stats->hw_info == NULL) { - xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc"); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - xge_os_memzero(stats->hw_info, - sizeof(xge_hal_stats_hw_info_t)); - xge_os_memzero(&stats->hw_info_saved, - sizeof(xge_hal_stats_hw_info_t)); - xge_os_memzero(&stats->hw_info_latest, - sizeof(xge_hal_stats_hw_info_t)); - - - - stats->dma_addr = xge_os_dma_map(hldev->pdev, + stats->hw_info = + (xge_hal_stats_hw_info_t *) xge_os_dma_malloc( + hldev->pdev, + sizeof(xge_hal_stats_hw_info_t), + dma_flags, + &stats->hw_info_dmah, + &stats->hw_info_dma_acch); + + if (stats->hw_info == NULL) { + xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc"); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + xge_os_memzero(stats->hw_info, + sizeof(xge_hal_stats_hw_info_t)); + xge_os_memzero(&stats->hw_info_saved, + sizeof(xge_hal_stats_hw_info_t)); + xge_os_memzero(&stats->hw_info_latest, + sizeof(xge_hal_stats_hw_info_t)); + + + + stats->dma_addr = xge_os_dma_map(hldev->pdev, stats->hw_info_dmah, - stats->hw_info, - sizeof(xge_hal_stats_hw_info_t), - XGE_OS_DMA_DIR_FROMDEVICE, - XGE_OS_DMA_CACHELINE_ALIGNED | + stats->hw_info, + sizeof(xge_hal_stats_hw_info_t), + XGE_OS_DMA_DIR_FROMDEVICE, + XGE_OS_DMA_CACHELINE_ALIGNED | #ifdef XGE_HAL_DMA_STATS_CONSISTENT - XGE_OS_DMA_CONSISTENT + XGE_OS_DMA_CONSISTENT #else - XGE_OS_DMA_STREAMING + XGE_OS_DMA_STREAMING #endif - ); - if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) { - xge_debug_stats(XGE_ERR, - "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA", - (unsigned long long)(ulong_t)stats->hw_info); - xge_os_dma_free(hldev->pdev, - stats->hw_info, - sizeof(xge_hal_stats_hw_info_t), - &stats->hw_info_dma_acch, - &stats->hw_info_dmah); - return XGE_HAL_ERR_OUT_OF_MAPPING; - } + ); + if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) { + xge_debug_stats(XGE_ERR, + "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA", + (unsigned long long)(ulong_t)stats->hw_info); + xge_os_dma_free(hldev->pdev, + stats->hw_info, + sizeof(xge_hal_stats_hw_info_t), + &stats->hw_info_dma_acch, + &stats->hw_info_dmah); + return XGE_HAL_ERR_OUT_OF_MAPPING; + } } else { - stats->pcim_info_saved = - (xge_hal_stats_pcim_info_t *)xge_os_malloc( - hldev->pdev, sizeof(xge_hal_stats_pcim_info_t)); - if (stats->pcim_info_saved == NULL) { - xge_debug_stats(XGE_ERR, "%s", "can not alloc"); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - - stats->pcim_info_latest = - (xge_hal_stats_pcim_info_t *)xge_os_malloc( - hldev->pdev, sizeof(xge_hal_stats_pcim_info_t)); - if (stats->pcim_info_latest == NULL) { - xge_os_free(hldev->pdev, stats->pcim_info_saved, - sizeof(xge_hal_stats_pcim_info_t)); - xge_debug_stats(XGE_ERR, "%s", "can not alloc"); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - - stats->pcim_info = - (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc( - hldev->pdev, - sizeof(xge_hal_stats_pcim_info_t), - dma_flags, - &stats->hw_info_dmah, - &stats->hw_info_dma_acch); - - if (stats->pcim_info == NULL) { - xge_os_free(hldev->pdev, stats->pcim_info_saved, - sizeof(xge_hal_stats_pcim_info_t)); - xge_os_free(hldev->pdev, stats->pcim_info_latest, - sizeof(xge_hal_stats_pcim_info_t)); - xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc"); - return XGE_HAL_ERR_OUT_OF_MEMORY; - } - - - xge_os_memzero(stats->pcim_info, - sizeof(xge_hal_stats_pcim_info_t)); - xge_os_memzero(stats->pcim_info_saved, - sizeof(xge_hal_stats_pcim_info_t)); - xge_os_memzero(stats->pcim_info_latest, - sizeof(xge_hal_stats_pcim_info_t)); - - - - stats->dma_addr = xge_os_dma_map(hldev->pdev, + stats->pcim_info_saved = + (xge_hal_stats_pcim_info_t *)xge_os_malloc( + hldev->pdev, sizeof(xge_hal_stats_pcim_info_t)); + if (stats->pcim_info_saved == NULL) { + xge_debug_stats(XGE_ERR, "%s", "can not alloc"); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + + stats->pcim_info_latest = + (xge_hal_stats_pcim_info_t *)xge_os_malloc( + hldev->pdev, sizeof(xge_hal_stats_pcim_info_t)); + if (stats->pcim_info_latest == NULL) { + xge_os_free(hldev->pdev, stats->pcim_info_saved, + sizeof(xge_hal_stats_pcim_info_t)); + xge_debug_stats(XGE_ERR, "%s", "can not alloc"); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + + stats->pcim_info = + (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc( + hldev->pdev, + sizeof(xge_hal_stats_pcim_info_t), + dma_flags, + &stats->hw_info_dmah, + &stats->hw_info_dma_acch); + + if (stats->pcim_info == NULL) { + xge_os_free(hldev->pdev, stats->pcim_info_saved, + sizeof(xge_hal_stats_pcim_info_t)); + xge_os_free(hldev->pdev, stats->pcim_info_latest, + sizeof(xge_hal_stats_pcim_info_t)); + xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc"); + return XGE_HAL_ERR_OUT_OF_MEMORY; + } + + + xge_os_memzero(stats->pcim_info, + sizeof(xge_hal_stats_pcim_info_t)); + xge_os_memzero(stats->pcim_info_saved, + sizeof(xge_hal_stats_pcim_info_t)); + xge_os_memzero(stats->pcim_info_latest, + sizeof(xge_hal_stats_pcim_info_t)); + + + + stats->dma_addr = xge_os_dma_map(hldev->pdev, stats->hw_info_dmah, - stats->pcim_info, - sizeof(xge_hal_stats_pcim_info_t), - XGE_OS_DMA_DIR_FROMDEVICE, - XGE_OS_DMA_CACHELINE_ALIGNED | + stats->pcim_info, + sizeof(xge_hal_stats_pcim_info_t), + XGE_OS_DMA_DIR_FROMDEVICE, + XGE_OS_DMA_CACHELINE_ALIGNED | #ifdef XGE_HAL_DMA_STATS_CONSISTENT - XGE_OS_DMA_CONSISTENT + XGE_OS_DMA_CONSISTENT #else - XGE_OS_DMA_STREAMING + XGE_OS_DMA_STREAMING #endif - ); - if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) { - xge_debug_stats(XGE_ERR, - "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA", - (unsigned long long)(ulong_t)stats->hw_info); - - xge_os_dma_free(hldev->pdev, - stats->pcim_info, - sizeof(xge_hal_stats_pcim_info_t), - &stats->hw_info_dma_acch, - &stats->hw_info_dmah); - - xge_os_free(hldev->pdev, stats->pcim_info_saved, - sizeof(xge_hal_stats_pcim_info_t)); - - xge_os_free(hldev->pdev, stats->pcim_info_latest, - sizeof(xge_hal_stats_pcim_info_t)); - - return XGE_HAL_ERR_OUT_OF_MAPPING; - } + ); + if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) { + xge_debug_stats(XGE_ERR, + "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA", + (unsigned long long)(ulong_t)stats->hw_info); + + xge_os_dma_free(hldev->pdev, + stats->pcim_info, + sizeof(xge_hal_stats_pcim_info_t), + &stats->hw_info_dma_acch, + &stats->hw_info_dmah); + + xge_os_free(hldev->pdev, stats->pcim_info_saved, + sizeof(xge_hal_stats_pcim_info_t)); + + xge_os_free(hldev->pdev, stats->pcim_info_latest, + sizeof(xge_hal_stats_pcim_info_t)); + + return XGE_HAL_ERR_OUT_OF_MAPPING; + } } stats->devh = devh; xge_os_memzero(&stats->sw_dev_info_stats, - sizeof(xge_hal_stats_device_info_t)); + sizeof(xge_hal_stats_device_info_t)); stats->is_initialized = 1; @@ -199,19 +191,19 @@ __hal_stats_save (xge_hal_stats_t *stats) xge_hal_device_t *hldev = (xge_hal_device_t*)stats->devh; if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { - xge_hal_stats_hw_info_t *latest; + xge_hal_stats_hw_info_t *latest; - (void) xge_hal_stats_hw(stats->devh, &latest); + (void) xge_hal_stats_hw(stats->devh, &latest); - xge_os_memcpy(&stats->hw_info_saved, stats->hw_info, - sizeof(xge_hal_stats_hw_info_t)); + xge_os_memcpy(&stats->hw_info_saved, stats->hw_info, + sizeof(xge_hal_stats_hw_info_t)); } else { - xge_hal_stats_pcim_info_t *latest; + xge_hal_stats_pcim_info_t *latest; - (void) xge_hal_stats_pcim(stats->devh, &latest); + (void) xge_hal_stats_pcim(stats->devh, &latest); - xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info, - sizeof(xge_hal_stats_pcim_info_t)); + xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info, + sizeof(xge_hal_stats_pcim_info_t)); } } @@ -237,16 +229,16 @@ __hal_stats_disable (xge_hal_stats_t *stats) bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->stat_cfg); + &bar0->stat_cfg); val64 &= ~XGE_HAL_STAT_CFG_STAT_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, - &bar0->stat_cfg); + &bar0->stat_cfg); /* flush the write */ (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, - &bar0->stat_cfg); + &bar0->stat_cfg); xge_debug_stats(XGE_TRACE, "stats disabled at 0x"XGE_OS_LLXFMT, - (unsigned long long)stats->dma_addr); + (unsigned long long)stats->dma_addr); stats->is_enabled = 0; } @@ -268,35 +260,35 @@ __hal_stats_terminate (xge_hal_stats_t *stats) xge_assert(hldev); xge_assert(stats->is_initialized); if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { - xge_os_dma_unmap(hldev->pdev, + xge_os_dma_unmap(hldev->pdev, stats->hw_info_dmah, - stats->dma_addr, - sizeof(xge_hal_stats_hw_info_t), - XGE_OS_DMA_DIR_FROMDEVICE); - - xge_os_dma_free(hldev->pdev, - stats->hw_info, - sizeof(xge_hal_stats_hw_info_t), - &stats->hw_info_dma_acch, - &stats->hw_info_dmah); + stats->dma_addr, + sizeof(xge_hal_stats_hw_info_t), + XGE_OS_DMA_DIR_FROMDEVICE); + + xge_os_dma_free(hldev->pdev, + stats->hw_info, + sizeof(xge_hal_stats_hw_info_t), + &stats->hw_info_dma_acch, + &stats->hw_info_dmah); } else { - xge_os_dma_unmap(hldev->pdev, + xge_os_dma_unmap(hldev->pdev, stats->hw_info_dmah, - stats->dma_addr, - sizeof(xge_hal_stats_pcim_info_t), - XGE_OS_DMA_DIR_FROMDEVICE); + stats->dma_addr, + sizeof(xge_hal_stats_pcim_info_t), + XGE_OS_DMA_DIR_FROMDEVICE); - xge_os_dma_free(hldev->pdev, - stats->pcim_info, - sizeof(xge_hal_stats_pcim_info_t), - &stats->hw_info_dma_acch, - &stats->hw_info_dmah); + xge_os_dma_free(hldev->pdev, + stats->pcim_info, + sizeof(xge_hal_stats_pcim_info_t), + &stats->hw_info_dma_acch, + &stats->hw_info_dmah); - xge_os_free(hldev->pdev, stats->pcim_info_saved, - sizeof(xge_hal_stats_pcim_info_t)); + xge_os_free(hldev->pdev, stats->pcim_info_saved, + sizeof(xge_hal_stats_pcim_info_t)); - xge_os_free(hldev->pdev, stats->pcim_info_latest, - sizeof(xge_hal_stats_pcim_info_t)); + xge_os_free(hldev->pdev, stats->pcim_info_latest, + sizeof(xge_hal_stats_pcim_info_t)); } @@ -333,13 +325,13 @@ __hal_stats_enable (xge_hal_stats_t *stats) * For Titan stat_addr offset == 0x09d8, and stat_cfg offset == 0x09d0 */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - stats->dma_addr, &bar0->stat_addr); + stats->dma_addr, &bar0->stat_addr); refresh_time_pci_clocks = XGE_HAL_XENA_PER_SEC * - hldev->config.stats_refresh_time_sec; + hldev->config.stats_refresh_time_sec; refresh_time_pci_clocks = - __hal_fix_time_ival_herc(hldev, - refresh_time_pci_clocks); + __hal_fix_time_ival_herc(hldev, + refresh_time_pci_clocks); #ifdef XGE_HAL_HERC_EMULATION /* @@ -351,18 +343,18 @@ __hal_stats_enable (xge_hal_stats_t *stats) */ val64 = (0x20C | XGE_HAL_STAT_CFG_STAT_RO | - XGE_HAL_STAT_CFG_STAT_EN); + XGE_HAL_STAT_CFG_STAT_EN); #else val64 = XGE_HAL_SET_UPDT_PERIOD(refresh_time_pci_clocks) | - XGE_HAL_STAT_CFG_STAT_RO | - XGE_HAL_STAT_CFG_STAT_EN; + XGE_HAL_STAT_CFG_STAT_RO | + XGE_HAL_STAT_CFG_STAT_EN; #endif xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, - val64, &bar0->stat_cfg); + val64, &bar0->stat_cfg); xge_debug_stats(XGE_TRACE, "stats enabled at 0x"XGE_OS_LLXFMT, - (unsigned long long)stats->dma_addr); + (unsigned long long)stats->dma_addr); stats->is_enabled = 1; } @@ -377,133 +369,133 @@ __hal_stats_pcim_update_latest(xge_hal_device_h devh) xge_hal_device_t *hldev = (xge_hal_device_t *)devh; int i; -#define set_latest_stat_link_cnt(_link, _p) \ - hldev->stats.pcim_info_latest->link_info[_link]._p = \ - ((hldev->stats.pcim_info->link_info[_link]._p >= \ - hldev->stats.pcim_info_saved->link_info[_link]._p) ? \ - hldev->stats.pcim_info->link_info[_link]._p - \ - hldev->stats.pcim_info_saved->link_info[_link]._p : \ - ((-1) - hldev->stats.pcim_info_saved->link_info[_link]._p) + \ - hldev->stats.pcim_info->link_info[_link]._p) +#define set_latest_stat_link_cnt(_link, _p) \ + hldev->stats.pcim_info_latest->link_info[_link]._p = \ + ((hldev->stats.pcim_info->link_info[_link]._p >= \ + hldev->stats.pcim_info_saved->link_info[_link]._p) ? \ + hldev->stats.pcim_info->link_info[_link]._p - \ + hldev->stats.pcim_info_saved->link_info[_link]._p : \ + ((-1) - hldev->stats.pcim_info_saved->link_info[_link]._p) + \ + hldev->stats.pcim_info->link_info[_link]._p) -#define set_latest_stat_aggr_cnt(_aggr, _p) \ - hldev->stats.pcim_info_latest->aggr_info[_aggr]._p = \ - ((hldev->stats.pcim_info->aggr_info[_aggr]._p >= \ - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) ? \ - hldev->stats.pcim_info->aggr_info[_aggr]._p - \ - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p : \ - ((-1) - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) + \ - hldev->stats.pcim_info->aggr_info[_aggr]._p) +#define set_latest_stat_aggr_cnt(_aggr, _p) \ + hldev->stats.pcim_info_latest->aggr_info[_aggr]._p = \ + ((hldev->stats.pcim_info->aggr_info[_aggr]._p >= \ + hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) ? \ + hldev->stats.pcim_info->aggr_info[_aggr]._p - \ + hldev->stats.pcim_info_saved->aggr_info[_aggr]._p : \ + ((-1) - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) + \ + hldev->stats.pcim_info->aggr_info[_aggr]._p) for (i = 0; i < XGE_HAL_MAC_LINKS; i++) { - set_latest_stat_link_cnt(i, tx_frms); - set_latest_stat_link_cnt(i, tx_ttl_eth_octets); - set_latest_stat_link_cnt(i, tx_data_octets); - set_latest_stat_link_cnt(i, tx_mcst_frms); - set_latest_stat_link_cnt(i, tx_bcst_frms); - set_latest_stat_link_cnt(i, tx_ucst_frms); - set_latest_stat_link_cnt(i, tx_tagged_frms); - set_latest_stat_link_cnt(i, tx_vld_ip); - set_latest_stat_link_cnt(i, tx_vld_ip_octets); - set_latest_stat_link_cnt(i, tx_icmp); - set_latest_stat_link_cnt(i, tx_tcp); - set_latest_stat_link_cnt(i, tx_rst_tcp); - set_latest_stat_link_cnt(i, tx_udp); - set_latest_stat_link_cnt(i, tx_unknown_protocol); - set_latest_stat_link_cnt(i, tx_parse_error); - set_latest_stat_link_cnt(i, tx_pause_ctrl_frms); - set_latest_stat_link_cnt(i, tx_lacpdu_frms); - set_latest_stat_link_cnt(i, tx_marker_pdu_frms); - set_latest_stat_link_cnt(i, tx_marker_resp_pdu_frms); - set_latest_stat_link_cnt(i, tx_drop_ip); - set_latest_stat_link_cnt(i, tx_xgmii_char1_match); - set_latest_stat_link_cnt(i, tx_xgmii_char2_match); - set_latest_stat_link_cnt(i, tx_xgmii_column1_match); - set_latest_stat_link_cnt(i, tx_xgmii_column2_match); - set_latest_stat_link_cnt(i, tx_drop_frms); - set_latest_stat_link_cnt(i, tx_any_err_frms); - set_latest_stat_link_cnt(i, rx_ttl_frms); - set_latest_stat_link_cnt(i, rx_vld_frms); - set_latest_stat_link_cnt(i, rx_offld_frms); - set_latest_stat_link_cnt(i, rx_ttl_eth_octets); - set_latest_stat_link_cnt(i, rx_data_octets); - set_latest_stat_link_cnt(i, rx_offld_octets); - set_latest_stat_link_cnt(i, rx_vld_mcst_frms); - set_latest_stat_link_cnt(i, rx_vld_bcst_frms); - set_latest_stat_link_cnt(i, rx_accepted_ucst_frms); - set_latest_stat_link_cnt(i, rx_accepted_nucst_frms); - set_latest_stat_link_cnt(i, rx_tagged_frms); - set_latest_stat_link_cnt(i, rx_long_frms); - set_latest_stat_link_cnt(i, rx_usized_frms); - set_latest_stat_link_cnt(i, rx_osized_frms); - set_latest_stat_link_cnt(i, rx_frag_frms); - set_latest_stat_link_cnt(i, rx_jabber_frms); - set_latest_stat_link_cnt(i, rx_ttl_64_frms); - set_latest_stat_link_cnt(i, rx_ttl_65_127_frms); - set_latest_stat_link_cnt(i, rx_ttl_128_255_frms); - set_latest_stat_link_cnt(i, rx_ttl_256_511_frms); - set_latest_stat_link_cnt(i, rx_ttl_512_1023_frms); - set_latest_stat_link_cnt(i, rx_ttl_1024_1518_frms); - set_latest_stat_link_cnt(i, rx_ttl_1519_4095_frms); - set_latest_stat_link_cnt(i, rx_ttl_40956_8191_frms); - set_latest_stat_link_cnt(i, rx_ttl_8192_max_frms); - set_latest_stat_link_cnt(i, rx_ttl_gt_max_frms); - set_latest_stat_link_cnt(i, rx_ip); - set_latest_stat_link_cnt(i, rx_ip_octets); - set_latest_stat_link_cnt(i, rx_hdr_err_ip); - set_latest_stat_link_cnt(i, rx_icmp); - set_latest_stat_link_cnt(i, rx_tcp); - set_latest_stat_link_cnt(i, rx_udp); - set_latest_stat_link_cnt(i, rx_err_tcp); - set_latest_stat_link_cnt(i, rx_pause_cnt); - set_latest_stat_link_cnt(i, rx_pause_ctrl_frms); - set_latest_stat_link_cnt(i, rx_unsup_ctrl_frms); - set_latest_stat_link_cnt(i, rx_in_rng_len_err_frms); - set_latest_stat_link_cnt(i, rx_out_rng_len_err_frms); - set_latest_stat_link_cnt(i, rx_drop_frms); - set_latest_stat_link_cnt(i, rx_discarded_frms); - set_latest_stat_link_cnt(i, rx_drop_ip); - set_latest_stat_link_cnt(i, rx_err_drp_udp); - set_latest_stat_link_cnt(i, rx_lacpdu_frms); - set_latest_stat_link_cnt(i, rx_marker_pdu_frms); - set_latest_stat_link_cnt(i, rx_marker_resp_pdu_frms); - set_latest_stat_link_cnt(i, rx_unknown_pdu_frms); - set_latest_stat_link_cnt(i, rx_illegal_pdu_frms); - set_latest_stat_link_cnt(i, rx_fcs_discard); - set_latest_stat_link_cnt(i, rx_len_discard); - set_latest_stat_link_cnt(i, rx_pf_discard); - set_latest_stat_link_cnt(i, rx_trash_discard); - set_latest_stat_link_cnt(i, rx_rts_discard); - set_latest_stat_link_cnt(i, rx_wol_discard); - set_latest_stat_link_cnt(i, rx_red_discard); - set_latest_stat_link_cnt(i, rx_ingm_full_discard); - set_latest_stat_link_cnt(i, rx_xgmii_data_err_cnt); - set_latest_stat_link_cnt(i, rx_xgmii_ctrl_err_cnt); - set_latest_stat_link_cnt(i, rx_xgmii_err_sym); - set_latest_stat_link_cnt(i, rx_xgmii_char1_match); - set_latest_stat_link_cnt(i, rx_xgmii_char2_match); - set_latest_stat_link_cnt(i, rx_xgmii_column1_match); - set_latest_stat_link_cnt(i, rx_xgmii_column2_match); - set_latest_stat_link_cnt(i, rx_local_fault); - set_latest_stat_link_cnt(i, rx_remote_fault); - set_latest_stat_link_cnt(i, rx_queue_full); + set_latest_stat_link_cnt(i, tx_frms); + set_latest_stat_link_cnt(i, tx_ttl_eth_octets); + set_latest_stat_link_cnt(i, tx_data_octets); + set_latest_stat_link_cnt(i, tx_mcst_frms); + set_latest_stat_link_cnt(i, tx_bcst_frms); + set_latest_stat_link_cnt(i, tx_ucst_frms); + set_latest_stat_link_cnt(i, tx_tagged_frms); + set_latest_stat_link_cnt(i, tx_vld_ip); + set_latest_stat_link_cnt(i, tx_vld_ip_octets); + set_latest_stat_link_cnt(i, tx_icmp); + set_latest_stat_link_cnt(i, tx_tcp); + set_latest_stat_link_cnt(i, tx_rst_tcp); + set_latest_stat_link_cnt(i, tx_udp); + set_latest_stat_link_cnt(i, tx_unknown_protocol); + set_latest_stat_link_cnt(i, tx_parse_error); + set_latest_stat_link_cnt(i, tx_pause_ctrl_frms); + set_latest_stat_link_cnt(i, tx_lacpdu_frms); + set_latest_stat_link_cnt(i, tx_marker_pdu_frms); + set_latest_stat_link_cnt(i, tx_marker_resp_pdu_frms); + set_latest_stat_link_cnt(i, tx_drop_ip); + set_latest_stat_link_cnt(i, tx_xgmii_char1_match); + set_latest_stat_link_cnt(i, tx_xgmii_char2_match); + set_latest_stat_link_cnt(i, tx_xgmii_column1_match); + set_latest_stat_link_cnt(i, tx_xgmii_column2_match); + set_latest_stat_link_cnt(i, tx_drop_frms); + set_latest_stat_link_cnt(i, tx_any_err_frms); + set_latest_stat_link_cnt(i, rx_ttl_frms); + set_latest_stat_link_cnt(i, rx_vld_frms); + set_latest_stat_link_cnt(i, rx_offld_frms); + set_latest_stat_link_cnt(i, rx_ttl_eth_octets); + set_latest_stat_link_cnt(i, rx_data_octets); + set_latest_stat_link_cnt(i, rx_offld_octets); + set_latest_stat_link_cnt(i, rx_vld_mcst_frms); + set_latest_stat_link_cnt(i, rx_vld_bcst_frms); + set_latest_stat_link_cnt(i, rx_accepted_ucst_frms); + set_latest_stat_link_cnt(i, rx_accepted_nucst_frms); + set_latest_stat_link_cnt(i, rx_tagged_frms); + set_latest_stat_link_cnt(i, rx_long_frms); + set_latest_stat_link_cnt(i, rx_usized_frms); + set_latest_stat_link_cnt(i, rx_osized_frms); + set_latest_stat_link_cnt(i, rx_frag_frms); + set_latest_stat_link_cnt(i, rx_jabber_frms); + set_latest_stat_link_cnt(i, rx_ttl_64_frms); + set_latest_stat_link_cnt(i, rx_ttl_65_127_frms); + set_latest_stat_link_cnt(i, rx_ttl_128_255_frms); + set_latest_stat_link_cnt(i, rx_ttl_256_511_frms); + set_latest_stat_link_cnt(i, rx_ttl_512_1023_frms); + set_latest_stat_link_cnt(i, rx_ttl_1024_1518_frms); + set_latest_stat_link_cnt(i, rx_ttl_1519_4095_frms); + set_latest_stat_link_cnt(i, rx_ttl_40956_8191_frms); + set_latest_stat_link_cnt(i, rx_ttl_8192_max_frms); + set_latest_stat_link_cnt(i, rx_ttl_gt_max_frms); + set_latest_stat_link_cnt(i, rx_ip); + set_latest_stat_link_cnt(i, rx_ip_octets); + set_latest_stat_link_cnt(i, rx_hdr_err_ip); + set_latest_stat_link_cnt(i, rx_icmp); + set_latest_stat_link_cnt(i, rx_tcp); + set_latest_stat_link_cnt(i, rx_udp); + set_latest_stat_link_cnt(i, rx_err_tcp); + set_latest_stat_link_cnt(i, rx_pause_cnt); + set_latest_stat_link_cnt(i, rx_pause_ctrl_frms); + set_latest_stat_link_cnt(i, rx_unsup_ctrl_frms); + set_latest_stat_link_cnt(i, rx_in_rng_len_err_frms); + set_latest_stat_link_cnt(i, rx_out_rng_len_err_frms); + set_latest_stat_link_cnt(i, rx_drop_frms); + set_latest_stat_link_cnt(i, rx_discarded_frms); + set_latest_stat_link_cnt(i, rx_drop_ip); + set_latest_stat_link_cnt(i, rx_err_drp_udp); + set_latest_stat_link_cnt(i, rx_lacpdu_frms); + set_latest_stat_link_cnt(i, rx_marker_pdu_frms); + set_latest_stat_link_cnt(i, rx_marker_resp_pdu_frms); + set_latest_stat_link_cnt(i, rx_unknown_pdu_frms); + set_latest_stat_link_cnt(i, rx_illegal_pdu_frms); + set_latest_stat_link_cnt(i, rx_fcs_discard); + set_latest_stat_link_cnt(i, rx_len_discard); + set_latest_stat_link_cnt(i, rx_pf_discard); + set_latest_stat_link_cnt(i, rx_trash_discard); + set_latest_stat_link_cnt(i, rx_rts_discard); + set_latest_stat_link_cnt(i, rx_wol_discard); + set_latest_stat_link_cnt(i, rx_red_discard); + set_latest_stat_link_cnt(i, rx_ingm_full_discard); + set_latest_stat_link_cnt(i, rx_xgmii_data_err_cnt); + set_latest_stat_link_cnt(i, rx_xgmii_ctrl_err_cnt); + set_latest_stat_link_cnt(i, rx_xgmii_err_sym); + set_latest_stat_link_cnt(i, rx_xgmii_char1_match); + set_latest_stat_link_cnt(i, rx_xgmii_char2_match); + set_latest_stat_link_cnt(i, rx_xgmii_column1_match); + set_latest_stat_link_cnt(i, rx_xgmii_column2_match); + set_latest_stat_link_cnt(i, rx_local_fault); + set_latest_stat_link_cnt(i, rx_remote_fault); + set_latest_stat_link_cnt(i, rx_queue_full); } for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) { - set_latest_stat_aggr_cnt(i, tx_frms); - set_latest_stat_aggr_cnt(i, tx_mcst_frms); - set_latest_stat_aggr_cnt(i, tx_bcst_frms); - set_latest_stat_aggr_cnt(i, tx_discarded_frms); - set_latest_stat_aggr_cnt(i, tx_errored_frms); - set_latest_stat_aggr_cnt(i, rx_frms); - set_latest_stat_aggr_cnt(i, rx_data_octets); - set_latest_stat_aggr_cnt(i, rx_mcst_frms); - set_latest_stat_aggr_cnt(i, rx_bcst_frms); - set_latest_stat_aggr_cnt(i, rx_discarded_frms); - set_latest_stat_aggr_cnt(i, rx_errored_frms); - set_latest_stat_aggr_cnt(i, rx_unknown_protocol_frms); + set_latest_stat_aggr_cnt(i, tx_frms); + set_latest_stat_aggr_cnt(i, tx_mcst_frms); + set_latest_stat_aggr_cnt(i, tx_bcst_frms); + set_latest_stat_aggr_cnt(i, tx_discarded_frms); + set_latest_stat_aggr_cnt(i, tx_errored_frms); + set_latest_stat_aggr_cnt(i, rx_frms); + set_latest_stat_aggr_cnt(i, rx_data_octets); + set_latest_stat_aggr_cnt(i, rx_mcst_frms); + set_latest_stat_aggr_cnt(i, rx_bcst_frms); + set_latest_stat_aggr_cnt(i, rx_discarded_frms); + set_latest_stat_aggr_cnt(i, rx_errored_frms); + set_latest_stat_aggr_cnt(i, rx_unknown_protocol_frms); } return; } @@ -518,14 +510,14 @@ __hal_stats_update_latest(xge_hal_device_h devh) xge_hal_device_t *hldev = (xge_hal_device_t *)devh; #define set_latest_stat_cnt(_dev, _p) \ - hldev->stats.hw_info_latest._p = \ + hldev->stats.hw_info_latest._p = \ ((hldev->stats.hw_info->_p >= hldev->stats.hw_info_saved._p) ? \ - hldev->stats.hw_info->_p - hldev->stats.hw_info_saved._p : \ + hldev->stats.hw_info->_p - hldev->stats.hw_info_saved._p : \ ((-1) - hldev->stats.hw_info_saved._p) + hldev->stats.hw_info->_p) if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) { - __hal_stats_pcim_update_latest(devh); - return; + __hal_stats_pcim_update_latest(devh); + return; } /* Tx MAC statistics counters. */ @@ -721,20 +713,20 @@ xge_hal_stats_hw(xge_hal_device_h devh, xge_hal_stats_hw_info_t **hw_info) if (!hldev->stats.is_initialized || !hldev->stats.is_enabled) { - *hw_info = NULL; - return XGE_HAL_INF_STATS_IS_NOT_READY; + *hw_info = NULL; + return XGE_HAL_INF_STATS_IS_NOT_READY; } #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING) xge_os_dma_sync(hldev->pdev, hldev->stats.hw_info_dmah, - hldev->stats.dma_addr, - 0, - sizeof(xge_hal_stats_hw_info_t), - XGE_OS_DMA_DIR_FROMDEVICE); + hldev->stats.dma_addr, + 0, + sizeof(xge_hal_stats_hw_info_t), + XGE_OS_DMA_DIR_FROMDEVICE); #endif - /* + /* * update hw counters, taking into account * the "reset" or "saved" * values @@ -746,19 +738,19 @@ xge_hal_stats_hw(xge_hal_device_h devh, xge_hal_stats_hw_info_t **hw_info) */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA || xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { - u64 mcst, bcst; - xge_hal_stats_hw_info_t *hwsta = &hldev->stats.hw_info_latest; + u64 mcst, bcst; + xge_hal_stats_hw_info_t *hwsta = &hldev->stats.hw_info_latest; - mcst = ((u64)hwsta->rmac_vld_mcst_frms_oflow << 32) | - hwsta->rmac_vld_mcst_frms; + mcst = ((u64)hwsta->rmac_vld_mcst_frms_oflow << 32) | + hwsta->rmac_vld_mcst_frms; - bcst = ((u64)hwsta->rmac_vld_bcst_frms_oflow << 32) | - hwsta->rmac_vld_bcst_frms; + bcst = ((u64)hwsta->rmac_vld_bcst_frms_oflow << 32) | + hwsta->rmac_vld_bcst_frms; - mcst -= bcst; + mcst -= bcst; - hwsta->rmac_vld_mcst_frms_oflow = (u32)(mcst >> 32); - hwsta->rmac_vld_mcst_frms = (u32)mcst; + hwsta->rmac_vld_mcst_frms_oflow = (u32)(mcst >> 32); + hwsta->rmac_vld_mcst_frms = (u32)mcst; } *hw_info = &hldev->stats.hw_info_latest; @@ -786,20 +778,20 @@ xge_hal_stats_pcim(xge_hal_device_h devh, xge_hal_stats_pcim_info_t **hw_info) if (!hldev->stats.is_initialized || !hldev->stats.is_enabled) { - *hw_info = NULL; - return XGE_HAL_INF_STATS_IS_NOT_READY; + *hw_info = NULL; + return XGE_HAL_INF_STATS_IS_NOT_READY; } #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING) xge_os_dma_sync(hldev->pdev, hldev->stats.hw_info_dmah, - hldev->stats.dma_addr, - 0, - sizeof(xge_hal_stats_pcim_info_t), - XGE_OS_DMA_DIR_FROMDEVICE); + hldev->stats.dma_addr, + 0, + sizeof(xge_hal_stats_pcim_info_t), + XGE_OS_DMA_DIR_FROMDEVICE); #endif - /* + /* * update hw counters, taking into account * the "reset" or "saved" * values @@ -830,19 +822,19 @@ xge_hal_stats_pcim(xge_hal_device_h devh, xge_hal_stats_pcim_info_t **hw_info) */ xge_hal_status_e xge_hal_stats_device(xge_hal_device_h devh, - xge_hal_stats_device_info_t **device_info) + xge_hal_stats_device_info_t **device_info) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; if (!hldev->stats.is_initialized || !hldev->stats.is_enabled) { - *device_info = NULL; - return XGE_HAL_INF_STATS_IS_NOT_READY; + *device_info = NULL; + return XGE_HAL_INF_STATS_IS_NOT_READY; } hldev->stats.sw_dev_info_stats.traffic_intr_cnt = - hldev->stats.sw_dev_info_stats.total_intr_cnt - - hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt; + hldev->stats.sw_dev_info_stats.total_intr_cnt - + hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt; *device_info = &hldev->stats.sw_dev_info_stats; @@ -866,64 +858,64 @@ xge_hal_stats_device(xge_hal_device_h devh, */ xge_hal_status_e xge_hal_stats_channel(xge_hal_channel_h channelh, - xge_hal_stats_channel_info_t **channel_info) + xge_hal_stats_channel_info_t **channel_info) { - xge_hal_stats_hw_info_t *latest; + xge_hal_stats_hw_info_t *latest; xge_hal_channel_t *channel; xge_hal_device_t *hldev; channel = (xge_hal_channel_t *)channelh; + if ((channel == NULL) || (channel->magic != XGE_HAL_MAGIC)) { + return XGE_HAL_ERR_INVALID_DEVICE; + } hldev = (xge_hal_device_t *)channel->devh; if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; - } - if ((channel == NULL) || (channel->magic != XGE_HAL_MAGIC)) { - return XGE_HAL_ERR_INVALID_DEVICE; + return XGE_HAL_ERR_INVALID_DEVICE; } if (!hldev->stats.is_initialized || !hldev->stats.is_enabled || !channel->is_open) { - *channel_info = NULL; - return XGE_HAL_INF_STATS_IS_NOT_READY; + *channel_info = NULL; + return XGE_HAL_INF_STATS_IS_NOT_READY; } hldev->stats.sw_dev_info_stats.traffic_intr_cnt = - hldev->stats.sw_dev_info_stats.total_intr_cnt - - hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt; + hldev->stats.sw_dev_info_stats.total_intr_cnt - + hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt; if (hldev->stats.sw_dev_info_stats.traffic_intr_cnt) { - int rxcnt = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt; - int txcnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; - if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - if (!txcnt) - txcnt = 1; - channel->stats.avg_compl_per_intr_cnt = - channel->stats.total_compl_cnt / txcnt; - } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING && - !hldev->config.bimodal_interrupts) { - if (!rxcnt) - rxcnt = 1; - channel->stats.avg_compl_per_intr_cnt = - channel->stats.total_compl_cnt / rxcnt; - } - if (channel->stats.avg_compl_per_intr_cnt == 0) { - /* to not confuse user */ - channel->stats.avg_compl_per_intr_cnt = 1; - } + int rxcnt = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt; + int txcnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; + if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { + if (!txcnt) + txcnt = 1; + channel->stats.avg_compl_per_intr_cnt = + channel->stats.total_compl_cnt / txcnt; + } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING && + !hldev->config.bimodal_interrupts) { + if (!rxcnt) + rxcnt = 1; + channel->stats.avg_compl_per_intr_cnt = + channel->stats.total_compl_cnt / rxcnt; + } + if (channel->stats.avg_compl_per_intr_cnt == 0) { + /* to not confuse user */ + channel->stats.avg_compl_per_intr_cnt = 1; + } } (void) xge_hal_stats_hw(hldev, &latest); if (channel->stats.total_posts) { - channel->stats.avg_buffers_per_post = - channel->stats.total_buffers / - channel->stats.total_posts; + channel->stats.avg_buffers_per_post = + channel->stats.total_buffers / + channel->stats.total_posts; #ifdef XGE_OS_PLATFORM_64BIT if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - channel->stats.avg_post_size = - (u32)(latest->tmac_ttl_less_fb_octets / - channel->stats.total_posts); + channel->stats.avg_post_size = + (u32)(latest->tmac_ttl_less_fb_octets / + channel->stats.total_posts); } #endif } @@ -931,9 +923,9 @@ xge_hal_stats_channel(xge_hal_channel_h channelh, #ifdef XGE_OS_PLATFORM_64BIT if (channel->stats.total_buffers && channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { - channel->stats.avg_buffer_size = - (u32)(latest->tmac_ttl_less_fb_octets / - channel->stats.total_buffers); + channel->stats.avg_buffer_size = + (u32)(latest->tmac_ttl_less_fb_octets / + channel->stats.total_buffers); } #endif @@ -960,14 +952,14 @@ xge_hal_stats_reset(xge_hal_device_h devh) if (!hldev->stats.is_initialized || !hldev->stats.is_enabled) { - return XGE_HAL_INF_STATS_IS_NOT_READY; + return XGE_HAL_INF_STATS_IS_NOT_READY; } /* save hw stats to calculate the after-reset values */ __hal_stats_save(&hldev->stats); /* zero-out driver-maintained stats, don't reset the saved */ - __hal_stats_soft_reset(hldev, 0); + __hal_stats_soft_reset(hldev, 0); return XGE_HAL_OK; } @@ -982,19 +974,19 @@ __hal_stats_soft_reset (xge_hal_device_h devh, int reset_all) xge_hal_channel_t *channel; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; - if (reset_all) { - if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { - xge_os_memzero(&hldev->stats.hw_info_saved, - sizeof(xge_hal_stats_hw_info_t)); - xge_os_memzero(&hldev->stats.hw_info_latest, - sizeof(xge_hal_stats_hw_info_t)); - } else { - xge_os_memzero(&hldev->stats.pcim_info_saved, - sizeof(xge_hal_stats_pcim_info_t)); - xge_os_memzero(&hldev->stats.pcim_info_latest, - sizeof(xge_hal_stats_pcim_info_t)); - } - } + if (reset_all) { + if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) { + xge_os_memzero(&hldev->stats.hw_info_saved, + sizeof(xge_hal_stats_hw_info_t)); + xge_os_memzero(&hldev->stats.hw_info_latest, + sizeof(xge_hal_stats_hw_info_t)); + } else { + xge_os_memzero(&hldev->stats.pcim_info_saved, + sizeof(xge_hal_stats_pcim_info_t)); + xge_os_memzero(&hldev->stats.pcim_info_latest, + sizeof(xge_hal_stats_pcim_info_t)); + } + } /* Reset the "soft" error and informational statistics */ xge_os_memzero(&hldev->stats.sw_dev_err_stats, @@ -1004,16 +996,16 @@ __hal_stats_soft_reset (xge_hal_device_h devh, int reset_all) /* for each Rx channel */ xge_list_for_each(item, &hldev->ring_channels) { - channel = xge_container_of(item, xge_hal_channel_t, item); - xge_os_memzero(&channel->stats, - sizeof(xge_hal_stats_channel_info_t)); + channel = xge_container_of(item, xge_hal_channel_t, item); + xge_os_memzero(&channel->stats, + sizeof(xge_hal_stats_channel_info_t)); } /* for each Tx channel */ xge_list_for_each(item, &hldev->fifo_channels) { - channel = xge_container_of(item, xge_hal_channel_t, item); - xge_os_memzero(&channel->stats, - sizeof(xge_hal_stats_channel_info_t)); + channel = xge_container_of(item, xge_hal_channel_t, item); + xge_os_memzero(&channel->stats, + sizeof(xge_hal_stats_channel_info_t)); } } diff --git a/sys/dev/nxge/xgell-version.h b/sys/dev/nxge/xgell-version.h index f694833..df72482 100644 --- a/sys/dev/nxge/xgell-version.h +++ b/sys/dev/nxge/xgell-version.h @@ -26,20 +26,12 @@ * $FreeBSD$ */ -/* - * FileName : version.h - * - * Description: versioning file - * - * Created: 3 September 2004 - */ - #ifndef XGELL_VERSION_H #define XGELL_VERSION_H #define XGELL_VERSION_MAJOR "2" #define XGELL_VERSION_MINOR "0" -#define XGELL_VERSION_FIX "7" +#define XGELL_VERSION_FIX "9" #define XGELL_VERSION_BUILD GENERATED_BUILD_VERSION #define XGELL_VERSION XGELL_VERSION_MAJOR"."XGELL_VERSION_MINOR"." \ GENERATED_BUILD_VERSION diff --git a/sys/modules/nxge/Makefile b/sys/modules/nxge/Makefile index 0eea721..4994b90 100644 --- a/sys/modules/nxge/Makefile +++ b/sys/modules/nxge/Makefile @@ -22,14 +22,11 @@ CFLAGS_NXGE += -DXGE_DEBUG_MODULE_MASK=XGE_COMPONENT_LL CFLAGS_NXGE += -DXGE_DEBUG_ERR_MASK=XGE_COMPONENT_LL #CFLAGS_NXGE += -DXGE_DEBUG_TRACE_MASK=XGE_COMPONENT_LL -# 2-Buffer Mode -#CFLAGS_NXGE += -DXGE_BUFFER_MODE_2 - -# 3-Buffer Mode -#CFLAGS_NXGE += -DXGE_BUFFER_MODE_3 +# Check Memory +#CFLAGS_NXGE += -DXGE_OS_MEMORY_CHECK -# TSO (TCP Segmentation Offload) -CFLAGS_NXGE += -DXGE_FEATURE_TSO +# 2-Buffer Mode +#CFLAGS_NXGE += -DXGE_FEATURE_BUFFER_MODE_2 CFLAGS += $(CFLAGS_NXGE) diff --git a/tools/tools/nxge/xge_cmn.h b/tools/tools/nxge/xge_cmn.h index e4a8ada..f4eb9aa 100644 --- a/tools/tools/nxge/xge_cmn.h +++ b/tools/tools/nxge/xge_cmn.h @@ -25,6 +25,7 @@ * * $FreeBSD$ */ + #ifndef XGE_CMN_H #define XGE_CMN_H @@ -45,97 +46,99 @@ #define XGE_OS_HOST_BIG_ENDIAN 1 #endif -#define u64 unsigned long long -#define u32 unsigned int -#define u16 unsigned short -#define u8 unsigned char +#define u64 unsigned long long +#define u32 unsigned int +#define u16 unsigned short +#define u8 unsigned char #define XGE_COUNT_REGS 386 #define XGE_COUNT_STATS 160 #define XGE_COUNT_PCICONF 43 -#define XGE_COUNT_DEVCONF 1677 +#define XGE_COUNT_DEVCONF 1677 #ifdef CONFIG_LRO -#define XGE_COUNT_INTRSTAT 26 +#define XGE_COUNT_INTRSTAT 26 #else -#define XGE_COUNT_INTRSTAT 20 +#define XGE_COUNT_INTRSTAT 20 #endif -#define XGE_COUNT_TCODESTAT 54 +#define XGE_COUNT_SWSTAT 54 +#define XGE_COUNT_DRIVERSTATS 27 #define DEVICE_ID_XFRAME_II 0x5832 #define XGE_COUNT_EXTENDED_STATS 56 -#define XGE_PRINT(fd, fmt...) \ -{ \ - fprintf( fd, fmt ); \ - fprintf( fd, "\n" ); \ - printf( fmt ); \ - printf( "\n" ); \ +#define XGE_PRINT(fd, fmt...) { \ + fprintf(fd, fmt); \ + fprintf(fd, "\n"); \ + printf(fmt); \ + printf("\n"); \ } -#define XGE_PRINT_LINE(fd) XGE_PRINT(fd, line); +#define XGE_PRINT_LINE(fd) XGE_PRINT(fd, line); + /* Read & Write Register */ typedef struct barregister { - char option[2]; - u64 offset; - u64 value; -}bar0reg_t; + char option[2]; + u64 offset; + u64 value; +}xge_register_info_t; /* Register Dump */ typedef struct xge_pci_bar0_t { - u8 name[32]; /* Register name as in user guides */ - u64 offset; /* Offset from base address */ - u64 value; /* Value */ - char type; /* 1: XframeII, 0: Common */ - + u8 name[32]; /* Register name as in user guides */ + u64 offset; /* Offset from base address */ + u64 value; /* Value */ + char type; /* 1: XframeII, 0: Common */ } xge_pci_bar0_t; /* Hardware Statistics */ typedef struct xge_stats_hw_info_t { - u8 name[32]; /* Statistics name */ - u64 be_offset; /* Offset from base address (BE) */ - u64 le_offset; /* Offset from base address (LE) */ - u8 type; /* Type: 1, 2, 3 or 4 bytes */ - u64 value; /* Value */ - + u8 name[32]; /* Statistics name */ + u64 be_offset; /* Offset from base address (BE) */ + u64 le_offset; /* Offset from base address (LE) */ + u8 type; /* Type: 1, 2, 3 or 4 bytes */ + u64 value; /* Value */ } xge_stats_hw_info_t; /* PCI Configuration Space */ typedef struct xge_pci_config_t { - u8 name[32]; /* Pci conf. name */ - u64 be_offset; /* Offset from base address (BE) */ - u64 le_offset; /* Offset from base address (LE) */ - u64 value; /* Value */ - + u8 name[32]; /* Pci conf. name */ + u64 be_offset; /* Offset from base address (BE) */ + u64 le_offset; /* Offset from base address (LE) */ + u64 value; /* Value */ } xge_pci_config_t; /* Device Configuration */ typedef struct xge_device_config_t { - u8 name[32]; /* Device conf. name */ - u64 value; /* Value */ - + u8 name[32]; /* Device conf. name */ + u64 value; /* Value */ } xge_device_config_t; /* Interrupt Statistics */ typedef struct xge_stats_intr_info_t { - u8 name[32]; /* Interrupt entry name */ - u64 value; /* Value (count) */ - + u8 name[32]; /* Interrupt entry name */ + u64 value; /* Value (count) */ } xge_stats_intr_info_t; + /* Tcode Statistics */ typedef struct xge_stats_tcode_info_t { - u8 name[32]; /* Tcode entry name */ - u64 value; /* Value (count) */ - u8 type; /* Type: 1, 2, 3 or 4 bytes */ - u16 flag; - + u8 name[32]; /* Tcode entry name */ + u64 value; /* Value (count) */ + u8 type; /* Type: 1, 2, 3 or 4 bytes */ + u16 flag; }xge_stats_tcode_info_t; +typedef struct xge_stats_driver_info_t +{ + u8 name[32]; /* Driver statistics name */ + u64 value; /* Value */ +} xge_stats_driver_info_t; + #ifdef XGE_OS_HOST_BIG_ENDIAN #define GET_OFFSET_STATS(index) statsInfo[(index)].be_offset #define GET_OFFSET_PCICONF(index) pciconfInfo[(index)].be_offset diff --git a/tools/tools/nxge/xge_info.c b/tools/tools/nxge/xge_info.c index 714d89c..f392c5c 100644 --- a/tools/tools/nxge/xge_info.c +++ b/tools/tools/nxge/xge_info.c @@ -25,437 +25,582 @@ * * $FreeBSD$ */ -/****************************************** - * xge_info.c - * - * To get the Tx, Rx, PCI, Interrupt statistics, - * PCI configuration space and bar0 register - * values - ******************************************/ + #include "xge_info.h" int main( int argc, char *argv[] ) { - if(argc >= 4) { - if(!((strcmp(argv[2], "-r") == 0) || - (strcmp(argv[2], "-w") == 0) || - (strcmp(argv[2], "chgbufmode") == 0))) - { goto use; } - } - else { - - if(argc != 3) { goto out; } - - else - { - if(!((strcmp(argv[2], "stats") == 0) || - (strcmp(argv[2], "pciconf") == 0) || - (strcmp(argv[2], "devconf") == 0) || - (strcmp(argv[2], "reginfo") == 0) || - (strcmp(argv[2], "driverversion") == 0) || - (strcmp(argv[2], "swstats") == 0) || - (strcmp(argv[2], "getbufmode") == 0) || - (strcmp(argv[2], "intr") == 0))) - { goto out; } - } - } - - if((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) - { - printf("Creating socket failed\n"); - return EXIT_FAILURE; - } - - ifreqp.ifr_addr.sa_family = AF_INET; - strcpy(ifreqp.ifr_name, argv[1]); - - if (strcmp(argv[2], "pciconf") == 0) return getPciConf(); - else if(strcmp(argv[2], "devconf") == 0) return getDevConf(); - else if(strcmp(argv[2], "stats") == 0) return getStats(); - else if(strcmp(argv[2], "reginfo") == 0) return getRegInfo(); - else if(strcmp(argv[2], "intr") == 0) return getIntrStats(); - else if(strcmp(argv[2], "swstats") == 0) return getTcodeStats(); - else if(strcmp(argv[2], "driverversion") == 0) return getDriverVer(); - else if(strcmp(argv[2], "-r") == 0) return getReadReg(argv[2], - argv[3]); - else if(strcmp(argv[2], "-w") == 0) return getWriteReg(argv[2], - argv[3],argv[5]); - else if(strcmp(argv[2], "chgbufmode") == 0) return changeBufMode(argv[3]); - else if(strcmp(argv[2], "getbufmode") == 0) return getBufMode(); - else return EXIT_FAILURE; - -use: - printf("Usage:"); - printf("%s <INTERFACE> [-r] [-w] [chgbufmode]\n", argv[0]); - printf("\t -r <offset> : Read register \n"); - printf("\t -w <offset> -v <value> : Write register \n"); - printf("\t chgbufmode <Buffer mode> : Changes buffer mode \n"); - return EXIT_FAILURE; + int status = EXIT_FAILURE; + + if(argc >= 4) { + if(!((strcmp(argv[2], "getregister") == 0) || + (strcmp(argv[2], "setregister") == 0) || + (strcmp(argv[2], "setbufmode") == 0))) { + goto out; + } + } + else { + if(argc != 3) { + goto out; + } + else { + if(!((strcmp(argv[2], "hwstats") == 0) || + (strcmp(argv[2], "pciconf") == 0) || + (strcmp(argv[2], "devconf") == 0) || + (strcmp(argv[2], "registers") == 0) || + (strcmp(argv[2], "version") == 0) || + (strcmp(argv[2], "swstats") == 0) || + (strcmp(argv[2], "drvstats") == 0) || + (strcmp(argv[2], "getbufmode") == 0) || + (strcmp(argv[2], "devstats") == 0))) { + goto out; + } + } + } + + if((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + printf("Creating socket failed\n"); + goto _exit; + } + + ifreqp.ifr_addr.sa_family = AF_INET; + strcpy(ifreqp.ifr_name, argv[1]); + + if (strcmp(argv[2], "pciconf") == 0) + status = xge_get_pciconf(); + else if(strcmp(argv[2], "devconf") == 0) + status = xge_get_devconf(); + else if(strcmp(argv[2], "hwstats") == 0) + status = xge_get_hwstats(); + else if(strcmp(argv[2], "registers") == 0) + status = xge_get_registers(); + else if(strcmp(argv[2], "devstats") == 0) + status = xge_get_devstats(); + else if(strcmp(argv[2], "swstats") == 0) + status = xge_get_swstats(); + else if(strcmp(argv[2], "drvstats") == 0) + status = xge_get_drvstats(); + else if(strcmp(argv[2], "version") == 0) + status = xge_get_drv_version(); + else if(strcmp(argv[2], "getbufmode") == 0) + status = xge_get_buffer_mode(); + else if(strcmp(argv[2], "getregister") == 0) + status = xge_get_register(argv[3]); + else if(strcmp(argv[2], "setregister") == 0) + status = xge_set_register(argv[3], argv[4]); + else if(strcmp(argv[2], "setbufmode") == 0) + status = xge_change_buffer_mode(argv[3]); + goto _exit; out: - printf("Usage:"); - printf("%s <INTERFACE> <[stats] [reginfo] [pciconf] [devconf] ", argv[0]); - printf("[intr] [swstats] [driverversion] "); - printf("[getbufmode] [chgbufmode] [-r] [-w] >\n"); - printf("\tINTERFACE : Interface (xge0, xge1, xge2, ..)\n"); - printf("\tstats : Prints statistics \n"); - printf("\treginfo : Prints register values \n"); - printf("\tpciconf : Prints PCI configuration space \n"); - printf("\tdevconf : Prints device configuration \n"); - printf("\tintr : Prints interrupt statistics \n"); - printf("\tswstats : Prints sw statistics \n"); - printf("\tdriverversion : Prints driver version \n"); - printf("\tgetbufmode : Prints Buffer Mode \n"); - printf("\tchgbufmode : Changes buffer mode \n"); - printf("\t -r <offset> : Read register \n"); - printf("\t -w <offset> -v <value> : Write register \n"); - return EXIT_FAILURE; + printf("Usage: "); + printf("getinfo <INTERFACE> [hwstats] [swstats] [devstats] "); + printf("[drvstats] [version] [registers] [getregister offset] "); + printf("[setregister offset value] [pciconf] [devconf] [getbufmode] "); + printf("[setbufmode]\n"); + printf("\tINTERFACE : Interface (nxge0, nxge1, nxge2, ..) \n"); + printf("\thwstats : Prints hardware statistics \n"); + printf("\tswstats : Prints software statistics \n"); + printf("\tdevstats : Prints device statistics \n"); + printf("\tdrvstats : Prints driver statistics \n"); + printf("\tversion : Prints driver version \n"); + printf("\tregisters : Prints register values \n"); + printf("\tgetregister : Read a register \n"); + printf("\tsetregister : Write to a register \n"); + printf("\tpciconf : Prints PCI configuration space \n"); + printf("\tdevconf : Prints device configuration \n"); + printf("\tgetbufmode : Prints Buffer Mode \n"); + printf("\tsetbufmode : Changes buffer mode \n"); + +_exit: + return status; } +/** + * xge_get_hwstats + * Gets hardware statistics + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getStats() +xge_get_hwstats(void) { - void *hw_stats; - void *pci_cfg; - unsigned short device_id; - int index = 0; - bufferSize = GET_OFFSET_STATS(XGE_COUNT_STATS - 1) + 8; - - hw_stats = (void *) malloc(bufferSize); - if(!hw_stats) - { - printf("Allocating memory for hw_stats failed\n"); - return EXIT_FAILURE; - } - pAccess = (char *)hw_stats; - *pAccess = XGE_QUERY_STATS; - ifreqp.ifr_data = (caddr_t) hw_stats; - - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting hardware statistics failed\n"); - free(hw_stats); - return EXIT_FAILURE; - } - bufferSize = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF -1) + 8; - - pci_cfg = (void *) malloc(bufferSize); - if(!pci_cfg) - { - printf("Allocating memory for pci_cfg failed\n"); - return EXIT_FAILURE; - } - - pAccess = (char *)pci_cfg; - *pAccess = XGE_QUERY_PCICONF; - ifreqp.ifr_data = (caddr_t)pci_cfg; - - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting pci configuration space failed\n"); - free(pci_cfg); - return EXIT_FAILURE; - } - device_id = *( ( u16 * )( ( unsigned char * )pci_cfg + - GET_OFFSET_PCICONF(index) ) ); - - logStats( hw_stats,device_id ); - free(hw_stats); - free(pci_cfg); - return EXIT_SUCCESS; + char *hw_stats = NULL, *pci_cfg = NULL; + unsigned short device_id; + int index = 0; + int status = EXIT_FAILURE; + + buffer_size = GET_OFFSET_STATS(XGE_COUNT_STATS - 1) + 8; + + hw_stats = (char *)malloc(buffer_size); + if(!hw_stats) { + printf("Allocating memory for hardware statistics failed\n"); + goto _exit; + } + *hw_stats = XGE_QUERY_STATS; + ifreqp.ifr_data = (caddr_t) hw_stats; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting hardware statistics failed\n"); + goto _exit1; + } + + buffer_size = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF - 1) + 8; + pci_cfg = (void *)malloc(buffer_size); + if(!pci_cfg) { + printf("Allocating memory for PCI configuration failed\n"); + goto _exit1; + } + + *pci_cfg = XGE_QUERY_PCICONF; + ifreqp.ifr_data = (caddr_t)pci_cfg; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting pci configuration space failed\n"); + goto _exit2; + } + device_id = *((u16 *)((unsigned char *)pci_cfg + + GET_OFFSET_PCICONF(index))); + + xge_print_hwstats(hw_stats,device_id); + status = EXIT_SUCCESS; + +_exit2: + free(pci_cfg); + +_exit1: + free(hw_stats); + +_exit: + return status; } +/** + * xge_get_pciconf + * Gets PCI configuration space + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getPciConf() +xge_get_pciconf(void) { - void *pci_cfg; - - indexer = 0; - bufferSize = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF -1) + 8; - - pci_cfg = (void *) malloc(bufferSize); - if(!pci_cfg) - { - printf("Allocating memory for pci_cfg failed\n"); - return EXIT_FAILURE; - } - - pAccess = (char *)pci_cfg; - *pAccess = XGE_QUERY_PCICONF; - ifreqp.ifr_data = (caddr_t)pci_cfg; - - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting pci configuration space failed\n"); - free(pci_cfg); - return EXIT_FAILURE; - } - - logPciConf( pci_cfg ); - free(pci_cfg); - return EXIT_SUCCESS; + char *pci_cfg = NULL; + int status = EXIT_FAILURE; + + buffer_size = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF - 1) + 8; + + pci_cfg = (char *)malloc(buffer_size); + if(!pci_cfg) { + printf("Allocating memory for PCI configuration failed\n"); + goto _exit; + } + + *pci_cfg = XGE_QUERY_PCICONF; + ifreqp.ifr_data = (caddr_t)pci_cfg; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting PCI configuration space failed\n"); + goto _exit1; + } + + xge_print_pciconf( pci_cfg ); + status = EXIT_SUCCESS; + +_exit1: + free(pci_cfg); + +_exit: + return status; } +/** + * xge_get_devconf + * Gets device configuration + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getDevConf() +xge_get_devconf(void) { - void *device_cfg; - - indexer = 0; - bufferSize = XGE_COUNT_DEVCONF * sizeof(int); - - device_cfg = (void *) malloc(bufferSize); - if(!device_cfg) - { - printf("Allocating memory for device_cfg failed\n"); - return EXIT_FAILURE; - } - pAccess = (char *)device_cfg; - *pAccess = XGE_QUERY_DEVCONF; - ifreqp.ifr_data = (caddr_t)device_cfg; - - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting Device Configuration failed\n"); - free(device_cfg); - return EXIT_FAILURE; - } - - logDevConf( device_cfg ); - free(device_cfg); - return EXIT_SUCCESS; + char *device_cfg = NULL; + int status = EXIT_FAILURE; + + buffer_size = XGE_COUNT_DEVCONF * sizeof(int); + + device_cfg = (char *)malloc(buffer_size); + if(!device_cfg) { + printf("Allocating memory for device configuration failed\n"); + goto _exit; + } + + *device_cfg = XGE_QUERY_DEVCONF; + ifreqp.ifr_data = (caddr_t)device_cfg; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting Device Configuration failed\n"); + goto _exit1; + } + + xge_print_devconf( device_cfg ); + status = EXIT_SUCCESS; + +_exit1: + free(device_cfg); + +_exit: + return status; } +/** + * xge_get_buffer_mode + * Get current Rx buffer mode + * + * Return EXIT_SUCCESS or EXIT_FAILURE + */ int -getBufMode() +xge_get_buffer_mode(void) { - void *buf_mode = 0; - - buf_mode = (void *) malloc(sizeof(int)); - if(!buf_mode) - { - printf("Allocating memory for Buffer mode parameter failed\n"); - return EXIT_FAILURE; - } - - pAccess = (char *)buf_mode; - *pAccess = XGE_QUERY_BUFFER_MODE; - ifreqp.ifr_data = (void *)buf_mode; - - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting Buffer Mode failed\n"); - free(buf_mode); - return EXIT_FAILURE; - } - printf("Buffer Mode is %d\n", *ifreqp.ifr_data); - free(buf_mode); - return EXIT_SUCCESS; + char *buf_mode = NULL; + int status = EXIT_FAILURE; + + buf_mode = (char *)malloc(sizeof(int)); + if(!buf_mode) { + printf("Allocating memory for buffer mode failed\n"); + goto _exit; + } + + *buf_mode = XGE_QUERY_BUFFER_MODE; + ifreqp.ifr_data = (void *)buf_mode; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting Buffer Mode failed\n"); + goto _exit1; + } + printf("Rx Buffer Mode: %d\n", *ifreqp.ifr_data); + status = EXIT_SUCCESS; + +_exit1: + free(buf_mode); + +_exit: + return status; } - +/** + * xge_change_buffer_mode + * Change Rx buffer mode + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -changeBufMode(char *bufmode) +xge_change_buffer_mode(char *bufmode) { - - char *print_msg; - pAccess = (char *)malloc(sizeof(char)); - - if(*bufmode == '1'){ - *pAccess = XGE_SET_BUFFER_MODE_1; - }else if (*bufmode == '2'){ - *pAccess = XGE_SET_BUFFER_MODE_2; - }else if (*bufmode == '3'){ - *pAccess = XGE_SET_BUFFER_MODE_3; - }else if (*bufmode == '5'){ - *pAccess = XGE_SET_BUFFER_MODE_5; - }else{ - printf("Invalid Buffer mode\n"); - return EXIT_FAILURE; - } - - ifreqp.ifr_data = (char *)pAccess; - if( ioctl( sockfd, SIOCGPRIVATE_0, &ifreqp ) < 0 ) - { - printf( "Changing Buffer Mode Failed\n" ); - return EXIT_FAILURE; - } - print_msg = (char *)ifreqp.ifr_data; - if(*print_msg == 'Y') - printf("Requested buffer mode was already enabled\n"); - else if(*print_msg == 'N') - printf("Requested buffer mode is not implemented OR\nDynamic buffer changing is not supported in this driver\n"); - else if(*print_msg == 'C') - printf("Buffer mode changed to %c\n", *bufmode); - - return EXIT_SUCCESS; + char *print_msg = NULL; + int status = EXIT_FAILURE; + + print_msg = (char *)malloc(sizeof(char)); + if(print_msg == NULL) { + printf("Allocation of memory for message failed\n"); + goto _exit; + } + + if (*bufmode == '1') *print_msg = XGE_SET_BUFFER_MODE_1; + else if(*bufmode == '2') *print_msg = XGE_SET_BUFFER_MODE_2; + else if(*bufmode == '5') *print_msg = XGE_SET_BUFFER_MODE_5; + else { + printf("Invalid Buffer mode\n"); + goto _exit1; + } + + ifreqp.ifr_data = (char *)print_msg; + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Changing buffer mode failed\n"); + goto _exit1; + } + + if(*print_msg == 'Y') { + printf("Requested buffer mode was already enabled\n"); + } + else if(*print_msg == 'N') { + printf("Requested buffer mode is not implemented OR\n"); + printf("Dynamic buffer changing is not supported in this driver\n"); + } + else if(*print_msg == 'C') { + printf("Buffer mode changed to %c\n", *bufmode); + } + status = EXIT_SUCCESS; + +_exit1: + free(print_msg); + +_exit: + return status; } +/** + * xge_get_registers + * Gets register values + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getRegInfo() +xge_get_registers(void) { - void *regBuffer; - - indexer = 0; - bufferSize = regInfo[XGE_COUNT_REGS - 1].offset + 8; - - regBuffer = ( void * ) malloc ( bufferSize ); - if( !regBuffer ) - { - printf( "Allocating memory for register dump failed\n" ); - return EXIT_FAILURE; - } - - ifreqp.ifr_data = ( caddr_t )regBuffer; - if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 ) - { - printf( "Getting register dump failed\n" ); - free( regBuffer ); - return EXIT_FAILURE; - } - - logRegInfo( regBuffer ); - free( regBuffer ); - return EXIT_SUCCESS; + void *registers = NULL; + int status = EXIT_FAILURE; + + buffer_size = regInfo[XGE_COUNT_REGS - 1].offset + 8; + + registers = (void *)malloc(buffer_size); + if(!registers) { + printf("Allocating memory for register dump failed\n"); + goto _exit; + } + + ifreqp.ifr_data = (caddr_t)registers; + if(ioctl(sockfd, SIOCGPRIVATE_1, &ifreqp) < 0) { + printf("Getting register values failed\n"); + goto _exit1; + } + + xge_print_registers(registers); + status = EXIT_SUCCESS; + +_exit1: + free(registers); + +_exit: + return status; } -int -getReadReg(char *opt,char *offst) +/** + * xge_get_register + * Reads a register specified offset + * + * @offset Offset of register from base address + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ +int +xge_get_register(char *offset) { - bar0reg_t *reg; - - reg = ( bar0reg_t * ) malloc (sizeof(bar0reg_t)); - if( !reg ) - { - printf( "Allocating memory for reading register failed\n" ); - return EXIT_FAILURE; - } - strcpy(reg->option, opt); - sscanf(offst,"%x",®->offset); - ifreqp.ifr_data = ( caddr_t )reg; - if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 ) - { - printf( "Reading register failed\n" ); - free(reg); - return EXIT_FAILURE; - } - logReadReg ( reg->offset,reg->value ); - free(reg); - return EXIT_SUCCESS; -} + xge_register_info_t *register_info = NULL; + int status = EXIT_FAILURE; + register_info = + (xge_register_info_t *)malloc(sizeof(xge_register_info_t)); + if(!register_info) { + printf("Allocating memory for register info failed\n"); + goto _exit; + } + strcpy(register_info->option, "-r"); + sscanf(offset, "%x", ®ister_info->offset); + ifreqp.ifr_data = (caddr_t)register_info; + + if(ioctl(sockfd, SIOCGPRIVATE_1, &ifreqp) < 0) { + printf("Reading register failed\n"); + goto _exit1; + } + + xge_print_register(register_info->offset, register_info->value); + status = EXIT_SUCCESS; + +_exit1: + free(register_info); + +_exit: + return status; +} + +/** + * xge_set_register + * Writes to a register specified offset + * + * @offset Offset of register from base address + * @value Value to write to + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getWriteReg(char *opt,char *offst,char *val) +xge_set_register(char *offset, char *value) { - bar0reg_t *reg; - - reg = ( bar0reg_t * ) malloc (sizeof(bar0reg_t)); - if( !reg ) - { - printf( "Allocating memory for writing register failed\n" ); - return EXIT_FAILURE; - } - strcpy(reg->option, opt); - sscanf(offst,"%x",®->offset); - sscanf(val,"%llx",®->value); - ifreqp.ifr_data = ( caddr_t )reg; - if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 ) - { - printf( "Writing register failed\n" ); - free(reg); - return EXIT_FAILURE; - } - free(reg); - return EXIT_SUCCESS; + xge_register_info_t *register_info = NULL; + int status = EXIT_FAILURE; + + register_info = + (xge_register_info_t *)malloc(sizeof(xge_register_info_t)); + if(!register_info) { + printf("Allocating memory for register info failed\n"); + goto _exit; + } + + strcpy(register_info->option, "-w"); + sscanf(offset, "%x", ®ister_info->offset); + sscanf(value, "%llx", ®ister_info->value); + + ifreqp.ifr_data = (caddr_t)register_info; + if(ioctl(sockfd, SIOCGPRIVATE_1, &ifreqp) < 0) { + printf("Writing register failed\n"); + goto _exit1; + } + status = EXIT_SUCCESS; + +_exit1: + free(register_info); + +_exit: + return status; } +/** + * xge_get_devstats + * Gets device statistics + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ +int +xge_get_devstats(void) +{ + char *dev_stats = NULL; + int status = EXIT_FAILURE; + + buffer_size = XGE_COUNT_INTRSTAT * sizeof(u32); + + dev_stats = (char *)malloc(buffer_size); + if(!dev_stats) { + printf("Allocating memory for device statistics failed\n"); + goto _exit; + } + + *dev_stats = XGE_QUERY_DEVSTATS; + ifreqp.ifr_data = (caddr_t)dev_stats; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting device statistics failed\n"); + goto _exit1; + } + + xge_print_devstats(dev_stats); + status = EXIT_SUCCESS; + +_exit1: + free(dev_stats); + +_exit: + return status; +} +/** + * xge_get_swstats + * Gets software statistics + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getIntrStats() +xge_get_swstats(void) { - void *intr_stat; - - bufferSize = XGE_COUNT_INTRSTAT * sizeof(u32); - - intr_stat = (void *) malloc(bufferSize); - if(!intr_stat) - { - printf("Allocating memory for intr_stat failed\n"); - return EXIT_FAILURE; - } - pAccess = (char *)intr_stat; - *pAccess = XGE_QUERY_INTRSTATS ; - ifreqp.ifr_data = (caddr_t)intr_stat; - - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting interrupt statistics failed\n"); - free(intr_stat); - return EXIT_FAILURE; - } - intr_stat = (char *)ifreqp.ifr_data; - - logIntrStats( intr_stat ); - free(intr_stat); - return EXIT_SUCCESS; + char *sw_stats = NULL; + int status = EXIT_FAILURE; + + buffer_size = XGE_COUNT_SWSTAT * sizeof(u32); + + sw_stats = (char *) malloc(buffer_size); + if(!sw_stats) { + printf("Allocating memory for software statistics failed\n"); + goto _exit; + } + + *sw_stats = XGE_QUERY_SWSTATS; + ifreqp.ifr_data = (caddr_t)sw_stats; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting software statistics failed\n"); + goto _exit1; + } + + xge_print_swstats(sw_stats); + status = EXIT_SUCCESS; + +_exit1: + free(sw_stats); + +_exit: + return status; } +/** + * xge_get_drv_version + * Gets driver version + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getTcodeStats() +xge_get_drv_version(void) { - void *tcode_stat; - - bufferSize = XGE_COUNT_TCODESTAT * sizeof(u32); - - tcode_stat = (void *) malloc(bufferSize); - if(!tcode_stat) - { - printf("Allocating memory for tcode_stat failed\n"); - return EXIT_FAILURE; - } - pAccess = (char *)tcode_stat; - *pAccess = XGE_QUERY_TCODE ; - ifreqp.ifr_data = (caddr_t)tcode_stat; - if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) - { - printf("Getting tcode statistics failed\n"); - free(tcode_stat); - return EXIT_FAILURE; - } - tcode_stat = (char *)ifreqp.ifr_data; - - logTcodeStats( tcode_stat ); - free(tcode_stat); - return EXIT_SUCCESS; + char *version = NULL; + int status = EXIT_FAILURE; + + buffer_size = 20; + version = (char *)malloc(buffer_size); + if(!version) { + printf("Allocating memory for driver version failed\n"); + goto _exit; + } + + *version = XGE_READ_VERSION; + ifreqp.ifr_data = ( caddr_t )version; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting driver version failed\n"); + goto _exit1; + } + xge_print_drv_version(version); + status = EXIT_SUCCESS; + +_exit1: + free(version); + +_exit: + return status; } +/** + * xge_get_drvstats + * Gets driver statistics + * + * Returns EXIT_SUCCESS or EXIT_FAILURE + */ int -getDriverVer() +xge_get_drvstats(void) { - char *version; - bufferSize = 20; - version = ( char * ) malloc ( bufferSize ); - if( !version ) - { - printf( "Allocating memory for getting driver version failed\n" ); - return EXIT_FAILURE; - } - pAccess = version; - *pAccess = XGE_READ_VERSION; - - ifreqp.ifr_data = ( caddr_t )version; - if( ioctl( sockfd, SIOCGPRIVATE_0, &ifreqp ) < 0 ) - { - printf( "Getting driver version failed\n" ); - free( version ); - return EXIT_FAILURE; - } - logDriverInfo(version); - free( version ); - return EXIT_SUCCESS; + char *driver_stats = NULL; + int status = EXIT_FAILURE; -} + buffer_size = XGE_COUNT_DRIVERSTATS * sizeof(u64); + + driver_stats = (char *)malloc(buffer_size); + if(!driver_stats) { + printf("Allocating memory for driver statistics failed\n"); + goto _exit; + } + *driver_stats = XGE_QUERY_DRIVERSTATS; + ifreqp.ifr_data = (caddr_t)driver_stats; + + if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) { + printf("Getting Driver Statistics failed\n"); + goto _exit1; + } + + xge_print_drvstats(driver_stats); + status = EXIT_SUCCESS; + +_exit1: + free(driver_stats); + +_exit: + return status; +} diff --git a/tools/tools/nxge/xge_info.h b/tools/tools/nxge/xge_info.h index 4829e69..3448651 100644 --- a/tools/tools/nxge/xge_info.h +++ b/tools/tools/nxge/xge_info.h @@ -25,51 +25,46 @@ * * $FreeBSD$ */ -/****************************************** - * getinfo.h - * - * To get the Tx, Rx, PCI, Interrupt statistics, - * PCI configuration space,device configuration - * and bar0 register values - ******************************************/ + #ifndef XGE_CMN_H #include "xge_cmn.h" #endif #define XGE_QUERY_STATS 1 #define XGE_QUERY_PCICONF 2 -#define XGE_QUERY_INTRSTATS 3 +#define XGE_QUERY_DEVSTATS 3 #define XGE_QUERY_DEVCONF 4 #define XGE_READ_VERSION 5 -#define XGE_QUERY_TCODE 6 -#define XGE_SET_BUFFER_MODE_1 7 -#define XGE_SET_BUFFER_MODE_2 8 -#define XGE_SET_BUFFER_MODE_3 9 +#define XGE_QUERY_SWSTATS 6 +#define XGE_QUERY_DRIVERSTATS 7 +#define XGE_SET_BUFFER_MODE_1 8 +#define XGE_SET_BUFFER_MODE_2 9 #define XGE_SET_BUFFER_MODE_5 10 #define XGE_QUERY_BUFFER_MODE 11 /* Function declerations */ -int getPciConf(); -int getDevConf(); -int getStats(); -int getRegInfo(); -int getIntrStats(); -int getTcodeStats(); -int getReadReg(char *,char *); -int getWriteReg(char *,char *,char *); -int getDriverVersion(); -int getBufMode(); -int changeBufMode(char *); - -void logStats(void *,unsigned short); -void logPciConf(void *); -void logDevConf(void *); -void logRegInfo(void *); -void logReadReg(u64,u64); -void logIntrStats(void *); -void logTcodeStats(void *); -void logDriverInfo(char *); +int xge_get_pciconf(void); +int xge_get_devconf(void); +int xge_get_hwstats(void); +int xge_get_registers(void); +int xge_get_devstats(void); +int xge_get_swstats(void); +int xge_get_drvstats(void); +int xge_get_register(char *); +int xge_set_register(char *,char *); +int xge_get_drv_version(void); +int xge_get_buffer_mode(void); +int xge_change_buffer_mode(char *); +void xge_print_hwstats(void *,unsigned short); +void xge_print_pciconf(void *); +void xge_print_devconf(void *); +void xge_print_registers(void *); +void xge_print_register(u64,u64); +void xge_print_devstats(void *); +void xge_print_swstats(void *); +void xge_print_drvstats(void *); +void xge_print_drv_version(char *); extern xge_pci_bar0_t regInfo[]; extern xge_pci_config_t pciconfInfo[]; @@ -77,7 +72,8 @@ extern xge_stats_hw_info_t statsInfo[]; extern xge_device_config_t devconfInfo[]; extern xge_stats_intr_info_t intrInfo[]; extern xge_stats_tcode_info_t tcodeInfo[]; +extern xge_stats_driver_info_t driverInfo[]; + struct ifreq ifreqp; -int sockfd, indexer, bufferSize = 0; -char *pAccess; +int sockfd, indexer, buffer_size = 0; diff --git a/tools/tools/nxge/xge_log.c b/tools/tools/nxge/xge_log.c index 5f8b17a..e08ad14 100644 --- a/tools/tools/nxge/xge_log.c +++ b/tools/tools/nxge/xge_log.c @@ -25,222 +25,297 @@ * * $FreeBSD$ */ + #include "xge_log.h" +/** + * xge_print_hwstats + * Prints/logs hardware statistics + * + * @hw_stats Hardware statistics + * @device_id Device ID + */ void -logStats( void *hwStats, unsigned short device_id ) +xge_print_hwstats(void *hw_stats, unsigned short device_id) { - int index = 0; - int count = 0; - count = XGE_COUNT_STATS - ((device_id == DEVICE_ID_XFRAME_II) ? 0 : XGE_COUNT_EXTENDED_STATS); - fdAll = fopen( "stats.log", "w+" ); - if( fdAll ) - { - XGE_PRINT_HEADER_STATS(fdAll); - - for( index = 0; index < count ; index++ ) - { - switch( statsInfo[index].type ) - { - case 2: - { - statsInfo[index].value = - *( ( u16 * )( ( unsigned char * ) hwStats + - GET_OFFSET_STATS( index ) ) ); - break; - } - case 4: - { - statsInfo[index].value = - *( ( u32 * )( ( unsigned char * ) hwStats + - GET_OFFSET_STATS( index ) ) ); - break; - } - case 8: - { - statsInfo[index].value = - *( ( u64 * )( ( unsigned char * ) hwStats + - GET_OFFSET_STATS( index ) ) ); - break; - } - } - - XGE_PRINT_STATS(fdAll,(const char *) statsInfo[index].name, - statsInfo[index].value); - } - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } + int index = 0, count = 0; + + count = XGE_COUNT_STATS - + ((device_id == DEVICE_ID_XFRAME_II) ? 0 : XGE_COUNT_EXTENDED_STATS); + + fdAll = fopen("stats.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_STATS(fdAll); + for(index = 0; index < count ; index++) { + switch(statsInfo[index].type) { + case 2: + statsInfo[index].value = + *((u16 *)((unsigned char *)hw_stats + + GET_OFFSET_STATS(index))); + break; + case 4: + statsInfo[index].value = + *((u32 *)((unsigned char *) hw_stats + + GET_OFFSET_STATS(index))); + break; + case 8: + statsInfo[index].value = + *((u64 *)((unsigned char *)hw_stats + + GET_OFFSET_STATS(index))); + break; + } + + XGE_PRINT_STATS(fdAll,(const char *) statsInfo[index].name, + statsInfo[index].value); + } + XGE_PRINT_LINE(fdAll); + fclose(fdAll); +_exit: + return; } -void -logPciConf( void * pciConf ) +/** + * xge_print_pciconf + * Prints/logs PCI configuration space + * + * @pci_conf PCI Configuration + */ +void +xge_print_pciconf(void * pci_conf) { - int index = 0; - - fdAll = fopen( "pciconf.log", "w+" ); - if( fdAll ) - { - XGE_PRINT_HEADER_PCICONF(fdAll); - - for( index = 0; index < XGE_COUNT_PCICONF; index++ ) - { - pciconfInfo[index].value = - *( ( u16 * )( ( unsigned char * )pciConf + - GET_OFFSET_PCICONF(index) ) ); - XGE_PRINT_PCICONF(fdAll,(const char *) pciconfInfo[index].name, - GET_OFFSET_PCICONF(index), pciconfInfo[index].value); - } - - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } + int index = 0; + + fdAll = fopen("pciconf.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_PCICONF(fdAll); + for(index = 0; index < XGE_COUNT_PCICONF; index++) { + pciconfInfo[index].value = *((u16 *)((unsigned char *)pci_conf + + GET_OFFSET_PCICONF(index))); + XGE_PRINT_PCICONF(fdAll,(const char *) pciconfInfo[index].name, + GET_OFFSET_PCICONF(index), pciconfInfo[index].value); + } + + XGE_PRINT_LINE(fdAll); + fclose(fdAll); + +_exit: + return; } +/** + * xge_print_devconf + * Prints/logs Device Configuration + * + * @dev_conf Device Configuration + */ void -logDevConf( void * devConf ) +xge_print_devconf(void * dev_conf) { - int index = 0; - - fdAll = fopen( "devconf.log", "w+" ); - if( fdAll ) - { - XGE_PRINT_HEADER_DEVCONF(fdAll); - - for( index = 0; index < XGE_COUNT_DEVCONF; index++ ) - { - devconfInfo[index].value = - *( ( u32 * )( ( unsigned char * )devConf + - ( index * ( sizeof( int ) ) ) ) ); - XGE_PRINT_DEVCONF(fdAll,(const char *) devconfInfo[index].name, - devconfInfo[index].value); - } - - XGE_PRINT_LINE(fdAll); - fclose( fdAll ); - } + int index = 0; + + fdAll = fopen("devconf.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_DEVCONF(fdAll); + + for(index = 0; index < XGE_COUNT_DEVCONF; index++) { + devconfInfo[index].value = *((u32 *)((unsigned char *)dev_conf + + (index * (sizeof(int))))); + XGE_PRINT_DEVCONF(fdAll,(const char *) devconfInfo[index].name, + devconfInfo[index].value); + } + + XGE_PRINT_LINE(fdAll); + fclose( fdAll ); + +_exit: + return; } +/** + * xge_print_registers + * Prints/logs Register values + * + * @registers Register values + */ void -logRegInfo( void * regBuffer ) +xge_print_registers(void * registers) { - int index = 0; - - fdAll = fopen( "reginfo.log", "w+" ); - if( fdAll ) - { - XGE_PRINT_HEADER_REGS(fdAll); - - for( index = 0; index < XGE_COUNT_REGS; index++ ) - { - regInfo[index].value = - *( ( u64 * )( ( unsigned char * )regBuffer + - regInfo[index].offset ) ); - XGE_PRINT_REGS(fdAll,(const char *) regInfo[index].name, - regInfo[index].offset, regInfo[index].value); - } - - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } + int index = 0; + + fdAll = fopen("reginfo.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_REGS(fdAll); + + for(index = 0; index < XGE_COUNT_REGS; index++) { + regInfo[index].value = *((u64 *)((unsigned char *)registers + + regInfo[index].offset)); + XGE_PRINT_REGS(fdAll,(const char *) regInfo[index].name, + regInfo[index].offset, regInfo[index].value); + } + + XGE_PRINT_LINE(fdAll); + fclose(fdAll); +_exit: + return; } + +/** + * xge_print_register + * Prints/logs a register value + * + * @offset Offset of the register + * @temp??? + */ void -logReadReg(u64 offset,u64 temp) +xge_print_register(u64 offset, u64 value) { - int index=0; - - fdAll = fopen( "readreg.log", "w+"); - if( fdAll ) - { + int index = 0; + + fdAll = fopen("readreg.log", "w+"); + if(!fdAll) + goto _exit; + XGE_PRINT_READ_HEADER_REGS(fdAll); - - regInfo[index].offset = offset ; + regInfo[index].offset = offset; + regInfo[index].value = value; + printf("0x%.8X\t0x%.16llX\n", regInfo[index].offset, + regInfo[index].value); + XGE_PRINT_LINE(fdAll); + fclose(fdAll); + +_exit: + return; +} + +/** + * xge_print_devstats + * Prints Device Statistics + * + * @dev_stats Device Statistics + */ +void +xge_print_devstats(void *dev_stats) +{ + int index = 0; + + fdAll = fopen("intrstats.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_STATS(fdAll); + for(index = 0; index < XGE_COUNT_INTRSTAT; index++) { + intrInfo[index].value = *((u32 *)((unsigned char *)dev_stats + + (index * (sizeof(u32))))); + XGE_PRINT_STATS(fdAll,(const char *) intrInfo[index].name, + intrInfo[index].value); + } - regInfo[index].value = temp ; - - printf("0x%.8X\t0x%.16llX\n",regInfo[index].offset, regInfo[index].value); - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } + fclose(fdAll); +_exit: + return; } + +/** + * xge_print_swstats + * Prints/logs Software Statistics + * + * @sw_stats Software statistics + */ void -logIntrStats( void * intrStats ) +xge_print_swstats(void * sw_stats) { - int index = 0; - - fdAll = fopen( "intrstats.log", "w+" ); - if(fdAll) - { - XGE_PRINT_HEADER_STATS(fdAll); - - for( index = 0; index < XGE_COUNT_INTRSTAT; index++ ) - { - intrInfo[index].value = - *( ( u32 * )( ( unsigned char * )intrStats + - ( index * ( sizeof( u32 ) ) ) ) ); - XGE_PRINT_STATS(fdAll,(const char *) intrInfo[index].name, - intrInfo[index].value); - } - - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } + int index = 0; + + fdAll = fopen("tcodestats.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_STATS(fdAll); + for(index = 0; index < XGE_COUNT_SWSTAT; index++) { + if(!(tcodeInfo[index].flag)) { + switch(tcodeInfo[index].type) { + case 2: + tcodeInfo[index].value = + *((u16 *)((unsigned char *)sw_stats + + (index * (sizeof(u16))))); + break; + case 4: + tcodeInfo[index].value = + *((u32 *)((unsigned char *)sw_stats + + (index * (sizeof(u32))))); + break; + } + XGE_PRINT_STATS(fdAll,(const char *) tcodeInfo[index].name, + tcodeInfo[index].value); + } + } + + XGE_PRINT_LINE(fdAll); + fclose(fdAll); + +_exit: + return; } +/** + * xge_print_drv_version + * Prints/logs driver version + * + * @version Driver version + */ void -logTcodeStats( void * tcodeStats ) +xge_print_drv_version(char *version) { - int index = 0; - - fdAll = fopen( "tcodestats.log", "w+" ); - if(fdAll) - { - XGE_PRINT_HEADER_STATS(fdAll); - - for( index = 0; index < XGE_COUNT_TCODESTAT; index++ ) - { - if(!(tcodeInfo[index].flag)) - { - switch( tcodeInfo[index].type ) - { - case 2: - { - tcodeInfo[index].value = - *( ( u16 * )( ( unsigned char * )tcodeStats + - ( index * ( sizeof( u16 ) ) ) ) ); - break; - } - case 4: - { - tcodeInfo[index].value = - *( ( u32 * )( ( unsigned char * )tcodeStats + - ( index * ( sizeof( u32 ) ) ) ) ); - break; - } - } - - XGE_PRINT_STATS(fdAll,(const char *) tcodeInfo[index].name, - tcodeInfo[index].value); - } - } - - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } + fdAll = fopen("driverinfo.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_LINE(fdAll); + printf("Driver Version: %s\n", version); + XGE_PRINT_LINE(fdAll); + fclose(fdAll); + +_exit: + return; } +/** + * xge_print_drvstats + * Prints/logs Driver Statistics + * + * @driver_stats Driver Statistics + */ void -logDriverInfo( char *version ) +xge_print_drvstats(void * driver_stats) { - fdAll = fopen( "driverinfo.log", "w+"); - if (fdAll) - { - XGE_PRINT_LINE(fdAll); - printf("DRIVER VERSION : %s\n",version); - XGE_PRINT_LINE(fdAll); - fclose(fdAll); - } - + int index = 0; + + fdAll = fopen("driver_stats.log", "w+"); + if(!fdAll) + goto _exit; + + XGE_PRINT_HEADER_STATS(fdAll); + + for(index = 0; index < XGE_COUNT_DRIVERSTATS; index++) { + driverInfo[index].value = *((u64 *)((unsigned char *)driver_stats + + (index * (sizeof(u64))))); + XGE_PRINT_STATS(fdAll,(const char *) driverInfo[index].name, + driverInfo[index].value); + } + + XGE_PRINT_LINE(fdAll); + fclose( fdAll ); + +_exit: + return; } diff --git a/tools/tools/nxge/xge_log.h b/tools/tools/nxge/xge_log.h index dc33d09..b8bfdec 100644 --- a/tools/tools/nxge/xge_log.h +++ b/tools/tools/nxge/xge_log.h @@ -25,6 +25,7 @@ * * $FreeBSD$ */ + #ifndef XGE_CMN_H #include "xge_cmn.h" #endif @@ -42,2532 +43,2550 @@ FILE *fdAll; int spaceCount = 0; -char line[68] = \ - "==================================================================="; - -#define XGE_PRINT_HEADER(fd, maxSize, pOffset) \ -{ \ - XGE_PRINT_LINE(fd); \ - spaceCount = (maxSize) - 9; \ - if( (pOffset) == 1 ) \ - { \ - XGE_PRINT(fd, "PARAMETER%*s\tOFFSET\tVALUE", spaceCount, " "); \ - } \ - else \ - { \ - XGE_PRINT(fd, "PARAMETER%*s\tVALUE", spaceCount, " "); \ - } \ - XGE_PRINT_LINE(fd); \ +char line[68] = \ + "==================================================================="; + +#define XGE_PRINT_HEADER(fd, maxSize, pOffset) { \ + XGE_PRINT_LINE(fd); \ + spaceCount = (maxSize) - 9; \ + if((pOffset) == 1) { \ + XGE_PRINT(fd, "PARAMETER%*s\tOFFSET\tVALUE", spaceCount, " "); \ + } \ + else { \ + XGE_PRINT(fd, "PARAMETER%*s\tVALUE", spaceCount, " "); \ + } \ + XGE_PRINT_LINE(fd); \ } -#define XGE_PRINT_READ_HEADER(fd, maxSize, pOffset) \ -{ \ - XGE_PRINT_LINE(fd); \ - spaceCount = (maxSize) - 9; \ - if( (pOffset) == 1 ) \ - { \ - XGE_PRINT(fd, "OFFSET\t\tVALUE", spaceCount, " "); \ - } \ - else \ - { \ - XGE_PRINT(fd, "VALUE", spaceCount, " "); \ - } \ - XGE_PRINT_LINE(fd); \ + +#define XGE_PRINT_READ_HEADER(fd, maxSize, pOffset) { \ + XGE_PRINT_LINE(fd); \ + spaceCount = (maxSize) - 9; \ + if((pOffset) == 1) { \ + XGE_PRINT(fd, "OFFSET\t\tVALUE", spaceCount, " "); \ + } \ + else { \ + XGE_PRINT(fd, "VALUE", spaceCount, " "); \ + } \ + XGE_PRINT_LINE(fd); \ } -#define XGE_PRINT_HEADER_REGS(fd) \ - XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_REGS, 1); -#define XGE_PRINT_READ_HEADER_REGS(fd) \ - XGE_PRINT_READ_HEADER(fd, XGE_STR_MAX_LEN_REGS, 1); -#define XGE_PRINT_HEADER_PCICONF(fd) \ - XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_PCICONF, 1); -#define XGE_PRINT_HEADER_DEVCONF(fd) \ - XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_DEVCONF, 0); -#define XGE_PRINT_HEADER_STATS(fd) \ - XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_STATS, 0); - -#define XGE_PRINT_REGS(fd, parameter, offset, value) \ -{ \ - spaceCount = XGE_STR_MAX_LEN_REGS - strlen(parameter); \ - XGE_PRINT(fd, XGE_FORMAT_REGS, (parameter), (spaceCount), " ", (offset), \ - (value)); \ +#define XGE_PRINT_HEADER_REGS(fd) \ + XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_REGS, 1); +#define XGE_PRINT_READ_HEADER_REGS(fd) \ + XGE_PRINT_READ_HEADER(fd, XGE_STR_MAX_LEN_REGS, 1); +#define XGE_PRINT_HEADER_PCICONF(fd) \ + XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_PCICONF, 1); +#define XGE_PRINT_HEADER_DEVCONF(fd) \ + XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_DEVCONF, 0); +#define XGE_PRINT_HEADER_STATS(fd) \ + XGE_PRINT_HEADER(fd, XGE_STR_MAX_LEN_STATS, 0); + +#define XGE_PRINT_REGS(fd, parameter, offset, value) { \ + spaceCount = XGE_STR_MAX_LEN_REGS - strlen(parameter); \ + XGE_PRINT(fd, XGE_FORMAT_REGS, (parameter), (spaceCount), " ", \ + (offset), (value)); \ } -#define XGE_PRINT_PCICONF(fd, parameter, offset, value) \ -{ \ - spaceCount = XGE_STR_MAX_LEN_PCICONF - strlen(parameter); \ - XGE_PRINT(fd, XGE_FORMAT_PCICONF, (parameter), (spaceCount), " ", \ - (offset), (value)); \ +#define XGE_PRINT_PCICONF(fd, parameter, offset, value) { \ + spaceCount = XGE_STR_MAX_LEN_PCICONF - strlen(parameter); \ + XGE_PRINT(fd, XGE_FORMAT_PCICONF, (parameter), (spaceCount), " ", \ + (offset), (value)); \ } -#define XGE_PRINT_DEVCONF(fd, parameter, value) \ -{ \ - spaceCount = XGE_STR_MAX_LEN_DEVCONF - strlen(parameter); \ - XGE_PRINT(fd, XGE_FORMAT_DEVCONF, (parameter), (spaceCount), " ", \ - (value)); \ +#define XGE_PRINT_DEVCONF(fd, parameter, value) { \ + spaceCount = XGE_STR_MAX_LEN_DEVCONF - strlen(parameter); \ + XGE_PRINT(fd, XGE_FORMAT_DEVCONF, (parameter), (spaceCount), " ", \ + (value)); \ } -#define XGE_PRINT_STATS(fd, parameter, value) \ -{ \ - spaceCount = XGE_STR_MAX_LEN_STATS - strlen(parameter); \ - XGE_PRINT(fd, XGE_FORMAT_STATS, (parameter), (spaceCount), " ", (value)); \ +#define XGE_PRINT_STATS(fd, parameter, value) { \ + spaceCount = XGE_STR_MAX_LEN_STATS - strlen(parameter); \ + XGE_PRINT(fd, XGE_FORMAT_STATS, (parameter), (spaceCount), " ", \ + (value)); \ } -xge_pci_bar0_t regInfo[] = -{ - {"GENERAL_INT_STATUS", 0x0000, 0, 0}, - {"GENERAL_INT_MASK", 0x0008, 0, 0}, - {"SW_RESET", 0x0100, 0, 0}, - {"ADAPTER_STATUS", 0x0108, 0, 0}, - {"ADAPTER_CONTROL", 0x0110, 0, 0}, - {"SERR_SOURCE", 0x0118, 0, 0}, - {"PCI_MODE", 0x0120, 0, 1}, - {"RIC_STATUS", 0x0160, 0, 1}, - {"MBIST_STATUS", 0x0558, 0, 1}, - {"PIC_INT_STATUS", 0x0800, 0, 0}, - {"PIC_INT_MASK", 0x0808, 0, 0}, - {"TxPIC_INT_REG", 0x0810, 0, 0}, - {"TxPIC_INT_MASK", 0x0818, 0, 0}, - {"TxPIC_INT_ALARM", 0x0820, 0, 0}, - {"RxPIC_INT_REG", 0x0828, 0, 0}, - {"RxPIC_INT_MASK", 0x0830, 0, 0}, - {"RxPIC_INT_ALARM", 0x0838, 0, 0}, - {"FLSH_INT_REG", 0x0840, 0, 0}, - {"FLSH_INT_MASK", 0x0848, 0, 0}, - {"FLSH_INT_ALARM", 0x0850, 0, 0}, - {"MDIO_INT_REG", 0x0858, 0, 0}, - {"MDIO_INT_MASK", 0x0860, 0, 0}, - {"MDIO_INT_ALARM", 0x0868, 0, 0}, - {"IIC_INT_REG", 0x0870, 0, 0}, - {"IIC_INT_MASK", 0x0878, 0, 0}, - {"IIC_INT_ALARM", 0x0880, 0, 0}, - {"MSI_PENDING_REG", 0x0888, 0, 1}, - {"GPIO_INT_REG", 0x0890, 0, 0}, - {"GPIO_INT_MASK", 0x0898, 0, 0}, - {"GPIO_INT_ALARM", 0x08A0, 0, 0}, - {"MISC_INT_REG", 0x0890, 0, 1}, - {"MISC_INT_MASK", 0x0898, 0, 1}, - {"MISC_INT_ALARM", 0x08A0, 0, 1}, - {"MSI_TRIGGERED_REG", 0x08A8, 0, 1}, - {"XFP_GPIO_INT_REG", 0x08B0, 0, 1}, - {"XFP_GPIO_INT_MASK", 0x08B8, 0, 1}, - {"XFP_GPIO_ALARM", 0x08C0, 0, 1}, - {"TX_TRAFFIC_INT", 0x08E0, 0, 0}, - {"TX_TRAFFIC_INT_MASK", 0x08E8, 0, 0}, - {"RX_TRAFFIC_INT", 0x08F0, 0, 0}, - {"RX_TRAFFIC_INT_MASK", 0x08F8, 0, 0}, - {"PIC_CONTROL", 0x0900, 0, 0}, - {"SWAPPER_CTRL", 0x0908, 0, 0}, - {"PIF_RD_SWAPPER_Fb", 0x0910, 0, 0}, - {"SCHEDULED_INT_CTRL", 0x0918, 0, 0}, - {"TxReqTimeOut", 0x0920, 0, 0}, - {"STATSReqTimeOut", 0x0928, 0, 0}, - {"Read_Retry_Delay", 0x0930, 0, 0}, - {"Read_Retry_Acceleration", 0x0938, 0, 0}, - {"Write_Retry_Delay", 0x0940, 0, 0}, - {"Write_Retry_Acceleration", 0x0948, 0, 0}, - {"XMSI_Control", 0x0950, 0, 0}, - {"XMSI_Access", 0x0958, 0, 0}, - {"XMSI_Address", 0x0960, 0, 0}, - {"XMSI_Data", 0x0968, 0, 0}, - {"Rx_MAT", 0x0970, 0, 0}, - {"Tx_MAT0_7", 0x0980, 0, 0}, - {"Tx_MAT8_15", 0x0988, 0, 0}, - {"Tx_MAT16_23", 0x0990, 0, 0}, - {"Tx_MAT24_31", 0x0998, 0, 0}, - {"Tx_MAT32_39", 0x09A0, 0, 0}, - {"Tx_MAT40_47", 0x09A8, 0, 0}, - {"Tx_MAT48_55", 0x09B0, 0, 0}, - {"Tx_MAT56_63", 0x09B8, 0, 0}, - {"XMSI_MASK_REG", 0x09C0, 0, 1}, - {"STAT_BYTE_CNT", 0x09C8, 0, 1}, - {"STAT_CFG", 0x09D0, 0, 0}, - {"STAT_ADDR", 0x09D8, 0, 0}, - {"MDIO_CONTROL", 0x09E0, 0, 0}, - {"DTX_CONTROL", 0x09E8, 0, 0}, - {"I2C_CONTROL", 0x09F0, 0, 0}, - {"GPIO_CONTROL", 0x09F8, 0, 0}, - {"BEACON_CONTROL", 0x09F8, 0, 1}, - {"HOST_SCRATCH", 0x0A00, 0, 0}, - {"MISC_CONTROL", 0x0A00, 0, 1}, - {"XFP_CONTROL", 0x0A08, 0, 1}, - {"GPIO_CONTROL", 0x0A10, 0, 1}, - {"TxFIFO_DW_MASK", 0x0A18, 0, 1}, - {"SPLIT_TABLE_LINE_NO", 0x0A20, 0, 1}, - {"SC_TIMEOUT", 0x0A28, 0, 1}, - {"PIC_CONTROL_2", 0x0A30, 0, 1}, - {"INI_DPERR_CTRL", 0x0A38, 0, 1}, - {"WREQ_SPLIT_MASK", 0x0A40, 0, 1}, - {"QW_PER_RXD", 0x0A48, 0, 1}, - {"PIC_STATUS", 0x0B00, 0, 1}, - {"TXP_STATUS", 0x0B08, 0, 1}, - {"TXP_ERR_CONTEXT", 0x0B10, 0, 1}, - {"SPDM_BIR_OFFSET", 0x0B18, 0, 1}, - {"SPDM_OVERWRITE", 0x0B20, 0, 1}, - {"CFG_ADDR_ON_DPERR", 0x0B28, 0, 1}, - {"PIF_ADDR_ON_DPERR", 0x0B30, 0, 1}, - {"TAGS_IN_USE", 0x0B38, 0, 1}, - {"RD_REQ_TYPES", 0x0B40, 0, 1}, - {"SPLIT_TABLE_LINE", 0x0B48, 0, 1}, - {"UNXP_SPLIT_ADD_PH", 0x0B50, 0, 1}, - {"UNEXP_SPLIT_ATTR_PH", 0x0B58, 0, 1}, - {"SPLIT_MESSAGE", 0x0B60, 0, 1}, - {"SPDM_STRUCTURE", 0x0B68, 0, 1}, - {"TXDW_PTR_CNT_0", 0x0B70, 0, 1}, - {"TXDW_PTR_CNT_1", 0x0B78, 0, 1}, - {"TXDW_PTR_CNT_2", 0x0B80, 0, 1}, - {"TXDW_PTR_CNT_3", 0x0B88, 0, 1}, - {"TXDW_PTR_CNT_4", 0x0B90, 0, 1}, - {"TXDW_PTR_CNT_5", 0x0B98, 0, 1}, - {"TXDW_PTR_CNT_6", 0x0BA0, 0, 1}, - {"TXDW_PTR_CNT_7", 0x0BA8, 0, 1}, - {"RXDW_CNT_RING_0", 0x0BB0, 0, 1}, - {"RXDW_CNT_RING_1", 0x0BB8, 0, 1}, - {"RXDW_CNT_RING_2", 0x0BC0, 0, 1}, - {"RXDW_CNT_RING_3", 0x0BC8, 0, 1}, - {"RXDW_CNT_RING_4", 0x0BD0, 0, 1}, - {"RXDW_CNT_RING_5", 0x0BD8, 0, 1}, - {"RXDW_CNT_RING_6", 0x0BE0, 0, 1}, - {"RXDW_CNT_RING_7", 0x0BE8, 0, 1}, - {"TXDMA_INT_STATUS", 0x1000, 0, 0}, - {"TXDMA_INT_MASK", 0x1008, 0, 0}, - {"PFC_ERR_REG", 0x1010, 0, 0}, - {"PFC_ERR_MASK", 0x1018, 0, 0}, - {"PFC_ERR_ALARM", 0x1020, 0, 0}, - {"TDA_ERR_REG", 0x1028, 0, 0}, - {"TDA_ERR_MASK", 0x1030, 0, 0}, - {"TDA_ERR_ALARM", 0x1038, 0, 0}, - {"PCC_ERR_REG", 0x1040, 0, 0}, - {"PCC_ERR_MASK", 0x1048, 0, 0}, - {"PCC_ERR_ALARM", 0x1050, 0, 0}, - {"TTI_ERR_REG", 0x1058, 0, 0}, - {"TTI_ERR_MASK", 0x1060, 0, 0}, - {"TTI_ERR_ALARM", 0x1068, 0, 0}, - {"LSO_ERR_REG", 0x1070, 0, 0}, - {"LSO_ERR_MASK", 0x1078, 0, 0}, - {"LSO_ERR_ALARM", 0x1080, 0, 0}, - {"TPA_ERR_REG", 0x1088, 0, 0}, - {"TPA_ERR_MASK", 0x1090, 0, 0}, - {"TPA_ERR_ALARM", 0x1098, 0, 0}, - {"SM_ERR_REG", 0x10A0, 0, 0}, - {"SM_ERR_MASK", 0x10A8, 0, 0}, - {"SM_ERR_ALARM", 0x10B0, 0, 0}, - {"TX_FIFO_PARTITION_0", 0x1108, 0, 0}, - {"TX_FIFO_PARTITION_1", 0x1110, 0, 0}, - {"TX_FIFO_PARTITION_2", 0x1118, 0, 0}, - {"TX_FIFO_PARTITION_3", 0x1120, 0, 0}, - {"TX_W_ROUND_ROBIN_0", 0x1128, 0, 0}, - {"TX_W_ROUND_ROBIN_1", 0x1130, 0, 0}, - {"TX_W_ROUND_ROBIN_2", 0x1138, 0, 0}, - {"TX_W_ROUND_ROBIN_3", 0x1140, 0, 0}, - {"TX_W_ROUND_ROBIN_4", 0x1148, 0, 0}, - {"TTI_COMMAND_MEM", 0x1150, 0, 0}, - {"TTI_DATA1_MEM", 0x1158, 0, 0}, - {"TTI_DATA2_MEM", 0x1160, 0, 0}, - {"TX_PA_CFG", 0x1168, 0, 0}, - {"PCC_ENABLE", 0x1170, 0, 0}, - {"PFC_MONITOR_0", 0x1178, 0, 1}, - {"PFC_MONITOR_1", 0x1180, 0, 1}, - {"PFC_MONITOR_2", 0x1188, 0, 1}, - {"PFC_MONITOR_3", 0x1190, 0, 1}, - {"TXD_OWNERSHIP_CTRL", 0x1198, 0, 1}, - {"PFC_READ_CNTRL", 0x11A0, 0, 1}, - {"PFC_READ_DATA", 0x11A8, 0, 1}, - {"RXDMA_INT_STATUS", 0x1800, 0, 0}, - {"RXDMA_INT_MASK", 0x1808, 0, 0}, - {"RDA_ERR_REG", 0x1810, 0, 0}, - {"RDA_ERR_MASK", 0x1818, 0, 0}, - {"RDA_ERR_ALARM", 0x1820, 0, 0}, - {"RC_ERR_REG", 0x1828, 0, 0}, - {"RC_ERR_MASK", 0x1830, 0, 0}, - {"RC_ERR_ALARM", 0x1838, 0, 0}, - {"PRC_PCIX_ERR_REG", 0x1840, 0, 0}, - {"PRC_PCIX_ERR_MASK", 0x1848, 0, 0}, - {"PRC_PCIX_ERR_ALARM", 0x1850, 0, 0}, - {"RPA_ERR_REG", 0x1858, 0, 0}, - {"RPA_ERR_MASK", 0x1860, 0, 0}, - {"RPA_ERR_ALARM", 0x1868, 0, 0}, - {"RTI_ERR_REG", 0x1870, 0, 0}, - {"RTI_ERR_MASK", 0x1878, 0, 0}, - {"RTI_ERR_ALARM", 0x1880, 0, 0}, - {"RX_QUEUE_PRIORITY", 0x1900, 0, 0}, - {"RX_W_ROUND_ROBIN_0", 0x1908, 0, 0}, - {"RX_W_ROUND_ROBIN_1", 0x1910, 0, 0}, - {"RX_W_ROUND_ROBIN_2", 0x1918, 0, 0}, - {"RX_W_ROUND_ROBIN_3", 0x1920, 0, 0}, - {"RX_W_ROUND_ROBIN_4", 0x1928, 0, 0}, - {"PRC_RXD0_0", 0x1930, 0, 0}, - {"PRC_RXD0_1", 0x1938, 0, 0}, - {"PRC_RXD0_2", 0x1940, 0, 0}, - {"PRC_RXD0_3", 0x1948, 0, 0}, - {"PRC_RXD0_4", 0x1950, 0, 0}, - {"PRC_RXD0_5", 0x1958, 0, 0}, - {"PRC_RXD0_6", 0x1960, 0, 0}, - {"PRC_RXD0_7", 0x1968, 0, 0}, - {"PRC_CTRL_0", 0x1970, 0, 0}, - {"PRC_CTRL_1", 0x1978, 0, 0}, - {"PRC_CTRL_2", 0x1980, 0, 0}, - {"PRC_CTRL_3", 0x1988, 0, 0}, - {"PRC_CTRL_4", 0x1990, 0, 0}, - {"PRC_CTRL_5", 0x1998, 0, 0}, - {"PRC_CTRL_6", 0x19A0, 0, 0}, - {"PRC_CTRL_7", 0x19A8, 0, 0}, - {"PRC_ALARM_ACTION", 0x19B0, 0, 0}, - {"RTI_COMMAND_MEM", 0x19B8, 0, 0}, - {"RTI_DATA1_MEM", 0x19C0, 0, 0}, - {"RTI_DATA2_MEM", 0x19C8, 0, 0}, - {"RX_PA_CFG", 0x19D0, 0, 0}, - {"RING_BUMP_COUNTER1", 0x19E0, 0, 1}, - {"RING_BUMP_COUNTER2", 0x19E8, 0, 1}, - {"MAC_INT_STATUS", 0x2000, 0, 0}, - {"MAC_INT_MASK", 0x2008, 0, 0}, - {"MAC_TMAC_ERR_REG", 0x2010, 0, 0}, - {"MAC_TMAC_ERR_MASK", 0x2018, 0, 0}, - {"MAC_TMAC_ERR_ALARM", 0x2020, 0, 0}, - {"MAC_RMAC_ERR_REG", 0x2028, 0, 0}, - {"MAC_RMAC_ERR_MASK", 0x2030, 0, 0}, - {"MAC_RMAC_ERR_ALARM", 0x2038, 0, 0}, - {"MAC_CFG", 0x2100, 0, 0}, - {"TMAC_AVG_IPG", 0x2108, 0, 0}, - {"RMAC_MAX_PYLD_LEN", 0x2110, 0, 0}, - {"RMAC_ERR_CFG", 0x2118, 0, 0}, - {"RMAC_CFG_KEY", 0x2120, 0, 0}, - {"RMAC_ADDR_CMD_MEM", 0x2128, 0, 0}, - {"RMAC_ADDR_DATA0_MEM", 0x2130, 0, 0}, - {"RMAC_ADDR_DATA1_MEM", 0x2138, 0, 0}, - {"TMAC_IPG_CFG", 0x2148, 0, 0}, - {"RMAC_PAUSE_CFG", 0x2150, 0, 0}, - {"RMAC_RED_CFG", 0x2158, 0, 0}, - {"RMAC_RED_RATE_Q0Q3", 0x2160, 0, 0}, - {"RMAC_RED_RATE_Q4Q7", 0x2168, 0, 0}, - {"MAC_LINK_UTIL", 0x2170, 0, 0}, - {"RMAC_INVALID_IPG", 0x2178, 0, 0}, - {"RTS_FRM_LEN_0", 0x2180, 0, 0}, - {"RTS_FRM_LEN_1", 0x2188, 0, 0}, - {"RTS_FRM_LEN_2", 0x2190, 0, 0}, - {"RTS_FRM_LEN_3", 0x2198, 0, 0}, - {"RTS_FRM_LEN_4", 0x21A0, 0, 0}, - {"RTS_FRM_LEN_5", 0x21A8, 0, 0}, - {"RTS_FRM_LEN_6", 0x21B0, 0, 0}, - {"RTS_FRM_LEN_7", 0x21B8, 0, 0}, - {"RTS_QOS_STEERING", 0x21C0, 0, 0}, - {"RTS_DIX_MAP_0", 0x21C8, 0, 0}, - {"RTS_DIX_MAP_1", 0x21D0, 0, 0}, - {"RTS_DIX_MAP_2", 0x21D8, 0, 0}, - {"RTS_DIX_MAP_3", 0x21E0, 0, 0}, - {"RTS_Q_ALTERNATES", 0x21E8, 0, 0}, - {"RTS_DEFAULT_Q", 0x21F0, 0, 1}, - {"RTS_CTRL", 0x21F8, 0, 0}, - {"RTS_PN_CAM_CTRL", 0x2200, 0, 0}, - {"RTS_PN_CAM_DATA", 0x2208, 0, 0}, - {"RTS_DS_MEM_CTRL", 0x2210, 0, 0}, - {"RTS_DS_MEM_DATA", 0x2218, 0, 0}, - {"RTS_VID_MEM_CTRL", 0x2308, 0, 1}, - {"RTS_VID_MEM_DATA", 0x2310, 0, 1}, - {"RTS_P0_P3_MAP", 0x2318, 0, 1}, - {"RTS_P4_P7_MAP", 0x2320, 0, 1}, - {"RTS_P8_P11_MAP", 0x2328, 0, 1}, - {"RTS_P12_P15_MAP", 0x2330, 0, 1}, - {"RTS_DA_CFG", 0x2338, 0, 1}, - {"RTS_RTH_CFG", 0x2380, 0, 1}, - {"RTS_RTH_MAP_MEM_CTRL", 0x2388, 0, 1}, - {"RTS_RTH_MAP_MEM_DATA", 0x2390, 0, 1}, - {"RTS_RTH_SPDM_MEM_CTRL", 0x2398, 0, 1}, - {"RTS_RTH_SPDM_MEM_DATA", 0x23A0, 0, 1}, - {"RTS_RTH_JHASH_CFG", 0x23A8, 0, 1}, - {"RTS_RTH_JHASH_MASK_0", 0x23B0, 0, 1}, - {"RTS_RTH_JHASH_MASK_1", 0x23B8, 0, 1}, - {"RTS_RTH_JHASH_MASK_2", 0x23C0, 0, 1}, - {"RTS_RTH_JHASH_MASK_3", 0x23C8, 0, 1}, - {"RTS_RTH_JHASH_MASK_4", 0x23D0, 0, 1}, - {"RTS_RTH_JHASH_MASK_5", 0x23D8, 0, 1}, - {"RTS_RTH_STATUS", 0x23E0, 0, 1}, - {"RMAC_RED_FINE_Q0Q3", 0x2400, 0, 1}, - {"RMAC_RED_FINE_Q4Q7", 0x2408, 0, 1}, - {"RMAC_PTHRESH_CROSS", 0x2410, 0, 1}, - {"RMAC_RTHRESH_CROSS", 0x2418, 0, 1}, - {"RMAC_PNUM_RANGE_0", 0x2420, 0, 1}, - {"RMAC_PNUM_RANGE_1", 0x2428, 0, 1}, - {"RMAC_PNUM_RANGE_2", 0x2430, 0, 1}, - {"RMAC_PNUM_RANGE_3", 0x2438, 0, 1}, - {"RMAC_PNUM_RANGE_4", 0x2440, 0, 1}, - {"RMAC_PNUM_RANGE_5", 0x2448, 0, 1}, - {"RMAC_PNUM_RANGE_6", 0x2450, 0, 1}, - {"RMAC_PNUM_RANGE_7", 0x2458, 0, 1}, - {"RMAC_PNUM_RANGE_8", 0x2460, 0, 1}, - {"RMAC_PNUM_RANGE_9", 0x2468, 0, 1}, - {"RMAC_PNUM_RANGE_10", 0x2470, 0, 1}, - {"RMAC_PNUM_RANGE_11", 0x2478, 0, 1}, - {"RMAC_PNUM_RANGE_12", 0x2480, 0, 1}, - {"RMAC_PNUM_RANGE_13", 0x2488, 0, 1}, - {"RMAC_PNUM_RANGE_14", 0x2490, 0, 1}, - {"RMAC_PNUM_RANGE_15", 0x2498, 0, 1}, - {"RMAC_PNUM_RANGE_16", 0x24A0, 0, 1}, - {"RMAC_PNUM_RANGE_17", 0x24A8, 0, 1}, - {"RMAC_PNUM_RANGE_18", 0x24B0, 0, 1}, - {"RMAC_PNUM_RANGE_19", 0x24B8, 0, 1}, - {"RMAC_PNUM_RANGE_20", 0x24C0, 0, 1}, - {"RMAC_PNUM_RANGE_21", 0x24C8, 0, 1}, - {"RMAC_PNUM_RANGE_22", 0x24D0, 0, 1}, - {"RMAC_PNUM_RANGE_23", 0x24D8, 0, 1}, - {"RMAC_PNUM_RANGE_24", 0x24E0, 0, 1}, - {"RMAC_PNUM_RANGE_25", 0x24E8, 0, 1}, - {"RMAC_PNUM_RANGE_26", 0x24F0, 0, 1}, - {"RMAC_PNUM_RANGE_27", 0x24F8, 0, 1}, - {"RMAC_PNUM_RANGE_28", 0x2500, 0, 1}, - {"RMAC_PNUM_RANGE_29", 0x2508, 0, 1}, - {"RMAC_PNUM_RANGE_30", 0x2510, 0, 1}, - {"RMAC_PNUM_RANGE_31", 0x2518, 0, 1}, - {"RMAC_MP_CRC_0", 0x2520, 0, 1}, - {"RMAC_MP_MASK_A_0", 0x2528, 0, 1}, - {"RMAC_MP_MASK_B_0", 0x2530, 0, 1}, - {"RMAC_MP_CRC_1", 0x2538, 0, 1}, - {"RMAC_MP_MASK_A_1", 0x2540, 0, 1}, - {"RMAC_MP_MASK_B_1", 0x2548, 0, 1}, - {"RMAC_MP_CRC_2", 0x2550, 0, 1}, - {"RMAC_MP_MASK_A_2", 0x2558, 0, 1}, - {"RMAC_MP_MASK_B_2", 0x2560, 0, 1}, - {"RMAC_MP_CRC_3", 0x2568, 0, 1}, - {"RMAC_MP_MASK_A_3", 0x2570, 0, 1}, - {"RMAC_MP_MASK_B_3", 0x2578, 0, 1}, - {"RMAC_MP_CRC_4", 0x2580, 0, 1}, - {"RMAC_MP_MASK_A_4", 0x2588, 0, 1}, - {"RMAC_MP_MASK_B_4", 0x2590, 0, 1}, - {"RMAC_MP_CRC_5", 0x2598, 0, 1}, - {"RMAC_MP_MASK_A_5", 0x25A0, 0, 1}, - {"RMAC_MP_MASK_B_5", 0x25A8, 0, 1}, - {"RMAC_MP_CRC_6", 0x25B0, 0, 1}, - {"RMAC_MP_MASK_A_6", 0x25B8, 0, 1}, - {"RMAC_MP_MASK_B_6", 0x25C0, 0, 1}, - {"RMAC_MP_CRC_7", 0x25C8, 0, 1}, - {"RMAC_MP_MASK_A_7", 0x25D0, 0, 1}, - {"RMAC_MP_MASK_B_7", 0x25D8, 0, 1}, - {"MAC_CTRL", 0x25E0, 0, 1}, - {"ACTIVITY_CONTROL", 0x25E8, 0, 1}, - {"MC_INT_STATUS", 0x2800, 0, 0}, - {"MC_INT_MASK", 0x2808, 0, 0}, - {"MC_ERR_REG", 0x2810, 0, 0}, - {"MC_ERR_MASK", 0x2818, 0, 0}, - {"MC_ERR_ALARM", 0x2820, 0, 0}, - {"RX_QUEUE_CFG", 0x2900, 0, 0}, - {"MC_RLDRAM_MRS", 0x2908, 0, 0}, - {"MC_RLDRAM_INTERLEAVE", 0x2910, 0, 0}, - {"MC_PAUSE_THRESH_Q0Q3", 0x2918, 0, 0}, - {"MC_PAUSE_THRESH_Q4Q7", 0x2920, 0, 0}, - {"MC_RED_THRESH_Q0", 0x2928, 0, 0}, - {"MC_RED_THRESH_Q1", 0x2930, 0, 0}, - {"MC_RED_THRESH_Q2", 0x2938, 0, 0}, - {"MC_RED_THRESH_Q3", 0x2940, 0, 0}, - {"MC_RED_THRESH_Q4", 0x2948, 0, 0}, - {"MC_RED_THRESH_Q5", 0x2950, 0, 0}, - {"MC_RED_THRESH_Q6", 0x2958, 0, 0}, - {"MC_RED_THRESH_Q7", 0x2960, 0, 0}, - {"MC_RLDRAM_REF_PER", 0x2A00, 0, 0}, - {"MC_RLDRAM_TEST_CTRL", 0x2A20, 0, 0}, - {"MC_RLDRAM_TEST_ADD", 0x2A40, 0, 0}, - {"MC_RLDRAM_TEST_D0", 0x2A60, 0, 0}, - {"MC_RLDRAM_TEST_D1", 0x2A80, 0, 0}, - {"MC_RLDRAM_TEST_D2", 0x2B00, 0, 0}, - {"MC_RLDRAM_TEST_READ_D0", 0x2C00, 0, 1}, - {"MC_RLDRAM_TEST_RAED_D1", 0x2C20, 0, 1}, - {"MC_RLDRAM_TEST_READ_D2", 0x2C40, 0, 1}, - {"MC_RLDRAM_TEST_ADD_BKG", 0x2C60, 0, 1}, - {"MC_RLDRAM_TEST_D0_BKG", 0x2C80, 0, 1}, - {"MC_RLDRAM_TEST_D1_BKG", 0x2D00, 0, 1}, - {"MC_RLDRAM_TEST_D2_BKG", 0x2D20, 0, 1}, - {"MC_RLDRAM_TEST_READ_D0_BKG", 0x2D40, 0, 1}, - {"MC_RLDRAM_TEST_READ_D1_BKG", 0x2D60, 0, 1}, - {"MC_RLDRAM_TEST_READ_D2_BKG", 0x2D80, 0, 1}, - {"MC_RLDRAM_GENERATION", 0x2E00, 0, 1}, - {"MC_DRIVER", 0x2E20, 0, 1}, - {"XGXS_INT_STATUS", 0x3000, 0, 0}, - {"XGXS_INT_MASK", 0x3008, 0, 0}, - {"XGXS_TXGXS_ERR_REG", 0x3010, 0, 0}, - {"XGXS_TXGXS_ERR_MASK", 0x3018, 0, 0}, - {"XGXS_TXGXS_ERR_ALARM", 0x3020, 0, 0}, - {"XGXS_RXGXS_ERR_REG", 0x3028, 0, 0}, - {"XGXS_RXGXS_ERR_MASK", 0x3030, 0, 0}, - {"XGXS_RXGXS_ERR_ALARM", 0x3038, 0, 0}, - {"SPI_ERR_REG", 0x3040, 0, 1}, - {"SPI_ERR_MASK", 0x3048, 0, 1}, - {"SPI_ERR_ALARM", 0x3050, 0, 1}, - {"XGXS_CFG", 0x3100, 0, 0}, - {"XGXS_STATUS", 0x3108, 0, 0}, - {"XGXS_CFG_KEY", 0x3110, 0, 0}, - {"XGXS_EFIFO_CFG", 0x3118, 0, 0}, - {"RXGXS_BER_0", 0x3120, 0, 0}, - {"RXGXS_BER_1", 0x3128, 0, 0}, - {"SPI_CONTROL", 0x3130, 0, 1}, - {"SPI_DATA", 0x3138, 0, 1}, - {"SPI_WRITE_PROTECT", 0x3140, 0, 1}, - {"XGXS_CFG_1", 0x3180, 0, 1} +xge_pci_bar0_t regInfo[] = { + {"GENERAL_INT_STATUS", 0x0000, 0, 0}, + {"GENERAL_INT_MASK", 0x0008, 0, 0}, + {"SW_RESET", 0x0100, 0, 0}, + {"ADAPTER_STATUS", 0x0108, 0, 0}, + {"ADAPTER_CONTROL", 0x0110, 0, 0}, + {"SERR_SOURCE", 0x0118, 0, 0}, + {"PCI_MODE", 0x0120, 0, 1}, + {"RIC_STATUS", 0x0160, 0, 1}, + {"MBIST_STATUS", 0x0558, 0, 1}, + {"PIC_INT_STATUS", 0x0800, 0, 0}, + {"PIC_INT_MASK", 0x0808, 0, 0}, + {"TxPIC_INT_REG", 0x0810, 0, 0}, + {"TxPIC_INT_MASK", 0x0818, 0, 0}, + {"TxPIC_INT_ALARM", 0x0820, 0, 0}, + {"RxPIC_INT_REG", 0x0828, 0, 0}, + {"RxPIC_INT_MASK", 0x0830, 0, 0}, + {"RxPIC_INT_ALARM", 0x0838, 0, 0}, + {"FLSH_INT_REG", 0x0840, 0, 0}, + {"FLSH_INT_MASK", 0x0848, 0, 0}, + {"FLSH_INT_ALARM", 0x0850, 0, 0}, + {"MDIO_INT_REG", 0x0858, 0, 0}, + {"MDIO_INT_MASK", 0x0860, 0, 0}, + {"MDIO_INT_ALARM", 0x0868, 0, 0}, + {"IIC_INT_REG", 0x0870, 0, 0}, + {"IIC_INT_MASK", 0x0878, 0, 0}, + {"IIC_INT_ALARM", 0x0880, 0, 0}, + {"MSI_PENDING_REG", 0x0888, 0, 1}, + {"GPIO_INT_REG", 0x0890, 0, 0}, + {"GPIO_INT_MASK", 0x0898, 0, 0}, + {"GPIO_INT_ALARM", 0x08A0, 0, 0}, + {"MISC_INT_REG", 0x0890, 0, 1}, + {"MISC_INT_MASK", 0x0898, 0, 1}, + {"MISC_INT_ALARM", 0x08A0, 0, 1}, + {"MSI_TRIGGERED_REG", 0x08A8, 0, 1}, + {"XFP_GPIO_INT_REG", 0x08B0, 0, 1}, + {"XFP_GPIO_INT_MASK", 0x08B8, 0, 1}, + {"XFP_GPIO_ALARM", 0x08C0, 0, 1}, + {"TX_TRAFFIC_INT", 0x08E0, 0, 0}, + {"TX_TRAFFIC_INT_MASK", 0x08E8, 0, 0}, + {"RX_TRAFFIC_INT", 0x08F0, 0, 0}, + {"RX_TRAFFIC_INT_MASK", 0x08F8, 0, 0}, + {"PIC_CONTROL", 0x0900, 0, 0}, + {"SWAPPER_CTRL", 0x0908, 0, 0}, + {"PIF_RD_SWAPPER_Fb", 0x0910, 0, 0}, + {"SCHEDULED_INT_CTRL", 0x0918, 0, 0}, + {"TxReqTimeOut", 0x0920, 0, 0}, + {"STATSReqTimeOut", 0x0928, 0, 0}, + {"Read_Retry_Delay", 0x0930, 0, 0}, + {"Read_Retry_Acceleration", 0x0938, 0, 0}, + {"Write_Retry_Delay", 0x0940, 0, 0}, + {"Write_Retry_Acceleration", 0x0948, 0, 0}, + {"XMSI_Control", 0x0950, 0, 0}, + {"XMSI_Access", 0x0958, 0, 0}, + {"XMSI_Address", 0x0960, 0, 0}, + {"XMSI_Data", 0x0968, 0, 0}, + {"Rx_MAT", 0x0970, 0, 0}, + {"Tx_MAT0_7", 0x0980, 0, 0}, + {"Tx_MAT8_15", 0x0988, 0, 0}, + {"Tx_MAT16_23", 0x0990, 0, 0}, + {"Tx_MAT24_31", 0x0998, 0, 0}, + {"Tx_MAT32_39", 0x09A0, 0, 0}, + {"Tx_MAT40_47", 0x09A8, 0, 0}, + {"Tx_MAT48_55", 0x09B0, 0, 0}, + {"Tx_MAT56_63", 0x09B8, 0, 0}, + {"XMSI_MASK_REG", 0x09C0, 0, 1}, + {"STAT_BYTE_CNT", 0x09C8, 0, 1}, + {"STAT_CFG", 0x09D0, 0, 0}, + {"STAT_ADDR", 0x09D8, 0, 0}, + {"MDIO_CONTROL", 0x09E0, 0, 0}, + {"DTX_CONTROL", 0x09E8, 0, 0}, + {"I2C_CONTROL", 0x09F0, 0, 0}, + {"GPIO_CONTROL", 0x09F8, 0, 0}, + {"BEACON_CONTROL", 0x09F8, 0, 1}, + {"HOST_SCRATCH", 0x0A00, 0, 0}, + {"MISC_CONTROL", 0x0A00, 0, 1}, + {"XFP_CONTROL", 0x0A08, 0, 1}, + {"GPIO_CONTROL", 0x0A10, 0, 1}, + {"TxFIFO_DW_MASK", 0x0A18, 0, 1}, + {"SPLIT_TABLE_LINE_NO", 0x0A20, 0, 1}, + {"SC_TIMEOUT", 0x0A28, 0, 1}, + {"PIC_CONTROL_2", 0x0A30, 0, 1}, + {"INI_DPERR_CTRL", 0x0A38, 0, 1}, + {"WREQ_SPLIT_MASK", 0x0A40, 0, 1}, + {"QW_PER_RXD", 0x0A48, 0, 1}, + {"PIC_STATUS", 0x0B00, 0, 1}, + {"TXP_STATUS", 0x0B08, 0, 1}, + {"TXP_ERR_CONTEXT", 0x0B10, 0, 1}, + {"SPDM_BIR_OFFSET", 0x0B18, 0, 1}, + {"SPDM_OVERWRITE", 0x0B20, 0, 1}, + {"CFG_ADDR_ON_DPERR", 0x0B28, 0, 1}, + {"PIF_ADDR_ON_DPERR", 0x0B30, 0, 1}, + {"TAGS_IN_USE", 0x0B38, 0, 1}, + {"RD_REQ_TYPES", 0x0B40, 0, 1}, + {"SPLIT_TABLE_LINE", 0x0B48, 0, 1}, + {"UNXP_SPLIT_ADD_PH", 0x0B50, 0, 1}, + {"UNEXP_SPLIT_ATTR_PH", 0x0B58, 0, 1}, + {"SPLIT_MESSAGE", 0x0B60, 0, 1}, + {"SPDM_STRUCTURE", 0x0B68, 0, 1}, + {"TXDW_PTR_CNT_0", 0x0B70, 0, 1}, + {"TXDW_PTR_CNT_1", 0x0B78, 0, 1}, + {"TXDW_PTR_CNT_2", 0x0B80, 0, 1}, + {"TXDW_PTR_CNT_3", 0x0B88, 0, 1}, + {"TXDW_PTR_CNT_4", 0x0B90, 0, 1}, + {"TXDW_PTR_CNT_5", 0x0B98, 0, 1}, + {"TXDW_PTR_CNT_6", 0x0BA0, 0, 1}, + {"TXDW_PTR_CNT_7", 0x0BA8, 0, 1}, + {"RXDW_CNT_RING_0", 0x0BB0, 0, 1}, + {"RXDW_CNT_RING_1", 0x0BB8, 0, 1}, + {"RXDW_CNT_RING_2", 0x0BC0, 0, 1}, + {"RXDW_CNT_RING_3", 0x0BC8, 0, 1}, + {"RXDW_CNT_RING_4", 0x0BD0, 0, 1}, + {"RXDW_CNT_RING_5", 0x0BD8, 0, 1}, + {"RXDW_CNT_RING_6", 0x0BE0, 0, 1}, + {"RXDW_CNT_RING_7", 0x0BE8, 0, 1}, + {"TXDMA_INT_STATUS", 0x1000, 0, 0}, + {"TXDMA_INT_MASK", 0x1008, 0, 0}, + {"PFC_ERR_REG", 0x1010, 0, 0}, + {"PFC_ERR_MASK", 0x1018, 0, 0}, + {"PFC_ERR_ALARM", 0x1020, 0, 0}, + {"TDA_ERR_REG", 0x1028, 0, 0}, + {"TDA_ERR_MASK", 0x1030, 0, 0}, + {"TDA_ERR_ALARM", 0x1038, 0, 0}, + {"PCC_ERR_REG", 0x1040, 0, 0}, + {"PCC_ERR_MASK", 0x1048, 0, 0}, + {"PCC_ERR_ALARM", 0x1050, 0, 0}, + {"TTI_ERR_REG", 0x1058, 0, 0}, + {"TTI_ERR_MASK", 0x1060, 0, 0}, + {"TTI_ERR_ALARM", 0x1068, 0, 0}, + {"LSO_ERR_REG", 0x1070, 0, 0}, + {"LSO_ERR_MASK", 0x1078, 0, 0}, + {"LSO_ERR_ALARM", 0x1080, 0, 0}, + {"TPA_ERR_REG", 0x1088, 0, 0}, + {"TPA_ERR_MASK", 0x1090, 0, 0}, + {"TPA_ERR_ALARM", 0x1098, 0, 0}, + {"SM_ERR_REG", 0x10A0, 0, 0}, + {"SM_ERR_MASK", 0x10A8, 0, 0}, + {"SM_ERR_ALARM", 0x10B0, 0, 0}, + {"TX_FIFO_PARTITION_0", 0x1108, 0, 0}, + {"TX_FIFO_PARTITION_1", 0x1110, 0, 0}, + {"TX_FIFO_PARTITION_2", 0x1118, 0, 0}, + {"TX_FIFO_PARTITION_3", 0x1120, 0, 0}, + {"TX_W_ROUND_ROBIN_0", 0x1128, 0, 0}, + {"TX_W_ROUND_ROBIN_1", 0x1130, 0, 0}, + {"TX_W_ROUND_ROBIN_2", 0x1138, 0, 0}, + {"TX_W_ROUND_ROBIN_3", 0x1140, 0, 0}, + {"TX_W_ROUND_ROBIN_4", 0x1148, 0, 0}, + {"TTI_COMMAND_MEM", 0x1150, 0, 0}, + {"TTI_DATA1_MEM", 0x1158, 0, 0}, + {"TTI_DATA2_MEM", 0x1160, 0, 0}, + {"TX_PA_CFG", 0x1168, 0, 0}, + {"PCC_ENABLE", 0x1170, 0, 0}, + {"PFC_MONITOR_0", 0x1178, 0, 1}, + {"PFC_MONITOR_1", 0x1180, 0, 1}, + {"PFC_MONITOR_2", 0x1188, 0, 1}, + {"PFC_MONITOR_3", 0x1190, 0, 1}, + {"TXD_OWNERSHIP_CTRL", 0x1198, 0, 1}, + {"PFC_READ_CNTRL", 0x11A0, 0, 1}, + {"PFC_READ_DATA", 0x11A8, 0, 1}, + {"RXDMA_INT_STATUS", 0x1800, 0, 0}, + {"RXDMA_INT_MASK", 0x1808, 0, 0}, + {"RDA_ERR_REG", 0x1810, 0, 0}, + {"RDA_ERR_MASK", 0x1818, 0, 0}, + {"RDA_ERR_ALARM", 0x1820, 0, 0}, + {"RC_ERR_REG", 0x1828, 0, 0}, + {"RC_ERR_MASK", 0x1830, 0, 0}, + {"RC_ERR_ALARM", 0x1838, 0, 0}, + {"PRC_PCIX_ERR_REG", 0x1840, 0, 0}, + {"PRC_PCIX_ERR_MASK", 0x1848, 0, 0}, + {"PRC_PCIX_ERR_ALARM", 0x1850, 0, 0}, + {"RPA_ERR_REG", 0x1858, 0, 0}, + {"RPA_ERR_MASK", 0x1860, 0, 0}, + {"RPA_ERR_ALARM", 0x1868, 0, 0}, + {"RTI_ERR_REG", 0x1870, 0, 0}, + {"RTI_ERR_MASK", 0x1878, 0, 0}, + {"RTI_ERR_ALARM", 0x1880, 0, 0}, + {"RX_QUEUE_PRIORITY", 0x1900, 0, 0}, + {"RX_W_ROUND_ROBIN_0", 0x1908, 0, 0}, + {"RX_W_ROUND_ROBIN_1", 0x1910, 0, 0}, + {"RX_W_ROUND_ROBIN_2", 0x1918, 0, 0}, + {"RX_W_ROUND_ROBIN_3", 0x1920, 0, 0}, + {"RX_W_ROUND_ROBIN_4", 0x1928, 0, 0}, + {"PRC_RXD0_0", 0x1930, 0, 0}, + {"PRC_RXD0_1", 0x1938, 0, 0}, + {"PRC_RXD0_2", 0x1940, 0, 0}, + {"PRC_RXD0_3", 0x1948, 0, 0}, + {"PRC_RXD0_4", 0x1950, 0, 0}, + {"PRC_RXD0_5", 0x1958, 0, 0}, + {"PRC_RXD0_6", 0x1960, 0, 0}, + {"PRC_RXD0_7", 0x1968, 0, 0}, + {"PRC_CTRL_0", 0x1970, 0, 0}, + {"PRC_CTRL_1", 0x1978, 0, 0}, + {"PRC_CTRL_2", 0x1980, 0, 0}, + {"PRC_CTRL_3", 0x1988, 0, 0}, + {"PRC_CTRL_4", 0x1990, 0, 0}, + {"PRC_CTRL_5", 0x1998, 0, 0}, + {"PRC_CTRL_6", 0x19A0, 0, 0}, + {"PRC_CTRL_7", 0x19A8, 0, 0}, + {"PRC_ALARM_ACTION", 0x19B0, 0, 0}, + {"RTI_COMMAND_MEM", 0x19B8, 0, 0}, + {"RTI_DATA1_MEM", 0x19C0, 0, 0}, + {"RTI_DATA2_MEM", 0x19C8, 0, 0}, + {"RX_PA_CFG", 0x19D0, 0, 0}, + {"RING_BUMP_COUNTER1", 0x19E0, 0, 1}, + {"RING_BUMP_COUNTER2", 0x19E8, 0, 1}, + {"MAC_INT_STATUS", 0x2000, 0, 0}, + {"MAC_INT_MASK", 0x2008, 0, 0}, + {"MAC_TMAC_ERR_REG", 0x2010, 0, 0}, + {"MAC_TMAC_ERR_MASK", 0x2018, 0, 0}, + {"MAC_TMAC_ERR_ALARM", 0x2020, 0, 0}, + {"MAC_RMAC_ERR_REG", 0x2028, 0, 0}, + {"MAC_RMAC_ERR_MASK", 0x2030, 0, 0}, + {"MAC_RMAC_ERR_ALARM", 0x2038, 0, 0}, + {"MAC_CFG", 0x2100, 0, 0}, + {"TMAC_AVG_IPG", 0x2108, 0, 0}, + {"RMAC_MAX_PYLD_LEN", 0x2110, 0, 0}, + {"RMAC_ERR_CFG", 0x2118, 0, 0}, + {"RMAC_CFG_KEY", 0x2120, 0, 0}, + {"RMAC_ADDR_CMD_MEM", 0x2128, 0, 0}, + {"RMAC_ADDR_DATA0_MEM", 0x2130, 0, 0}, + {"RMAC_ADDR_DATA1_MEM", 0x2138, 0, 0}, + {"TMAC_IPG_CFG", 0x2148, 0, 0}, + {"RMAC_PAUSE_CFG", 0x2150, 0, 0}, + {"RMAC_RED_CFG", 0x2158, 0, 0}, + {"RMAC_RED_RATE_Q0Q3", 0x2160, 0, 0}, + {"RMAC_RED_RATE_Q4Q7", 0x2168, 0, 0}, + {"MAC_LINK_UTIL", 0x2170, 0, 0}, + {"RMAC_INVALID_IPG", 0x2178, 0, 0}, + {"RTS_FRM_LEN_0", 0x2180, 0, 0}, + {"RTS_FRM_LEN_1", 0x2188, 0, 0}, + {"RTS_FRM_LEN_2", 0x2190, 0, 0}, + {"RTS_FRM_LEN_3", 0x2198, 0, 0}, + {"RTS_FRM_LEN_4", 0x21A0, 0, 0}, + {"RTS_FRM_LEN_5", 0x21A8, 0, 0}, + {"RTS_FRM_LEN_6", 0x21B0, 0, 0}, + {"RTS_FRM_LEN_7", 0x21B8, 0, 0}, + {"RTS_QOS_STEERING", 0x21C0, 0, 0}, + {"RTS_DIX_MAP_0", 0x21C8, 0, 0}, + {"RTS_DIX_MAP_1", 0x21D0, 0, 0}, + {"RTS_DIX_MAP_2", 0x21D8, 0, 0}, + {"RTS_DIX_MAP_3", 0x21E0, 0, 0}, + {"RTS_Q_ALTERNATES", 0x21E8, 0, 0}, + {"RTS_DEFAULT_Q", 0x21F0, 0, 1}, + {"RTS_CTRL", 0x21F8, 0, 0}, + {"RTS_PN_CAM_CTRL", 0x2200, 0, 0}, + {"RTS_PN_CAM_DATA", 0x2208, 0, 0}, + {"RTS_DS_MEM_CTRL", 0x2210, 0, 0}, + {"RTS_DS_MEM_DATA", 0x2218, 0, 0}, + {"RTS_VID_MEM_CTRL", 0x2308, 0, 1}, + {"RTS_VID_MEM_DATA", 0x2310, 0, 1}, + {"RTS_P0_P3_MAP", 0x2318, 0, 1}, + {"RTS_P4_P7_MAP", 0x2320, 0, 1}, + {"RTS_P8_P11_MAP", 0x2328, 0, 1}, + {"RTS_P12_P15_MAP", 0x2330, 0, 1}, + {"RTS_DA_CFG", 0x2338, 0, 1}, + {"RTS_RTH_CFG", 0x2380, 0, 1}, + {"RTS_RTH_MAP_MEM_CTRL", 0x2388, 0, 1}, + {"RTS_RTH_MAP_MEM_DATA", 0x2390, 0, 1}, + {"RTS_RTH_SPDM_MEM_CTRL", 0x2398, 0, 1}, + {"RTS_RTH_SPDM_MEM_DATA", 0x23A0, 0, 1}, + {"RTS_RTH_JHASH_CFG", 0x23A8, 0, 1}, + {"RTS_RTH_JHASH_MASK_0", 0x23B0, 0, 1}, + {"RTS_RTH_JHASH_MASK_1", 0x23B8, 0, 1}, + {"RTS_RTH_JHASH_MASK_2", 0x23C0, 0, 1}, + {"RTS_RTH_JHASH_MASK_3", 0x23C8, 0, 1}, + {"RTS_RTH_JHASH_MASK_4", 0x23D0, 0, 1}, + {"RTS_RTH_JHASH_MASK_5", 0x23D8, 0, 1}, + {"RTS_RTH_STATUS", 0x23E0, 0, 1}, + {"RMAC_RED_FINE_Q0Q3", 0x2400, 0, 1}, + {"RMAC_RED_FINE_Q4Q7", 0x2408, 0, 1}, + {"RMAC_PTHRESH_CROSS", 0x2410, 0, 1}, + {"RMAC_RTHRESH_CROSS", 0x2418, 0, 1}, + {"RMAC_PNUM_RANGE_0", 0x2420, 0, 1}, + {"RMAC_PNUM_RANGE_1", 0x2428, 0, 1}, + {"RMAC_PNUM_RANGE_2", 0x2430, 0, 1}, + {"RMAC_PNUM_RANGE_3", 0x2438, 0, 1}, + {"RMAC_PNUM_RANGE_4", 0x2440, 0, 1}, + {"RMAC_PNUM_RANGE_5", 0x2448, 0, 1}, + {"RMAC_PNUM_RANGE_6", 0x2450, 0, 1}, + {"RMAC_PNUM_RANGE_7", 0x2458, 0, 1}, + {"RMAC_PNUM_RANGE_8", 0x2460, 0, 1}, + {"RMAC_PNUM_RANGE_9", 0x2468, 0, 1}, + {"RMAC_PNUM_RANGE_10", 0x2470, 0, 1}, + {"RMAC_PNUM_RANGE_11", 0x2478, 0, 1}, + {"RMAC_PNUM_RANGE_12", 0x2480, 0, 1}, + {"RMAC_PNUM_RANGE_13", 0x2488, 0, 1}, + {"RMAC_PNUM_RANGE_14", 0x2490, 0, 1}, + {"RMAC_PNUM_RANGE_15", 0x2498, 0, 1}, + {"RMAC_PNUM_RANGE_16", 0x24A0, 0, 1}, + {"RMAC_PNUM_RANGE_17", 0x24A8, 0, 1}, + {"RMAC_PNUM_RANGE_18", 0x24B0, 0, 1}, + {"RMAC_PNUM_RANGE_19", 0x24B8, 0, 1}, + {"RMAC_PNUM_RANGE_20", 0x24C0, 0, 1}, + {"RMAC_PNUM_RANGE_21", 0x24C8, 0, 1}, + {"RMAC_PNUM_RANGE_22", 0x24D0, 0, 1}, + {"RMAC_PNUM_RANGE_23", 0x24D8, 0, 1}, + {"RMAC_PNUM_RANGE_24", 0x24E0, 0, 1}, + {"RMAC_PNUM_RANGE_25", 0x24E8, 0, 1}, + {"RMAC_PNUM_RANGE_26", 0x24F0, 0, 1}, + {"RMAC_PNUM_RANGE_27", 0x24F8, 0, 1}, + {"RMAC_PNUM_RANGE_28", 0x2500, 0, 1}, + {"RMAC_PNUM_RANGE_29", 0x2508, 0, 1}, + {"RMAC_PNUM_RANGE_30", 0x2510, 0, 1}, + {"RMAC_PNUM_RANGE_31", 0x2518, 0, 1}, + {"RMAC_MP_CRC_0", 0x2520, 0, 1}, + {"RMAC_MP_MASK_A_0", 0x2528, 0, 1}, + {"RMAC_MP_MASK_B_0", 0x2530, 0, 1}, + {"RMAC_MP_CRC_1", 0x2538, 0, 1}, + {"RMAC_MP_MASK_A_1", 0x2540, 0, 1}, + {"RMAC_MP_MASK_B_1", 0x2548, 0, 1}, + {"RMAC_MP_CRC_2", 0x2550, 0, 1}, + {"RMAC_MP_MASK_A_2", 0x2558, 0, 1}, + {"RMAC_MP_MASK_B_2", 0x2560, 0, 1}, + {"RMAC_MP_CRC_3", 0x2568, 0, 1}, + {"RMAC_MP_MASK_A_3", 0x2570, 0, 1}, + {"RMAC_MP_MASK_B_3", 0x2578, 0, 1}, + {"RMAC_MP_CRC_4", 0x2580, 0, 1}, + {"RMAC_MP_MASK_A_4", 0x2588, 0, 1}, + {"RMAC_MP_MASK_B_4", 0x2590, 0, 1}, + {"RMAC_MP_CRC_5", 0x2598, 0, 1}, + {"RMAC_MP_MASK_A_5", 0x25A0, 0, 1}, + {"RMAC_MP_MASK_B_5", 0x25A8, 0, 1}, + {"RMAC_MP_CRC_6", 0x25B0, 0, 1}, + {"RMAC_MP_MASK_A_6", 0x25B8, 0, 1}, + {"RMAC_MP_MASK_B_6", 0x25C0, 0, 1}, + {"RMAC_MP_CRC_7", 0x25C8, 0, 1}, + {"RMAC_MP_MASK_A_7", 0x25D0, 0, 1}, + {"RMAC_MP_MASK_B_7", 0x25D8, 0, 1}, + {"MAC_CTRL", 0x25E0, 0, 1}, + {"ACTIVITY_CONTROL", 0x25E8, 0, 1}, + {"MC_INT_STATUS", 0x2800, 0, 0}, + {"MC_INT_MASK", 0x2808, 0, 0}, + {"MC_ERR_REG", 0x2810, 0, 0}, + {"MC_ERR_MASK", 0x2818, 0, 0}, + {"MC_ERR_ALARM", 0x2820, 0, 0}, + {"RX_QUEUE_CFG", 0x2900, 0, 0}, + {"MC_RLDRAM_MRS", 0x2908, 0, 0}, + {"MC_RLDRAM_INTERLEAVE", 0x2910, 0, 0}, + {"MC_PAUSE_THRESH_Q0Q3", 0x2918, 0, 0}, + {"MC_PAUSE_THRESH_Q4Q7", 0x2920, 0, 0}, + {"MC_RED_THRESH_Q0", 0x2928, 0, 0}, + {"MC_RED_THRESH_Q1", 0x2930, 0, 0}, + {"MC_RED_THRESH_Q2", 0x2938, 0, 0}, + {"MC_RED_THRESH_Q3", 0x2940, 0, 0}, + {"MC_RED_THRESH_Q4", 0x2948, 0, 0}, + {"MC_RED_THRESH_Q5", 0x2950, 0, 0}, + {"MC_RED_THRESH_Q6", 0x2958, 0, 0}, + {"MC_RED_THRESH_Q7", 0x2960, 0, 0}, + {"MC_RLDRAM_REF_PER", 0x2A00, 0, 0}, + {"MC_RLDRAM_TEST_CTRL", 0x2A20, 0, 0}, + {"MC_RLDRAM_TEST_ADD", 0x2A40, 0, 0}, + {"MC_RLDRAM_TEST_D0", 0x2A60, 0, 0}, + {"MC_RLDRAM_TEST_D1", 0x2A80, 0, 0}, + {"MC_RLDRAM_TEST_D2", 0x2B00, 0, 0}, + {"MC_RLDRAM_TEST_READ_D0", 0x2C00, 0, 1}, + {"MC_RLDRAM_TEST_RAED_D1", 0x2C20, 0, 1}, + {"MC_RLDRAM_TEST_READ_D2", 0x2C40, 0, 1}, + {"MC_RLDRAM_TEST_ADD_BKG", 0x2C60, 0, 1}, + {"MC_RLDRAM_TEST_D0_BKG", 0x2C80, 0, 1}, + {"MC_RLDRAM_TEST_D1_BKG", 0x2D00, 0, 1}, + {"MC_RLDRAM_TEST_D2_BKG", 0x2D20, 0, 1}, + {"MC_RLDRAM_TEST_READ_D0_BKG", 0x2D40, 0, 1}, + {"MC_RLDRAM_TEST_READ_D1_BKG", 0x2D60, 0, 1}, + {"MC_RLDRAM_TEST_READ_D2_BKG", 0x2D80, 0, 1}, + {"MC_RLDRAM_GENERATION", 0x2E00, 0, 1}, + {"MC_DRIVER", 0x2E20, 0, 1}, + {"XGXS_INT_STATUS", 0x3000, 0, 0}, + {"XGXS_INT_MASK", 0x3008, 0, 0}, + {"XGXS_TXGXS_ERR_REG", 0x3010, 0, 0}, + {"XGXS_TXGXS_ERR_MASK", 0x3018, 0, 0}, + {"XGXS_TXGXS_ERR_ALARM", 0x3020, 0, 0}, + {"XGXS_RXGXS_ERR_REG", 0x3028, 0, 0}, + {"XGXS_RXGXS_ERR_MASK", 0x3030, 0, 0}, + {"XGXS_RXGXS_ERR_ALARM", 0x3038, 0, 0}, + {"SPI_ERR_REG", 0x3040, 0, 1}, + {"SPI_ERR_MASK", 0x3048, 0, 1}, + {"SPI_ERR_ALARM", 0x3050, 0, 1}, + {"XGXS_CFG", 0x3100, 0, 0}, + {"XGXS_STATUS", 0x3108, 0, 0}, + {"XGXS_CFG_KEY", 0x3110, 0, 0}, + {"XGXS_EFIFO_CFG", 0x3118, 0, 0}, + {"RXGXS_BER_0", 0x3120, 0, 0}, + {"RXGXS_BER_1", 0x3128, 0, 0}, + {"SPI_CONTROL", 0x3130, 0, 1}, + {"SPI_DATA", 0x3138, 0, 1}, + {"SPI_WRITE_PROTECT", 0x3140, 0, 1}, + {"XGXS_CFG_1", 0x3180, 0, 1} }; xge_stats_hw_info_t statsInfo[] = { - /* Tx MAC statistics counters */ - {"tmac_frms", 0x0000, 0x0004, 4, 0}, - {"tmac_data_octets", 0x0004, 0x0000, 4, 0}, - {"tmac_drop_frms", 0x0008, 0x0008, 8, 0}, - {"tmac_mcst_frms", 0x0010, 0x0014, 4, 0}, - {"tmac_bcst_frms", 0x0014, 0x0010, 4, 0}, - {"tmac_pause_ctrl_frms", 0x0018, 0x0018, 8, 0}, - {"tmac_ttl_octets", 0x0020, 0x0024, 4, 0}, - {"tmac_ucst_frms", 0x0024, 0x0020, 4, 0}, - {"tmac_nucst_frms", 0x0028, 0x002C, 4, 0}, - {"tmac_any_err_frms", 0x002C, 0x0028, 4, 0}, - {"tmac_ttl_less_fb_octets", 0x0030, 0x0030, 8, 0}, - {"tmac_vld_ip_octets", 0x0038, 0x0038, 8, 0}, - {"tmac_vld_ip", 0x0040, 0x0044, 4, 0}, - {"tmac_drop_ip", 0x0044, 0x0040, 4, 0}, - {"tmac_icmp", 0x0048, 0x004C, 4, 0}, - {"tmac_rst_tcp", 0x004C, 0x0048, 4, 0}, - {"tmac_tcp", 0x0050, 0x0050, 8, 0}, - {"tmac_udp", 0x0058, 0x005C, 4, 0}, - {"reserved_0", 0x005C, 0x0058, 4, 0}, - - /* Rx MAC Statistics counters. */ - {"rmac_vld_frms", 0x0060, 0x0064, 4, 0}, - {"rmac_data_octets", 0x006C, 0x0060, 4, 0}, - {"rmac_fcs_err_frms", 0x0068, 0x0068, 8, 0}, - {"rmac_drop_frms", 0x0070, 0x0070, 8, 0}, - {"rmac_vld_mcst_frms", 0x0078, 0x007C, 4, 0}, - {"rmac_vld_bcst_frms", 0x007C, 0x0078, 4, 0}, - {"rmac_in_rng_len_err_frms", 0x0080, 0x0084, 4, 0}, - {"rmac_out_rng_len_err_frms", 0x0084, 0x0080, 4, 0}, - {"rmac_long_frms", 0x0088, 0x0088, 8, 0}, - {"rmac_pause_ctrl_frms", 0x0090, 0x0090, 8, 0}, - {"rmac_unsup_ctrl_frms", 0x0098, 0x0098, 8, 0}, - {"rmac_ttl_octets", 0x00A0, 0x00A4, 4, 0}, - {"rmac_accepted_ucst_frms", 0x00A4, 0x00A0, 4, 0}, - {"rmac_accepted_nucst_frms", 0x00A8, 0x00AC, 4, 0}, - {"rmac_discarded_frms", 0x00AC, 0x00A8, 4, 0}, - {"rmac_drop_events", 0x00B0, 0x00B4, 4, 0}, - {"reserved_1", 0x00B4, 0x00B0, 4, 0}, - {"rmac_ttl_less_fb_octets", 0x00B8, 0x00B8, 8, 0}, - {"rmac_ttl_frms", 0x00C0, 0x00C0, 8, 0}, - {"reserved_2", 0x00C8, 0x00C8, 8, 0}, - {"reserved_3", 0x00D0, 0x00D4, 4, 0}, - {"rmac_usized_frms", 0x00D4, 0x00D0, 4, 0}, - {"rmac_osized_frms", 0x00D8, 0x00DC, 4, 0}, - {"rmac_frag_frms", 0x00DC, 0x00D8, 4, 0}, - {"rmac_jabber_frms", 0x00E0, 0x00E4, 4, 0}, - {"reserved_4", 0x00E4, 0x00E0, 4, 0}, - {"rmac_ttl_64_frms", 0x00E8, 0x00E8, 8, 0}, - {"rmac_ttl_65_127_frms", 0x00F0, 0x00F0, 8, 0}, - {"reserved_5", 0x00F8, 0x00F8, 8, 0}, - {"rmac_ttl_128_255_frms", 0x0100, 0x0100, 8, 0}, - {"rmac_ttl_256_511_frms", 0x0108, 0x0108, 8, 0}, - {"reserved_6", 0x0110, 0x0110, 8, 0}, - {"rmac_ttl_512_1023_frms", 0x0118, 0x0118, 8, 0}, - {"rmac_ttl_1024_1518_frms", 0x0120, 0x0120, 8, 0}, - {"reserved_7", 0x0128, 0x012C, 4, 0}, - {"rmac_ip", 0x012C, 0x0128, 4, 0}, - {"rmac_ip_octets", 0x0130, 0x0130, 8, 0}, - {"rmac_hdr_err_ip", 0x0138, 0x013C, 4, 0}, - {"rmac_drop_ip", 0x013C, 0x0138, 4, 0}, - {"rmac_icmp", 0x0140, 0x0144, 4, 0}, - {"reserved_8", 0x0144, 0x0140, 4, 0}, - {"rmac_tcp", 0x0148, 0x0148, 8, 0}, - {"rmac_udp", 0x0150, 0x0154, 4, 0}, - {"rmac_err_drp_udp", 0x0154, 0x0150, 4, 0}, - {"rmac_xgmii_err_sym", 0x0158, 0x0158, 8, 0}, - {"rmac_frms_q0", 0x0160, 0x0160, 8, 0}, - {"rmac_frms_q1", 0x0168, 0x0168, 8, 0}, - {"rmac_frms_q2", 0x0170, 0x0170, 8, 0}, - {"rmac_frms_q3", 0x0178, 0x0178, 8, 0}, - {"rmac_frms_q4", 0x0180, 0x0180, 8, 0}, - {"rmac_frms_q5", 0x0188, 0x0188, 8, 0}, - {"rmac_frms_q6", 0x0190, 0x0190, 8, 0}, - {"rmac_frms_q7", 0x0198, 0x0198, 8, 0}, - {"rmac_full_q0", 0x01A0, 0x01A6, 2, 0}, - {"rmac_full_q1", 0x01A2, 0x01A4, 2, 0}, - {"rmac_full_q2", 0x01A4, 0x01A2, 2, 0}, - {"rmac_full_q3", 0x01A6, 0x01A0, 2, 0}, - {"rmac_full_q4", 0x01A8, 0x01AE, 2, 0}, - {"rmac_full_q5", 0x01AA, 0x01AC, 2, 0}, - {"rmac_full_q6", 0x01AB, 0x01AA, 2, 0}, - {"rmac_full_q7", 0x01AC, 0x01A8, 2, 0}, - {"rmac_pause_cnt", 0x01B0, 0x01B4, 4, 0}, - {"reserved_9", 0x01B4, 0x01B0, 4, 0}, - {"rmac_xgmii_data_err_cnt", 0x01B8, 0x01B8, 8, 0}, - {"rmac_xgmii_ctrl_err_cnt", 0x01C0, 0x01C0, 8, 0}, - {"rmac_accepted_ip", 0x01C8, 0x01CC, 4, 0}, - {"rmac_err_tcp", 0x01CC, 0x01C8, 4, 0}, - - /* PCI/PCI-X Read transaction statistics. */ - {"rd_req_cnt", 0x01D0, 0x01D4, 4, 0}, - {"new_rd_req_cnt", 0x01D4, 0x01D0, 4, 0}, - {"new_rd_req_rtry_cnt", 0x01D8, 0x01DC, 4, 0}, - {"rd_rtry_cnt", 0x01DC, 0x01D8, 4, 0}, - {"wr_rtry_rd_ack_cnt", 0x01E0, 0x01E4, 4, 0}, - - /* PCI/PCI-X write transaction statistics. */ - {"wr_req_cnt", 0x01E4, 0x01E0, 4, 0}, - {"new_wr_req_cnt", 0x01E8, 0x01EC, 4, 0}, - {"new_wr_req_rtry_cnt", 0x01EC, 0x01E8, 4, 0}, - {"wr_rtry_cnt", 0x01F0, 0x01F4, 4, 0}, - {"wr_disc_cnt", 0x01F4, 0x01F0, 4, 0}, - {"rd_rtry_wr_ack_cnt", 0x01F8, 0x01FC, 4, 0}, - - /* DMA Transaction statistics. */ - {"txp_wr_cnt", 0x01FC, 0x01F8, 4, 0}, - {"txd_rd_cnt", 0x0200, 0x0204, 4, 0}, - {"txd_wr_cnt", 0x0204, 0x0200, 4, 0}, - {"rxd_rd_cnt", 0x0208, 0x020C, 4, 0}, - {"rxd_wr_cnt", 0x020C, 0x0208, 4, 0}, - {"txf_rd_cnt", 0x0210, 0x0214, 4, 0}, - {"rxf_wr_cnt", 0x0214, 0x0210, 4, 0}, - - /* Enhanced Herc statistics */ - {"tmac_frms_oflow", 0x0218, 0x021C, 4, 0}, - {"tmac_data_octets_oflow", 0x021C, 0x0218, 4, 0}, - {"tmac_mcst_frms_oflow", 0x0220, 0x0224, 4, 0}, - {"tmac_bcst_frms_oflow", 0x0224, 0x0220, 4, 0}, - {"tmac_ttl_octets_oflow", 0x0228, 0x022C, 4, 0}, - {"tmac_ucst_frms_oflow", 0x022C, 0x0228, 4, 0}, - {"tmac_nucst_frms_oflow", 0x0230, 0x0234, 4, 0}, - {"tmac_any_err_frms_oflow", 0x0234, 0x0230, 4, 0}, - {"tmac_vlan_frms", 0x0238, 0x0238, 8, 0}, - {"tmac_vld_ip_oflow", 0x0240, 0x0244, 4, 0}, - {"tmac_drop_ip_oflow", 0x0244, 0x0240, 4, 0}, - {"tmac_icmp_oflow", 0x0248, 0x024C, 4, 0}, - {"tmac_rst_tcp_oflow", 0x024C, 0x0248, 4, 0}, - {"tmac_udp_oflow", 0x0250, 0x0254, 4, 0}, - {"tpa_unknown_protocol", 0x0254, 0x0250, 4, 0}, - {"tpa_parse_failure", 0x0258, 0x025C, 4, 0}, - {"reserved_10", 0x025C, 0x0258, 4, 0}, - {"rmac_vld_frms_oflow", 0x0260, 0x0264, 4, 0}, - {"rmac_data_octets_oflow", 0x0264, 0x0260, 4, 0}, - {"rmac_vld_mcst_frms_oflow", 0x0268, 0x026C, 4, 0}, - {"rmac_vld_bcst_frms_oflow", 0x026C, 0x0268, 4, 0}, - {"rmac_ttl_octets_oflow", 0x0270, 0x0274, 4, 0}, - {"rmac_accepted_ucst_frms_oflow", 0x0274, 0x0270, 4, 0}, - {"rmac_accepted_nucst_frms_oflow", 0x0278, 0x027C, 4, 0}, - {"rmac_discarded_frms_oflow", 0x027C, 0x0278, 4, 0}, - {"rmac_drop_events_oflow", 0x0280, 0x0284, 4, 0}, - {"rmac_usized_frms_oflow", 0x0284, 0x0280, 4, 0}, - {"rmac_osized_frms_oflow", 0x0288, 0x028C, 4, 0}, - {"rmac_frag_frms_oflow", 0x028C, 0x0288, 4, 0}, - {"rmac_jabber_frms_oflow", 0x0290, 0x0294, 4, 0}, - {"rmac_ip_oflow", 0x0294, 0x0290, 4, 0}, - {"rmac_drop_ip_oflow", 0x0298, 0x029C, 4, 0}, - {"rmac_icmp_oflow", 0x029C, 0x0298, 4, 0}, - {"rmac_udp_oflow", 0x02A0, 0x02A4, 4, 0}, - {"rmac_err_drp_udp_oflow", 0x02A4, 0x02A0, 4, 0}, - {"rmac_pause_cnt_oflow", 0x02A8, 0x02AC, 4, 0}, - {"reserved_11", 0x02AC, 0x02A8, 4, 0}, - {"rmac_ttl_1519_4095_frms", 0x02B0, 0x02B0, 8, 0}, - {"rmac_ttl_4096_8191_frms", 0x02B8, 0x02B8, 8, 0}, - {"rmac_ttl_8192_max_frms", 0x02C0, 0x02C0, 8, 0}, - {"rmac_ttl_gt_max_frms", 0x02C8, 0x02C8, 8, 0}, - {"rmac_osized_alt_frms", 0x02D0, 0x02D0, 8, 0}, - {"rmac_jabber_alt_frms", 0x02D8, 0x02D8, 8, 0}, - {"rmac_gt_max_alt_frms", 0x02E0, 0x02E0, 8, 0}, - {"rmac_vlan_frms", 0x02E8, 0x02E8, 8, 0}, - {"rmac_fcs_discard", 0x02F0, 0x02F4, 4, 0}, - {"rmac_len_discard", 0x02F4, 0x02F0, 4, 0}, - {"rmac_da_discard", 0x02F8, 0x02FC, 4, 0}, - {"rmac_pf_discard", 0x02FC, 0x02F8, 4, 0}, - {"rmac_rts_discard", 0x0300, 0x0304, 4, 0}, - {"rmac_wol_discard", 0x0304, 0x0300, 4, 0}, - {"rmac_red_discard", 0x0308, 0x030C, 4, 0}, - {"rmac_ingm_full_discard", 0x030C, 0x0308, 4, 0}, - {"rmac_accepted_ip_oflow", 0x0310, 0x0314, 4, 0}, - {"reserved_12", 0x0314, 0x0310, 4, 0}, - {"link_fault_cnt", 0x0318, 0x031C, 4, 0}, - {"reserved_13", 0x031C, 0x0318, 4, 0} + /* Tx MAC statistics counters */ + {"tmac_frms", 0x0000, 0x0004, 4, 0}, + {"tmac_data_octets", 0x0004, 0x0000, 4, 0}, + {"tmac_drop_frms", 0x0008, 0x0008, 8, 0}, + {"tmac_mcst_frms", 0x0010, 0x0014, 4, 0}, + {"tmac_bcst_frms", 0x0014, 0x0010, 4, 0}, + {"tmac_pause_ctrl_frms", 0x0018, 0x0018, 8, 0}, + {"tmac_ttl_octets", 0x0020, 0x0024, 4, 0}, + {"tmac_ucst_frms", 0x0024, 0x0020, 4, 0}, + {"tmac_nucst_frms", 0x0028, 0x002C, 4, 0}, + {"tmac_any_err_frms", 0x002C, 0x0028, 4, 0}, + {"tmac_ttl_less_fb_octets", 0x0030, 0x0030, 8, 0}, + {"tmac_vld_ip_octets", 0x0038, 0x0038, 8, 0}, + {"tmac_vld_ip", 0x0040, 0x0044, 4, 0}, + {"tmac_drop_ip", 0x0044, 0x0040, 4, 0}, + {"tmac_icmp", 0x0048, 0x004C, 4, 0}, + {"tmac_rst_tcp", 0x004C, 0x0048, 4, 0}, + {"tmac_tcp", 0x0050, 0x0050, 8, 0}, + {"tmac_udp", 0x0058, 0x005C, 4, 0}, + {"reserved_0", 0x005C, 0x0058, 4, 0}, + + /* Rx MAC Statistics counters. */ + {"rmac_vld_frms", 0x0060, 0x0064, 4, 0}, + {"rmac_data_octets", 0x006C, 0x0060, 4, 0}, + {"rmac_fcs_err_frms", 0x0068, 0x0068, 8, 0}, + {"rmac_drop_frms", 0x0070, 0x0070, 8, 0}, + {"rmac_vld_mcst_frms", 0x0078, 0x007C, 4, 0}, + {"rmac_vld_bcst_frms", 0x007C, 0x0078, 4, 0}, + {"rmac_in_rng_len_err_frms", 0x0080, 0x0084, 4, 0}, + {"rmac_out_rng_len_err_frms", 0x0084, 0x0080, 4, 0}, + {"rmac_long_frms", 0x0088, 0x0088, 8, 0}, + {"rmac_pause_ctrl_frms", 0x0090, 0x0090, 8, 0}, + {"rmac_unsup_ctrl_frms", 0x0098, 0x0098, 8, 0}, + {"rmac_ttl_octets", 0x00A0, 0x00A4, 4, 0}, + {"rmac_accepted_ucst_frms", 0x00A4, 0x00A0, 4, 0}, + {"rmac_accepted_nucst_frms", 0x00A8, 0x00AC, 4, 0}, + {"rmac_discarded_frms", 0x00AC, 0x00A8, 4, 0}, + {"rmac_drop_events", 0x00B0, 0x00B4, 4, 0}, + {"reserved_1", 0x00B4, 0x00B0, 4, 0}, + {"rmac_ttl_less_fb_octets", 0x00B8, 0x00B8, 8, 0}, + {"rmac_ttl_frms", 0x00C0, 0x00C0, 8, 0}, + {"reserved_2", 0x00C8, 0x00C8, 8, 0}, + {"reserved_3", 0x00D0, 0x00D4, 4, 0}, + {"rmac_usized_frms", 0x00D4, 0x00D0, 4, 0}, + {"rmac_osized_frms", 0x00D8, 0x00DC, 4, 0}, + {"rmac_frag_frms", 0x00DC, 0x00D8, 4, 0}, + {"rmac_jabber_frms", 0x00E0, 0x00E4, 4, 0}, + {"reserved_4", 0x00E4, 0x00E0, 4, 0}, + {"rmac_ttl_64_frms", 0x00E8, 0x00E8, 8, 0}, + {"rmac_ttl_65_127_frms", 0x00F0, 0x00F0, 8, 0}, + {"reserved_5", 0x00F8, 0x00F8, 8, 0}, + {"rmac_ttl_128_255_frms", 0x0100, 0x0100, 8, 0}, + {"rmac_ttl_256_511_frms", 0x0108, 0x0108, 8, 0}, + {"reserved_6", 0x0110, 0x0110, 8, 0}, + {"rmac_ttl_512_1023_frms", 0x0118, 0x0118, 8, 0}, + {"rmac_ttl_1024_1518_frms", 0x0120, 0x0120, 8, 0}, + {"reserved_7", 0x0128, 0x012C, 4, 0}, + {"rmac_ip", 0x012C, 0x0128, 4, 0}, + {"rmac_ip_octets", 0x0130, 0x0130, 8, 0}, + {"rmac_hdr_err_ip", 0x0138, 0x013C, 4, 0}, + {"rmac_drop_ip", 0x013C, 0x0138, 4, 0}, + {"rmac_icmp", 0x0140, 0x0144, 4, 0}, + {"reserved_8", 0x0144, 0x0140, 4, 0}, + {"rmac_tcp", 0x0148, 0x0148, 8, 0}, + {"rmac_udp", 0x0150, 0x0154, 4, 0}, + {"rmac_err_drp_udp", 0x0154, 0x0150, 4, 0}, + {"rmac_xgmii_err_sym", 0x0158, 0x0158, 8, 0}, + {"rmac_frms_q0", 0x0160, 0x0160, 8, 0}, + {"rmac_frms_q1", 0x0168, 0x0168, 8, 0}, + {"rmac_frms_q2", 0x0170, 0x0170, 8, 0}, + {"rmac_frms_q3", 0x0178, 0x0178, 8, 0}, + {"rmac_frms_q4", 0x0180, 0x0180, 8, 0}, + {"rmac_frms_q5", 0x0188, 0x0188, 8, 0}, + {"rmac_frms_q6", 0x0190, 0x0190, 8, 0}, + {"rmac_frms_q7", 0x0198, 0x0198, 8, 0}, + {"rmac_full_q0", 0x01A0, 0x01A6, 2, 0}, + {"rmac_full_q1", 0x01A2, 0x01A4, 2, 0}, + {"rmac_full_q2", 0x01A4, 0x01A2, 2, 0}, + {"rmac_full_q3", 0x01A6, 0x01A0, 2, 0}, + {"rmac_full_q4", 0x01A8, 0x01AE, 2, 0}, + {"rmac_full_q5", 0x01AA, 0x01AC, 2, 0}, + {"rmac_full_q6", 0x01AB, 0x01AA, 2, 0}, + {"rmac_full_q7", 0x01AC, 0x01A8, 2, 0}, + {"rmac_pause_cnt", 0x01B0, 0x01B4, 4, 0}, + {"reserved_9", 0x01B4, 0x01B0, 4, 0}, + {"rmac_xgmii_data_err_cnt", 0x01B8, 0x01B8, 8, 0}, + {"rmac_xgmii_ctrl_err_cnt", 0x01C0, 0x01C0, 8, 0}, + {"rmac_accepted_ip", 0x01C8, 0x01CC, 4, 0}, + {"rmac_err_tcp", 0x01CC, 0x01C8, 4, 0}, + + /* PCI/PCI-X Read transaction statistics. */ + {"rd_req_cnt", 0x01D0, 0x01D4, 4, 0}, + {"new_rd_req_cnt", 0x01D4, 0x01D0, 4, 0}, + {"new_rd_req_rtry_cnt", 0x01D8, 0x01DC, 4, 0}, + {"rd_rtry_cnt", 0x01DC, 0x01D8, 4, 0}, + {"wr_rtry_rd_ack_cnt", 0x01E0, 0x01E4, 4, 0}, + + /* PCI/PCI-X write transaction statistics. */ + {"wr_req_cnt", 0x01E4, 0x01E0, 4, 0}, + {"new_wr_req_cnt", 0x01E8, 0x01EC, 4, 0}, + {"new_wr_req_rtry_cnt", 0x01EC, 0x01E8, 4, 0}, + {"wr_rtry_cnt", 0x01F0, 0x01F4, 4, 0}, + {"wr_disc_cnt", 0x01F4, 0x01F0, 4, 0}, + {"rd_rtry_wr_ack_cnt", 0x01F8, 0x01FC, 4, 0}, + + /* DMA Transaction statistics. */ + {"txp_wr_cnt", 0x01FC, 0x01F8, 4, 0}, + {"txd_rd_cnt", 0x0200, 0x0204, 4, 0}, + {"txd_wr_cnt", 0x0204, 0x0200, 4, 0}, + {"rxd_rd_cnt", 0x0208, 0x020C, 4, 0}, + {"rxd_wr_cnt", 0x020C, 0x0208, 4, 0}, + {"txf_rd_cnt", 0x0210, 0x0214, 4, 0}, + {"rxf_wr_cnt", 0x0214, 0x0210, 4, 0}, + + /* Enhanced Herc statistics */ + {"tmac_frms_oflow", 0x0218, 0x021C, 4, 0}, + {"tmac_data_octets_oflow", 0x021C, 0x0218, 4, 0}, + {"tmac_mcst_frms_oflow", 0x0220, 0x0224, 4, 0}, + {"tmac_bcst_frms_oflow", 0x0224, 0x0220, 4, 0}, + {"tmac_ttl_octets_oflow", 0x0228, 0x022C, 4, 0}, + {"tmac_ucst_frms_oflow", 0x022C, 0x0228, 4, 0}, + {"tmac_nucst_frms_oflow", 0x0230, 0x0234, 4, 0}, + {"tmac_any_err_frms_oflow", 0x0234, 0x0230, 4, 0}, + {"tmac_vlan_frms", 0x0238, 0x0238, 8, 0}, + {"tmac_vld_ip_oflow", 0x0240, 0x0244, 4, 0}, + {"tmac_drop_ip_oflow", 0x0244, 0x0240, 4, 0}, + {"tmac_icmp_oflow", 0x0248, 0x024C, 4, 0}, + {"tmac_rst_tcp_oflow", 0x024C, 0x0248, 4, 0}, + {"tmac_udp_oflow", 0x0250, 0x0254, 4, 0}, + {"tpa_unknown_protocol", 0x0254, 0x0250, 4, 0}, + {"tpa_parse_failure", 0x0258, 0x025C, 4, 0}, + {"reserved_10", 0x025C, 0x0258, 4, 0}, + {"rmac_vld_frms_oflow", 0x0260, 0x0264, 4, 0}, + {"rmac_data_octets_oflow", 0x0264, 0x0260, 4, 0}, + {"rmac_vld_mcst_frms_oflow", 0x0268, 0x026C, 4, 0}, + {"rmac_vld_bcst_frms_oflow", 0x026C, 0x0268, 4, 0}, + {"rmac_ttl_octets_oflow", 0x0270, 0x0274, 4, 0}, + {"rmac_accepted_ucst_frms_oflow", 0x0274, 0x0270, 4, 0}, + {"rmac_accepted_nucst_frms_oflow", 0x0278, 0x027C, 4, 0}, + {"rmac_discarded_frms_oflow", 0x027C, 0x0278, 4, 0}, + {"rmac_drop_events_oflow", 0x0280, 0x0284, 4, 0}, + {"rmac_usized_frms_oflow", 0x0284, 0x0280, 4, 0}, + {"rmac_osized_frms_oflow", 0x0288, 0x028C, 4, 0}, + {"rmac_frag_frms_oflow", 0x028C, 0x0288, 4, 0}, + {"rmac_jabber_frms_oflow", 0x0290, 0x0294, 4, 0}, + {"rmac_ip_oflow", 0x0294, 0x0290, 4, 0}, + {"rmac_drop_ip_oflow", 0x0298, 0x029C, 4, 0}, + {"rmac_icmp_oflow", 0x029C, 0x0298, 4, 0}, + {"rmac_udp_oflow", 0x02A0, 0x02A4, 4, 0}, + {"rmac_err_drp_udp_oflow", 0x02A4, 0x02A0, 4, 0}, + {"rmac_pause_cnt_oflow", 0x02A8, 0x02AC, 4, 0}, + {"reserved_11", 0x02AC, 0x02A8, 4, 0}, + {"rmac_ttl_1519_4095_frms", 0x02B0, 0x02B0, 8, 0}, + {"rmac_ttl_4096_8191_frms", 0x02B8, 0x02B8, 8, 0}, + {"rmac_ttl_8192_max_frms", 0x02C0, 0x02C0, 8, 0}, + {"rmac_ttl_gt_max_frms", 0x02C8, 0x02C8, 8, 0}, + {"rmac_osized_alt_frms", 0x02D0, 0x02D0, 8, 0}, + {"rmac_jabber_alt_frms", 0x02D8, 0x02D8, 8, 0}, + {"rmac_gt_max_alt_frms", 0x02E0, 0x02E0, 8, 0}, + {"rmac_vlan_frms", 0x02E8, 0x02E8, 8, 0}, + {"rmac_fcs_discard", 0x02F0, 0x02F4, 4, 0}, + {"rmac_len_discard", 0x02F4, 0x02F0, 4, 0}, + {"rmac_da_discard", 0x02F8, 0x02FC, 4, 0}, + {"rmac_pf_discard", 0x02FC, 0x02F8, 4, 0}, + {"rmac_rts_discard", 0x0300, 0x0304, 4, 0}, + {"rmac_wol_discard", 0x0304, 0x0300, 4, 0}, + {"rmac_red_discard", 0x0308, 0x030C, 4, 0}, + {"rmac_ingm_full_discard", 0x030C, 0x0308, 4, 0}, + {"rmac_accepted_ip_oflow", 0x0310, 0x0314, 4, 0}, + {"reserved_12", 0x0314, 0x0310, 4, 0}, + {"link_fault_cnt", 0x0318, 0x031C, 4, 0}, + {"reserved_13", 0x031C, 0x0318, 4, 0} }; -xge_pci_config_t pciconfInfo[] = -{ - {"device_id", 0x0002, 0x0002, 0}, - {"vendor_id", 0x0000, 0x0000, 0}, - {"status", 0x0006, 0x0006, 0}, - {"command", 0x0004, 0x0004, 0}, - {"pciClass", 0x000B, 0x0009, 0}, - {"revision", 0x0008, 0x0008, 0}, - {"bist", 0x000F, 0x000F, 0}, - {"header_type", 0x000E, 0x000E, 0}, - {"latency_timer", 0x000D, 0x000D, 0}, - {"cache_line_size", 0x000C, 0x000C, 0}, - {"base_addr0_lo", 0x0010, 0x0010, 0}, - {"base_addr0_hi", 0x0014, 0x0014, 0}, - {"base_addr1_lo", 0x0018, 0x0018, 0}, - {"base_addr1_hi", 0x001C, 0x001C, 0}, - {"not_Implemented1", 0x0020, 0x0020, 0}, - {"not_Implemented2", 0x0024, 0x0024, 0}, - {"cardbus_cis_pointer", 0x0028, 0x0028, 0}, - {"subsystem_id", 0x002E, 0x002E, 0}, - {"subsystem_vendor_id", 0x002C, 0x002C, 0}, - {"rom_base", 0x0030, 0x0030, 0}, - {"rsvd_35", 0x0035, 0x0035, 0}, - {"capabilities_pointer", 0x0034, 0x0034, 0}, - {"rsvd_38", 0x0038, 0x0038, 0}, - {"max_latency", 0x003F, 0x003F, 0}, - {"min_grant", 0x003E, 0x003E, 0}, - {"interrupt_pin", 0x003D, 0x003D, 0}, - {"interrupt_line", 0x003C, 0x003C, 0}, - {"msi_control", 0x0042, 0x0042, 0}, - {"msi_next_ptr", 0x0041, 0x0041, 0}, - {"msi_cap_id", 0x0040, 0x0040, 0}, - {"msi_lower_address", 0x0044, 0x0044, 0}, - {"msi_higher_address", 0x0048, 0x0048, 0}, - {"msi_unused", 0x004E, 0x004E, 0}, - {"msi_data", 0x004C, 0x004C, 0}, - {"vpd_addr", 0x0052, 0x0052, 0}, - {"vpd_next_cap", 0x0051, 0x0051, 0}, - {"vpd_cap_id", 0x0050, 0x0050, 0}, - {"vpd_data", 0x0054, 0x0054, 0}, - {"rsvd_b0", 0x0058, 0x0058, 0}, - {"pcix_command", 0x0062, 0x0062, 0}, - {"pcix_next_cap", 0x0061, 0x0061, 0}, - {"pcix_cap", 0x0060, 0x0060, 0}, - {"pcix_status", 0x0064, 0x0064, 0} +xge_pci_config_t pciconfInfo[] = { + {"device_id", 0x0002, 0x0002, 0}, + {"vendor_id", 0x0000, 0x0000, 0}, + {"status", 0x0006, 0x0006, 0}, + {"command", 0x0004, 0x0004, 0}, + {"pciClass", 0x000B, 0x0009, 0}, + {"revision", 0x0008, 0x0008, 0}, + {"bist", 0x000F, 0x000F, 0}, + {"header_type", 0x000E, 0x000E, 0}, + {"latency_timer", 0x000D, 0x000D, 0}, + {"cache_line_size", 0x000C, 0x000C, 0}, + {"base_addr0_lo", 0x0010, 0x0010, 0}, + {"base_addr0_hi", 0x0014, 0x0014, 0}, + {"base_addr1_lo", 0x0018, 0x0018, 0}, + {"base_addr1_hi", 0x001C, 0x001C, 0}, + {"not_Implemented1", 0x0020, 0x0020, 0}, + {"not_Implemented2", 0x0024, 0x0024, 0}, + {"cardbus_cis_pointer", 0x0028, 0x0028, 0}, + {"subsystem_id", 0x002E, 0x002E, 0}, + {"subsystem_vendor_id", 0x002C, 0x002C, 0}, + {"rom_base", 0x0030, 0x0030, 0}, + {"rsvd_35", 0x0035, 0x0035, 0}, + {"capabilities_pointer", 0x0034, 0x0034, 0}, + {"rsvd_38", 0x0038, 0x0038, 0}, + {"max_latency", 0x003F, 0x003F, 0}, + {"min_grant", 0x003E, 0x003E, 0}, + {"interrupt_pin", 0x003D, 0x003D, 0}, + {"interrupt_line", 0x003C, 0x003C, 0}, + {"msi_control", 0x0042, 0x0042, 0}, + {"msi_next_ptr", 0x0041, 0x0041, 0}, + {"msi_cap_id", 0x0040, 0x0040, 0}, + {"msi_lower_address", 0x0044, 0x0044, 0}, + {"msi_higher_address", 0x0048, 0x0048, 0}, + {"msi_unused", 0x004E, 0x004E, 0}, + {"msi_data", 0x004C, 0x004C, 0}, + {"vpd_addr", 0x0052, 0x0052, 0}, + {"vpd_next_cap", 0x0051, 0x0051, 0}, + {"vpd_cap_id", 0x0050, 0x0050, 0}, + {"vpd_data", 0x0054, 0x0054, 0}, + {"rsvd_b0", 0x0058, 0x0058, 0}, + {"pcix_command", 0x0062, 0x0062, 0}, + {"pcix_next_cap", 0x0061, 0x0061, 0}, + {"pcix_cap", 0x0060, 0x0060, 0}, + {"pcix_status", 0x0064, 0x0064, 0} }; xge_device_config_t devconfInfo[] = { - {"mtu", 0}, - {"isr_polling_cnt", 0}, - {"latency_timer", 0}, - {"napi_weight", 0}, - {"max_splits_trans", 0}, - {"mmrb_count", 0}, - {"shared_splits", 0}, - {"stats_refresh_time_sec", 0}, - {"pci_freq_mherz", 0}, - {"intr_mode", 0}, - {"sched_timer_us", 0}, - {"sched_timer_one_shot", 0}, - - {"ring_memblock_size", 0}, - {"ring_scatter_mode", 0}, - {"ring_strip_vlan_tag", 0}, - - {"ring_queue0_max", 0}, - {"ring_queue0_initial", 0}, - {"ring_queue0_buffer_mode", 0}, - {"ring_queue0_dram_size_mb", 0}, - {"ring_queue0_intr_vector", 0}, - {"ring_queue0_backoff_interval_us", 0}, - {"ring_queue0_max_frm_len", 0}, - {"ring_queue0_priority", 0}, - {"ring_queue0_no_snoop_bits", 0}, - {"ring_queue0_indicate_max_pkts", 0}, - {"ring_queue0_configured", 0}, - {"ring_queue0_rts_mac_en", 0}, - {"ring_queue0_rth_en", 0}, - {"ring_queue0_rts_port_en", 0}, - {"ring_queue0_rts_port0_num", 0}, - {"ring_queue0_rts_port0_udp", 0}, - {"ring_queue0_rts_port0_src", 0}, - {"ring_queue0_rts_port1_num", 0}, - {"ring_queue0_rts_port1_udp", 0}, - {"ring_queue0_rts_port1_src", 0}, - {"ring_queue0_rts_port2_num", 0}, - {"ring_queue0_rts_port2_udp", 0}, - {"ring_queue0_rts_port2_src", 0}, - {"ring_queue0_rts_port3_num", 0}, - {"ring_queue0_rts_port3_udp", 0}, - {"ring_queue0_rts_port3_src", 0}, - {"ring_queue0_rts_port4_num", 0}, - {"ring_queue0_rts_port4_udp", 0}, - {"ring_queue0_rts_port4_src", 0}, - {"ring_queue0_rts_port5_num", 0}, - {"ring_queue0_rts_port5_udp", 0}, - {"ring_queue0_rts_port5_src", 0}, - {"ring_queue0_rts_port6_num", 0}, - {"ring_queue0_rts_port6_udp", 0}, - {"ring_queue0_rts_port6_src", 0}, - {"ring_queue0_rts_port7_num", 0}, - {"ring_queue0_rts_port7_udp", 0}, - {"ring_queue0_rts_port7_src", 0}, - {"ring_queue0_rts_port8_num", 0}, - {"ring_queue0_rts_port8_udp", 0}, - {"ring_queue0_rts_port8_src", 0}, - {"ring_queue0_rts_port9_num", 0}, - {"ring_queue0_rts_port9_udp", 0}, - {"ring_queue0_rts_port9_src", 0}, - {"ring_queue0_rts_port10_num", 0}, - {"ring_queue0_rts_port10_udp", 0}, - {"ring_queue0_rts_port10_src", 0}, - {"ring_queue0_rts_port11_num", 0}, - {"ring_queue0_rts_port11_udp", 0}, - {"ring_queue0_rts_port11_src", 0}, - {"ring_queue0_rts_port12_num", 0}, - {"ring_queue0_rts_port12_udp", 0}, - {"ring_queue0_rts_port12_src", 0}, - {"ring_queue0_rts_port13_num", 0}, - {"ring_queue0_rts_port13_udp", 0}, - {"ring_queue0_rts_port13_src", 0}, - {"ring_queue0_rts_port14_num", 0}, - {"ring_queue0_rts_port14_udp", 0}, - {"ring_queue0_rts_port14_src", 0}, - {"ring_queue0_rts_port15_num", 0}, - {"ring_queue0_rts_port15_udp", 0}, - {"ring_queue0_rts_port15_src", 0}, - {"ring_queue0_rts_port16_num", 0}, - {"ring_queue0_rts_port16_udp", 0}, - {"ring_queue0_rts_port16_src", 0}, - {"ring_queue0_rts_port17_num", 0}, - {"ring_queue0_rts_port17_udp", 0}, - {"ring_queue0_rts_port17_src", 0}, - {"ring_queue0_rts_port18_num", 0}, - {"ring_queue0_rts_port18_udp", 0}, - {"ring_queue0_rts_port18_src", 0}, - {"ring_queue0_rts_port19_num", 0}, - {"ring_queue0_rts_port19_udp", 0}, - {"ring_queue0_rts_port19_src", 0}, - {"ring_queue0_rts_port20_num", 0}, - {"ring_queue0_rts_port20_udp", 0}, - {"ring_queue0_rts_port20_src", 0}, - {"ring_queue0_rts_port21_num", 0}, - {"ring_queue0_rts_port21_udp", 0}, - {"ring_queue0_rts_port21_src", 0}, - {"ring_queue0_rts_port22_num", 0}, - {"ring_queue0_rts_port22_udp", 0}, - {"ring_queue0_rts_port22_src", 0}, - {"ring_queue0_rts_port23_num", 0}, - {"ring_queue0_rts_port23_udp", 0}, - {"ring_queue0_rts_port23_src", 0}, - {"ring_queue0_rts_port24_num", 0}, - {"ring_queue0_rts_port24_udp", 0}, - {"ring_queue0_rts_port24_src", 0}, - {"ring_queue0_rts_port25_num", 0}, - {"ring_queue0_rts_port25_udp", 0}, - {"ring_queue0_rts_port25_src", 0}, - {"ring_queue0_rts_port26_num", 0}, - {"ring_queue0_rts_port26_udp", 0}, - {"ring_queue0_rts_port26_src", 0}, - {"ring_queue0_rts_port27_num", 0}, - {"ring_queue0_rts_port27_udp", 0}, - {"ring_queue0_rts_port27_src", 0}, - {"ring_queue0_rts_port28_num", 0}, - {"ring_queue0_rts_port28_udp", 0}, - {"ring_queue0_rts_port28_src", 0}, - {"ring_queue0_rts_port29_num", 0}, - {"ring_queue0_rts_port29_udp", 0}, - {"ring_queue0_rts_port29_src", 0}, - {"ring_queue0_rts_port30_num", 0}, - {"ring_queue0_rts_port30_udp", 0}, - {"ring_queue0_rts_port30_src", 0}, - {"ring_queue0_rts_port31_num", 0}, - {"ring_queue0_rts_port31_udp", 0}, - {"ring_queue0_rts_port31_src", 0}, - {"ring_queue0_rti_urange_a", 0}, - {"ring_queue0_rti_ufc_a", 0}, - {"ring_queue0_rti_urange_b", 0}, - {"ring_queue0_rti_ufc_b", 0}, - {"ring_queue0_rti_urange_c", 0}, - {"ring_queue0_rti_ufc_c", 0}, - {"ring_queue0_rti_ufc_d", 0}, - {"ring_queue0_rti_timer_ac_en", 0}, - {"ring_queue0_rti_timer_val_us", 0}, - - {"ring_queue1_max", 0}, - {"ring_queue1_initial", 0}, - {"ring_queue1_buffer_mode", 0}, - {"ring_queue1_dram_size_mb", 0}, - {"ring_queue1_intr_vector", 0}, - {"ring_queue1_backoff_interval_us", 0}, - {"ring_queue1_max_frm_len", 0}, - {"ring_queue1_priority", 0}, - {"ring_queue1_no_snoop_bits", 0}, - {"ring_queue1_indicate_max_pkts", 0}, - {"ring_queue1_configured", 0}, - {"ring_queue1_rts_mac_en", 0}, - {"ring_queue1_rth_en", 0}, - {"ring_queue1_rts_port_en", 0}, - {"ring_queue1_rts_port0_num", 0}, - {"ring_queue1_rts_port0_udp", 0}, - {"ring_queue1_rts_port0_src", 0}, - {"ring_queue1_rts_port1_num", 0}, - {"ring_queue1_rts_port1_udp", 0}, - {"ring_queue1_rts_port1_src", 0}, - {"ring_queue1_rts_port2_num", 0}, - {"ring_queue1_rts_port2_udp", 0}, - {"ring_queue1_rts_port2_src", 0}, - {"ring_queue1_rts_port3_num", 0}, - {"ring_queue1_rts_port3_udp", 0}, - {"ring_queue1_rts_port3_src", 0}, - {"ring_queue1_rts_port4_num", 0}, - {"ring_queue1_rts_port4_udp", 0}, - {"ring_queue1_rts_port4_src", 0}, - {"ring_queue1_rts_port5_num", 0}, - {"ring_queue1_rts_port5_udp", 0}, - {"ring_queue1_rts_port5_src", 0}, - {"ring_queue1_rts_port6_num", 0}, - {"ring_queue1_rts_port6_udp", 0}, - {"ring_queue1_rts_port6_src", 0}, - {"ring_queue1_rts_port7_num", 0}, - {"ring_queue1_rts_port7_udp", 0}, - {"ring_queue1_rts_port7_src", 0}, - {"ring_queue1_rts_port8_num", 0}, - {"ring_queue1_rts_port8_udp", 0}, - {"ring_queue1_rts_port8_src", 0}, - {"ring_queue1_rts_port9_num", 0}, - {"ring_queue1_rts_port9_udp", 0}, - {"ring_queue1_rts_port9_src", 0}, - {"ring_queue1_rts_port10_num", 0}, - {"ring_queue1_rts_port10_udp", 0}, - {"ring_queue1_rts_port10_src", 0}, - {"ring_queue1_rts_port11_num", 0}, - {"ring_queue1_rts_port11_udp", 0}, - {"ring_queue1_rts_port11_src", 0}, - {"ring_queue1_rts_port12_num", 0}, - {"ring_queue1_rts_port12_udp", 0}, - {"ring_queue1_rts_port12_src", 0}, - {"ring_queue1_rts_port13_num", 0}, - {"ring_queue1_rts_port13_udp", 0}, - {"ring_queue1_rts_port13_src", 0}, - {"ring_queue1_rts_port14_num", 0}, - {"ring_queue1_rts_port14_udp", 0}, - {"ring_queue1_rts_port14_src", 0}, - {"ring_queue1_rts_port15_num", 0}, - {"ring_queue1_rts_port15_udp", 0}, - {"ring_queue1_rts_port15_src", 0}, - {"ring_queue1_rts_port16_num", 0}, - {"ring_queue1_rts_port16_udp", 0}, - {"ring_queue1_rts_port16_src", 0}, - {"ring_queue1_rts_port17_num", 0}, - {"ring_queue1_rts_port17_udp", 0}, - {"ring_queue1_rts_port17_src", 0}, - {"ring_queue1_rts_port18_num", 0}, - {"ring_queue1_rts_port18_udp", 0}, - {"ring_queue1_rts_port18_src", 0}, - {"ring_queue1_rts_port19_num", 0}, - {"ring_queue1_rts_port19_udp", 0}, - {"ring_queue1_rts_port19_src", 0}, - {"ring_queue1_rts_port20_num", 0}, - {"ring_queue1_rts_port20_udp", 0}, - {"ring_queue1_rts_port20_src", 0}, - {"ring_queue1_rts_port21_num", 0}, - {"ring_queue1_rts_port21_udp", 0}, - {"ring_queue1_rts_port21_src", 0}, - {"ring_queue1_rts_port22_num", 0}, - {"ring_queue1_rts_port22_udp", 0}, - {"ring_queue1_rts_port22_src", 0}, - {"ring_queue1_rts_port23_num", 0}, - {"ring_queue1_rts_port23_udp", 0}, - {"ring_queue1_rts_port23_src", 0}, - {"ring_queue1_rts_port24_num", 0}, - {"ring_queue1_rts_port24_udp", 0}, - {"ring_queue1_rts_port24_src", 0}, - {"ring_queue1_rts_port25_num", 0}, - {"ring_queue1_rts_port25_udp", 0}, - {"ring_queue1_rts_port25_src", 0}, - {"ring_queue1_rts_port26_num", 0}, - {"ring_queue1_rts_port26_udp", 0}, - {"ring_queue1_rts_port26_src", 0}, - {"ring_queue1_rts_port27_num", 0}, - {"ring_queue1_rts_port27_udp", 0}, - {"ring_queue1_rts_port27_src", 0}, - {"ring_queue1_rts_port28_num", 0}, - {"ring_queue1_rts_port28_udp", 0}, - {"ring_queue1_rts_port28_src", 0}, - {"ring_queue1_rts_port29_num", 0}, - {"ring_queue1_rts_port29_udp", 0}, - {"ring_queue1_rts_port29_src", 0}, - {"ring_queue1_rts_port30_num", 0}, - {"ring_queue1_rts_port30_udp", 0}, - {"ring_queue1_rts_port30_src", 0}, - {"ring_queue1_rts_port31_num", 0}, - {"ring_queue1_rts_port31_udp", 0}, - {"ring_queue1_rts_port31_src", 0}, - {"ring_queue1_rti_urange_a", 0}, - {"ring_queue1_rti_ufc_a", 0}, - {"ring_queue1_rti_urange_b", 0}, - {"ring_queue1_rti_ufc_b", 0}, - {"ring_queue1_rti_urange_c", 0}, - {"ring_queue1_rti_ufc_c", 0}, - {"ring_queue1_rti_ufc_d", 0}, - {"ring_queue1_rti_timer_ac_en", 0}, - {"ring_queue1_rti_timer_val_us", 0}, - - {"ring_queue2_max", 0}, - {"ring_queue2_initial", 0}, - {"ring_queue2_buffer_mode", 0}, - {"ring_queue2_dram_size_mb", 0}, - {"ring_queue2_intr_vector", 0}, - {"ring_queue2_backoff_interval_us", 0}, - {"ring_queue2_max_frm_len", 0}, - {"ring_queue2_priority", 0}, - {"ring_queue2_no_snoop_bits", 0}, - {"ring_queue2_indicate_max_pkts", 0}, - {"ring_queue2_configured", 0}, - {"ring_queue2_rts_mac_en", 0}, - {"ring_queue2_rth_en", 0}, - {"ring_queue2_rts_port_en", 0}, - {"ring_queue2_rts_port0_num", 0}, - {"ring_queue2_rts_port0_udp", 0}, - {"ring_queue2_rts_port0_src", 0}, - {"ring_queue2_rts_port1_num", 0}, - {"ring_queue2_rts_port1_udp", 0}, - {"ring_queue2_rts_port1_src", 0}, - {"ring_queue2_rts_port2_num", 0}, - {"ring_queue2_rts_port2_udp", 0}, - {"ring_queue2_rts_port2_src", 0}, - {"ring_queue2_rts_port3_num", 0}, - {"ring_queue2_rts_port3_udp", 0}, - {"ring_queue2_rts_port3_src", 0}, - {"ring_queue2_rts_port4_num", 0}, - {"ring_queue2_rts_port4_udp", 0}, - {"ring_queue2_rts_port4_src", 0}, - {"ring_queue2_rts_port5_num", 0}, - {"ring_queue2_rts_port5_udp", 0}, - {"ring_queue2_rts_port5_src", 0}, - {"ring_queue2_rts_port6_num", 0}, - {"ring_queue2_rts_port6_udp", 0}, - {"ring_queue2_rts_port6_src", 0}, - {"ring_queue2_rts_port7_num", 0}, - {"ring_queue2_rts_port7_udp", 0}, - {"ring_queue2_rts_port7_src", 0}, - {"ring_queue2_rts_port8_num", 0}, - {"ring_queue2_rts_port8_udp", 0}, - {"ring_queue2_rts_port8_src", 0}, - {"ring_queue2_rts_port9_num", 0}, - {"ring_queue2_rts_port9_udp", 0}, - {"ring_queue2_rts_port9_src", 0}, - {"ring_queue2_rts_port10_num", 0}, - {"ring_queue2_rts_port10_udp", 0}, - {"ring_queue2_rts_port10_src", 0}, - {"ring_queue2_rts_port11_num", 0}, - {"ring_queue2_rts_port11_udp", 0}, - {"ring_queue2_rts_port11_src", 0}, - {"ring_queue2_rts_port12_num", 0}, - {"ring_queue2_rts_port12_udp", 0}, - {"ring_queue2_rts_port12_src", 0}, - {"ring_queue2_rts_port13_num", 0}, - {"ring_queue2_rts_port13_udp", 0}, - {"ring_queue2_rts_port13_src", 0}, - {"ring_queue2_rts_port14_num", 0}, - {"ring_queue2_rts_port14_udp", 0}, - {"ring_queue2_rts_port14_src", 0}, - {"ring_queue2_rts_port15_num", 0}, - {"ring_queue2_rts_port15_udp", 0}, - {"ring_queue2_rts_port15_src", 0}, - {"ring_queue2_rts_port16_num", 0}, - {"ring_queue2_rts_port16_udp", 0}, - {"ring_queue2_rts_port16_src", 0}, - {"ring_queue2_rts_port17_num", 0}, - {"ring_queue2_rts_port17_udp", 0}, - {"ring_queue2_rts_port17_src", 0}, - {"ring_queue2_rts_port18_num", 0}, - {"ring_queue2_rts_port18_udp", 0}, - {"ring_queue2_rts_port18_src", 0}, - {"ring_queue2_rts_port19_num", 0}, - {"ring_queue2_rts_port19_udp", 0}, - {"ring_queue2_rts_port19_src", 0}, - {"ring_queue2_rts_port20_num", 0}, - {"ring_queue2_rts_port20_udp", 0}, - {"ring_queue2_rts_port20_src", 0}, - {"ring_queue2_rts_port21_num", 0}, - {"ring_queue2_rts_port21_udp", 0}, - {"ring_queue2_rts_port21_src", 0}, - {"ring_queue2_rts_port22_num", 0}, - {"ring_queue2_rts_port22_udp", 0}, - {"ring_queue2_rts_port22_src", 0}, - {"ring_queue2_rts_port23_num", 0}, - {"ring_queue2_rts_port23_udp", 0}, - {"ring_queue2_rts_port23_src", 0}, - {"ring_queue2_rts_port24_num", 0}, - {"ring_queue2_rts_port24_udp", 0}, - {"ring_queue2_rts_port24_src", 0}, - {"ring_queue2_rts_port25_num", 0}, - {"ring_queue2_rts_port25_udp", 0}, - {"ring_queue2_rts_port25_src", 0}, - {"ring_queue2_rts_port26_num", 0}, - {"ring_queue2_rts_port26_udp", 0}, - {"ring_queue2_rts_port26_src", 0}, - {"ring_queue2_rts_port27_num", 0}, - {"ring_queue2_rts_port27_udp", 0}, - {"ring_queue2_rts_port27_src", 0}, - {"ring_queue2_rts_port28_num", 0}, - {"ring_queue2_rts_port28_udp", 0}, - {"ring_queue2_rts_port28_src", 0}, - {"ring_queue2_rts_port29_num", 0}, - {"ring_queue2_rts_port29_udp", 0}, - {"ring_queue2_rts_port29_src", 0}, - {"ring_queue2_rts_port30_num", 0}, - {"ring_queue2_rts_port30_udp", 0}, - {"ring_queue2_rts_port30_src", 0}, - {"ring_queue2_rts_port31_num", 0}, - {"ring_queue2_rts_port31_udp", 0}, - {"ring_queue2_rts_port31_src", 0}, - {"ring_queue2_rti_urange_a", 0}, - {"ring_queue2_rti_ufc_a", 0}, - {"ring_queue2_rti_urange_b", 0}, - {"ring_queue2_rti_ufc_b", 0}, - {"ring_queue2_rti_urange_c", 0}, - {"ring_queue2_rti_ufc_c", 0}, - {"ring_queue2_rti_ufc_d", 0}, - {"ring_queue2_rti_timer_ac_en", 0}, - {"ring_queue2_rti_timer_val_us", 0}, - - {"ring_queue3_max", 0}, - {"ring_queue3_initial", 0}, - {"ring_queue3_buffer_mode", 0}, - {"ring_queue3_dram_size_mb", 0}, - {"ring_queue3_intr_vector", 0}, - {"ring_queue3_backoff_interval_us", 0}, - {"ring_queue3_max_frm_len", 0}, - {"ring_queue3_priority", 0}, - {"ring_queue3_no_snoop_bits", 0}, - {"ring_queue3_indicate_max_pkts", 0}, - {"ring_queue3_configured", 0}, - {"ring_queue3_rts_mac_en", 0}, - {"ring_queue3_rth_en", 0}, - {"ring_queue3_rts_port_en", 0}, - {"ring_queue3_rts_port0_num", 0}, - {"ring_queue3_rts_port0_udp", 0}, - {"ring_queue3_rts_port0_src", 0}, - {"ring_queue3_rts_port1_num", 0}, - {"ring_queue3_rts_port1_udp", 0}, - {"ring_queue3_rts_port1_src", 0}, - {"ring_queue3_rts_port2_num", 0}, - {"ring_queue3_rts_port2_udp", 0}, - {"ring_queue3_rts_port2_src", 0}, - {"ring_queue3_rts_port3_num", 0}, - {"ring_queue3_rts_port3_udp", 0}, - {"ring_queue3_rts_port3_src", 0}, - {"ring_queue3_rts_port4_num", 0}, - {"ring_queue3_rts_port4_udp", 0}, - {"ring_queue3_rts_port4_src", 0}, - {"ring_queue3_rts_port5_num", 0}, - {"ring_queue3_rts_port5_udp", 0}, - {"ring_queue3_rts_port5_src", 0}, - {"ring_queue3_rts_port6_num", 0}, - {"ring_queue3_rts_port6_udp", 0}, - {"ring_queue3_rts_port6_src", 0}, - {"ring_queue3_rts_port7_num", 0}, - {"ring_queue3_rts_port7_udp", 0}, - {"ring_queue3_rts_port7_src", 0}, - {"ring_queue3_rts_port8_num", 0}, - {"ring_queue3_rts_port8_udp", 0}, - {"ring_queue3_rts_port8_src", 0}, - {"ring_queue3_rts_port9_num", 0}, - {"ring_queue3_rts_port9_udp", 0}, - {"ring_queue3_rts_port9_src", 0}, - {"ring_queue3_rts_port10_num", 0}, - {"ring_queue3_rts_port10_udp", 0}, - {"ring_queue3_rts_port10_src", 0}, - {"ring_queue3_rts_port11_num", 0}, - {"ring_queue3_rts_port11_udp", 0}, - {"ring_queue3_rts_port11_src", 0}, - {"ring_queue3_rts_port12_num", 0}, - {"ring_queue3_rts_port12_udp", 0}, - {"ring_queue3_rts_port12_src", 0}, - {"ring_queue3_rts_port13_num", 0}, - {"ring_queue3_rts_port13_udp", 0}, - {"ring_queue3_rts_port13_src", 0}, - {"ring_queue3_rts_port14_num", 0}, - {"ring_queue3_rts_port14_udp", 0}, - {"ring_queue3_rts_port14_src", 0}, - {"ring_queue3_rts_port15_num", 0}, - {"ring_queue3_rts_port15_udp", 0}, - {"ring_queue3_rts_port15_src", 0}, - {"ring_queue3_rts_port16_num", 0}, - {"ring_queue3_rts_port16_udp", 0}, - {"ring_queue3_rts_port16_src", 0}, - {"ring_queue3_rts_port17_num", 0}, - {"ring_queue3_rts_port17_udp", 0}, - {"ring_queue3_rts_port17_src", 0}, - {"ring_queue3_rts_port18_num", 0}, - {"ring_queue3_rts_port18_udp", 0}, - {"ring_queue3_rts_port18_src", 0}, - {"ring_queue3_rts_port19_num", 0}, - {"ring_queue3_rts_port19_udp", 0}, - {"ring_queue3_rts_port19_src", 0}, - {"ring_queue3_rts_port20_num", 0}, - {"ring_queue3_rts_port20_udp", 0}, - {"ring_queue3_rts_port20_src", 0}, - {"ring_queue3_rts_port21_num", 0}, - {"ring_queue3_rts_port21_udp", 0}, - {"ring_queue3_rts_port21_src", 0}, - {"ring_queue3_rts_port22_num", 0}, - {"ring_queue3_rts_port22_udp", 0}, - {"ring_queue3_rts_port22_src", 0}, - {"ring_queue3_rts_port23_num", 0}, - {"ring_queue3_rts_port23_udp", 0}, - {"ring_queue3_rts_port23_src", 0}, - {"ring_queue3_rts_port24_num", 0}, - {"ring_queue3_rts_port24_udp", 0}, - {"ring_queue3_rts_port24_src", 0}, - {"ring_queue3_rts_port25_num", 0}, - {"ring_queue3_rts_port25_udp", 0}, - {"ring_queue3_rts_port25_src", 0}, - {"ring_queue3_rts_port26_num", 0}, - {"ring_queue3_rts_port26_udp", 0}, - {"ring_queue3_rts_port26_src", 0}, - {"ring_queue3_rts_port27_num", 0}, - {"ring_queue3_rts_port27_udp", 0}, - {"ring_queue3_rts_port27_src", 0}, - {"ring_queue3_rts_port28_num", 0}, - {"ring_queue3_rts_port28_udp", 0}, - {"ring_queue3_rts_port28_src", 0}, - {"ring_queue3_rts_port29_num", 0}, - {"ring_queue3_rts_port29_udp", 0}, - {"ring_queue3_rts_port29_src", 0}, - {"ring_queue3_rts_port30_num", 0}, - {"ring_queue3_rts_port30_udp", 0}, - {"ring_queue3_rts_port30_src", 0}, - {"ring_queue3_rts_port31_num", 0}, - {"ring_queue3_rts_port31_udp", 0}, - {"ring_queue3_rts_port31_src", 0}, - {"ring_queue3_rti_urange_a", 0}, - {"ring_queue3_rti_ufc_a", 0}, - {"ring_queue3_rti_urange_b", 0}, - {"ring_queue3_rti_ufc_b", 0}, - {"ring_queue3_rti_urange_c", 0}, - {"ring_queue3_rti_ufc_c", 0}, - {"ring_queue3_rti_ufc_d", 0}, - {"ring_queue3_rti_timer_ac_en", 0}, - {"ring_queue3_rti_timer_val_us", 0}, - - {"ring_queue4_max", 0}, - {"ring_queue4_initial", 0}, - {"ring_queue4_buffer_mode", 0}, - {"ring_queue4_dram_size_mb", 0}, - {"ring_queue4_intr_vector", 0}, - {"ring_queue4_backoff_interval_us", 0}, - {"ring_queue4_max_frm_len", 0}, - {"ring_queue4_priority", 0}, - {"ring_queue4_no_snoop_bits", 0}, - {"ring_queue4_indicate_max_pkts", 0}, - {"ring_queue4_configured", 0}, - {"ring_queue4_rts_mac_en", 0}, - {"ring_queue4_rth_en", 0}, - {"ring_queue4_rts_port_en", 0}, - {"ring_queue4_rts_port0_num", 0}, - {"ring_queue4_rts_port0_udp", 0}, - {"ring_queue4_rts_port0_src", 0}, - {"ring_queue4_rts_port1_num", 0}, - {"ring_queue4_rts_port1_udp", 0}, - {"ring_queue4_rts_port1_src", 0}, - {"ring_queue4_rts_port2_num", 0}, - {"ring_queue4_rts_port2_udp", 0}, - {"ring_queue4_rts_port2_src", 0}, - {"ring_queue4_rts_port3_num", 0}, - {"ring_queue4_rts_port3_udp", 0}, - {"ring_queue4_rts_port3_src", 0}, - {"ring_queue4_rts_port4_num", 0}, - {"ring_queue4_rts_port4_udp", 0}, - {"ring_queue4_rts_port4_src", 0}, - {"ring_queue4_rts_port5_num", 0}, - {"ring_queue4_rts_port5_udp", 0}, - {"ring_queue4_rts_port5_src", 0}, - {"ring_queue4_rts_port6_num", 0}, - {"ring_queue4_rts_port6_udp", 0}, - {"ring_queue4_rts_port6_src", 0}, - {"ring_queue4_rts_port7_num", 0}, - {"ring_queue4_rts_port7_udp", 0}, - {"ring_queue4_rts_port7_src", 0}, - {"ring_queue4_rts_port8_num", 0}, - {"ring_queue4_rts_port8_udp", 0}, - {"ring_queue4_rts_port8_src", 0}, - {"ring_queue4_rts_port9_num", 0}, - {"ring_queue4_rts_port9_udp", 0}, - {"ring_queue4_rts_port9_src", 0}, - {"ring_queue4_rts_port10_num", 0}, - {"ring_queue4_rts_port10_udp", 0}, - {"ring_queue4_rts_port10_src", 0}, - {"ring_queue4_rts_port11_num", 0}, - {"ring_queue4_rts_port11_udp", 0}, - {"ring_queue4_rts_port11_src", 0}, - {"ring_queue4_rts_port12_num", 0}, - {"ring_queue4_rts_port12_udp", 0}, - {"ring_queue4_rts_port12_src", 0}, - {"ring_queue4_rts_port13_num", 0}, - {"ring_queue4_rts_port13_udp", 0}, - {"ring_queue4_rts_port13_src", 0}, - {"ring_queue4_rts_port14_num", 0}, - {"ring_queue4_rts_port14_udp", 0}, - {"ring_queue4_rts_port14_src", 0}, - {"ring_queue4_rts_port15_num", 0}, - {"ring_queue4_rts_port15_udp", 0}, - {"ring_queue4_rts_port15_src", 0}, - {"ring_queue4_rts_port16_num", 0}, - {"ring_queue4_rts_port16_udp", 0}, - {"ring_queue4_rts_port16_src", 0}, - {"ring_queue4_rts_port17_num", 0}, - {"ring_queue4_rts_port17_udp", 0}, - {"ring_queue4_rts_port17_src", 0}, - {"ring_queue4_rts_port18_num", 0}, - {"ring_queue4_rts_port18_udp", 0}, - {"ring_queue4_rts_port18_src", 0}, - {"ring_queue4_rts_port19_num", 0}, - {"ring_queue4_rts_port19_udp", 0}, - {"ring_queue4_rts_port19_src", 0}, - {"ring_queue4_rts_port20_num", 0}, - {"ring_queue4_rts_port20_udp", 0}, - {"ring_queue4_rts_port20_src", 0}, - {"ring_queue4_rts_port21_num", 0}, - {"ring_queue4_rts_port21_udp", 0}, - {"ring_queue4_rts_port21_src", 0}, - {"ring_queue4_rts_port22_num", 0}, - {"ring_queue4_rts_port22_udp", 0}, - {"ring_queue4_rts_port22_src", 0}, - {"ring_queue4_rts_port23_num", 0}, - {"ring_queue4_rts_port23_udp", 0}, - {"ring_queue4_rts_port23_src", 0}, - {"ring_queue4_rts_port24_num", 0}, - {"ring_queue4_rts_port24_udp", 0}, - {"ring_queue4_rts_port24_src", 0}, - {"ring_queue4_rts_port25_num", 0}, - {"ring_queue4_rts_port25_udp", 0}, - {"ring_queue4_rts_port25_src", 0}, - {"ring_queue4_rts_port26_num", 0}, - {"ring_queue4_rts_port26_udp", 0}, - {"ring_queue4_rts_port26_src", 0}, - {"ring_queue4_rts_port27_num", 0}, - {"ring_queue4_rts_port27_udp", 0}, - {"ring_queue4_rts_port27_src", 0}, - {"ring_queue4_rts_port28_num", 0}, - {"ring_queue4_rts_port28_udp", 0}, - {"ring_queue4_rts_port28_src", 0}, - {"ring_queue4_rts_port29_num", 0}, - {"ring_queue4_rts_port29_udp", 0}, - {"ring_queue4_rts_port29_src", 0}, - {"ring_queue4_rts_port30_num", 0}, - {"ring_queue4_rts_port30_udp", 0}, - {"ring_queue4_rts_port30_src", 0}, - {"ring_queue4_rts_port31_num", 0}, - {"ring_queue4_rts_port31_udp", 0}, - {"ring_queue4_rts_port31_src", 0}, - {"ring_queue4_rti_urange_a", 0}, - {"ring_queue4_rti_ufc_a", 0}, - {"ring_queue4_rti_urange_b", 0}, - {"ring_queue4_rti_ufc_b", 0}, - {"ring_queue4_rti_urange_c", 0}, - {"ring_queue4_rti_ufc_c", 0}, - {"ring_queue4_rti_ufc_d", 0}, - {"ring_queue4_rti_timer_ac_en", 0}, - {"ring_queue4_rti_timer_val_us", 0}, - - {"ring_queue5_max", 0}, - {"ring_queue5_initial", 0}, - {"ring_queue5_buffer_mode", 0}, - {"ring_queue5_dram_size_mb", 0}, - {"ring_queue5_intr_vector", 0}, - {"ring_queue5_backoff_interval_us", 0}, - {"ring_queue5_max_frm_len", 0}, - {"ring_queue5_priority", 0}, - {"ring_queue5_no_snoop_bits", 0}, - {"ring_queue5_indicate_max_pkts", 0}, - {"ring_queue5_configured", 0}, - {"ring_queue5_rts_mac_en", 0}, - {"ring_queue5_rth_en", 0}, - {"ring_queue5_rts_port_en", 0}, - {"ring_queue5_rts_port0_num", 0}, - {"ring_queue5_rts_port0_udp", 0}, - {"ring_queue5_rts_port0_src", 0}, - {"ring_queue5_rts_port1_num", 0}, - {"ring_queue5_rts_port1_udp", 0}, - {"ring_queue5_rts_port1_src", 0}, - {"ring_queue5_rts_port2_num", 0}, - {"ring_queue5_rts_port2_udp", 0}, - {"ring_queue5_rts_port2_src", 0}, - {"ring_queue5_rts_port3_num", 0}, - {"ring_queue5_rts_port3_udp", 0}, - {"ring_queue5_rts_port3_src", 0}, - {"ring_queue5_rts_port4_num", 0}, - {"ring_queue5_rts_port4_udp", 0}, - {"ring_queue5_rts_port4_src", 0}, - {"ring_queue5_rts_port5_num", 0}, - {"ring_queue5_rts_port5_udp", 0}, - {"ring_queue5_rts_port5_src", 0}, - {"ring_queue5_rts_port6_num", 0}, - {"ring_queue5_rts_port6_udp", 0}, - {"ring_queue5_rts_port6_src", 0}, - {"ring_queue5_rts_port7_num", 0}, - {"ring_queue5_rts_port7_udp", 0}, - {"ring_queue5_rts_port7_src", 0}, - {"ring_queue5_rts_port8_num", 0}, - {"ring_queue5_rts_port8_udp", 0}, - {"ring_queue5_rts_port8_src", 0}, - {"ring_queue5_rts_port9_num", 0}, - {"ring_queue5_rts_port9_udp", 0}, - {"ring_queue5_rts_port9_src", 0}, - {"ring_queue5_rts_port10_num", 0}, - {"ring_queue5_rts_port10_udp", 0}, - {"ring_queue5_rts_port10_src", 0}, - {"ring_queue5_rts_port11_num", 0}, - {"ring_queue5_rts_port11_udp", 0}, - {"ring_queue5_rts_port11_src", 0}, - {"ring_queue5_rts_port12_num", 0}, - {"ring_queue5_rts_port12_udp", 0}, - {"ring_queue5_rts_port12_src", 0}, - {"ring_queue5_rts_port13_num", 0}, - {"ring_queue5_rts_port13_udp", 0}, - {"ring_queue5_rts_port13_src", 0}, - {"ring_queue5_rts_port14_num", 0}, - {"ring_queue5_rts_port14_udp", 0}, - {"ring_queue5_rts_port14_src", 0}, - {"ring_queue5_rts_port15_num", 0}, - {"ring_queue5_rts_port15_udp", 0}, - {"ring_queue5_rts_port15_src", 0}, - {"ring_queue5_rts_port16_num", 0}, - {"ring_queue5_rts_port16_udp", 0}, - {"ring_queue5_rts_port16_src", 0}, - {"ring_queue5_rts_port17_num", 0}, - {"ring_queue5_rts_port17_udp", 0}, - {"ring_queue5_rts_port17_src", 0}, - {"ring_queue5_rts_port18_num", 0}, - {"ring_queue5_rts_port18_udp", 0}, - {"ring_queue5_rts_port18_src", 0}, - {"ring_queue5_rts_port19_num", 0}, - {"ring_queue5_rts_port19_udp", 0}, - {"ring_queue5_rts_port19_src", 0}, - {"ring_queue5_rts_port20_num", 0}, - {"ring_queue5_rts_port20_udp", 0}, - {"ring_queue5_rts_port20_src", 0}, - {"ring_queue5_rts_port21_num", 0}, - {"ring_queue5_rts_port21_udp", 0}, - {"ring_queue5_rts_port21_src", 0}, - {"ring_queue5_rts_port22_num", 0}, - {"ring_queue5_rts_port22_udp", 0}, - {"ring_queue5_rts_port22_src", 0}, - {"ring_queue5_rts_port23_num", 0}, - {"ring_queue5_rts_port23_udp", 0}, - {"ring_queue5_rts_port23_src", 0}, - {"ring_queue5_rts_port24_num", 0}, - {"ring_queue5_rts_port24_udp", 0}, - {"ring_queue5_rts_port24_src", 0}, - {"ring_queue5_rts_port25_num", 0}, - {"ring_queue5_rts_port25_udp", 0}, - {"ring_queue5_rts_port25_src", 0}, - {"ring_queue5_rts_port26_num", 0}, - {"ring_queue5_rts_port26_udp", 0}, - {"ring_queue5_rts_port26_src", 0}, - {"ring_queue5_rts_port27_num", 0}, - {"ring_queue5_rts_port27_udp", 0}, - {"ring_queue5_rts_port27_src", 0}, - {"ring_queue5_rts_port28_num", 0}, - {"ring_queue5_rts_port28_udp", 0}, - {"ring_queue5_rts_port28_src", 0}, - {"ring_queue5_rts_port29_num", 0}, - {"ring_queue5_rts_port29_udp", 0}, - {"ring_queue5_rts_port29_src", 0}, - {"ring_queue5_rts_port30_num", 0}, - {"ring_queue5_rts_port30_udp", 0}, - {"ring_queue5_rts_port30_src", 0}, - {"ring_queue5_rts_port31_num", 0}, - {"ring_queue5_rts_port31_udp", 0}, - {"ring_queue5_rts_port31_src", 0}, - {"ring_queue5_rti_urange_a", 0}, - {"ring_queue5_rti_ufc_a", 0}, - {"ring_queue5_rti_urange_b", 0}, - {"ring_queue5_rti_ufc_b", 0}, - {"ring_queue5_rti_urange_c", 0}, - {"ring_queue5_rti_ufc_c", 0}, - {"ring_queue5_rti_ufc_d", 0}, - {"ring_queue5_rti_timer_ac_en", 0}, - {"ring_queue5_rti_timer_val_us", 0}, - - {"ring_queue6_max", 0}, - {"ring_queue6_initial", 0}, - {"ring_queue6_buffer_mode", 0}, - {"ring_queue6_dram_size_mb", 0}, - {"ring_queue6_intr_vector", 0}, - {"ring_queue6_backoff_interval_us", 0}, - {"ring_queue6_max_frm_len", 0}, - {"ring_queue6_priority", 0}, - {"ring_queue6_no_snoop_bits", 0}, - {"ring_queue6_indicate_max_pkts", 0}, - {"ring_queue6_configured", 0}, - {"ring_queue6_rts_mac_en", 0}, - {"ring_queue6_rth_en", 0}, - {"ring_queue6_rts_port_en", 0}, - {"ring_queue6_rts_port0_num", 0}, - {"ring_queue6_rts_port0_udp", 0}, - {"ring_queue6_rts_port0_src", 0}, - {"ring_queue6_rts_port1_num", 0}, - {"ring_queue6_rts_port1_udp", 0}, - {"ring_queue6_rts_port1_src", 0}, - {"ring_queue6_rts_port2_num", 0}, - {"ring_queue6_rts_port2_udp", 0}, - {"ring_queue6_rts_port2_src", 0}, - {"ring_queue6_rts_port3_num", 0}, - {"ring_queue6_rts_port3_udp", 0}, - {"ring_queue6_rts_port3_src", 0}, - {"ring_queue6_rts_port4_num", 0}, - {"ring_queue6_rts_port4_udp", 0}, - {"ring_queue6_rts_port4_src", 0}, - {"ring_queue6_rts_port5_num", 0}, - {"ring_queue6_rts_port5_udp", 0}, - {"ring_queue6_rts_port5_src", 0}, - {"ring_queue6_rts_port6_num", 0}, - {"ring_queue6_rts_port6_udp", 0}, - {"ring_queue6_rts_port6_src", 0}, - {"ring_queue6_rts_port7_num", 0}, - {"ring_queue6_rts_port7_udp", 0}, - {"ring_queue6_rts_port7_src", 0}, - {"ring_queue6_rts_port8_num", 0}, - {"ring_queue6_rts_port8_udp", 0}, - {"ring_queue6_rts_port8_src", 0}, - {"ring_queue6_rts_port9_num", 0}, - {"ring_queue6_rts_port9_udp", 0}, - {"ring_queue6_rts_port9_src", 0}, - {"ring_queue6_rts_port10_num", 0}, - {"ring_queue6_rts_port10_udp", 0}, - {"ring_queue6_rts_port10_src", 0}, - {"ring_queue6_rts_port11_num", 0}, - {"ring_queue6_rts_port11_udp", 0}, - {"ring_queue6_rts_port11_src", 0}, - {"ring_queue6_rts_port12_num", 0}, - {"ring_queue6_rts_port12_udp", 0}, - {"ring_queue6_rts_port12_src", 0}, - {"ring_queue6_rts_port13_num", 0}, - {"ring_queue6_rts_port13_udp", 0}, - {"ring_queue6_rts_port13_src", 0}, - {"ring_queue6_rts_port14_num", 0}, - {"ring_queue6_rts_port14_udp", 0}, - {"ring_queue6_rts_port14_src", 0}, - {"ring_queue6_rts_port15_num", 0}, - {"ring_queue6_rts_port15_udp", 0}, - {"ring_queue6_rts_port15_src", 0}, - {"ring_queue6_rts_port16_num", 0}, - {"ring_queue6_rts_port16_udp", 0}, - {"ring_queue6_rts_port16_src", 0}, - {"ring_queue6_rts_port17_num", 0}, - {"ring_queue6_rts_port17_udp", 0}, - {"ring_queue6_rts_port17_src", 0}, - {"ring_queue6_rts_port18_num", 0}, - {"ring_queue6_rts_port18_udp", 0}, - {"ring_queue6_rts_port18_src", 0}, - {"ring_queue6_rts_port19_num", 0}, - {"ring_queue6_rts_port19_udp", 0}, - {"ring_queue6_rts_port19_src", 0}, - {"ring_queue6_rts_port20_num", 0}, - {"ring_queue6_rts_port20_udp", 0}, - {"ring_queue6_rts_port20_src", 0}, - {"ring_queue6_rts_port21_num", 0}, - {"ring_queue6_rts_port21_udp", 0}, - {"ring_queue6_rts_port21_src", 0}, - {"ring_queue6_rts_port22_num", 0}, - {"ring_queue6_rts_port22_udp", 0}, - {"ring_queue6_rts_port22_src", 0}, - {"ring_queue6_rts_port23_num", 0}, - {"ring_queue6_rts_port23_udp", 0}, - {"ring_queue6_rts_port23_src", 0}, - {"ring_queue6_rts_port24_num", 0}, - {"ring_queue6_rts_port24_udp", 0}, - {"ring_queue6_rts_port24_src", 0}, - {"ring_queue6_rts_port25_num", 0}, - {"ring_queue6_rts_port25_udp", 0}, - {"ring_queue6_rts_port25_src", 0}, - {"ring_queue6_rts_port26_num", 0}, - {"ring_queue6_rts_port26_udp", 0}, - {"ring_queue6_rts_port26_src", 0}, - {"ring_queue6_rts_port27_num", 0}, - {"ring_queue6_rts_port27_udp", 0}, - {"ring_queue6_rts_port27_src", 0}, - {"ring_queue6_rts_port28_num", 0}, - {"ring_queue6_rts_port28_udp", 0}, - {"ring_queue6_rts_port28_src", 0}, - {"ring_queue6_rts_port29_num", 0}, - {"ring_queue6_rts_port29_udp", 0}, - {"ring_queue6_rts_port29_src", 0}, - {"ring_queue6_rts_port30_num", 0}, - {"ring_queue6_rts_port30_udp", 0}, - {"ring_queue6_rts_port30_src", 0}, - {"ring_queue6_rts_port31_num", 0}, - {"ring_queue6_rts_port31_udp", 0}, - {"ring_queue6_rts_port31_src", 0}, - {"ring_queue6_rti_urange_a", 0}, - {"ring_queue6_rti_ufc_a", 0}, - {"ring_queue6_rti_urange_b", 0}, - {"ring_queue6_rti_ufc_b", 0}, - {"ring_queue6_rti_urange_c", 0}, - {"ring_queue6_rti_ufc_c", 0}, - {"ring_queue6_rti_ufc_d", 0}, - {"ring_queue6_rti_timer_ac_en", 0}, - {"ring_queue6_rti_timer_val_us", 0}, - - {"ring_queue7_max", 0}, - {"ring_queue7_initial", 0}, - {"ring_queue7_buffer_mode", 0}, - {"ring_queue7_dram_size_mb", 0}, - {"ring_queue7_intr_vector", 0}, - {"ring_queue7_backoff_interval_us", 0}, - {"ring_queue7_max_frm_len", 0}, - {"ring_queue7_priority", 0}, - {"ring_queue7_no_snoop_bits", 0}, - {"ring_queue7_indicate_max_pkts", 0}, - {"ring_queue7_configured", 0}, - {"ring_queue7_rts_mac_en", 0}, - {"ring_queue7_rth_en", 0}, - {"ring_queue7_rts_port_en", 0}, - {"ring_queue7_rts_port0_num", 0}, - {"ring_queue7_rts_port0_udp", 0}, - {"ring_queue7_rts_port0_src", 0}, - {"ring_queue7_rts_port1_num", 0}, - {"ring_queue7_rts_port1_udp", 0}, - {"ring_queue7_rts_port1_src", 0}, - {"ring_queue7_rts_port2_num", 0}, - {"ring_queue7_rts_port2_udp", 0}, - {"ring_queue7_rts_port2_src", 0}, - {"ring_queue7_rts_port3_num", 0}, - {"ring_queue7_rts_port3_udp", 0}, - {"ring_queue7_rts_port3_src", 0}, - {"ring_queue7_rts_port4_num", 0}, - {"ring_queue7_rts_port4_udp", 0}, - {"ring_queue7_rts_port4_src", 0}, - {"ring_queue7_rts_port5_num", 0}, - {"ring_queue7_rts_port5_udp", 0}, - {"ring_queue7_rts_port5_src", 0}, - {"ring_queue7_rts_port6_num", 0}, - {"ring_queue7_rts_port6_udp", 0}, - {"ring_queue7_rts_port6_src", 0}, - {"ring_queue7_rts_port7_num", 0}, - {"ring_queue7_rts_port7_udp", 0}, - {"ring_queue7_rts_port7_src", 0}, - {"ring_queue7_rts_port8_num", 0}, - {"ring_queue7_rts_port8_udp", 0}, - {"ring_queue7_rts_port8_src", 0}, - {"ring_queue7_rts_port9_num", 0}, - {"ring_queue7_rts_port9_udp", 0}, - {"ring_queue7_rts_port9_src", 0}, - {"ring_queue7_rts_port10_num", 0}, - {"ring_queue7_rts_port10_udp", 0}, - {"ring_queue7_rts_port10_src", 0}, - {"ring_queue7_rts_port11_num", 0}, - {"ring_queue7_rts_port11_udp", 0}, - {"ring_queue7_rts_port11_src", 0}, - {"ring_queue7_rts_port12_num", 0}, - {"ring_queue7_rts_port12_udp", 0}, - {"ring_queue7_rts_port12_src", 0}, - {"ring_queue7_rts_port13_num", 0}, - {"ring_queue7_rts_port13_udp", 0}, - {"ring_queue7_rts_port13_src", 0}, - {"ring_queue7_rts_port14_num", 0}, - {"ring_queue7_rts_port14_udp", 0}, - {"ring_queue7_rts_port14_src", 0}, - {"ring_queue7_rts_port15_num", 0}, - {"ring_queue7_rts_port15_udp", 0}, - {"ring_queue7_rts_port15_src", 0}, - {"ring_queue7_rts_port16_num", 0}, - {"ring_queue7_rts_port16_udp", 0}, - {"ring_queue7_rts_port16_src", 0}, - {"ring_queue7_rts_port17_num", 0}, - {"ring_queue7_rts_port17_udp", 0}, - {"ring_queue7_rts_port17_src", 0}, - {"ring_queue7_rts_port18_num", 0}, - {"ring_queue7_rts_port18_udp", 0}, - {"ring_queue7_rts_port18_src", 0}, - {"ring_queue7_rts_port19_num", 0}, - {"ring_queue7_rts_port19_udp", 0}, - {"ring_queue7_rts_port19_src", 0}, - {"ring_queue7_rts_port20_num", 0}, - {"ring_queue7_rts_port20_udp", 0}, - {"ring_queue7_rts_port20_src", 0}, - {"ring_queue7_rts_port21_num", 0}, - {"ring_queue7_rts_port21_udp", 0}, - {"ring_queue7_rts_port21_src", 0}, - {"ring_queue7_rts_port22_num", 0}, - {"ring_queue7_rts_port22_udp", 0}, - {"ring_queue7_rts_port22_src", 0}, - {"ring_queue7_rts_port23_num", 0}, - {"ring_queue7_rts_port23_udp", 0}, - {"ring_queue7_rts_port23_src", 0}, - {"ring_queue7_rts_port24_num", 0}, - {"ring_queue7_rts_port24_udp", 0}, - {"ring_queue7_rts_port24_src", 0}, - {"ring_queue7_rts_port25_num", 0}, - {"ring_queue7_rts_port25_udp", 0}, - {"ring_queue7_rts_port25_src", 0}, - {"ring_queue7_rts_port26_num", 0}, - {"ring_queue7_rts_port26_udp", 0}, - {"ring_queue7_rts_port26_src", 0}, - {"ring_queue7_rts_port27_num", 0}, - {"ring_queue7_rts_port27_udp", 0}, - {"ring_queue7_rts_port27_src", 0}, - {"ring_queue7_rts_port28_num", 0}, - {"ring_queue7_rts_port28_udp", 0}, - {"ring_queue7_rts_port28_src", 0}, - {"ring_queue7_rts_port29_num", 0}, - {"ring_queue7_rts_port29_udp", 0}, - {"ring_queue7_rts_port29_src", 0}, - {"ring_queue7_rts_port30_num", 0}, - {"ring_queue7_rts_port30_udp", 0}, - {"ring_queue7_rts_port30_src", 0}, - {"ring_queue7_rts_port31_num", 0}, - {"ring_queue7_rts_port31_udp", 0}, - {"ring_queue7_rts_port31_src", 0}, - {"ring_queue7_rti_urange_a", 0}, - {"ring_queue7_rti_ufc_a", 0}, - {"ring_queue7_rti_urange_b", 0}, - {"ring_queue7_rti_ufc_b", 0}, - {"ring_queue7_rti_urange_c", 0}, - {"ring_queue7_rti_ufc_c", 0}, - {"ring_queue7_rti_ufc_d", 0}, - {"ring_queue7_rti_timer_ac_en", 0}, - {"ring_queue7_rti_timer_val_us", 0}, - - {"mac_media", 0}, - {"mac_tmac_util_period", 0}, - {"mac_rmac_util_period", 0}, - {"mac_rmac_bcast_en", 0}, - {"mac_rmac_pause_gen_en", 0}, - {"mac_rmac_pause_rcv_en", 0}, - {"mac_rmac_pause_time", 0}, - {"mac_mc_pause_threshold_q0q3", 0}, - {"mac_mc_pause_threshold_q4q7", 0}, - - {"fifo_max_frags", 0}, - {"fifo_reserve_threshold", 0}, - {"fifo_memblock_size", 0}, - {"fifo_alignment_size", 0}, - {"fifo_max_aligned_frags", 0}, - - {"fifo_queue0_max", 0}, - {"fifo_queue0_initial", 0}, - {"fifo_queue0_intr", 0}, - {"fifo_queue0_intr_vector", 0}, - {"fifo_queue0_no_snoop_bits", 0}, - {"fifo_queue0_priority", 0}, - {"fifo_queue0_configured", 0}, - - {"fifo_queue0_tti0_enabled", 0}, - {"fifo_queue0_tti0_urange_a", 0}, - {"fifo_queue0_tti0_ufc_a", 0}, - {"fifo_queue0_tti0_urange_b", 0}, - {"fifo_queue0_tti0_ufc_b", 0}, - {"fifo_queue0_tti0_urange_c", 0}, - {"fifo_queue0_tti0_ufc_c", 0}, - {"fifo_queue0_tti0_ufc_d", 0}, - {"fifo_queue0_tti0_timer_val_us", 0}, - {"fifo_queue0_tti0_timer_ac_en", 0}, - {"fifo_queue0_tti0_timer_ci_en", 0}, - - {"fifo_queue0_tti1_enabled", 0}, - {"fifo_queue0_tti1_urange_a", 0}, - {"fifo_queue0_tti1_ufc_a", 0}, - {"fifo_queue0_tti1_urange_b", 0}, - {"fifo_queue0_tti1_ufc_b", 0}, - {"fifo_queue0_tti1_urange_c", 0}, - {"fifo_queue0_tti1_ufc_c", 0}, - {"fifo_queue0_tti1_ufc_d", 0}, - {"fifo_queue0_tti1_timer_val_us", 0}, - {"fifo_queue0_tti1_timer_ac_en", 0}, - {"fifo_queue0_tti1_timer_ci_en", 0}, - - {"fifo_queue0_tti2_enabled", 0}, - {"fifo_queue0_tti2_urange_a", 0}, - {"fifo_queue0_tti2_ufc_a", 0}, - {"fifo_queue0_tti2_urange_b", 0}, - {"fifo_queue0_tti2_ufc_b", 0}, - {"fifo_queue0_tti2_urange_c", 0}, - {"fifo_queue0_tti2_ufc_c", 0}, - {"fifo_queue0_tti2_ufc_d", 0}, - {"fifo_queue0_tti2_timer_val_us", 0}, - {"fifo_queue0_tti2_timer_ac_en", 0}, - {"fifo_queue0_tti2_timer_ci_en", 0}, - - {"fifo_queue0_tti3_enabled", 0}, - {"fifo_queue0_tti3_urange_a", 0}, - {"fifo_queue0_tti3_ufc_a", 0}, - {"fifo_queue0_tti3_urange_b", 0}, - {"fifo_queue0_tti3_ufc_b", 0}, - {"fifo_queue0_tti3_urange_c", 0}, - {"fifo_queue0_tti3_ufc_c", 0}, - {"fifo_queue0_tti3_ufc_d", 0}, - {"fifo_queue0_tti3_timer_val_us", 0}, - {"fifo_queue0_tti3_timer_ac_en", 0}, - {"fifo_queue0_tti3_timer_ci_en", 0}, - - {"fifo_queue0_tti4_enabled", 0}, - {"fifo_queue0_tti4_urange_a", 0}, - {"fifo_queue0_tti4_ufc_a", 0}, - {"fifo_queue0_tti4_urange_b", 0}, - {"fifo_queue0_tti4_ufc_b", 0}, - {"fifo_queue0_tti4_urange_c", 0}, - {"fifo_queue0_tti4_ufc_c", 0}, - {"fifo_queue0_tti4_ufc_d", 0}, - {"fifo_queue0_tti4_timer_val_us", 0}, - {"fifo_queue0_tti4_timer_ac_en", 0}, - {"fifo_queue0_tti4_timer_ci_en", 0}, - - {"fifo_queue0_tti5_enabled", 0}, - {"fifo_queue0_tti5_urange_a", 0}, - {"fifo_queue0_tti5_ufc_a", 0}, - {"fifo_queue0_tti5_urange_b", 0}, - {"fifo_queue0_tti5_ufc_b", 0}, - {"fifo_queue0_tti5_urange_c", 0}, - {"fifo_queue0_tti5_ufc_c", 0}, - {"fifo_queue0_tti5_ufc_d", 0}, - {"fifo_queue0_tti5_timer_val_us", 0}, - {"fifo_queue0_tti5_timer_ac_en", 0}, - {"fifo_queue0_tti5_timer_ci_en", 0}, - - {"fifo_queue0_tti6_enabled", 0}, - {"fifo_queue0_tti6_urange_a", 0}, - {"fifo_queue0_tti6_ufc_a", 0}, - {"fifo_queue0_tti6_urange_b", 0}, - {"fifo_queue0_tti6_ufc_b", 0}, - {"fifo_queue0_tti6_urange_c", 0}, - {"fifo_queue0_tti6_ufc_c", 0}, - {"fifo_queue0_tti6_ufc_d", 0}, - {"fifo_queue0_tti6_timer_val_us", 0}, - {"fifo_queue0_tti6_timer_ac_en", 0}, - {"fifo_queue0_tti6_timer_ci_en", 0}, - - {"fifo_queue1_max", 0}, - {"fifo_queue1_initial", 0}, - {"fifo_queue1_intr", 0}, - {"fifo_queue1_intr_vector", 0}, - {"fifo_queue1_no_snoop_bits", 0}, - {"fifo_queue1_priority", 0}, - {"fifo_queue1_configured", 0}, - - {"fifo_queue1_tti0_enabled", 0}, - {"fifo_queue1_tti0_urange_a", 0}, - {"fifo_queue1_tti0_ufc_a", 0}, - {"fifo_queue1_tti0_urange_b", 0}, - {"fifo_queue1_tti0_ufc_b", 0}, - {"fifo_queue1_tti0_urange_c", 0}, - {"fifo_queue1_tti0_ufc_c", 0}, - {"fifo_queue1_tti0_ufc_d", 0}, - {"fifo_queue1_tti0_timer_val_us", 0}, - {"fifo_queue1_tti0_timer_ac_en", 0}, - {"fifo_queue1_tti0_timer_ci_en", 0}, - - {"fifo_queue1_tti1_enabled", 0}, - {"fifo_queue1_tti1_urange_a", 0}, - {"fifo_queue1_tti1_ufc_a", 0}, - {"fifo_queue1_tti1_urange_b", 0}, - {"fifo_queue1_tti1_ufc_b", 0}, - {"fifo_queue1_tti1_urange_c", 0}, - {"fifo_queue1_tti1_ufc_c", 0}, - {"fifo_queue1_tti1_ufc_d", 0}, - {"fifo_queue1_tti1_timer_val_us", 0}, - {"fifo_queue1_tti1_timer_ac_en", 0}, - {"fifo_queue1_tti1_timer_ci_en", 0}, - - {"fifo_queue1_tti2_enabled", 0}, - {"fifo_queue1_tti2_urange_a", 0}, - {"fifo_queue1_tti2_ufc_a", 0}, - {"fifo_queue1_tti2_urange_b", 0}, - {"fifo_queue1_tti2_ufc_b", 0}, - {"fifo_queue1_tti2_urange_c", 0}, - {"fifo_queue1_tti2_ufc_c", 0}, - {"fifo_queue1_tti2_ufc_d", 0}, - {"fifo_queue1_tti2_timer_val_us", 0}, - {"fifo_queue1_tti2_timer_ac_en", 0}, - {"fifo_queue1_tti2_timer_ci_en", 0}, - - {"fifo_queue1_tti3_enabled", 0}, - {"fifo_queue1_tti3_urange_a", 0}, - {"fifo_queue1_tti3_ufc_a", 0}, - {"fifo_queue1_tti3_urange_b", 0}, - {"fifo_queue1_tti3_ufc_b", 0}, - {"fifo_queue1_tti3_urange_c", 0}, - {"fifo_queue1_tti3_ufc_c", 0}, - {"fifo_queue1_tti3_ufc_d", 0}, - {"fifo_queue1_tti3_timer_val_us", 0}, - {"fifo_queue1_tti3_timer_ac_en", 0}, - {"fifo_queue1_tti3_timer_ci_en", 0}, - - {"fifo_queue1_tti4_enabled", 0}, - {"fifo_queue1_tti4_urange_a", 0}, - {"fifo_queue1_tti4_ufc_a", 0}, - {"fifo_queue1_tti4_urange_b", 0}, - {"fifo_queue1_tti4_ufc_b", 0}, - {"fifo_queue1_tti4_urange_c", 0}, - {"fifo_queue1_tti4_ufc_c", 0}, - {"fifo_queue1_tti4_ufc_d", 0}, - {"fifo_queue1_tti4_timer_val_us", 0}, - {"fifo_queue1_tti4_timer_ac_en", 0}, - {"fifo_queue1_tti4_timer_ci_en", 0}, - - {"fifo_queue1_tti5_enabled", 0}, - {"fifo_queue1_tti5_urange_a", 0}, - {"fifo_queue1_tti5_ufc_a", 0}, - {"fifo_queue1_tti5_urange_b", 0}, - {"fifo_queue1_tti5_ufc_b", 0}, - {"fifo_queue1_tti5_urange_c", 0}, - {"fifo_queue1_tti5_ufc_c", 0}, - {"fifo_queue1_tti5_ufc_d", 0}, - {"fifo_queue1_tti5_timer_val_us", 0}, - {"fifo_queue1_tti5_timer_ac_en", 0}, - {"fifo_queue1_tti5_timer_ci_en", 0}, - - {"fifo_queue1_tti6_enabled", 0}, - {"fifo_queue1_tti6_urange_a", 0}, - {"fifo_queue1_tti6_ufc_a", 0}, - {"fifo_queue1_tti6_urange_b", 0}, - {"fifo_queue1_tti6_ufc_b", 0}, - {"fifo_queue1_tti6_urange_c", 0}, - {"fifo_queue1_tti6_ufc_c", 0}, - {"fifo_queue1_tti6_ufc_d", 0}, - {"fifo_queue1_tti6_timer_val_us", 0}, - {"fifo_queue1_tti6_timer_ac_en", 0}, - {"fifo_queue1_tti6_timer_ci_en", 0}, - - {"fifo_queue2_max", 0}, - {"fifo_queue2_initial", 0}, - {"fifo_queue2_intr", 0}, - {"fifo_queue2_intr_vector", 0}, - {"fifo_queue2_no_snoop_bits", 0}, - {"fifo_queue2_priority", 0}, - {"fifo_queue2_configured", 0}, - - {"fifo_queue2_tti0_enabled", 0}, - {"fifo_queue2_tti0_urange_a", 0}, - {"fifo_queue2_tti0_ufc_a", 0}, - {"fifo_queue2_tti0_urange_b", 0}, - {"fifo_queue2_tti0_ufc_b", 0}, - {"fifo_queue2_tti0_urange_c", 0}, - {"fifo_queue2_tti0_ufc_c", 0}, - {"fifo_queue2_tti0_ufc_d", 0}, - {"fifo_queue2_tti0_timer_val_us", 0}, - {"fifo_queue2_tti0_timer_ac_en", 0}, - {"fifo_queue2_tti0_timer_ci_en", 0}, - - {"fifo_queue2_tti1_enabled", 0}, - {"fifo_queue2_tti1_urange_a", 0}, - {"fifo_queue2_tti1_ufc_a", 0}, - {"fifo_queue2_tti1_urange_b", 0}, - {"fifo_queue2_tti1_ufc_b", 0}, - {"fifo_queue2_tti1_urange_c", 0}, - {"fifo_queue2_tti1_ufc_c", 0}, - {"fifo_queue2_tti1_ufc_d", 0}, - {"fifo_queue2_tti1_timer_val_us", 0}, - {"fifo_queue2_tti1_timer_ac_en", 0}, - {"fifo_queue2_tti1_timer_ci_en", 0}, - - {"fifo_queue2_tti2_enabled", 0}, - {"fifo_queue2_tti2_urange_a", 0}, - {"fifo_queue2_tti2_ufc_a", 0}, - {"fifo_queue2_tti2_urange_b", 0}, - {"fifo_queue2_tti2_ufc_b", 0}, - {"fifo_queue2_tti2_urange_c", 0}, - {"fifo_queue2_tti2_ufc_c", 0}, - {"fifo_queue2_tti2_ufc_d", 0}, - {"fifo_queue2_tti2_timer_val_us", 0}, - {"fifo_queue2_tti2_timer_ac_en", 0}, - {"fifo_queue2_tti2_timer_ci_en", 0}, - - {"fifo_queue2_tti3_enabled", 0}, - {"fifo_queue2_tti3_urange_a", 0}, - {"fifo_queue2_tti3_ufc_a", 0}, - {"fifo_queue2_tti3_urange_b", 0}, - {"fifo_queue2_tti3_ufc_b", 0}, - {"fifo_queue2_tti3_urange_c", 0}, - {"fifo_queue2_tti3_ufc_c", 0}, - {"fifo_queue2_tti3_ufc_d", 0}, - {"fifo_queue2_tti3_timer_val_us", 0}, - {"fifo_queue2_tti3_timer_ac_en", 0}, - {"fifo_queue2_tti3_timer_ci_en", 0}, - - {"fifo_queue2_tti4_enabled", 0}, - {"fifo_queue2_tti4_urange_a", 0}, - {"fifo_queue2_tti4_ufc_a", 0}, - {"fifo_queue2_tti4_urange_b", 0}, - {"fifo_queue2_tti4_ufc_b", 0}, - {"fifo_queue2_tti4_urange_c", 0}, - {"fifo_queue2_tti4_ufc_c", 0}, - {"fifo_queue2_tti4_ufc_d", 0}, - {"fifo_queue2_tti4_timer_val_us", 0}, - {"fifo_queue2_tti4_timer_ac_en", 0}, - {"fifo_queue2_tti4_timer_ci_en", 0}, - - {"fifo_queue2_tti5_enabled", 0}, - {"fifo_queue2_tti5_urange_a", 0}, - {"fifo_queue2_tti5_ufc_a", 0}, - {"fifo_queue2_tti5_urange_b", 0}, - {"fifo_queue2_tti5_ufc_b", 0}, - {"fifo_queue2_tti5_urange_c", 0}, - {"fifo_queue2_tti5_ufc_c", 0}, - {"fifo_queue2_tti5_ufc_d", 0}, - {"fifo_queue2_tti5_timer_val_us", 0}, - {"fifo_queue2_tti5_timer_ac_en", 0}, - {"fifo_queue2_tti5_timer_ci_en", 0}, - - {"fifo_queue2_tti6_enabled", 0}, - {"fifo_queue2_tti6_urange_a", 0}, - {"fifo_queue2_tti6_ufc_a", 0}, - {"fifo_queue2_tti6_urange_b", 0}, - {"fifo_queue2_tti6_ufc_b", 0}, - {"fifo_queue2_tti6_urange_c", 0}, - {"fifo_queue2_tti6_ufc_c", 0}, - {"fifo_queue2_tti6_ufc_d", 0}, - {"fifo_queue2_tti6_timer_val_us", 0}, - {"fifo_queue2_tti6_timer_ac_en", 0}, - {"fifo_queue2_tti6_timer_ci_en", 0}, - - {"fifo_queue3_max", 0}, - {"fifo_queue3_initial", 0}, - {"fifo_queue3_intr", 0}, - {"fifo_queue3_intr_vector", 0}, - {"fifo_queue3_no_snoop_bits", 0}, - {"fifo_queue3_priority", 0}, - {"fifo_queue3_configured", 0}, - - {"fifo_queue3_tti0_enabled", 0}, - {"fifo_queue3_tti0_urange_a", 0}, - {"fifo_queue3_tti0_ufc_a", 0}, - {"fifo_queue3_tti0_urange_b", 0}, - {"fifo_queue3_tti0_ufc_b", 0}, - {"fifo_queue3_tti0_urange_c", 0}, - {"fifo_queue3_tti0_ufc_c", 0}, - {"fifo_queue3_tti0_ufc_d", 0}, - {"fifo_queue3_tti0_timer_val_us", 0}, - {"fifo_queue3_tti0_timer_ac_en", 0}, - {"fifo_queue3_tti0_timer_ci_en", 0}, - - {"fifo_queue3_tti1_enabled", 0}, - {"fifo_queue3_tti1_urange_a", 0}, - {"fifo_queue3_tti1_ufc_a", 0}, - {"fifo_queue3_tti1_urange_b", 0}, - {"fifo_queue3_tti1_ufc_b", 0}, - {"fifo_queue3_tti1_urange_c", 0}, - {"fifo_queue3_tti1_ufc_c", 0}, - {"fifo_queue3_tti1_ufc_d", 0}, - {"fifo_queue3_tti1_timer_val_us", 0}, - {"fifo_queue3_tti1_timer_ac_en", 0}, - {"fifo_queue3_tti1_timer_ci_en", 0}, - - {"fifo_queue3_tti2_enabled", 0}, - {"fifo_queue3_tti2_urange_a", 0}, - {"fifo_queue3_tti2_ufc_a", 0}, - {"fifo_queue3_tti2_urange_b", 0}, - {"fifo_queue3_tti2_ufc_b", 0}, - {"fifo_queue3_tti2_urange_c", 0}, - {"fifo_queue3_tti2_ufc_c", 0}, - {"fifo_queue3_tti2_ufc_d", 0}, - {"fifo_queue3_tti2_timer_val_us", 0}, - {"fifo_queue3_tti2_timer_ac_en", 0}, - {"fifo_queue3_tti2_timer_ci_en", 0}, - - {"fifo_queue3_tti3_enabled", 0}, - {"fifo_queue3_tti3_urange_a", 0}, - {"fifo_queue3_tti3_ufc_a", 0}, - {"fifo_queue3_tti3_urange_b", 0}, - {"fifo_queue3_tti3_ufc_b", 0}, - {"fifo_queue3_tti3_urange_c", 0}, - {"fifo_queue3_tti3_ufc_c", 0}, - {"fifo_queue3_tti3_ufc_d", 0}, - {"fifo_queue3_tti3_timer_val_us", 0}, - {"fifo_queue3_tti3_timer_ac_en", 0}, - {"fifo_queue3_tti3_timer_ci_en", 0}, - - {"fifo_queue3_tti4_enabled", 0}, - {"fifo_queue3_tti4_urange_a", 0}, - {"fifo_queue3_tti4_ufc_a", 0}, - {"fifo_queue3_tti4_urange_b", 0}, - {"fifo_queue3_tti4_ufc_b", 0}, - {"fifo_queue3_tti4_urange_c", 0}, - {"fifo_queue3_tti4_ufc_c", 0}, - {"fifo_queue3_tti4_ufc_d", 0}, - {"fifo_queue3_tti4_timer_val_us", 0}, - {"fifo_queue3_tti4_timer_ac_en", 0}, - {"fifo_queue3_tti4_timer_ci_en", 0}, - - {"fifo_queue3_tti5_enabled", 0}, - {"fifo_queue3_tti5_urange_a", 0}, - {"fifo_queue3_tti5_ufc_a", 0}, - {"fifo_queue3_tti5_urange_b", 0}, - {"fifo_queue3_tti5_ufc_b", 0}, - {"fifo_queue3_tti5_urange_c", 0}, - {"fifo_queue3_tti5_ufc_c", 0}, - {"fifo_queue3_tti5_ufc_d", 0}, - {"fifo_queue3_tti5_timer_val_us", 0}, - {"fifo_queue3_tti5_timer_ac_en", 0}, - {"fifo_queue3_tti5_timer_ci_en", 0}, - - {"fifo_queue3_tti6_enabled", 0}, - {"fifo_queue3_tti6_urange_a", 0}, - {"fifo_queue3_tti6_ufc_a", 0}, - {"fifo_queue3_tti6_urange_b", 0}, - {"fifo_queue3_tti6_ufc_b", 0}, - {"fifo_queue3_tti6_urange_c", 0}, - {"fifo_queue3_tti6_ufc_c", 0}, - {"fifo_queue3_tti6_ufc_d", 0}, - {"fifo_queue3_tti6_timer_val_us", 0}, - {"fifo_queue3_tti6_timer_ac_en", 0}, - {"fifo_queue3_tti6_timer_ci_en", 0}, - - {"fifo_queue4_max", 0}, - {"fifo_queue4_initial", 0}, - {"fifo_queue4_intr", 0}, - {"fifo_queue4_intr_vector", 0}, - {"fifo_queue4_no_snoop_bits", 0}, - {"fifo_queue4_priority", 0}, - {"fifo_queue4_configured", 0}, - - {"fifo_queue4_tti0_enabled", 0}, - {"fifo_queue4_tti0_urange_a", 0}, - {"fifo_queue4_tti0_ufc_a", 0}, - {"fifo_queue4_tti0_urange_b", 0}, - {"fifo_queue4_tti0_ufc_b", 0}, - {"fifo_queue4_tti0_urange_c", 0}, - {"fifo_queue4_tti0_ufc_c", 0}, - {"fifo_queue4_tti0_ufc_d", 0}, - {"fifo_queue4_tti0_timer_val_us", 0}, - {"fifo_queue4_tti0_timer_ac_en", 0}, - {"fifo_queue4_tti0_timer_ci_en", 0}, - - {"fifo_queue4_tti1_enabled", 0}, - {"fifo_queue4_tti1_urange_a", 0}, - {"fifo_queue4_tti1_ufc_a", 0}, - {"fifo_queue4_tti1_urange_b", 0}, - {"fifo_queue4_tti1_ufc_b", 0}, - {"fifo_queue4_tti1_urange_c", 0}, - {"fifo_queue4_tti1_ufc_c", 0}, - {"fifo_queue4_tti1_ufc_d", 0}, - {"fifo_queue4_tti1_timer_val_us", 0}, - {"fifo_queue4_tti1_timer_ac_en", 0}, - {"fifo_queue4_tti1_timer_ci_en", 0}, - - {"fifo_queue4_tti2_enabled", 0}, - {"fifo_queue4_tti2_urange_a", 0}, - {"fifo_queue4_tti2_ufc_a", 0}, - {"fifo_queue4_tti2_urange_b", 0}, - {"fifo_queue4_tti2_ufc_b", 0}, - {"fifo_queue4_tti2_urange_c", 0}, - {"fifo_queue4_tti2_ufc_c", 0}, - {"fifo_queue4_tti2_ufc_d", 0}, - {"fifo_queue4_tti2_timer_val_us", 0}, - {"fifo_queue4_tti2_timer_ac_en", 0}, - {"fifo_queue4_tti2_timer_ci_en", 0}, - - {"fifo_queue4_tti3_enabled", 0}, - {"fifo_queue4_tti3_urange_a", 0}, - {"fifo_queue4_tti3_ufc_a", 0}, - {"fifo_queue4_tti3_urange_b", 0}, - {"fifo_queue4_tti3_ufc_b", 0}, - {"fifo_queue4_tti3_urange_c", 0}, - {"fifo_queue4_tti3_ufc_c", 0}, - {"fifo_queue4_tti3_ufc_d", 0}, - {"fifo_queue4_tti3_timer_val_us", 0}, - {"fifo_queue4_tti3_timer_ac_en", 0}, - {"fifo_queue4_tti3_timer_ci_en", 0}, - - {"fifo_queue4_tti4_enabled", 0}, - {"fifo_queue4_tti4_urange_a", 0}, - {"fifo_queue4_tti4_ufc_a", 0}, - {"fifo_queue4_tti4_urange_b", 0}, - {"fifo_queue4_tti4_ufc_b", 0}, - {"fifo_queue4_tti4_urange_c", 0}, - {"fifo_queue4_tti4_ufc_c", 0}, - {"fifo_queue4_tti4_ufc_d", 0}, - {"fifo_queue4_tti4_timer_val_us", 0}, - {"fifo_queue4_tti4_timer_ac_en", 0}, - {"fifo_queue4_tti4_timer_ci_en", 0}, - - {"fifo_queue4_tti5_enabled", 0}, - {"fifo_queue4_tti5_urange_a", 0}, - {"fifo_queue4_tti5_ufc_a", 0}, - {"fifo_queue4_tti5_urange_b", 0}, - {"fifo_queue4_tti5_ufc_b", 0}, - {"fifo_queue4_tti5_urange_c", 0}, - {"fifo_queue4_tti5_ufc_c", 0}, - {"fifo_queue4_tti5_ufc_d", 0}, - {"fifo_queue4_tti5_timer_val_us", 0}, - {"fifo_queue4_tti5_timer_ac_en", 0}, - {"fifo_queue4_tti5_timer_ci_en", 0}, - - {"fifo_queue4_tti6_enabled", 0}, - {"fifo_queue4_tti6_urange_a", 0}, - {"fifo_queue4_tti6_ufc_a", 0}, - {"fifo_queue4_tti6_urange_b", 0}, - {"fifo_queue4_tti6_ufc_b", 0}, - {"fifo_queue4_tti6_urange_c", 0}, - {"fifo_queue4_tti6_ufc_c", 0}, - {"fifo_queue4_tti6_ufc_d", 0}, - {"fifo_queue4_tti6_timer_val_us", 0}, - {"fifo_queue4_tti6_timer_ac_en", 0}, - {"fifo_queue4_tti6_timer_ci_en", 0}, - - {"fifo_queue5_max", 0}, - {"fifo_queue5_initial", 0}, - {"fifo_queue5_intr", 0}, - {"fifo_queue5_intr_vector", 0}, - {"fifo_queue5_no_snoop_bits", 0}, - {"fifo_queue5_priority", 0}, - {"fifo_queue5_configured", 0}, - - {"fifo_queue5_tti0_enabled", 0}, - {"fifo_queue5_tti0_urange_a", 0}, - {"fifo_queue5_tti0_ufc_a", 0}, - {"fifo_queue5_tti0_urange_b", 0}, - {"fifo_queue5_tti0_ufc_b", 0}, - {"fifo_queue5_tti0_urange_c", 0}, - {"fifo_queue5_tti0_ufc_c", 0}, - {"fifo_queue5_tti0_ufc_d", 0}, - {"fifo_queue5_tti0_timer_val_us", 0}, - {"fifo_queue5_tti0_timer_ac_en", 0}, - {"fifo_queue5_tti0_timer_ci_en", 0}, - - {"fifo_queue5_tti1_enabled", 0}, - {"fifo_queue5_tti1_urange_a", 0}, - {"fifo_queue5_tti1_ufc_a", 0}, - {"fifo_queue5_tti1_urange_b", 0}, - {"fifo_queue5_tti1_ufc_b", 0}, - {"fifo_queue5_tti1_urange_c", 0}, - {"fifo_queue5_tti1_ufc_c", 0}, - {"fifo_queue5_tti1_ufc_d", 0}, - {"fifo_queue5_tti1_timer_val_us", 0}, - {"fifo_queue5_tti1_timer_ac_en", 0}, - {"fifo_queue5_tti1_timer_ci_en", 0}, - - {"fifo_queue5_tti2_enabled", 0}, - {"fifo_queue5_tti2_urange_a", 0}, - {"fifo_queue5_tti2_ufc_a", 0}, - {"fifo_queue5_tti2_urange_b", 0}, - {"fifo_queue5_tti2_ufc_b", 0}, - {"fifo_queue5_tti2_urange_c", 0}, - {"fifo_queue5_tti2_ufc_c", 0}, - {"fifo_queue5_tti2_ufc_d", 0}, - {"fifo_queue5_tti2_timer_val_us", 0}, - {"fifo_queue5_tti2_timer_ac_en", 0}, - {"fifo_queue5_tti2_timer_ci_en", 0}, - - {"fifo_queue5_tti3_enabled", 0}, - {"fifo_queue5_tti3_urange_a", 0}, - {"fifo_queue5_tti3_ufc_a", 0}, - {"fifo_queue5_tti3_urange_b", 0}, - {"fifo_queue5_tti3_ufc_b", 0}, - {"fifo_queue5_tti3_urange_c", 0}, - {"fifo_queue5_tti3_ufc_c", 0}, - {"fifo_queue5_tti3_ufc_d", 0}, - {"fifo_queue5_tti3_timer_val_us", 0}, - {"fifo_queue5_tti3_timer_ac_en", 0}, - {"fifo_queue5_tti3_timer_ci_en", 0}, - - {"fifo_queue5_tti4_enabled", 0}, - {"fifo_queue5_tti4_urange_a", 0}, - {"fifo_queue5_tti4_ufc_a", 0}, - {"fifo_queue5_tti4_urange_b", 0}, - {"fifo_queue5_tti4_fc_b", 0}, - {"fifo_queue5_tti4_urange_c", 0}, - {"fifo_queue5_tti4_ufc_c", 0}, - {"fifo_queue5_tti4_ufc_d", 0}, - {"fifo_queue5_tti4_timer_val_us", 0}, - {"fifo_queue5_tti4_timer_ac_en", 0}, - {"fifo_queue5_tti4_timer_ci_en", 0}, - - {"fifo_queue5_tti5_enabled", 0}, - {"fifo_queue5_tti5_urange_a", 0}, - {"fifo_queue5_tti5_ufc_a", 0}, - {"fifo_queue5_tti5_urange_b", 0}, - {"fifo_queue5_tti5_ufc_b", 0}, - {"fifo_queue5_tti5_urange_c", 0}, - {"fifo_queue5_tti5_ufc_c", 0}, - {"fifo_queue5_tti5_ufc_d", 0}, - {"fifo_queue5_tti5_timer_val_us", 0}, - {"fifo_queue5_tti5_timer_ac_en", 0}, - {"fifo_queue5_tti5_timer_ci_en", 0}, - - {"fifo_queue5_tti6_enabled", 0}, - {"fifo_queue5_tti6_urange_a", 0}, - {"fifo_queue5_tti6_ufc_a", 0}, - {"fifo_queue5_tti6_urange_b", 0}, - {"fifo_queue5_tti6_ufc_b", 0}, - {"fifo_queue5_tti6_urange_c", 0}, - {"fifo_queue5_tti6_ufc_c", 0}, - {"fifo_queue5_tti6_ufc_d", 0}, - {"fifo_queue5_tti6_timer_val_us", 0}, - {"fifo_queue5_tti6_timer_ac_en", 0}, - {"fifo_queue5_tti6_timer_ci_en", 0}, - - {"fifo_queue6_max", 0}, - {"fifo_queue6_initial", 0}, - {"fifo_queue6_intr", 0}, - {"fifo_queue6_intr_vector", 0}, - {"fifo_queue6_no_snoop_bits", 0}, - {"fifo_queue6_priority", 0}, - {"fifo_queue6_configured", 0}, - - {"fifo_queue6_tti0_enabled", 0}, - {"fifo_queue6_tti0_urange_a", 0}, - {"fifo_queue6_tti0_ufc_a", 0}, - {"fifo_queue6_tti0_urange_b", 0}, - {"fifo_queue6_tti0_ufc_b", 0}, - {"fifo_queue6_tti0_urange_c", 0}, - {"fifo_queue6_tti0_ufc_c", 0}, - {"fifo_queue6_tti0_ufc_d", 0}, - {"fifo_queue6_tti0_timer_val_us", 0}, - {"fifo_queue6_tti0_timer_ac_en", 0}, - {"fifo_queue6_tti0_timer_ci_en", 0}, - - {"fifo_queue6_tti1_enabled", 0}, - {"fifo_queue6_tti1_urange_a", 0}, - {"fifo_queue6_tti1_ufc_a", 0}, - {"fifo_queue6_tti1_urange_b", 0}, - {"fifo_queue6_tti1_ufc_b", 0}, - {"fifo_queue6_tti1_urange_c", 0}, - {"fifo_queue6_tti1_ufc_c", 0}, - {"fifo_queue6_tti1_ufc_d", 0}, - {"fifo_queue6_tti1_timer_val_us", 0}, - {"fifo_queue6_tti1_timer_ac_en", 0}, - {"fifo_queue6_tti1_timer_ci_en", 0}, - - {"fifo_queue6_tti2_enabled", 0}, - {"fifo_queue6_tti2_urange_a", 0}, - {"fifo_queue6_tti2_ufc_a", 0}, - {"fifo_queue6_tti2_urange_b", 0}, - {"fifo_queue6_tti2_ufc_b", 0}, - {"fifo_queue6_tti2_urange_c", 0}, - {"fifo_queue6_tti2_ufc_c", 0}, - {"fifo_queue6_tti2_ufc_d", 0}, - {"fifo_queue6_tti2_timer_val_us", 0}, - {"fifo_queue6_tti2_timer_ac_en", 0}, - {"fifo_queue6_tti2_timer_ci_en", 0}, - - {"fifo_queue6_tti3_enabled", 0}, - {"fifo_queue6_tti3_urange_a", 0}, - {"fifo_queue6_tti3_ufc_a", 0}, - {"fifo_queue6_tti3_urange_b", 0}, - {"fifo_queue6_tti3_ufc_b", 0}, - {"fifo_queue6_tti3_urange_c", 0}, - {"fifo_queue6_tti3_ufc_c", 0}, - {"fifo_queue6_tti3_ufc_d", 0}, - {"fifo_queue6_tti3_timer_val_us", 0}, - {"fifo_queue6_tti3_timer_ac_en", 0}, - {"fifo_queue6_tti3_timer_ci_en", 0}, - - {"fifo_queue6_tti4_enabled", 0}, - {"fifo_queue6_tti4_urange_a", 0}, - {"fifo_queue6_tti4_ufc_a", 0}, - {"fifo_queue6_tti4_urange_b", 0}, - {"fifo_queue6_tti4_ufc_b", 0}, - {"fifo_queue6_tti4_urange_c", 0}, - {"fifo_queue6_tti4_ufc_c", 0}, - {"fifo_queue6_tti4_ufc_d", 0}, - {"fifo_queue6_tti4_timer_val_us", 0}, - {"fifo_queue6_tti4_timer_ac_en", 0}, - {"fifo_queue6_tti4_timer_ci_en", 0}, - - {"fifo_queue6_tti5_enabled", 0}, - {"fifo_queue6_tti5_urange_a", 0}, - {"fifo_queue6_tti5_ufc_a", 0}, - {"fifo_queue6_tti5_urange_b", 0}, - {"fifo_queue6_tti5_ufc_b", 0}, - {"fifo_queue6_tti5_urange_c", 0}, - {"fifo_queue6_tti5_ufc_c", 0}, - {"fifo_queue6_tti5_ufc_d", 0}, - {"fifo_queue6_tti5_timer_val_us", 0}, - {"fifo_queue6_tti5_timer_ac_en", 0}, - {"fifo_queue6_tti5_timer_ci_en", 0}, - - {"fifo_queue6_tti6_enabled", 0}, - {"fifo_queue6_tti6_urange_a", 0}, - {"fifo_queue6_tti6_ufc_a", 0}, - {"fifo_queue6_tti6_urange_b", 0}, - {"fifo_queue6_tti6_ufc_b", 0}, - {"fifo_queue6_tti6_urange_c", 0}, - {"fifo_queue6_tti6_ufc_c", 0}, - {"fifo_queue6_tti6_ufc_d", 0}, - {"fifo_queue6_tti6_timer_val_us", 0}, - {"fifo_queue6_tti6_timer_ac_en", 0}, - {"fifo_queue6_tti6_timer_ci_en", 0}, - - {"fifo_queue7_max", 0}, - {"fifo_queue7_initial", 0}, - {"fifo_queue7_intr", 0}, - {"fifo_queue7_intr_vector", 0}, - {"fifo_queue7_no_snoop_bits", 0}, - {"fifo_queue7_priority", 0}, - {"fifo_queue7_configured", 0}, - - {"fifo_queue7_tti0_enabled", 0}, - {"fifo_queue7_tti0_urange_a", 0}, - {"fifo_queue7_tti0_ufc_a", 0}, - {"fifo_queue7_tti0_urange_b", 0}, - {"fifo_queue7_tti0_ufc_b", 0}, - {"fifo_queue7_tti0_urange_c", 0}, - {"fifo_queue7_tti0_ufc_c", 0}, - {"fifo_queue7_tti0_ufc_d", 0}, - {"fifo_queue7_tti0_timer_val_us", 0}, - {"fifo_queue7_tti0_timer_ac_en", 0}, - {"fifo_queue7_tti0_timer_ci_en", 0}, - - {"fifo_queue7_tti1_enabled", 0}, - {"fifo_queue7_tti1_urange_a", 0}, - {"fifo_queue7_tti1_ufc_a", 0}, - {"fifo_queue7_tti1_urange_b", 0}, - {"fifo_queue7_tti1_ufc_b", 0}, - {"fifo_queue7_tti1_urange_c", 0}, - {"fifo_queue7_tti1_ufc_c", 0}, - {"fifo_queue7_tti1_ufc_d", 0}, - {"fifo_queue7_tti1_timer_val_us", 0}, - {"fifo_queue7_tti1_timer_ac_en", 0}, - {"fifo_queue7_tti1_timer_ci_en", 0}, - - {"fifo_queue7_tti2_enabled", 0}, - {"fifo_queue7_tti2_urange_a", 0}, - {"fifo_queue7_tti2_ufc_a", 0}, - {"fifo_queue7_tti2_urange_b", 0}, - {"fifo_queue7_tti2_ufc_b", 0}, - {"fifo_queue7_tti2_urange_c", 0}, - {"fifo_queue7_tti2_ufc_c", 0}, - {"fifo_queue7_tti2_ufc_d", 0}, - {"fifo_queue7_tti2_timer_val_us", 0}, - {"fifo_queue7_tti2_timer_ac_en", 0}, - {"fifo_queue7_tti2_timer_ci_en", 0}, - - {"fifo_queue7_tti3_enabled", 0}, - {"fifo_queue7_tti3_urange_a", 0}, - {"fifo_queue7_tti3_ufc_a", 0}, - {"fifo_queue7_tti3_urange_b", 0}, - {"fifo_queue7_tti3_ufc_b", 0}, - {"fifo_queue7_tti3_urange_c", 0}, - {"fifo_queue7_tti3_ufc_c", 0}, - {"fifo_queue7_tti3_ufc_d", 0}, - {"fifo_queue7_tti3_timer_val_us", 0}, - {"fifo_queue7_tti3_timer_ac_en", 0}, - {"fifo_queue7_tti3_timer_ci_en", 0}, - - {"fifo_queue7_tti4_enabled", 0}, - {"fifo_queue7_tti4_urange_a", 0}, - {"fifo_queue7_tti4_ufc_a", 0}, - {"fifo_queue7_tti4_urange_b", 0}, - {"fifo_queue7_tti4_ufc_b", 0}, - {"fifo_queue7_tti4_urange_c", 0}, - {"fifo_queue7_tti4_ufc_c", 0}, - {"fifo_queue7_tti4_ufc_d", 0}, - {"fifo_queue7_tti4_timer_val_us", 0}, - {"fifo_queue7_tti4_timer_ac_en", 0}, - {"fifo_queue7_tti4_timer_ci_en", 0}, - - {"fifo_queue7_tti5_enabled", 0}, - {"fifo_queue7_tti5_urange_a", 0}, - {"fifo_queue7_tti5_ufc_a", 0}, - {"fifo_queue7_tti5_urange_b", 0}, - {"fifo_queue7_tti5_ufc_b", 0}, - {"fifo_queue7_tti5_urange_c", 0}, - {"fifo_queue7_tti5_ufc_c", 0}, - {"fifo_queue7_tti5_ufc_d", 0}, - {"fifo_queue7_tti5_timer_val_us", 0}, - {"fifo_queue7_tti5_timer_ac_en", 0}, - {"fifo_queue7_tti5_timer_ci_en", 0}, - - {"fifo_queue7_tti6_enabled", 0}, - {"fifo_queue7_tti6_urange_a", 0}, - {"fifo_queue7_tti6_ufc_a", 0}, - {"fifo_queue7_tti6_urange_b", 0}, - {"fifo_queue7_tti6_ufc_b", 0}, - {"fifo_queue7_tti6_urange_c", 0}, - {"fifo_queue7_tti6_ufc_c", 0}, - {"fifo_queue7_tti6_ufc_d", 0}, - {"fifo_queue7_tti6_timer_val_us", 0}, - {"fifo_queue7_tti6_timer_ac_en", 0}, - {"fifo_queue7_tti6_timer_ci_en", 0}, - - {"dump_on_serr", 0}, - {"dump_on_eccerr", 0}, - {"dump_on_parityerr", 0}, - {"rth_en", 0}, - {"rth_bucket_size", 0}, - {"rth_spdm_en", 0}, - {"rth_spdm_use_l4", 0}, - {"rxufca_intr_thres", 0}, - {"rxufca_lo_lim", 0}, - {"rxufca_hi_lim", 0}, - {"rxufca_lbolt_period", 0}, - {"link_valid_cnt", 0}, - {"link_retry_cnt", 0}, - {"link_stability_period", 0}, - {"device_poll_millis", 0}, - {"no_isr_events", 0}, - {"lro_sg_size", 0}, - {"lro_frm_len", 0}, - {"bimodal_interrupts", 0}, - {"bimodal_timer_lo_us", 0}, - {"bimodal_timer_hi_us", 0}, - {"rts_mac_en", 0}, - {"rts_qos_en", 0}, - {"rts_port_en", 0}, + {"mtu", 0}, + {"isr_polling_cnt", 0}, + {"latency_timer", 0}, + {"napi_weight", 0}, + {"max_splits_trans", 0}, + {"mmrb_count", 0}, + {"shared_splits", 0}, + {"stats_refresh_time_sec", 0}, + {"pci_freq_mherz", 0}, + {"intr_mode", 0}, + {"sched_timer_us", 0}, + {"sched_timer_one_shot", 0}, + + {"ring_memblock_size", 0}, + {"ring_scatter_mode", 0}, + {"ring_strip_vlan_tag", 0}, + + {"ring_queue0_max", 0}, + {"ring_queue0_initial", 0}, + {"ring_queue0_buffer_mode", 0}, + {"ring_queue0_dram_size_mb", 0}, + {"ring_queue0_intr_vector", 0}, + {"ring_queue0_backoff_interval_us", 0}, + {"ring_queue0_max_frm_len", 0}, + {"ring_queue0_priority", 0}, + {"ring_queue0_no_snoop_bits", 0}, + {"ring_queue0_indicate_max_pkts", 0}, + {"ring_queue0_configured", 0}, + {"ring_queue0_rts_mac_en", 0}, + {"ring_queue0_rth_en", 0}, + {"ring_queue0_rts_port_en", 0}, + {"ring_queue0_rts_port0_num", 0}, + {"ring_queue0_rts_port0_udp", 0}, + {"ring_queue0_rts_port0_src", 0}, + {"ring_queue0_rts_port1_num", 0}, + {"ring_queue0_rts_port1_udp", 0}, + {"ring_queue0_rts_port1_src", 0}, + {"ring_queue0_rts_port2_num", 0}, + {"ring_queue0_rts_port2_udp", 0}, + {"ring_queue0_rts_port2_src", 0}, + {"ring_queue0_rts_port3_num", 0}, + {"ring_queue0_rts_port3_udp", 0}, + {"ring_queue0_rts_port3_src", 0}, + {"ring_queue0_rts_port4_num", 0}, + {"ring_queue0_rts_port4_udp", 0}, + {"ring_queue0_rts_port4_src", 0}, + {"ring_queue0_rts_port5_num", 0}, + {"ring_queue0_rts_port5_udp", 0}, + {"ring_queue0_rts_port5_src", 0}, + {"ring_queue0_rts_port6_num", 0}, + {"ring_queue0_rts_port6_udp", 0}, + {"ring_queue0_rts_port6_src", 0}, + {"ring_queue0_rts_port7_num", 0}, + {"ring_queue0_rts_port7_udp", 0}, + {"ring_queue0_rts_port7_src", 0}, + {"ring_queue0_rts_port8_num", 0}, + {"ring_queue0_rts_port8_udp", 0}, + {"ring_queue0_rts_port8_src", 0}, + {"ring_queue0_rts_port9_num", 0}, + {"ring_queue0_rts_port9_udp", 0}, + {"ring_queue0_rts_port9_src", 0}, + {"ring_queue0_rts_port10_num", 0}, + {"ring_queue0_rts_port10_udp", 0}, + {"ring_queue0_rts_port10_src", 0}, + {"ring_queue0_rts_port11_num", 0}, + {"ring_queue0_rts_port11_udp", 0}, + {"ring_queue0_rts_port11_src", 0}, + {"ring_queue0_rts_port12_num", 0}, + {"ring_queue0_rts_port12_udp", 0}, + {"ring_queue0_rts_port12_src", 0}, + {"ring_queue0_rts_port13_num", 0}, + {"ring_queue0_rts_port13_udp", 0}, + {"ring_queue0_rts_port13_src", 0}, + {"ring_queue0_rts_port14_num", 0}, + {"ring_queue0_rts_port14_udp", 0}, + {"ring_queue0_rts_port14_src", 0}, + {"ring_queue0_rts_port15_num", 0}, + {"ring_queue0_rts_port15_udp", 0}, + {"ring_queue0_rts_port15_src", 0}, + {"ring_queue0_rts_port16_num", 0}, + {"ring_queue0_rts_port16_udp", 0}, + {"ring_queue0_rts_port16_src", 0}, + {"ring_queue0_rts_port17_num", 0}, + {"ring_queue0_rts_port17_udp", 0}, + {"ring_queue0_rts_port17_src", 0}, + {"ring_queue0_rts_port18_num", 0}, + {"ring_queue0_rts_port18_udp", 0}, + {"ring_queue0_rts_port18_src", 0}, + {"ring_queue0_rts_port19_num", 0}, + {"ring_queue0_rts_port19_udp", 0}, + {"ring_queue0_rts_port19_src", 0}, + {"ring_queue0_rts_port20_num", 0}, + {"ring_queue0_rts_port20_udp", 0}, + {"ring_queue0_rts_port20_src", 0}, + {"ring_queue0_rts_port21_num", 0}, + {"ring_queue0_rts_port21_udp", 0}, + {"ring_queue0_rts_port21_src", 0}, + {"ring_queue0_rts_port22_num", 0}, + {"ring_queue0_rts_port22_udp", 0}, + {"ring_queue0_rts_port22_src", 0}, + {"ring_queue0_rts_port23_num", 0}, + {"ring_queue0_rts_port23_udp", 0}, + {"ring_queue0_rts_port23_src", 0}, + {"ring_queue0_rts_port24_num", 0}, + {"ring_queue0_rts_port24_udp", 0}, + {"ring_queue0_rts_port24_src", 0}, + {"ring_queue0_rts_port25_num", 0}, + {"ring_queue0_rts_port25_udp", 0}, + {"ring_queue0_rts_port25_src", 0}, + {"ring_queue0_rts_port26_num", 0}, + {"ring_queue0_rts_port26_udp", 0}, + {"ring_queue0_rts_port26_src", 0}, + {"ring_queue0_rts_port27_num", 0}, + {"ring_queue0_rts_port27_udp", 0}, + {"ring_queue0_rts_port27_src", 0}, + {"ring_queue0_rts_port28_num", 0}, + {"ring_queue0_rts_port28_udp", 0}, + {"ring_queue0_rts_port28_src", 0}, + {"ring_queue0_rts_port29_num", 0}, + {"ring_queue0_rts_port29_udp", 0}, + {"ring_queue0_rts_port29_src", 0}, + {"ring_queue0_rts_port30_num", 0}, + {"ring_queue0_rts_port30_udp", 0}, + {"ring_queue0_rts_port30_src", 0}, + {"ring_queue0_rts_port31_num", 0}, + {"ring_queue0_rts_port31_udp", 0}, + {"ring_queue0_rts_port31_src", 0}, + {"ring_queue0_rti_urange_a", 0}, + {"ring_queue0_rti_ufc_a", 0}, + {"ring_queue0_rti_urange_b", 0}, + {"ring_queue0_rti_ufc_b", 0}, + {"ring_queue0_rti_urange_c", 0}, + {"ring_queue0_rti_ufc_c", 0}, + {"ring_queue0_rti_ufc_d", 0}, + {"ring_queue0_rti_timer_ac_en", 0}, + {"ring_queue0_rti_timer_val_us", 0}, + + {"ring_queue1_max", 0}, + {"ring_queue1_initial", 0}, + {"ring_queue1_buffer_mode", 0}, + {"ring_queue1_dram_size_mb", 0}, + {"ring_queue1_intr_vector", 0}, + {"ring_queue1_backoff_interval_us", 0}, + {"ring_queue1_max_frm_len", 0}, + {"ring_queue1_priority", 0}, + {"ring_queue1_no_snoop_bits", 0}, + {"ring_queue1_indicate_max_pkts", 0}, + {"ring_queue1_configured", 0}, + {"ring_queue1_rts_mac_en", 0}, + {"ring_queue1_rth_en", 0}, + {"ring_queue1_rts_port_en", 0}, + {"ring_queue1_rts_port0_num", 0}, + {"ring_queue1_rts_port0_udp", 0}, + {"ring_queue1_rts_port0_src", 0}, + {"ring_queue1_rts_port1_num", 0}, + {"ring_queue1_rts_port1_udp", 0}, + {"ring_queue1_rts_port1_src", 0}, + {"ring_queue1_rts_port2_num", 0}, + {"ring_queue1_rts_port2_udp", 0}, + {"ring_queue1_rts_port2_src", 0}, + {"ring_queue1_rts_port3_num", 0}, + {"ring_queue1_rts_port3_udp", 0}, + {"ring_queue1_rts_port3_src", 0}, + {"ring_queue1_rts_port4_num", 0}, + {"ring_queue1_rts_port4_udp", 0}, + {"ring_queue1_rts_port4_src", 0}, + {"ring_queue1_rts_port5_num", 0}, + {"ring_queue1_rts_port5_udp", 0}, + {"ring_queue1_rts_port5_src", 0}, + {"ring_queue1_rts_port6_num", 0}, + {"ring_queue1_rts_port6_udp", 0}, + {"ring_queue1_rts_port6_src", 0}, + {"ring_queue1_rts_port7_num", 0}, + {"ring_queue1_rts_port7_udp", 0}, + {"ring_queue1_rts_port7_src", 0}, + {"ring_queue1_rts_port8_num", 0}, + {"ring_queue1_rts_port8_udp", 0}, + {"ring_queue1_rts_port8_src", 0}, + {"ring_queue1_rts_port9_num", 0}, + {"ring_queue1_rts_port9_udp", 0}, + {"ring_queue1_rts_port9_src", 0}, + {"ring_queue1_rts_port10_num", 0}, + {"ring_queue1_rts_port10_udp", 0}, + {"ring_queue1_rts_port10_src", 0}, + {"ring_queue1_rts_port11_num", 0}, + {"ring_queue1_rts_port11_udp", 0}, + {"ring_queue1_rts_port11_src", 0}, + {"ring_queue1_rts_port12_num", 0}, + {"ring_queue1_rts_port12_udp", 0}, + {"ring_queue1_rts_port12_src", 0}, + {"ring_queue1_rts_port13_num", 0}, + {"ring_queue1_rts_port13_udp", 0}, + {"ring_queue1_rts_port13_src", 0}, + {"ring_queue1_rts_port14_num", 0}, + {"ring_queue1_rts_port14_udp", 0}, + {"ring_queue1_rts_port14_src", 0}, + {"ring_queue1_rts_port15_num", 0}, + {"ring_queue1_rts_port15_udp", 0}, + {"ring_queue1_rts_port15_src", 0}, + {"ring_queue1_rts_port16_num", 0}, + {"ring_queue1_rts_port16_udp", 0}, + {"ring_queue1_rts_port16_src", 0}, + {"ring_queue1_rts_port17_num", 0}, + {"ring_queue1_rts_port17_udp", 0}, + {"ring_queue1_rts_port17_src", 0}, + {"ring_queue1_rts_port18_num", 0}, + {"ring_queue1_rts_port18_udp", 0}, + {"ring_queue1_rts_port18_src", 0}, + {"ring_queue1_rts_port19_num", 0}, + {"ring_queue1_rts_port19_udp", 0}, + {"ring_queue1_rts_port19_src", 0}, + {"ring_queue1_rts_port20_num", 0}, + {"ring_queue1_rts_port20_udp", 0}, + {"ring_queue1_rts_port20_src", 0}, + {"ring_queue1_rts_port21_num", 0}, + {"ring_queue1_rts_port21_udp", 0}, + {"ring_queue1_rts_port21_src", 0}, + {"ring_queue1_rts_port22_num", 0}, + {"ring_queue1_rts_port22_udp", 0}, + {"ring_queue1_rts_port22_src", 0}, + {"ring_queue1_rts_port23_num", 0}, + {"ring_queue1_rts_port23_udp", 0}, + {"ring_queue1_rts_port23_src", 0}, + {"ring_queue1_rts_port24_num", 0}, + {"ring_queue1_rts_port24_udp", 0}, + {"ring_queue1_rts_port24_src", 0}, + {"ring_queue1_rts_port25_num", 0}, + {"ring_queue1_rts_port25_udp", 0}, + {"ring_queue1_rts_port25_src", 0}, + {"ring_queue1_rts_port26_num", 0}, + {"ring_queue1_rts_port26_udp", 0}, + {"ring_queue1_rts_port26_src", 0}, + {"ring_queue1_rts_port27_num", 0}, + {"ring_queue1_rts_port27_udp", 0}, + {"ring_queue1_rts_port27_src", 0}, + {"ring_queue1_rts_port28_num", 0}, + {"ring_queue1_rts_port28_udp", 0}, + {"ring_queue1_rts_port28_src", 0}, + {"ring_queue1_rts_port29_num", 0}, + {"ring_queue1_rts_port29_udp", 0}, + {"ring_queue1_rts_port29_src", 0}, + {"ring_queue1_rts_port30_num", 0}, + {"ring_queue1_rts_port30_udp", 0}, + {"ring_queue1_rts_port30_src", 0}, + {"ring_queue1_rts_port31_num", 0}, + {"ring_queue1_rts_port31_udp", 0}, + {"ring_queue1_rts_port31_src", 0}, + {"ring_queue1_rti_urange_a", 0}, + {"ring_queue1_rti_ufc_a", 0}, + {"ring_queue1_rti_urange_b", 0}, + {"ring_queue1_rti_ufc_b", 0}, + {"ring_queue1_rti_urange_c", 0}, + {"ring_queue1_rti_ufc_c", 0}, + {"ring_queue1_rti_ufc_d", 0}, + {"ring_queue1_rti_timer_ac_en", 0}, + {"ring_queue1_rti_timer_val_us", 0}, + + {"ring_queue2_max", 0}, + {"ring_queue2_initial", 0}, + {"ring_queue2_buffer_mode", 0}, + {"ring_queue2_dram_size_mb", 0}, + {"ring_queue2_intr_vector", 0}, + {"ring_queue2_backoff_interval_us", 0}, + {"ring_queue2_max_frm_len", 0}, + {"ring_queue2_priority", 0}, + {"ring_queue2_no_snoop_bits", 0}, + {"ring_queue2_indicate_max_pkts", 0}, + {"ring_queue2_configured", 0}, + {"ring_queue2_rts_mac_en", 0}, + {"ring_queue2_rth_en", 0}, + {"ring_queue2_rts_port_en", 0}, + {"ring_queue2_rts_port0_num", 0}, + {"ring_queue2_rts_port0_udp", 0}, + {"ring_queue2_rts_port0_src", 0}, + {"ring_queue2_rts_port1_num", 0}, + {"ring_queue2_rts_port1_udp", 0}, + {"ring_queue2_rts_port1_src", 0}, + {"ring_queue2_rts_port2_num", 0}, + {"ring_queue2_rts_port2_udp", 0}, + {"ring_queue2_rts_port2_src", 0}, + {"ring_queue2_rts_port3_num", 0}, + {"ring_queue2_rts_port3_udp", 0}, + {"ring_queue2_rts_port3_src", 0}, + {"ring_queue2_rts_port4_num", 0}, + {"ring_queue2_rts_port4_udp", 0}, + {"ring_queue2_rts_port4_src", 0}, + {"ring_queue2_rts_port5_num", 0}, + {"ring_queue2_rts_port5_udp", 0}, + {"ring_queue2_rts_port5_src", 0}, + {"ring_queue2_rts_port6_num", 0}, + {"ring_queue2_rts_port6_udp", 0}, + {"ring_queue2_rts_port6_src", 0}, + {"ring_queue2_rts_port7_num", 0}, + {"ring_queue2_rts_port7_udp", 0}, + {"ring_queue2_rts_port7_src", 0}, + {"ring_queue2_rts_port8_num", 0}, + {"ring_queue2_rts_port8_udp", 0}, + {"ring_queue2_rts_port8_src", 0}, + {"ring_queue2_rts_port9_num", 0}, + {"ring_queue2_rts_port9_udp", 0}, + {"ring_queue2_rts_port9_src", 0}, + {"ring_queue2_rts_port10_num", 0}, + {"ring_queue2_rts_port10_udp", 0}, + {"ring_queue2_rts_port10_src", 0}, + {"ring_queue2_rts_port11_num", 0}, + {"ring_queue2_rts_port11_udp", 0}, + {"ring_queue2_rts_port11_src", 0}, + {"ring_queue2_rts_port12_num", 0}, + {"ring_queue2_rts_port12_udp", 0}, + {"ring_queue2_rts_port12_src", 0}, + {"ring_queue2_rts_port13_num", 0}, + {"ring_queue2_rts_port13_udp", 0}, + {"ring_queue2_rts_port13_src", 0}, + {"ring_queue2_rts_port14_num", 0}, + {"ring_queue2_rts_port14_udp", 0}, + {"ring_queue2_rts_port14_src", 0}, + {"ring_queue2_rts_port15_num", 0}, + {"ring_queue2_rts_port15_udp", 0}, + {"ring_queue2_rts_port15_src", 0}, + {"ring_queue2_rts_port16_num", 0}, + {"ring_queue2_rts_port16_udp", 0}, + {"ring_queue2_rts_port16_src", 0}, + {"ring_queue2_rts_port17_num", 0}, + {"ring_queue2_rts_port17_udp", 0}, + {"ring_queue2_rts_port17_src", 0}, + {"ring_queue2_rts_port18_num", 0}, + {"ring_queue2_rts_port18_udp", 0}, + {"ring_queue2_rts_port18_src", 0}, + {"ring_queue2_rts_port19_num", 0}, + {"ring_queue2_rts_port19_udp", 0}, + {"ring_queue2_rts_port19_src", 0}, + {"ring_queue2_rts_port20_num", 0}, + {"ring_queue2_rts_port20_udp", 0}, + {"ring_queue2_rts_port20_src", 0}, + {"ring_queue2_rts_port21_num", 0}, + {"ring_queue2_rts_port21_udp", 0}, + {"ring_queue2_rts_port21_src", 0}, + {"ring_queue2_rts_port22_num", 0}, + {"ring_queue2_rts_port22_udp", 0}, + {"ring_queue2_rts_port22_src", 0}, + {"ring_queue2_rts_port23_num", 0}, + {"ring_queue2_rts_port23_udp", 0}, + {"ring_queue2_rts_port23_src", 0}, + {"ring_queue2_rts_port24_num", 0}, + {"ring_queue2_rts_port24_udp", 0}, + {"ring_queue2_rts_port24_src", 0}, + {"ring_queue2_rts_port25_num", 0}, + {"ring_queue2_rts_port25_udp", 0}, + {"ring_queue2_rts_port25_src", 0}, + {"ring_queue2_rts_port26_num", 0}, + {"ring_queue2_rts_port26_udp", 0}, + {"ring_queue2_rts_port26_src", 0}, + {"ring_queue2_rts_port27_num", 0}, + {"ring_queue2_rts_port27_udp", 0}, + {"ring_queue2_rts_port27_src", 0}, + {"ring_queue2_rts_port28_num", 0}, + {"ring_queue2_rts_port28_udp", 0}, + {"ring_queue2_rts_port28_src", 0}, + {"ring_queue2_rts_port29_num", 0}, + {"ring_queue2_rts_port29_udp", 0}, + {"ring_queue2_rts_port29_src", 0}, + {"ring_queue2_rts_port30_num", 0}, + {"ring_queue2_rts_port30_udp", 0}, + {"ring_queue2_rts_port30_src", 0}, + {"ring_queue2_rts_port31_num", 0}, + {"ring_queue2_rts_port31_udp", 0}, + {"ring_queue2_rts_port31_src", 0}, + {"ring_queue2_rti_urange_a", 0}, + {"ring_queue2_rti_ufc_a", 0}, + {"ring_queue2_rti_urange_b", 0}, + {"ring_queue2_rti_ufc_b", 0}, + {"ring_queue2_rti_urange_c", 0}, + {"ring_queue2_rti_ufc_c", 0}, + {"ring_queue2_rti_ufc_d", 0}, + {"ring_queue2_rti_timer_ac_en", 0}, + {"ring_queue2_rti_timer_val_us", 0}, + + {"ring_queue3_max", 0}, + {"ring_queue3_initial", 0}, + {"ring_queue3_buffer_mode", 0}, + {"ring_queue3_dram_size_mb", 0}, + {"ring_queue3_intr_vector", 0}, + {"ring_queue3_backoff_interval_us", 0}, + {"ring_queue3_max_frm_len", 0}, + {"ring_queue3_priority", 0}, + {"ring_queue3_no_snoop_bits", 0}, + {"ring_queue3_indicate_max_pkts", 0}, + {"ring_queue3_configured", 0}, + {"ring_queue3_rts_mac_en", 0}, + {"ring_queue3_rth_en", 0}, + {"ring_queue3_rts_port_en", 0}, + {"ring_queue3_rts_port0_num", 0}, + {"ring_queue3_rts_port0_udp", 0}, + {"ring_queue3_rts_port0_src", 0}, + {"ring_queue3_rts_port1_num", 0}, + {"ring_queue3_rts_port1_udp", 0}, + {"ring_queue3_rts_port1_src", 0}, + {"ring_queue3_rts_port2_num", 0}, + {"ring_queue3_rts_port2_udp", 0}, + {"ring_queue3_rts_port2_src", 0}, + {"ring_queue3_rts_port3_num", 0}, + {"ring_queue3_rts_port3_udp", 0}, + {"ring_queue3_rts_port3_src", 0}, + {"ring_queue3_rts_port4_num", 0}, + {"ring_queue3_rts_port4_udp", 0}, + {"ring_queue3_rts_port4_src", 0}, + {"ring_queue3_rts_port5_num", 0}, + {"ring_queue3_rts_port5_udp", 0}, + {"ring_queue3_rts_port5_src", 0}, + {"ring_queue3_rts_port6_num", 0}, + {"ring_queue3_rts_port6_udp", 0}, + {"ring_queue3_rts_port6_src", 0}, + {"ring_queue3_rts_port7_num", 0}, + {"ring_queue3_rts_port7_udp", 0}, + {"ring_queue3_rts_port7_src", 0}, + {"ring_queue3_rts_port8_num", 0}, + {"ring_queue3_rts_port8_udp", 0}, + {"ring_queue3_rts_port8_src", 0}, + {"ring_queue3_rts_port9_num", 0}, + {"ring_queue3_rts_port9_udp", 0}, + {"ring_queue3_rts_port9_src", 0}, + {"ring_queue3_rts_port10_num", 0}, + {"ring_queue3_rts_port10_udp", 0}, + {"ring_queue3_rts_port10_src", 0}, + {"ring_queue3_rts_port11_num", 0}, + {"ring_queue3_rts_port11_udp", 0}, + {"ring_queue3_rts_port11_src", 0}, + {"ring_queue3_rts_port12_num", 0}, + {"ring_queue3_rts_port12_udp", 0}, + {"ring_queue3_rts_port12_src", 0}, + {"ring_queue3_rts_port13_num", 0}, + {"ring_queue3_rts_port13_udp", 0}, + {"ring_queue3_rts_port13_src", 0}, + {"ring_queue3_rts_port14_num", 0}, + {"ring_queue3_rts_port14_udp", 0}, + {"ring_queue3_rts_port14_src", 0}, + {"ring_queue3_rts_port15_num", 0}, + {"ring_queue3_rts_port15_udp", 0}, + {"ring_queue3_rts_port15_src", 0}, + {"ring_queue3_rts_port16_num", 0}, + {"ring_queue3_rts_port16_udp", 0}, + {"ring_queue3_rts_port16_src", 0}, + {"ring_queue3_rts_port17_num", 0}, + {"ring_queue3_rts_port17_udp", 0}, + {"ring_queue3_rts_port17_src", 0}, + {"ring_queue3_rts_port18_num", 0}, + {"ring_queue3_rts_port18_udp", 0}, + {"ring_queue3_rts_port18_src", 0}, + {"ring_queue3_rts_port19_num", 0}, + {"ring_queue3_rts_port19_udp", 0}, + {"ring_queue3_rts_port19_src", 0}, + {"ring_queue3_rts_port20_num", 0}, + {"ring_queue3_rts_port20_udp", 0}, + {"ring_queue3_rts_port20_src", 0}, + {"ring_queue3_rts_port21_num", 0}, + {"ring_queue3_rts_port21_udp", 0}, + {"ring_queue3_rts_port21_src", 0}, + {"ring_queue3_rts_port22_num", 0}, + {"ring_queue3_rts_port22_udp", 0}, + {"ring_queue3_rts_port22_src", 0}, + {"ring_queue3_rts_port23_num", 0}, + {"ring_queue3_rts_port23_udp", 0}, + {"ring_queue3_rts_port23_src", 0}, + {"ring_queue3_rts_port24_num", 0}, + {"ring_queue3_rts_port24_udp", 0}, + {"ring_queue3_rts_port24_src", 0}, + {"ring_queue3_rts_port25_num", 0}, + {"ring_queue3_rts_port25_udp", 0}, + {"ring_queue3_rts_port25_src", 0}, + {"ring_queue3_rts_port26_num", 0}, + {"ring_queue3_rts_port26_udp", 0}, + {"ring_queue3_rts_port26_src", 0}, + {"ring_queue3_rts_port27_num", 0}, + {"ring_queue3_rts_port27_udp", 0}, + {"ring_queue3_rts_port27_src", 0}, + {"ring_queue3_rts_port28_num", 0}, + {"ring_queue3_rts_port28_udp", 0}, + {"ring_queue3_rts_port28_src", 0}, + {"ring_queue3_rts_port29_num", 0}, + {"ring_queue3_rts_port29_udp", 0}, + {"ring_queue3_rts_port29_src", 0}, + {"ring_queue3_rts_port30_num", 0}, + {"ring_queue3_rts_port30_udp", 0}, + {"ring_queue3_rts_port30_src", 0}, + {"ring_queue3_rts_port31_num", 0}, + {"ring_queue3_rts_port31_udp", 0}, + {"ring_queue3_rts_port31_src", 0}, + {"ring_queue3_rti_urange_a", 0}, + {"ring_queue3_rti_ufc_a", 0}, + {"ring_queue3_rti_urange_b", 0}, + {"ring_queue3_rti_ufc_b", 0}, + {"ring_queue3_rti_urange_c", 0}, + {"ring_queue3_rti_ufc_c", 0}, + {"ring_queue3_rti_ufc_d", 0}, + {"ring_queue3_rti_timer_ac_en", 0}, + {"ring_queue3_rti_timer_val_us", 0}, + + {"ring_queue4_max", 0}, + {"ring_queue4_initial", 0}, + {"ring_queue4_buffer_mode", 0}, + {"ring_queue4_dram_size_mb", 0}, + {"ring_queue4_intr_vector", 0}, + {"ring_queue4_backoff_interval_us", 0}, + {"ring_queue4_max_frm_len", 0}, + {"ring_queue4_priority", 0}, + {"ring_queue4_no_snoop_bits", 0}, + {"ring_queue4_indicate_max_pkts", 0}, + {"ring_queue4_configured", 0}, + {"ring_queue4_rts_mac_en", 0}, + {"ring_queue4_rth_en", 0}, + {"ring_queue4_rts_port_en", 0}, + {"ring_queue4_rts_port0_num", 0}, + {"ring_queue4_rts_port0_udp", 0}, + {"ring_queue4_rts_port0_src", 0}, + {"ring_queue4_rts_port1_num", 0}, + {"ring_queue4_rts_port1_udp", 0}, + {"ring_queue4_rts_port1_src", 0}, + {"ring_queue4_rts_port2_num", 0}, + {"ring_queue4_rts_port2_udp", 0}, + {"ring_queue4_rts_port2_src", 0}, + {"ring_queue4_rts_port3_num", 0}, + {"ring_queue4_rts_port3_udp", 0}, + {"ring_queue4_rts_port3_src", 0}, + {"ring_queue4_rts_port4_num", 0}, + {"ring_queue4_rts_port4_udp", 0}, + {"ring_queue4_rts_port4_src", 0}, + {"ring_queue4_rts_port5_num", 0}, + {"ring_queue4_rts_port5_udp", 0}, + {"ring_queue4_rts_port5_src", 0}, + {"ring_queue4_rts_port6_num", 0}, + {"ring_queue4_rts_port6_udp", 0}, + {"ring_queue4_rts_port6_src", 0}, + {"ring_queue4_rts_port7_num", 0}, + {"ring_queue4_rts_port7_udp", 0}, + {"ring_queue4_rts_port7_src", 0}, + {"ring_queue4_rts_port8_num", 0}, + {"ring_queue4_rts_port8_udp", 0}, + {"ring_queue4_rts_port8_src", 0}, + {"ring_queue4_rts_port9_num", 0}, + {"ring_queue4_rts_port9_udp", 0}, + {"ring_queue4_rts_port9_src", 0}, + {"ring_queue4_rts_port10_num", 0}, + {"ring_queue4_rts_port10_udp", 0}, + {"ring_queue4_rts_port10_src", 0}, + {"ring_queue4_rts_port11_num", 0}, + {"ring_queue4_rts_port11_udp", 0}, + {"ring_queue4_rts_port11_src", 0}, + {"ring_queue4_rts_port12_num", 0}, + {"ring_queue4_rts_port12_udp", 0}, + {"ring_queue4_rts_port12_src", 0}, + {"ring_queue4_rts_port13_num", 0}, + {"ring_queue4_rts_port13_udp", 0}, + {"ring_queue4_rts_port13_src", 0}, + {"ring_queue4_rts_port14_num", 0}, + {"ring_queue4_rts_port14_udp", 0}, + {"ring_queue4_rts_port14_src", 0}, + {"ring_queue4_rts_port15_num", 0}, + {"ring_queue4_rts_port15_udp", 0}, + {"ring_queue4_rts_port15_src", 0}, + {"ring_queue4_rts_port16_num", 0}, + {"ring_queue4_rts_port16_udp", 0}, + {"ring_queue4_rts_port16_src", 0}, + {"ring_queue4_rts_port17_num", 0}, + {"ring_queue4_rts_port17_udp", 0}, + {"ring_queue4_rts_port17_src", 0}, + {"ring_queue4_rts_port18_num", 0}, + {"ring_queue4_rts_port18_udp", 0}, + {"ring_queue4_rts_port18_src", 0}, + {"ring_queue4_rts_port19_num", 0}, + {"ring_queue4_rts_port19_udp", 0}, + {"ring_queue4_rts_port19_src", 0}, + {"ring_queue4_rts_port20_num", 0}, + {"ring_queue4_rts_port20_udp", 0}, + {"ring_queue4_rts_port20_src", 0}, + {"ring_queue4_rts_port21_num", 0}, + {"ring_queue4_rts_port21_udp", 0}, + {"ring_queue4_rts_port21_src", 0}, + {"ring_queue4_rts_port22_num", 0}, + {"ring_queue4_rts_port22_udp", 0}, + {"ring_queue4_rts_port22_src", 0}, + {"ring_queue4_rts_port23_num", 0}, + {"ring_queue4_rts_port23_udp", 0}, + {"ring_queue4_rts_port23_src", 0}, + {"ring_queue4_rts_port24_num", 0}, + {"ring_queue4_rts_port24_udp", 0}, + {"ring_queue4_rts_port24_src", 0}, + {"ring_queue4_rts_port25_num", 0}, + {"ring_queue4_rts_port25_udp", 0}, + {"ring_queue4_rts_port25_src", 0}, + {"ring_queue4_rts_port26_num", 0}, + {"ring_queue4_rts_port26_udp", 0}, + {"ring_queue4_rts_port26_src", 0}, + {"ring_queue4_rts_port27_num", 0}, + {"ring_queue4_rts_port27_udp", 0}, + {"ring_queue4_rts_port27_src", 0}, + {"ring_queue4_rts_port28_num", 0}, + {"ring_queue4_rts_port28_udp", 0}, + {"ring_queue4_rts_port28_src", 0}, + {"ring_queue4_rts_port29_num", 0}, + {"ring_queue4_rts_port29_udp", 0}, + {"ring_queue4_rts_port29_src", 0}, + {"ring_queue4_rts_port30_num", 0}, + {"ring_queue4_rts_port30_udp", 0}, + {"ring_queue4_rts_port30_src", 0}, + {"ring_queue4_rts_port31_num", 0}, + {"ring_queue4_rts_port31_udp", 0}, + {"ring_queue4_rts_port31_src", 0}, + {"ring_queue4_rti_urange_a", 0}, + {"ring_queue4_rti_ufc_a", 0}, + {"ring_queue4_rti_urange_b", 0}, + {"ring_queue4_rti_ufc_b", 0}, + {"ring_queue4_rti_urange_c", 0}, + {"ring_queue4_rti_ufc_c", 0}, + {"ring_queue4_rti_ufc_d", 0}, + {"ring_queue4_rti_timer_ac_en", 0}, + {"ring_queue4_rti_timer_val_us", 0}, + + {"ring_queue5_max", 0}, + {"ring_queue5_initial", 0}, + {"ring_queue5_buffer_mode", 0}, + {"ring_queue5_dram_size_mb", 0}, + {"ring_queue5_intr_vector", 0}, + {"ring_queue5_backoff_interval_us", 0}, + {"ring_queue5_max_frm_len", 0}, + {"ring_queue5_priority", 0}, + {"ring_queue5_no_snoop_bits", 0}, + {"ring_queue5_indicate_max_pkts", 0}, + {"ring_queue5_configured", 0}, + {"ring_queue5_rts_mac_en", 0}, + {"ring_queue5_rth_en", 0}, + {"ring_queue5_rts_port_en", 0}, + {"ring_queue5_rts_port0_num", 0}, + {"ring_queue5_rts_port0_udp", 0}, + {"ring_queue5_rts_port0_src", 0}, + {"ring_queue5_rts_port1_num", 0}, + {"ring_queue5_rts_port1_udp", 0}, + {"ring_queue5_rts_port1_src", 0}, + {"ring_queue5_rts_port2_num", 0}, + {"ring_queue5_rts_port2_udp", 0}, + {"ring_queue5_rts_port2_src", 0}, + {"ring_queue5_rts_port3_num", 0}, + {"ring_queue5_rts_port3_udp", 0}, + {"ring_queue5_rts_port3_src", 0}, + {"ring_queue5_rts_port4_num", 0}, + {"ring_queue5_rts_port4_udp", 0}, + {"ring_queue5_rts_port4_src", 0}, + {"ring_queue5_rts_port5_num", 0}, + {"ring_queue5_rts_port5_udp", 0}, + {"ring_queue5_rts_port5_src", 0}, + {"ring_queue5_rts_port6_num", 0}, + {"ring_queue5_rts_port6_udp", 0}, + {"ring_queue5_rts_port6_src", 0}, + {"ring_queue5_rts_port7_num", 0}, + {"ring_queue5_rts_port7_udp", 0}, + {"ring_queue5_rts_port7_src", 0}, + {"ring_queue5_rts_port8_num", 0}, + {"ring_queue5_rts_port8_udp", 0}, + {"ring_queue5_rts_port8_src", 0}, + {"ring_queue5_rts_port9_num", 0}, + {"ring_queue5_rts_port9_udp", 0}, + {"ring_queue5_rts_port9_src", 0}, + {"ring_queue5_rts_port10_num", 0}, + {"ring_queue5_rts_port10_udp", 0}, + {"ring_queue5_rts_port10_src", 0}, + {"ring_queue5_rts_port11_num", 0}, + {"ring_queue5_rts_port11_udp", 0}, + {"ring_queue5_rts_port11_src", 0}, + {"ring_queue5_rts_port12_num", 0}, + {"ring_queue5_rts_port12_udp", 0}, + {"ring_queue5_rts_port12_src", 0}, + {"ring_queue5_rts_port13_num", 0}, + {"ring_queue5_rts_port13_udp", 0}, + {"ring_queue5_rts_port13_src", 0}, + {"ring_queue5_rts_port14_num", 0}, + {"ring_queue5_rts_port14_udp", 0}, + {"ring_queue5_rts_port14_src", 0}, + {"ring_queue5_rts_port15_num", 0}, + {"ring_queue5_rts_port15_udp", 0}, + {"ring_queue5_rts_port15_src", 0}, + {"ring_queue5_rts_port16_num", 0}, + {"ring_queue5_rts_port16_udp", 0}, + {"ring_queue5_rts_port16_src", 0}, + {"ring_queue5_rts_port17_num", 0}, + {"ring_queue5_rts_port17_udp", 0}, + {"ring_queue5_rts_port17_src", 0}, + {"ring_queue5_rts_port18_num", 0}, + {"ring_queue5_rts_port18_udp", 0}, + {"ring_queue5_rts_port18_src", 0}, + {"ring_queue5_rts_port19_num", 0}, + {"ring_queue5_rts_port19_udp", 0}, + {"ring_queue5_rts_port19_src", 0}, + {"ring_queue5_rts_port20_num", 0}, + {"ring_queue5_rts_port20_udp", 0}, + {"ring_queue5_rts_port20_src", 0}, + {"ring_queue5_rts_port21_num", 0}, + {"ring_queue5_rts_port21_udp", 0}, + {"ring_queue5_rts_port21_src", 0}, + {"ring_queue5_rts_port22_num", 0}, + {"ring_queue5_rts_port22_udp", 0}, + {"ring_queue5_rts_port22_src", 0}, + {"ring_queue5_rts_port23_num", 0}, + {"ring_queue5_rts_port23_udp", 0}, + {"ring_queue5_rts_port23_src", 0}, + {"ring_queue5_rts_port24_num", 0}, + {"ring_queue5_rts_port24_udp", 0}, + {"ring_queue5_rts_port24_src", 0}, + {"ring_queue5_rts_port25_num", 0}, + {"ring_queue5_rts_port25_udp", 0}, + {"ring_queue5_rts_port25_src", 0}, + {"ring_queue5_rts_port26_num", 0}, + {"ring_queue5_rts_port26_udp", 0}, + {"ring_queue5_rts_port26_src", 0}, + {"ring_queue5_rts_port27_num", 0}, + {"ring_queue5_rts_port27_udp", 0}, + {"ring_queue5_rts_port27_src", 0}, + {"ring_queue5_rts_port28_num", 0}, + {"ring_queue5_rts_port28_udp", 0}, + {"ring_queue5_rts_port28_src", 0}, + {"ring_queue5_rts_port29_num", 0}, + {"ring_queue5_rts_port29_udp", 0}, + {"ring_queue5_rts_port29_src", 0}, + {"ring_queue5_rts_port30_num", 0}, + {"ring_queue5_rts_port30_udp", 0}, + {"ring_queue5_rts_port30_src", 0}, + {"ring_queue5_rts_port31_num", 0}, + {"ring_queue5_rts_port31_udp", 0}, + {"ring_queue5_rts_port31_src", 0}, + {"ring_queue5_rti_urange_a", 0}, + {"ring_queue5_rti_ufc_a", 0}, + {"ring_queue5_rti_urange_b", 0}, + {"ring_queue5_rti_ufc_b", 0}, + {"ring_queue5_rti_urange_c", 0}, + {"ring_queue5_rti_ufc_c", 0}, + {"ring_queue5_rti_ufc_d", 0}, + {"ring_queue5_rti_timer_ac_en", 0}, + {"ring_queue5_rti_timer_val_us", 0}, + + {"ring_queue6_max", 0}, + {"ring_queue6_initial", 0}, + {"ring_queue6_buffer_mode", 0}, + {"ring_queue6_dram_size_mb", 0}, + {"ring_queue6_intr_vector", 0}, + {"ring_queue6_backoff_interval_us", 0}, + {"ring_queue6_max_frm_len", 0}, + {"ring_queue6_priority", 0}, + {"ring_queue6_no_snoop_bits", 0}, + {"ring_queue6_indicate_max_pkts", 0}, + {"ring_queue6_configured", 0}, + {"ring_queue6_rts_mac_en", 0}, + {"ring_queue6_rth_en", 0}, + {"ring_queue6_rts_port_en", 0}, + {"ring_queue6_rts_port0_num", 0}, + {"ring_queue6_rts_port0_udp", 0}, + {"ring_queue6_rts_port0_src", 0}, + {"ring_queue6_rts_port1_num", 0}, + {"ring_queue6_rts_port1_udp", 0}, + {"ring_queue6_rts_port1_src", 0}, + {"ring_queue6_rts_port2_num", 0}, + {"ring_queue6_rts_port2_udp", 0}, + {"ring_queue6_rts_port2_src", 0}, + {"ring_queue6_rts_port3_num", 0}, + {"ring_queue6_rts_port3_udp", 0}, + {"ring_queue6_rts_port3_src", 0}, + {"ring_queue6_rts_port4_num", 0}, + {"ring_queue6_rts_port4_udp", 0}, + {"ring_queue6_rts_port4_src", 0}, + {"ring_queue6_rts_port5_num", 0}, + {"ring_queue6_rts_port5_udp", 0}, + {"ring_queue6_rts_port5_src", 0}, + {"ring_queue6_rts_port6_num", 0}, + {"ring_queue6_rts_port6_udp", 0}, + {"ring_queue6_rts_port6_src", 0}, + {"ring_queue6_rts_port7_num", 0}, + {"ring_queue6_rts_port7_udp", 0}, + {"ring_queue6_rts_port7_src", 0}, + {"ring_queue6_rts_port8_num", 0}, + {"ring_queue6_rts_port8_udp", 0}, + {"ring_queue6_rts_port8_src", 0}, + {"ring_queue6_rts_port9_num", 0}, + {"ring_queue6_rts_port9_udp", 0}, + {"ring_queue6_rts_port9_src", 0}, + {"ring_queue6_rts_port10_num", 0}, + {"ring_queue6_rts_port10_udp", 0}, + {"ring_queue6_rts_port10_src", 0}, + {"ring_queue6_rts_port11_num", 0}, + {"ring_queue6_rts_port11_udp", 0}, + {"ring_queue6_rts_port11_src", 0}, + {"ring_queue6_rts_port12_num", 0}, + {"ring_queue6_rts_port12_udp", 0}, + {"ring_queue6_rts_port12_src", 0}, + {"ring_queue6_rts_port13_num", 0}, + {"ring_queue6_rts_port13_udp", 0}, + {"ring_queue6_rts_port13_src", 0}, + {"ring_queue6_rts_port14_num", 0}, + {"ring_queue6_rts_port14_udp", 0}, + {"ring_queue6_rts_port14_src", 0}, + {"ring_queue6_rts_port15_num", 0}, + {"ring_queue6_rts_port15_udp", 0}, + {"ring_queue6_rts_port15_src", 0}, + {"ring_queue6_rts_port16_num", 0}, + {"ring_queue6_rts_port16_udp", 0}, + {"ring_queue6_rts_port16_src", 0}, + {"ring_queue6_rts_port17_num", 0}, + {"ring_queue6_rts_port17_udp", 0}, + {"ring_queue6_rts_port17_src", 0}, + {"ring_queue6_rts_port18_num", 0}, + {"ring_queue6_rts_port18_udp", 0}, + {"ring_queue6_rts_port18_src", 0}, + {"ring_queue6_rts_port19_num", 0}, + {"ring_queue6_rts_port19_udp", 0}, + {"ring_queue6_rts_port19_src", 0}, + {"ring_queue6_rts_port20_num", 0}, + {"ring_queue6_rts_port20_udp", 0}, + {"ring_queue6_rts_port20_src", 0}, + {"ring_queue6_rts_port21_num", 0}, + {"ring_queue6_rts_port21_udp", 0}, + {"ring_queue6_rts_port21_src", 0}, + {"ring_queue6_rts_port22_num", 0}, + {"ring_queue6_rts_port22_udp", 0}, + {"ring_queue6_rts_port22_src", 0}, + {"ring_queue6_rts_port23_num", 0}, + {"ring_queue6_rts_port23_udp", 0}, + {"ring_queue6_rts_port23_src", 0}, + {"ring_queue6_rts_port24_num", 0}, + {"ring_queue6_rts_port24_udp", 0}, + {"ring_queue6_rts_port24_src", 0}, + {"ring_queue6_rts_port25_num", 0}, + {"ring_queue6_rts_port25_udp", 0}, + {"ring_queue6_rts_port25_src", 0}, + {"ring_queue6_rts_port26_num", 0}, + {"ring_queue6_rts_port26_udp", 0}, + {"ring_queue6_rts_port26_src", 0}, + {"ring_queue6_rts_port27_num", 0}, + {"ring_queue6_rts_port27_udp", 0}, + {"ring_queue6_rts_port27_src", 0}, + {"ring_queue6_rts_port28_num", 0}, + {"ring_queue6_rts_port28_udp", 0}, + {"ring_queue6_rts_port28_src", 0}, + {"ring_queue6_rts_port29_num", 0}, + {"ring_queue6_rts_port29_udp", 0}, + {"ring_queue6_rts_port29_src", 0}, + {"ring_queue6_rts_port30_num", 0}, + {"ring_queue6_rts_port30_udp", 0}, + {"ring_queue6_rts_port30_src", 0}, + {"ring_queue6_rts_port31_num", 0}, + {"ring_queue6_rts_port31_udp", 0}, + {"ring_queue6_rts_port31_src", 0}, + {"ring_queue6_rti_urange_a", 0}, + {"ring_queue6_rti_ufc_a", 0}, + {"ring_queue6_rti_urange_b", 0}, + {"ring_queue6_rti_ufc_b", 0}, + {"ring_queue6_rti_urange_c", 0}, + {"ring_queue6_rti_ufc_c", 0}, + {"ring_queue6_rti_ufc_d", 0}, + {"ring_queue6_rti_timer_ac_en", 0}, + {"ring_queue6_rti_timer_val_us", 0}, + + {"ring_queue7_max", 0}, + {"ring_queue7_initial", 0}, + {"ring_queue7_buffer_mode", 0}, + {"ring_queue7_dram_size_mb", 0}, + {"ring_queue7_intr_vector", 0}, + {"ring_queue7_backoff_interval_us", 0}, + {"ring_queue7_max_frm_len", 0}, + {"ring_queue7_priority", 0}, + {"ring_queue7_no_snoop_bits", 0}, + {"ring_queue7_indicate_max_pkts", 0}, + {"ring_queue7_configured", 0}, + {"ring_queue7_rts_mac_en", 0}, + {"ring_queue7_rth_en", 0}, + {"ring_queue7_rts_port_en", 0}, + {"ring_queue7_rts_port0_num", 0}, + {"ring_queue7_rts_port0_udp", 0}, + {"ring_queue7_rts_port0_src", 0}, + {"ring_queue7_rts_port1_num", 0}, + {"ring_queue7_rts_port1_udp", 0}, + {"ring_queue7_rts_port1_src", 0}, + {"ring_queue7_rts_port2_num", 0}, + {"ring_queue7_rts_port2_udp", 0}, + {"ring_queue7_rts_port2_src", 0}, + {"ring_queue7_rts_port3_num", 0}, + {"ring_queue7_rts_port3_udp", 0}, + {"ring_queue7_rts_port3_src", 0}, + {"ring_queue7_rts_port4_num", 0}, + {"ring_queue7_rts_port4_udp", 0}, + {"ring_queue7_rts_port4_src", 0}, + {"ring_queue7_rts_port5_num", 0}, + {"ring_queue7_rts_port5_udp", 0}, + {"ring_queue7_rts_port5_src", 0}, + {"ring_queue7_rts_port6_num", 0}, + {"ring_queue7_rts_port6_udp", 0}, + {"ring_queue7_rts_port6_src", 0}, + {"ring_queue7_rts_port7_num", 0}, + {"ring_queue7_rts_port7_udp", 0}, + {"ring_queue7_rts_port7_src", 0}, + {"ring_queue7_rts_port8_num", 0}, + {"ring_queue7_rts_port8_udp", 0}, + {"ring_queue7_rts_port8_src", 0}, + {"ring_queue7_rts_port9_num", 0}, + {"ring_queue7_rts_port9_udp", 0}, + {"ring_queue7_rts_port9_src", 0}, + {"ring_queue7_rts_port10_num", 0}, + {"ring_queue7_rts_port10_udp", 0}, + {"ring_queue7_rts_port10_src", 0}, + {"ring_queue7_rts_port11_num", 0}, + {"ring_queue7_rts_port11_udp", 0}, + {"ring_queue7_rts_port11_src", 0}, + {"ring_queue7_rts_port12_num", 0}, + {"ring_queue7_rts_port12_udp", 0}, + {"ring_queue7_rts_port12_src", 0}, + {"ring_queue7_rts_port13_num", 0}, + {"ring_queue7_rts_port13_udp", 0}, + {"ring_queue7_rts_port13_src", 0}, + {"ring_queue7_rts_port14_num", 0}, + {"ring_queue7_rts_port14_udp", 0}, + {"ring_queue7_rts_port14_src", 0}, + {"ring_queue7_rts_port15_num", 0}, + {"ring_queue7_rts_port15_udp", 0}, + {"ring_queue7_rts_port15_src", 0}, + {"ring_queue7_rts_port16_num", 0}, + {"ring_queue7_rts_port16_udp", 0}, + {"ring_queue7_rts_port16_src", 0}, + {"ring_queue7_rts_port17_num", 0}, + {"ring_queue7_rts_port17_udp", 0}, + {"ring_queue7_rts_port17_src", 0}, + {"ring_queue7_rts_port18_num", 0}, + {"ring_queue7_rts_port18_udp", 0}, + {"ring_queue7_rts_port18_src", 0}, + {"ring_queue7_rts_port19_num", 0}, + {"ring_queue7_rts_port19_udp", 0}, + {"ring_queue7_rts_port19_src", 0}, + {"ring_queue7_rts_port20_num", 0}, + {"ring_queue7_rts_port20_udp", 0}, + {"ring_queue7_rts_port20_src", 0}, + {"ring_queue7_rts_port21_num", 0}, + {"ring_queue7_rts_port21_udp", 0}, + {"ring_queue7_rts_port21_src", 0}, + {"ring_queue7_rts_port22_num", 0}, + {"ring_queue7_rts_port22_udp", 0}, + {"ring_queue7_rts_port22_src", 0}, + {"ring_queue7_rts_port23_num", 0}, + {"ring_queue7_rts_port23_udp", 0}, + {"ring_queue7_rts_port23_src", 0}, + {"ring_queue7_rts_port24_num", 0}, + {"ring_queue7_rts_port24_udp", 0}, + {"ring_queue7_rts_port24_src", 0}, + {"ring_queue7_rts_port25_num", 0}, + {"ring_queue7_rts_port25_udp", 0}, + {"ring_queue7_rts_port25_src", 0}, + {"ring_queue7_rts_port26_num", 0}, + {"ring_queue7_rts_port26_udp", 0}, + {"ring_queue7_rts_port26_src", 0}, + {"ring_queue7_rts_port27_num", 0}, + {"ring_queue7_rts_port27_udp", 0}, + {"ring_queue7_rts_port27_src", 0}, + {"ring_queue7_rts_port28_num", 0}, + {"ring_queue7_rts_port28_udp", 0}, + {"ring_queue7_rts_port28_src", 0}, + {"ring_queue7_rts_port29_num", 0}, + {"ring_queue7_rts_port29_udp", 0}, + {"ring_queue7_rts_port29_src", 0}, + {"ring_queue7_rts_port30_num", 0}, + {"ring_queue7_rts_port30_udp", 0}, + {"ring_queue7_rts_port30_src", 0}, + {"ring_queue7_rts_port31_num", 0}, + {"ring_queue7_rts_port31_udp", 0}, + {"ring_queue7_rts_port31_src", 0}, + {"ring_queue7_rti_urange_a", 0}, + {"ring_queue7_rti_ufc_a", 0}, + {"ring_queue7_rti_urange_b", 0}, + {"ring_queue7_rti_ufc_b", 0}, + {"ring_queue7_rti_urange_c", 0}, + {"ring_queue7_rti_ufc_c", 0}, + {"ring_queue7_rti_ufc_d", 0}, + {"ring_queue7_rti_timer_ac_en", 0}, + {"ring_queue7_rti_timer_val_us", 0}, + + {"mac_media", 0}, + {"mac_tmac_util_period", 0}, + {"mac_rmac_util_period", 0}, + {"mac_rmac_bcast_en", 0}, + {"mac_rmac_pause_gen_en", 0}, + {"mac_rmac_pause_rcv_en", 0}, + {"mac_rmac_pause_time", 0}, + {"mac_mc_pause_threshold_q0q3", 0}, + {"mac_mc_pause_threshold_q4q7", 0}, + + {"fifo_max_frags", 0}, + {"fifo_reserve_threshold", 0}, + {"fifo_memblock_size", 0}, + {"fifo_alignment_size", 0}, + {"fifo_max_aligned_frags", 0}, + + {"fifo_queue0_max", 0}, + {"fifo_queue0_initial", 0}, + {"fifo_queue0_intr", 0}, + {"fifo_queue0_intr_vector", 0}, + {"fifo_queue0_no_snoop_bits", 0}, + {"fifo_queue0_priority", 0}, + {"fifo_queue0_configured", 0}, + + {"fifo_queue0_tti0_enabled", 0}, + {"fifo_queue0_tti0_urange_a", 0}, + {"fifo_queue0_tti0_ufc_a", 0}, + {"fifo_queue0_tti0_urange_b", 0}, + {"fifo_queue0_tti0_ufc_b", 0}, + {"fifo_queue0_tti0_urange_c", 0}, + {"fifo_queue0_tti0_ufc_c", 0}, + {"fifo_queue0_tti0_ufc_d", 0}, + {"fifo_queue0_tti0_timer_val_us", 0}, + {"fifo_queue0_tti0_timer_ac_en", 0}, + {"fifo_queue0_tti0_timer_ci_en", 0}, + + {"fifo_queue0_tti1_enabled", 0}, + {"fifo_queue0_tti1_urange_a", 0}, + {"fifo_queue0_tti1_ufc_a", 0}, + {"fifo_queue0_tti1_urange_b", 0}, + {"fifo_queue0_tti1_ufc_b", 0}, + {"fifo_queue0_tti1_urange_c", 0}, + {"fifo_queue0_tti1_ufc_c", 0}, + {"fifo_queue0_tti1_ufc_d", 0}, + {"fifo_queue0_tti1_timer_val_us", 0}, + {"fifo_queue0_tti1_timer_ac_en", 0}, + {"fifo_queue0_tti1_timer_ci_en", 0}, + + {"fifo_queue0_tti2_enabled", 0}, + {"fifo_queue0_tti2_urange_a", 0}, + {"fifo_queue0_tti2_ufc_a", 0}, + {"fifo_queue0_tti2_urange_b", 0}, + {"fifo_queue0_tti2_ufc_b", 0}, + {"fifo_queue0_tti2_urange_c", 0}, + {"fifo_queue0_tti2_ufc_c", 0}, + {"fifo_queue0_tti2_ufc_d", 0}, + {"fifo_queue0_tti2_timer_val_us", 0}, + {"fifo_queue0_tti2_timer_ac_en", 0}, + {"fifo_queue0_tti2_timer_ci_en", 0}, + + {"fifo_queue0_tti3_enabled", 0}, + {"fifo_queue0_tti3_urange_a", 0}, + {"fifo_queue0_tti3_ufc_a", 0}, + {"fifo_queue0_tti3_urange_b", 0}, + {"fifo_queue0_tti3_ufc_b", 0}, + {"fifo_queue0_tti3_urange_c", 0}, + {"fifo_queue0_tti3_ufc_c", 0}, + {"fifo_queue0_tti3_ufc_d", 0}, + {"fifo_queue0_tti3_timer_val_us", 0}, + {"fifo_queue0_tti3_timer_ac_en", 0}, + {"fifo_queue0_tti3_timer_ci_en", 0}, + + {"fifo_queue0_tti4_enabled", 0}, + {"fifo_queue0_tti4_urange_a", 0}, + {"fifo_queue0_tti4_ufc_a", 0}, + {"fifo_queue0_tti4_urange_b", 0}, + {"fifo_queue0_tti4_ufc_b", 0}, + {"fifo_queue0_tti4_urange_c", 0}, + {"fifo_queue0_tti4_ufc_c", 0}, + {"fifo_queue0_tti4_ufc_d", 0}, + {"fifo_queue0_tti4_timer_val_us", 0}, + {"fifo_queue0_tti4_timer_ac_en", 0}, + {"fifo_queue0_tti4_timer_ci_en", 0}, + + {"fifo_queue0_tti5_enabled", 0}, + {"fifo_queue0_tti5_urange_a", 0}, + {"fifo_queue0_tti5_ufc_a", 0}, + {"fifo_queue0_tti5_urange_b", 0}, + {"fifo_queue0_tti5_ufc_b", 0}, + {"fifo_queue0_tti5_urange_c", 0}, + {"fifo_queue0_tti5_ufc_c", 0}, + {"fifo_queue0_tti5_ufc_d", 0}, + {"fifo_queue0_tti5_timer_val_us", 0}, + {"fifo_queue0_tti5_timer_ac_en", 0}, + {"fifo_queue0_tti5_timer_ci_en", 0}, + + {"fifo_queue0_tti6_enabled", 0}, + {"fifo_queue0_tti6_urange_a", 0}, + {"fifo_queue0_tti6_ufc_a", 0}, + {"fifo_queue0_tti6_urange_b", 0}, + {"fifo_queue0_tti6_ufc_b", 0}, + {"fifo_queue0_tti6_urange_c", 0}, + {"fifo_queue0_tti6_ufc_c", 0}, + {"fifo_queue0_tti6_ufc_d", 0}, + {"fifo_queue0_tti6_timer_val_us", 0}, + {"fifo_queue0_tti6_timer_ac_en", 0}, + {"fifo_queue0_tti6_timer_ci_en", 0}, + + {"fifo_queue1_max", 0}, + {"fifo_queue1_initial", 0}, + {"fifo_queue1_intr", 0}, + {"fifo_queue1_intr_vector", 0}, + {"fifo_queue1_no_snoop_bits", 0}, + {"fifo_queue1_priority", 0}, + {"fifo_queue1_configured", 0}, + + {"fifo_queue1_tti0_enabled", 0}, + {"fifo_queue1_tti0_urange_a", 0}, + {"fifo_queue1_tti0_ufc_a", 0}, + {"fifo_queue1_tti0_urange_b", 0}, + {"fifo_queue1_tti0_ufc_b", 0}, + {"fifo_queue1_tti0_urange_c", 0}, + {"fifo_queue1_tti0_ufc_c", 0}, + {"fifo_queue1_tti0_ufc_d", 0}, + {"fifo_queue1_tti0_timer_val_us", 0}, + {"fifo_queue1_tti0_timer_ac_en", 0}, + {"fifo_queue1_tti0_timer_ci_en", 0}, + + {"fifo_queue1_tti1_enabled", 0}, + {"fifo_queue1_tti1_urange_a", 0}, + {"fifo_queue1_tti1_ufc_a", 0}, + {"fifo_queue1_tti1_urange_b", 0}, + {"fifo_queue1_tti1_ufc_b", 0}, + {"fifo_queue1_tti1_urange_c", 0}, + {"fifo_queue1_tti1_ufc_c", 0}, + {"fifo_queue1_tti1_ufc_d", 0}, + {"fifo_queue1_tti1_timer_val_us", 0}, + {"fifo_queue1_tti1_timer_ac_en", 0}, + {"fifo_queue1_tti1_timer_ci_en", 0}, + + {"fifo_queue1_tti2_enabled", 0}, + {"fifo_queue1_tti2_urange_a", 0}, + {"fifo_queue1_tti2_ufc_a", 0}, + {"fifo_queue1_tti2_urange_b", 0}, + {"fifo_queue1_tti2_ufc_b", 0}, + {"fifo_queue1_tti2_urange_c", 0}, + {"fifo_queue1_tti2_ufc_c", 0}, + {"fifo_queue1_tti2_ufc_d", 0}, + {"fifo_queue1_tti2_timer_val_us", 0}, + {"fifo_queue1_tti2_timer_ac_en", 0}, + {"fifo_queue1_tti2_timer_ci_en", 0}, + + {"fifo_queue1_tti3_enabled", 0}, + {"fifo_queue1_tti3_urange_a", 0}, + {"fifo_queue1_tti3_ufc_a", 0}, + {"fifo_queue1_tti3_urange_b", 0}, + {"fifo_queue1_tti3_ufc_b", 0}, + {"fifo_queue1_tti3_urange_c", 0}, + {"fifo_queue1_tti3_ufc_c", 0}, + {"fifo_queue1_tti3_ufc_d", 0}, + {"fifo_queue1_tti3_timer_val_us", 0}, + {"fifo_queue1_tti3_timer_ac_en", 0}, + {"fifo_queue1_tti3_timer_ci_en", 0}, + + {"fifo_queue1_tti4_enabled", 0}, + {"fifo_queue1_tti4_urange_a", 0}, + {"fifo_queue1_tti4_ufc_a", 0}, + {"fifo_queue1_tti4_urange_b", 0}, + {"fifo_queue1_tti4_ufc_b", 0}, + {"fifo_queue1_tti4_urange_c", 0}, + {"fifo_queue1_tti4_ufc_c", 0}, + {"fifo_queue1_tti4_ufc_d", 0}, + {"fifo_queue1_tti4_timer_val_us", 0}, + {"fifo_queue1_tti4_timer_ac_en", 0}, + {"fifo_queue1_tti4_timer_ci_en", 0}, + + {"fifo_queue1_tti5_enabled", 0}, + {"fifo_queue1_tti5_urange_a", 0}, + {"fifo_queue1_tti5_ufc_a", 0}, + {"fifo_queue1_tti5_urange_b", 0}, + {"fifo_queue1_tti5_ufc_b", 0}, + {"fifo_queue1_tti5_urange_c", 0}, + {"fifo_queue1_tti5_ufc_c", 0}, + {"fifo_queue1_tti5_ufc_d", 0}, + {"fifo_queue1_tti5_timer_val_us", 0}, + {"fifo_queue1_tti5_timer_ac_en", 0}, + {"fifo_queue1_tti5_timer_ci_en", 0}, + + {"fifo_queue1_tti6_enabled", 0}, + {"fifo_queue1_tti6_urange_a", 0}, + {"fifo_queue1_tti6_ufc_a", 0}, + {"fifo_queue1_tti6_urange_b", 0}, + {"fifo_queue1_tti6_ufc_b", 0}, + {"fifo_queue1_tti6_urange_c", 0}, + {"fifo_queue1_tti6_ufc_c", 0}, + {"fifo_queue1_tti6_ufc_d", 0}, + {"fifo_queue1_tti6_timer_val_us", 0}, + {"fifo_queue1_tti6_timer_ac_en", 0}, + {"fifo_queue1_tti6_timer_ci_en", 0}, + + {"fifo_queue2_max", 0}, + {"fifo_queue2_initial", 0}, + {"fifo_queue2_intr", 0}, + {"fifo_queue2_intr_vector", 0}, + {"fifo_queue2_no_snoop_bits", 0}, + {"fifo_queue2_priority", 0}, + {"fifo_queue2_configured", 0}, + + {"fifo_queue2_tti0_enabled", 0}, + {"fifo_queue2_tti0_urange_a", 0}, + {"fifo_queue2_tti0_ufc_a", 0}, + {"fifo_queue2_tti0_urange_b", 0}, + {"fifo_queue2_tti0_ufc_b", 0}, + {"fifo_queue2_tti0_urange_c", 0}, + {"fifo_queue2_tti0_ufc_c", 0}, + {"fifo_queue2_tti0_ufc_d", 0}, + {"fifo_queue2_tti0_timer_val_us", 0}, + {"fifo_queue2_tti0_timer_ac_en", 0}, + {"fifo_queue2_tti0_timer_ci_en", 0}, + + {"fifo_queue2_tti1_enabled", 0}, + {"fifo_queue2_tti1_urange_a", 0}, + {"fifo_queue2_tti1_ufc_a", 0}, + {"fifo_queue2_tti1_urange_b", 0}, + {"fifo_queue2_tti1_ufc_b", 0}, + {"fifo_queue2_tti1_urange_c", 0}, + {"fifo_queue2_tti1_ufc_c", 0}, + {"fifo_queue2_tti1_ufc_d", 0}, + {"fifo_queue2_tti1_timer_val_us", 0}, + {"fifo_queue2_tti1_timer_ac_en", 0}, + {"fifo_queue2_tti1_timer_ci_en", 0}, + + {"fifo_queue2_tti2_enabled", 0}, + {"fifo_queue2_tti2_urange_a", 0}, + {"fifo_queue2_tti2_ufc_a", 0}, + {"fifo_queue2_tti2_urange_b", 0}, + {"fifo_queue2_tti2_ufc_b", 0}, + {"fifo_queue2_tti2_urange_c", 0}, + {"fifo_queue2_tti2_ufc_c", 0}, + {"fifo_queue2_tti2_ufc_d", 0}, + {"fifo_queue2_tti2_timer_val_us", 0}, + {"fifo_queue2_tti2_timer_ac_en", 0}, + {"fifo_queue2_tti2_timer_ci_en", 0}, + + {"fifo_queue2_tti3_enabled", 0}, + {"fifo_queue2_tti3_urange_a", 0}, + {"fifo_queue2_tti3_ufc_a", 0}, + {"fifo_queue2_tti3_urange_b", 0}, + {"fifo_queue2_tti3_ufc_b", 0}, + {"fifo_queue2_tti3_urange_c", 0}, + {"fifo_queue2_tti3_ufc_c", 0}, + {"fifo_queue2_tti3_ufc_d", 0}, + {"fifo_queue2_tti3_timer_val_us", 0}, + {"fifo_queue2_tti3_timer_ac_en", 0}, + {"fifo_queue2_tti3_timer_ci_en", 0}, + + {"fifo_queue2_tti4_enabled", 0}, + {"fifo_queue2_tti4_urange_a", 0}, + {"fifo_queue2_tti4_ufc_a", 0}, + {"fifo_queue2_tti4_urange_b", 0}, + {"fifo_queue2_tti4_ufc_b", 0}, + {"fifo_queue2_tti4_urange_c", 0}, + {"fifo_queue2_tti4_ufc_c", 0}, + {"fifo_queue2_tti4_ufc_d", 0}, + {"fifo_queue2_tti4_timer_val_us", 0}, + {"fifo_queue2_tti4_timer_ac_en", 0}, + {"fifo_queue2_tti4_timer_ci_en", 0}, + + {"fifo_queue2_tti5_enabled", 0}, + {"fifo_queue2_tti5_urange_a", 0}, + {"fifo_queue2_tti5_ufc_a", 0}, + {"fifo_queue2_tti5_urange_b", 0}, + {"fifo_queue2_tti5_ufc_b", 0}, + {"fifo_queue2_tti5_urange_c", 0}, + {"fifo_queue2_tti5_ufc_c", 0}, + {"fifo_queue2_tti5_ufc_d", 0}, + {"fifo_queue2_tti5_timer_val_us", 0}, + {"fifo_queue2_tti5_timer_ac_en", 0}, + {"fifo_queue2_tti5_timer_ci_en", 0}, + + {"fifo_queue2_tti6_enabled", 0}, + {"fifo_queue2_tti6_urange_a", 0}, + {"fifo_queue2_tti6_ufc_a", 0}, + {"fifo_queue2_tti6_urange_b", 0}, + {"fifo_queue2_tti6_ufc_b", 0}, + {"fifo_queue2_tti6_urange_c", 0}, + {"fifo_queue2_tti6_ufc_c", 0}, + {"fifo_queue2_tti6_ufc_d", 0}, + {"fifo_queue2_tti6_timer_val_us", 0}, + {"fifo_queue2_tti6_timer_ac_en", 0}, + {"fifo_queue2_tti6_timer_ci_en", 0}, + + {"fifo_queue3_max", 0}, + {"fifo_queue3_initial", 0}, + {"fifo_queue3_intr", 0}, + {"fifo_queue3_intr_vector", 0}, + {"fifo_queue3_no_snoop_bits", 0}, + {"fifo_queue3_priority", 0}, + {"fifo_queue3_configured", 0}, + + {"fifo_queue3_tti0_enabled", 0}, + {"fifo_queue3_tti0_urange_a", 0}, + {"fifo_queue3_tti0_ufc_a", 0}, + {"fifo_queue3_tti0_urange_b", 0}, + {"fifo_queue3_tti0_ufc_b", 0}, + {"fifo_queue3_tti0_urange_c", 0}, + {"fifo_queue3_tti0_ufc_c", 0}, + {"fifo_queue3_tti0_ufc_d", 0}, + {"fifo_queue3_tti0_timer_val_us", 0}, + {"fifo_queue3_tti0_timer_ac_en", 0}, + {"fifo_queue3_tti0_timer_ci_en", 0}, + + {"fifo_queue3_tti1_enabled", 0}, + {"fifo_queue3_tti1_urange_a", 0}, + {"fifo_queue3_tti1_ufc_a", 0}, + {"fifo_queue3_tti1_urange_b", 0}, + {"fifo_queue3_tti1_ufc_b", 0}, + {"fifo_queue3_tti1_urange_c", 0}, + {"fifo_queue3_tti1_ufc_c", 0}, + {"fifo_queue3_tti1_ufc_d", 0}, + {"fifo_queue3_tti1_timer_val_us", 0}, + {"fifo_queue3_tti1_timer_ac_en", 0}, + {"fifo_queue3_tti1_timer_ci_en", 0}, + + {"fifo_queue3_tti2_enabled", 0}, + {"fifo_queue3_tti2_urange_a", 0}, + {"fifo_queue3_tti2_ufc_a", 0}, + {"fifo_queue3_tti2_urange_b", 0}, + {"fifo_queue3_tti2_ufc_b", 0}, + {"fifo_queue3_tti2_urange_c", 0}, + {"fifo_queue3_tti2_ufc_c", 0}, + {"fifo_queue3_tti2_ufc_d", 0}, + {"fifo_queue3_tti2_timer_val_us", 0}, + {"fifo_queue3_tti2_timer_ac_en", 0}, + {"fifo_queue3_tti2_timer_ci_en", 0}, + + {"fifo_queue3_tti3_enabled", 0}, + {"fifo_queue3_tti3_urange_a", 0}, + {"fifo_queue3_tti3_ufc_a", 0}, + {"fifo_queue3_tti3_urange_b", 0}, + {"fifo_queue3_tti3_ufc_b", 0}, + {"fifo_queue3_tti3_urange_c", 0}, + {"fifo_queue3_tti3_ufc_c", 0}, + {"fifo_queue3_tti3_ufc_d", 0}, + {"fifo_queue3_tti3_timer_val_us", 0}, + {"fifo_queue3_tti3_timer_ac_en", 0}, + {"fifo_queue3_tti3_timer_ci_en", 0}, + + {"fifo_queue3_tti4_enabled", 0}, + {"fifo_queue3_tti4_urange_a", 0}, + {"fifo_queue3_tti4_ufc_a", 0}, + {"fifo_queue3_tti4_urange_b", 0}, + {"fifo_queue3_tti4_ufc_b", 0}, + {"fifo_queue3_tti4_urange_c", 0}, + {"fifo_queue3_tti4_ufc_c", 0}, + {"fifo_queue3_tti4_ufc_d", 0}, + {"fifo_queue3_tti4_timer_val_us", 0}, + {"fifo_queue3_tti4_timer_ac_en", 0}, + {"fifo_queue3_tti4_timer_ci_en", 0}, + + {"fifo_queue3_tti5_enabled", 0}, + {"fifo_queue3_tti5_urange_a", 0}, + {"fifo_queue3_tti5_ufc_a", 0}, + {"fifo_queue3_tti5_urange_b", 0}, + {"fifo_queue3_tti5_ufc_b", 0}, + {"fifo_queue3_tti5_urange_c", 0}, + {"fifo_queue3_tti5_ufc_c", 0}, + {"fifo_queue3_tti5_ufc_d", 0}, + {"fifo_queue3_tti5_timer_val_us", 0}, + {"fifo_queue3_tti5_timer_ac_en", 0}, + {"fifo_queue3_tti5_timer_ci_en", 0}, + + {"fifo_queue3_tti6_enabled", 0}, + {"fifo_queue3_tti6_urange_a", 0}, + {"fifo_queue3_tti6_ufc_a", 0}, + {"fifo_queue3_tti6_urange_b", 0}, + {"fifo_queue3_tti6_ufc_b", 0}, + {"fifo_queue3_tti6_urange_c", 0}, + {"fifo_queue3_tti6_ufc_c", 0}, + {"fifo_queue3_tti6_ufc_d", 0}, + {"fifo_queue3_tti6_timer_val_us", 0}, + {"fifo_queue3_tti6_timer_ac_en", 0}, + {"fifo_queue3_tti6_timer_ci_en", 0}, + + {"fifo_queue4_max", 0}, + {"fifo_queue4_initial", 0}, + {"fifo_queue4_intr", 0}, + {"fifo_queue4_intr_vector", 0}, + {"fifo_queue4_no_snoop_bits", 0}, + {"fifo_queue4_priority", 0}, + {"fifo_queue4_configured", 0}, + + {"fifo_queue4_tti0_enabled", 0}, + {"fifo_queue4_tti0_urange_a", 0}, + {"fifo_queue4_tti0_ufc_a", 0}, + {"fifo_queue4_tti0_urange_b", 0}, + {"fifo_queue4_tti0_ufc_b", 0}, + {"fifo_queue4_tti0_urange_c", 0}, + {"fifo_queue4_tti0_ufc_c", 0}, + {"fifo_queue4_tti0_ufc_d", 0}, + {"fifo_queue4_tti0_timer_val_us", 0}, + {"fifo_queue4_tti0_timer_ac_en", 0}, + {"fifo_queue4_tti0_timer_ci_en", 0}, + + {"fifo_queue4_tti1_enabled", 0}, + {"fifo_queue4_tti1_urange_a", 0}, + {"fifo_queue4_tti1_ufc_a", 0}, + {"fifo_queue4_tti1_urange_b", 0}, + {"fifo_queue4_tti1_ufc_b", 0}, + {"fifo_queue4_tti1_urange_c", 0}, + {"fifo_queue4_tti1_ufc_c", 0}, + {"fifo_queue4_tti1_ufc_d", 0}, + {"fifo_queue4_tti1_timer_val_us", 0}, + {"fifo_queue4_tti1_timer_ac_en", 0}, + {"fifo_queue4_tti1_timer_ci_en", 0}, + + {"fifo_queue4_tti2_enabled", 0}, + {"fifo_queue4_tti2_urange_a", 0}, + {"fifo_queue4_tti2_ufc_a", 0}, + {"fifo_queue4_tti2_urange_b", 0}, + {"fifo_queue4_tti2_ufc_b", 0}, + {"fifo_queue4_tti2_urange_c", 0}, + {"fifo_queue4_tti2_ufc_c", 0}, + {"fifo_queue4_tti2_ufc_d", 0}, + {"fifo_queue4_tti2_timer_val_us", 0}, + {"fifo_queue4_tti2_timer_ac_en", 0}, + {"fifo_queue4_tti2_timer_ci_en", 0}, + + {"fifo_queue4_tti3_enabled", 0}, + {"fifo_queue4_tti3_urange_a", 0}, + {"fifo_queue4_tti3_ufc_a", 0}, + {"fifo_queue4_tti3_urange_b", 0}, + {"fifo_queue4_tti3_ufc_b", 0}, + {"fifo_queue4_tti3_urange_c", 0}, + {"fifo_queue4_tti3_ufc_c", 0}, + {"fifo_queue4_tti3_ufc_d", 0}, + {"fifo_queue4_tti3_timer_val_us", 0}, + {"fifo_queue4_tti3_timer_ac_en", 0}, + {"fifo_queue4_tti3_timer_ci_en", 0}, + + {"fifo_queue4_tti4_enabled", 0}, + {"fifo_queue4_tti4_urange_a", 0}, + {"fifo_queue4_tti4_ufc_a", 0}, + {"fifo_queue4_tti4_urange_b", 0}, + {"fifo_queue4_tti4_ufc_b", 0}, + {"fifo_queue4_tti4_urange_c", 0}, + {"fifo_queue4_tti4_ufc_c", 0}, + {"fifo_queue4_tti4_ufc_d", 0}, + {"fifo_queue4_tti4_timer_val_us", 0}, + {"fifo_queue4_tti4_timer_ac_en", 0}, + {"fifo_queue4_tti4_timer_ci_en", 0}, + + {"fifo_queue4_tti5_enabled", 0}, + {"fifo_queue4_tti5_urange_a", 0}, + {"fifo_queue4_tti5_ufc_a", 0}, + {"fifo_queue4_tti5_urange_b", 0}, + {"fifo_queue4_tti5_ufc_b", 0}, + {"fifo_queue4_tti5_urange_c", 0}, + {"fifo_queue4_tti5_ufc_c", 0}, + {"fifo_queue4_tti5_ufc_d", 0}, + {"fifo_queue4_tti5_timer_val_us", 0}, + {"fifo_queue4_tti5_timer_ac_en", 0}, + {"fifo_queue4_tti5_timer_ci_en", 0}, + + {"fifo_queue4_tti6_enabled", 0}, + {"fifo_queue4_tti6_urange_a", 0}, + {"fifo_queue4_tti6_ufc_a", 0}, + {"fifo_queue4_tti6_urange_b", 0}, + {"fifo_queue4_tti6_ufc_b", 0}, + {"fifo_queue4_tti6_urange_c", 0}, + {"fifo_queue4_tti6_ufc_c", 0}, + {"fifo_queue4_tti6_ufc_d", 0}, + {"fifo_queue4_tti6_timer_val_us", 0}, + {"fifo_queue4_tti6_timer_ac_en", 0}, + {"fifo_queue4_tti6_timer_ci_en", 0}, + + {"fifo_queue5_max", 0}, + {"fifo_queue5_initial", 0}, + {"fifo_queue5_intr", 0}, + {"fifo_queue5_intr_vector", 0}, + {"fifo_queue5_no_snoop_bits", 0}, + {"fifo_queue5_priority", 0}, + {"fifo_queue5_configured", 0}, + + {"fifo_queue5_tti0_enabled", 0}, + {"fifo_queue5_tti0_urange_a", 0}, + {"fifo_queue5_tti0_ufc_a", 0}, + {"fifo_queue5_tti0_urange_b", 0}, + {"fifo_queue5_tti0_ufc_b", 0}, + {"fifo_queue5_tti0_urange_c", 0}, + {"fifo_queue5_tti0_ufc_c", 0}, + {"fifo_queue5_tti0_ufc_d", 0}, + {"fifo_queue5_tti0_timer_val_us", 0}, + {"fifo_queue5_tti0_timer_ac_en", 0}, + {"fifo_queue5_tti0_timer_ci_en", 0}, + + {"fifo_queue5_tti1_enabled", 0}, + {"fifo_queue5_tti1_urange_a", 0}, + {"fifo_queue5_tti1_ufc_a", 0}, + {"fifo_queue5_tti1_urange_b", 0}, + {"fifo_queue5_tti1_ufc_b", 0}, + {"fifo_queue5_tti1_urange_c", 0}, + {"fifo_queue5_tti1_ufc_c", 0}, + {"fifo_queue5_tti1_ufc_d", 0}, + {"fifo_queue5_tti1_timer_val_us", 0}, + {"fifo_queue5_tti1_timer_ac_en", 0}, + {"fifo_queue5_tti1_timer_ci_en", 0}, + + {"fifo_queue5_tti2_enabled", 0}, + {"fifo_queue5_tti2_urange_a", 0}, + {"fifo_queue5_tti2_ufc_a", 0}, + {"fifo_queue5_tti2_urange_b", 0}, + {"fifo_queue5_tti2_ufc_b", 0}, + {"fifo_queue5_tti2_urange_c", 0}, + {"fifo_queue5_tti2_ufc_c", 0}, + {"fifo_queue5_tti2_ufc_d", 0}, + {"fifo_queue5_tti2_timer_val_us", 0}, + {"fifo_queue5_tti2_timer_ac_en", 0}, + {"fifo_queue5_tti2_timer_ci_en", 0}, + + {"fifo_queue5_tti3_enabled", 0}, + {"fifo_queue5_tti3_urange_a", 0}, + {"fifo_queue5_tti3_ufc_a", 0}, + {"fifo_queue5_tti3_urange_b", 0}, + {"fifo_queue5_tti3_ufc_b", 0}, + {"fifo_queue5_tti3_urange_c", 0}, + {"fifo_queue5_tti3_ufc_c", 0}, + {"fifo_queue5_tti3_ufc_d", 0}, + {"fifo_queue5_tti3_timer_val_us", 0}, + {"fifo_queue5_tti3_timer_ac_en", 0}, + {"fifo_queue5_tti3_timer_ci_en", 0}, + + {"fifo_queue5_tti4_enabled", 0}, + {"fifo_queue5_tti4_urange_a", 0}, + {"fifo_queue5_tti4_ufc_a", 0}, + {"fifo_queue5_tti4_urange_b", 0}, + {"fifo_queue5_tti4_fc_b", 0}, + {"fifo_queue5_tti4_urange_c", 0}, + {"fifo_queue5_tti4_ufc_c", 0}, + {"fifo_queue5_tti4_ufc_d", 0}, + {"fifo_queue5_tti4_timer_val_us", 0}, + {"fifo_queue5_tti4_timer_ac_en", 0}, + {"fifo_queue5_tti4_timer_ci_en", 0}, + + {"fifo_queue5_tti5_enabled", 0}, + {"fifo_queue5_tti5_urange_a", 0}, + {"fifo_queue5_tti5_ufc_a", 0}, + {"fifo_queue5_tti5_urange_b", 0}, + {"fifo_queue5_tti5_ufc_b", 0}, + {"fifo_queue5_tti5_urange_c", 0}, + {"fifo_queue5_tti5_ufc_c", 0}, + {"fifo_queue5_tti5_ufc_d", 0}, + {"fifo_queue5_tti5_timer_val_us", 0}, + {"fifo_queue5_tti5_timer_ac_en", 0}, + {"fifo_queue5_tti5_timer_ci_en", 0}, + + {"fifo_queue5_tti6_enabled", 0}, + {"fifo_queue5_tti6_urange_a", 0}, + {"fifo_queue5_tti6_ufc_a", 0}, + {"fifo_queue5_tti6_urange_b", 0}, + {"fifo_queue5_tti6_ufc_b", 0}, + {"fifo_queue5_tti6_urange_c", 0}, + {"fifo_queue5_tti6_ufc_c", 0}, + {"fifo_queue5_tti6_ufc_d", 0}, + {"fifo_queue5_tti6_timer_val_us", 0}, + {"fifo_queue5_tti6_timer_ac_en", 0}, + {"fifo_queue5_tti6_timer_ci_en", 0}, + + {"fifo_queue6_max", 0}, + {"fifo_queue6_initial", 0}, + {"fifo_queue6_intr", 0}, + {"fifo_queue6_intr_vector", 0}, + {"fifo_queue6_no_snoop_bits", 0}, + {"fifo_queue6_priority", 0}, + {"fifo_queue6_configured", 0}, + + {"fifo_queue6_tti0_enabled", 0}, + {"fifo_queue6_tti0_urange_a", 0}, + {"fifo_queue6_tti0_ufc_a", 0}, + {"fifo_queue6_tti0_urange_b", 0}, + {"fifo_queue6_tti0_ufc_b", 0}, + {"fifo_queue6_tti0_urange_c", 0}, + {"fifo_queue6_tti0_ufc_c", 0}, + {"fifo_queue6_tti0_ufc_d", 0}, + {"fifo_queue6_tti0_timer_val_us", 0}, + {"fifo_queue6_tti0_timer_ac_en", 0}, + {"fifo_queue6_tti0_timer_ci_en", 0}, + + {"fifo_queue6_tti1_enabled", 0}, + {"fifo_queue6_tti1_urange_a", 0}, + {"fifo_queue6_tti1_ufc_a", 0}, + {"fifo_queue6_tti1_urange_b", 0}, + {"fifo_queue6_tti1_ufc_b", 0}, + {"fifo_queue6_tti1_urange_c", 0}, + {"fifo_queue6_tti1_ufc_c", 0}, + {"fifo_queue6_tti1_ufc_d", 0}, + {"fifo_queue6_tti1_timer_val_us", 0}, + {"fifo_queue6_tti1_timer_ac_en", 0}, + {"fifo_queue6_tti1_timer_ci_en", 0}, + + {"fifo_queue6_tti2_enabled", 0}, + {"fifo_queue6_tti2_urange_a", 0}, + {"fifo_queue6_tti2_ufc_a", 0}, + {"fifo_queue6_tti2_urange_b", 0}, + {"fifo_queue6_tti2_ufc_b", 0}, + {"fifo_queue6_tti2_urange_c", 0}, + {"fifo_queue6_tti2_ufc_c", 0}, + {"fifo_queue6_tti2_ufc_d", 0}, + {"fifo_queue6_tti2_timer_val_us", 0}, + {"fifo_queue6_tti2_timer_ac_en", 0}, + {"fifo_queue6_tti2_timer_ci_en", 0}, + + {"fifo_queue6_tti3_enabled", 0}, + {"fifo_queue6_tti3_urange_a", 0}, + {"fifo_queue6_tti3_ufc_a", 0}, + {"fifo_queue6_tti3_urange_b", 0}, + {"fifo_queue6_tti3_ufc_b", 0}, + {"fifo_queue6_tti3_urange_c", 0}, + {"fifo_queue6_tti3_ufc_c", 0}, + {"fifo_queue6_tti3_ufc_d", 0}, + {"fifo_queue6_tti3_timer_val_us", 0}, + {"fifo_queue6_tti3_timer_ac_en", 0}, + {"fifo_queue6_tti3_timer_ci_en", 0}, + + {"fifo_queue6_tti4_enabled", 0}, + {"fifo_queue6_tti4_urange_a", 0}, + {"fifo_queue6_tti4_ufc_a", 0}, + {"fifo_queue6_tti4_urange_b", 0}, + {"fifo_queue6_tti4_ufc_b", 0}, + {"fifo_queue6_tti4_urange_c", 0}, + {"fifo_queue6_tti4_ufc_c", 0}, + {"fifo_queue6_tti4_ufc_d", 0}, + {"fifo_queue6_tti4_timer_val_us", 0}, + {"fifo_queue6_tti4_timer_ac_en", 0}, + {"fifo_queue6_tti4_timer_ci_en", 0}, + + {"fifo_queue6_tti5_enabled", 0}, + {"fifo_queue6_tti5_urange_a", 0}, + {"fifo_queue6_tti5_ufc_a", 0}, + {"fifo_queue6_tti5_urange_b", 0}, + {"fifo_queue6_tti5_ufc_b", 0}, + {"fifo_queue6_tti5_urange_c", 0}, + {"fifo_queue6_tti5_ufc_c", 0}, + {"fifo_queue6_tti5_ufc_d", 0}, + {"fifo_queue6_tti5_timer_val_us", 0}, + {"fifo_queue6_tti5_timer_ac_en", 0}, + {"fifo_queue6_tti5_timer_ci_en", 0}, + + {"fifo_queue6_tti6_enabled", 0}, + {"fifo_queue6_tti6_urange_a", 0}, + {"fifo_queue6_tti6_ufc_a", 0}, + {"fifo_queue6_tti6_urange_b", 0}, + {"fifo_queue6_tti6_ufc_b", 0}, + {"fifo_queue6_tti6_urange_c", 0}, + {"fifo_queue6_tti6_ufc_c", 0}, + {"fifo_queue6_tti6_ufc_d", 0}, + {"fifo_queue6_tti6_timer_val_us", 0}, + {"fifo_queue6_tti6_timer_ac_en", 0}, + {"fifo_queue6_tti6_timer_ci_en", 0}, + + {"fifo_queue7_max", 0}, + {"fifo_queue7_initial", 0}, + {"fifo_queue7_intr", 0}, + {"fifo_queue7_intr_vector", 0}, + {"fifo_queue7_no_snoop_bits", 0}, + {"fifo_queue7_priority", 0}, + {"fifo_queue7_configured", 0}, + + {"fifo_queue7_tti0_enabled", 0}, + {"fifo_queue7_tti0_urange_a", 0}, + {"fifo_queue7_tti0_ufc_a", 0}, + {"fifo_queue7_tti0_urange_b", 0}, + {"fifo_queue7_tti0_ufc_b", 0}, + {"fifo_queue7_tti0_urange_c", 0}, + {"fifo_queue7_tti0_ufc_c", 0}, + {"fifo_queue7_tti0_ufc_d", 0}, + {"fifo_queue7_tti0_timer_val_us", 0}, + {"fifo_queue7_tti0_timer_ac_en", 0}, + {"fifo_queue7_tti0_timer_ci_en", 0}, + + {"fifo_queue7_tti1_enabled", 0}, + {"fifo_queue7_tti1_urange_a", 0}, + {"fifo_queue7_tti1_ufc_a", 0}, + {"fifo_queue7_tti1_urange_b", 0}, + {"fifo_queue7_tti1_ufc_b", 0}, + {"fifo_queue7_tti1_urange_c", 0}, + {"fifo_queue7_tti1_ufc_c", 0}, + {"fifo_queue7_tti1_ufc_d", 0}, + {"fifo_queue7_tti1_timer_val_us", 0}, + {"fifo_queue7_tti1_timer_ac_en", 0}, + {"fifo_queue7_tti1_timer_ci_en", 0}, + + {"fifo_queue7_tti2_enabled", 0}, + {"fifo_queue7_tti2_urange_a", 0}, + {"fifo_queue7_tti2_ufc_a", 0}, + {"fifo_queue7_tti2_urange_b", 0}, + {"fifo_queue7_tti2_ufc_b", 0}, + {"fifo_queue7_tti2_urange_c", 0}, + {"fifo_queue7_tti2_ufc_c", 0}, + {"fifo_queue7_tti2_ufc_d", 0}, + {"fifo_queue7_tti2_timer_val_us", 0}, + {"fifo_queue7_tti2_timer_ac_en", 0}, + {"fifo_queue7_tti2_timer_ci_en", 0}, + + {"fifo_queue7_tti3_enabled", 0}, + {"fifo_queue7_tti3_urange_a", 0}, + {"fifo_queue7_tti3_ufc_a", 0}, + {"fifo_queue7_tti3_urange_b", 0}, + {"fifo_queue7_tti3_ufc_b", 0}, + {"fifo_queue7_tti3_urange_c", 0}, + {"fifo_queue7_tti3_ufc_c", 0}, + {"fifo_queue7_tti3_ufc_d", 0}, + {"fifo_queue7_tti3_timer_val_us", 0}, + {"fifo_queue7_tti3_timer_ac_en", 0}, + {"fifo_queue7_tti3_timer_ci_en", 0}, + + {"fifo_queue7_tti4_enabled", 0}, + {"fifo_queue7_tti4_urange_a", 0}, + {"fifo_queue7_tti4_ufc_a", 0}, + {"fifo_queue7_tti4_urange_b", 0}, + {"fifo_queue7_tti4_ufc_b", 0}, + {"fifo_queue7_tti4_urange_c", 0}, + {"fifo_queue7_tti4_ufc_c", 0}, + {"fifo_queue7_tti4_ufc_d", 0}, + {"fifo_queue7_tti4_timer_val_us", 0}, + {"fifo_queue7_tti4_timer_ac_en", 0}, + {"fifo_queue7_tti4_timer_ci_en", 0}, + + {"fifo_queue7_tti5_enabled", 0}, + {"fifo_queue7_tti5_urange_a", 0}, + {"fifo_queue7_tti5_ufc_a", 0}, + {"fifo_queue7_tti5_urange_b", 0}, + {"fifo_queue7_tti5_ufc_b", 0}, + {"fifo_queue7_tti5_urange_c", 0}, + {"fifo_queue7_tti5_ufc_c", 0}, + {"fifo_queue7_tti5_ufc_d", 0}, + {"fifo_queue7_tti5_timer_val_us", 0}, + {"fifo_queue7_tti5_timer_ac_en", 0}, + {"fifo_queue7_tti5_timer_ci_en", 0}, + + {"fifo_queue7_tti6_enabled", 0}, + {"fifo_queue7_tti6_urange_a", 0}, + {"fifo_queue7_tti6_ufc_a", 0}, + {"fifo_queue7_tti6_urange_b", 0}, + {"fifo_queue7_tti6_ufc_b", 0}, + {"fifo_queue7_tti6_urange_c", 0}, + {"fifo_queue7_tti6_ufc_c", 0}, + {"fifo_queue7_tti6_ufc_d", 0}, + {"fifo_queue7_tti6_timer_val_us", 0}, + {"fifo_queue7_tti6_timer_ac_en", 0}, + {"fifo_queue7_tti6_timer_ci_en", 0}, + + {"dump_on_serr", 0}, + {"dump_on_eccerr", 0}, + {"dump_on_parityerr", 0}, + {"rth_en", 0}, + {"rth_bucket_size", 0}, + {"rth_spdm_en", 0}, + {"rth_spdm_use_l4", 0}, + {"rxufca_intr_thres", 0}, + {"rxufca_lo_lim", 0}, + {"rxufca_hi_lim", 0}, + {"rxufca_lbolt_period", 0}, + {"link_valid_cnt", 0}, + {"link_retry_cnt", 0}, + {"link_stability_period", 0}, + {"device_poll_millis", 0}, + {"no_isr_events", 0}, + {"lro_sg_size", 0}, + {"lro_frm_len", 0}, + {"bimodal_interrupts", 0}, + {"bimodal_timer_lo_us", 0}, + {"bimodal_timer_hi_us", 0}, + {"rts_mac_en", 0}, + {"rts_qos_en", 0}, + {"rts_port_en", 0}, }; -xge_stats_intr_info_t intrInfo[] = -{ - {"rx_traffic_intr_cnt", 0}, - {"tx_traffic_intr_cnt", 0}, - {"txpic_intr_cnt", 0}, - {"txdma_intr_cnt", 0}, - {"txmac_intr_cnt", 0}, - {"txxgxs_intr_cnt", 0}, - {"rxpic_intr_cnt", 0}, - {"rxdma_intr_cnt", 0}, - {"rxmac_intr_cnt", 0}, - {"rxxgxs_intr_cnt", 0}, - {"mc_intr_cnt", 0}, - {"not_traffic_intr_cnt", 0}, - {"not_xge_intr_cnt", 0}, - {"traffic_intr_cnt", 0}, - {"total_intr_cnt", 0}, - {"soft_reset_cnt", 0}, - {"rxufca_hi_adjust_cnt", 0}, - {"rxufca_lo_adjust_cnt", 0}, - {"bimodal_hi_adjust_cnt", 0}, - {"bimodal_lo_adjust_cnt", 0}, +xge_stats_intr_info_t intrInfo[] = { + {"rx_traffic_intr_cnt", 0}, + {"tx_traffic_intr_cnt", 0}, + {"txpic_intr_cnt", 0}, + {"txdma_intr_cnt", 0}, + {"txmac_intr_cnt", 0}, + {"txxgxs_intr_cnt", 0}, + {"rxpic_intr_cnt", 0}, + {"rxdma_intr_cnt", 0}, + {"rxmac_intr_cnt", 0}, + {"rxxgxs_intr_cnt", 0}, + {"mc_intr_cnt", 0}, + {"not_traffic_intr_cnt", 0}, + {"not_xge_intr_cnt", 0}, + {"traffic_intr_cnt", 0}, + {"total_intr_cnt", 0}, + {"soft_reset_cnt", 0}, + {"rxufca_hi_adjust_cnt", 0}, + {"rxufca_lo_adjust_cnt", 0}, + {"bimodal_hi_adjust_cnt", 0}, + {"bimodal_lo_adjust_cnt", 0}, #ifdef CONFIG_LRO - {"tot_frms_lroised", 0}, - {"tot_lro_sessions", 0}, - {"lro_frm_len_exceed_cnt", 0}, - {"lro_sg_exceed_cnt", 0}, - {"lro_out_of_seq_pkt_cnt", 0}, - {"lro_dup_pkt_cnt", 0} + {"tot_frms_lroised", 0}, + {"tot_lro_sessions", 0}, + {"lro_frm_len_exceed_cnt", 0}, + {"lro_sg_exceed_cnt", 0}, + {"lro_out_of_seq_pkt_cnt", 0}, + {"lro_dup_pkt_cnt", 0} #endif }; -xge_stats_tcode_info_t tcodeInfo[] = -{ - {"sm_err_cnt", 0, 4, 0}, - {"single_ecc_err_cnt", 0, 4, 0}, - {"double_ecc_err_cnt", 0, 4, 0}, - {"ecc_err_cnt", 0, 4, 0}, - {"parity_err_cnt", 0, 4, 0}, - {"serr_cnt", 0, 4, 0}, - {"rxd_t_code_transfer_ok", 0, 4, 1}, - {"rxd_t_code_parity", 0, 4, 0}, - {"rxd_t_code_abort", 0, 4, 0}, - {"rxd_t_code_parity_abort", 0, 4, 0}, - {"rxd_t_code_rda_failure", 0, 4, 0}, - {"rxd_t_code_unknown_proto", 0, 4, 0}, - {"rxd_t_code_bad_fcs", 0, 4, 0}, - {"rxd_t_code_buff_size", 0, 4, 0}, - {"rxd_t_code_bad_ecc", 0, 4, 0}, - {"rxd_t_code_unused_9", 0, 4, 1}, - {"rxd_t_code_unused_a", 0, 4, 1}, - {"rxd_t_code_unused_b", 0, 4, 1}, - {"rxd_t_code_unused_c", 0, 4, 1}, - {"rxd_t_code_unused_d", 0, 4, 1}, - {"rxd_t_code_unused_e", 0, 4, 1}, - {"rxd_t_code_unknown", 0, 4, 0}, - {"txd_t_code_transfer_ok", 0, 4, 1}, - {"txd_t_code_unused_1", 0, 4, 1}, - {"txd_t_code_abort_buffer", 0, 4, 0}, - {"txd_t_code_abort_dtor", 0, 4, 0}, - {"txd_t_code_unused_4", 0, 4, 1}, - {"txd_t_code_unused_5", 0, 4, 1}, - {"txd_t_code_unused_6", 0, 4, 1}, - {"txd_t_code_parity", 0, 4, 0}, - {"txd_t_code_unused_8", 0, 4, 1}, - {"txd_t_code_unused_9", 0, 4, 1}, - {"txd_t_code_loss_of_link", 0, 4, 0}, - {"txd_t_code_unused_a", 0, 4, 1}, - {"txd_t_code_unused_b", 0, 4, 1}, - {"txd_t_code_unused_c", 0, 4, 1}, - {"txd_t_code_unused_d", 0, 4, 1}, - {"txd_t_code_general_err", 0, 4, 0}, - {"alarm_transceiver_temp_high", 0, 4, 0}, - {"alarm_transceiver_temp_low", 0, 4, 0}, - {"alarm_laser_bias_current_high", 0, 4, 0}, - {"alarm_laser_bias_current_low", 0, 4, 0}, - {"alarm_laser_output_power_high", 0, 4, 0}, - {"alarm_laser_output_power_low", 0, 4, 0}, - {"warn_transceiver_temp_high", 0, 4, 0}, - {"warn_transceiver_temp_low", 0, 4, 0}, - {"warn_laser_bias_current_high", 0, 4, 0}, - {"warn_laser_bias_current_low", 0, 4, 0}, - {"warn_laser_output_power_high", 0, 4, 0}, - {"warn_laser_output_power_low", 0, 4, 0}, - {"excess_temp", 0, 2, 0}, - {"excess_bias_current", 0, 2, 0}, - {"excess_laser_output", 0, 2, 0}, - {"tick_period", 0, 2, 0} +xge_stats_tcode_info_t tcodeInfo[] = { + {"sm_err_cnt", 0, 4, 0}, + {"single_ecc_err_cnt", 0, 4, 0}, + {"double_ecc_err_cnt", 0, 4, 0}, + {"ecc_err_cnt", 0, 4, 0}, + {"parity_err_cnt", 0, 4, 0}, + {"serr_cnt", 0, 4, 0}, + {"rxd_t_code_transfer_ok", 0, 4, 1}, + {"rxd_t_code_parity", 0, 4, 0}, + {"rxd_t_code_abort", 0, 4, 0}, + {"rxd_t_code_parity_abort", 0, 4, 0}, + {"rxd_t_code_rda_failure", 0, 4, 0}, + {"rxd_t_code_unknown_proto", 0, 4, 0}, + {"rxd_t_code_bad_fcs", 0, 4, 0}, + {"rxd_t_code_buff_size", 0, 4, 0}, + {"rxd_t_code_bad_ecc", 0, 4, 0}, + {"rxd_t_code_unused_9", 0, 4, 1}, + {"rxd_t_code_unused_a", 0, 4, 1}, + {"rxd_t_code_unused_b", 0, 4, 1}, + {"rxd_t_code_unused_c", 0, 4, 1}, + {"rxd_t_code_unused_d", 0, 4, 1}, + {"rxd_t_code_unused_e", 0, 4, 1}, + {"rxd_t_code_unknown", 0, 4, 0}, + {"txd_t_code_transfer_ok", 0, 4, 1}, + {"txd_t_code_unused_1", 0, 4, 1}, + {"txd_t_code_abort_buffer", 0, 4, 0}, + {"txd_t_code_abort_dtor", 0, 4, 0}, + {"txd_t_code_unused_4", 0, 4, 1}, + {"txd_t_code_unused_5", 0, 4, 1}, + {"txd_t_code_unused_6", 0, 4, 1}, + {"txd_t_code_parity", 0, 4, 0}, + {"txd_t_code_unused_8", 0, 4, 1}, + {"txd_t_code_unused_9", 0, 4, 1}, + {"txd_t_code_loss_of_link", 0, 4, 0}, + {"txd_t_code_unused_a", 0, 4, 1}, + {"txd_t_code_unused_b", 0, 4, 1}, + {"txd_t_code_unused_c", 0, 4, 1}, + {"txd_t_code_unused_d", 0, 4, 1}, + {"txd_t_code_general_err", 0, 4, 0}, + {"alarm_transceiver_temp_high", 0, 4, 0}, + {"alarm_transceiver_temp_low", 0, 4, 0}, + {"alarm_laser_bias_current_high", 0, 4, 0}, + {"alarm_laser_bias_current_low", 0, 4, 0}, + {"alarm_laser_output_power_high", 0, 4, 0}, + {"alarm_laser_output_power_low", 0, 4, 0}, + {"warn_transceiver_temp_high", 0, 4, 0}, + {"warn_transceiver_temp_low", 0, 4, 0}, + {"warn_laser_bias_current_high", 0, 4, 0}, + {"warn_laser_bias_current_low", 0, 4, 0}, + {"warn_laser_output_power_high", 0, 4, 0}, + {"warn_laser_output_power_low", 0, 4, 0}, + {"excess_temp", 0, 2, 0}, + {"excess_bias_current", 0, 2, 0}, + {"excess_laser_output", 0, 2, 0}, + {"tick_period", 0, 2, 0} +}; + +xge_stats_driver_info_t driverInfo[] = { + {"isr_filter", 0}, + {"isr_line", 0}, + {"isr_msi", 0}, + {"tx_calls", 0}, + {"tx_completions", 0}, + {"tx_desc_compl", 0}, + {"tx_tcode", 0}, + {"tx_defrag", 0}, + {"tx_no_txd", 0}, + {"tx_map_fail", 0}, + {"tx_max_frags", 0}, + {"tx_tso", 0}, + {"tx_posted", 0}, + {"tx_again", 0}, + {"rx_completions", 0}, + {"rx_desc_compl", 0}, + {"rx_tcode", 0}, + {"rx_no_buf", 0}, + {"rx_map_fail", 0}, + {"lro_uncapable", 0}, + {"lro_begin", 0}, + {"lro_end1", 0}, + {"lro_end2", 0}, + {"lro_end3", 0}, + {"lro_append", 0}, + {"lro_session_exceeded", 0}, + {"lro_close", 0} }; |