summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcperciva <cperciva@FreeBSD.org>2017-07-07 00:34:51 +0000
committercperciva <cperciva@FreeBSD.org>2017-07-07 00:34:51 +0000
commit8df37be70f94a016be894dad5569593b1c6757a4 (patch)
tree03b4364763ac17e083b6d205939c114b59cbb71b
parent26b466cddb56ff375b07488c2b9248a090f1530a (diff)
downloadFreeBSD-src-8df37be70f94a016be894dad5569593b1c6757a4.zip
FreeBSD-src-8df37be70f94a016be894dad5569593b1c6757a4.tar.gz
MF11 r320731,320749,320759: Add Amazon Elastic Network Adapter driver
and turn it on in EC2 AMI builds Approved by: re (gjb) Relnotes: FreeBSD now supports "next generation" Enhanced Networking in the Amazon EC2 cloud Sponsored by: Amazon.com Inc. (original work)
-rw-r--r--release/Makefile.ec22
-rw-r--r--release/tools/ec2.conf3
-rw-r--r--share/man/man4/ena.4255
-rw-r--r--sys/conf/files6
-rw-r--r--sys/contrib/ena-com/ena_admin_defs.h1412
-rw-r--r--sys/contrib/ena-com/ena_com.c2761
-rw-r--r--sys/contrib/ena-com/ena_com.h1054
-rw-r--r--sys/contrib/ena-com/ena_common_defs.h50
-rw-r--r--sys/contrib/ena-com/ena_eth_com.c509
-rw-r--r--sys/contrib/ena-com/ena_eth_com.h167
-rw-r--r--sys/contrib/ena-com/ena_eth_io_defs.h960
-rw-r--r--sys/contrib/ena-com/ena_plat.h376
-rw-r--r--sys/contrib/ena-com/ena_regs_defs.h137
-rw-r--r--sys/dev/ena/ena.c3848
-rw-r--r--sys/dev/ena/ena.h440
-rw-r--r--sys/dev/ena/ena_sysctl.c251
-rw-r--r--sys/dev/ena/ena_sysctl.h44
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/ena/Makefile41
19 files changed, 12317 insertions, 1 deletions
diff --git a/release/Makefile.ec2 b/release/Makefile.ec2
index 45a08e4..b413ea5 100644
--- a/release/Makefile.ec2
+++ b/release/Makefile.ec2
@@ -51,7 +51,7 @@ ec2ami: cw-ec2 ${CW_EC2_PORTINSTALL}
@echo "--------------------------------------------------------------"
@false
.endif
- /usr/local/bin/bsdec2-image-upload ${PUBLISH} --sriov \
+ /usr/local/bin/bsdec2-image-upload ${PUBLISH} --sriov --ena \
${.OBJDIR}/ec2.raw \
"${TYPE} ${REVISION}-${BRANCH}-${TARGET}${AMINAMESUFFIX}" \
"${TYPE} ${REVISION}-${BRANCH}-${TARGET}" \
diff --git a/release/tools/ec2.conf b/release/tools/ec2.conf
index 4581788..4a7a1e8 100644
--- a/release/tools/ec2.conf
+++ b/release/tools/ec2.conf
@@ -78,6 +78,9 @@ vm_extra_pre_umount() {
# nodes, but apply the workaround just in case.
echo 'hw.broken_txfifo="1"' >> ${DESTDIR}/boot/loader.conf
+ # Load the kernel module for the Amazon "Elastic Network Adapter"
+ echo 'if_ena_load="YES"' >> ${DESTDIR}/boot/loader.conf
+
# The first time the AMI boots, the installed "first boot" scripts
# should be allowed to run:
# * ec2_configinit (download and process EC2 user-data)
diff --git a/share/man/man4/ena.4 b/share/man/man4/ena.4
new file mode 100644
index 0000000..90e5839
--- /dev/null
+++ b/share/man/man4/ena.4
@@ -0,0 +1,255 @@
+.\" Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in
+.\" the documentation and/or other materials provided with the
+.\" distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+.\" OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+.\" SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+.\" LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+.\" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd May 04, 2017
+.Dt ENA 4
+.Os
+.Sh NAME
+.Nm ena
+.Nd "FreeBSD kernel driver for Elastic Network Adapter (ENA) family"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following line in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device ena"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_ena_load="YES"
+.Ed
+.Sh DESCRIPTION
+The ENA is a networking interface designed to make good use of modern CPU
+features and system architectures.
+.Pp
+The ENA device exposes a lightweight management interface with a
+minimal set of memory mapped registers and extendable command set
+through an Admin Queue.
+.Pp
+The driver supports a range of ENA devices, is link-speed independent
+(i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc.), and has
+a negotiated and extendable feature set.
+.Pp
+Some ENA devices support SR-IOV. This driver is used for both the
+SR-IOV Physical Function (PF) and Virtual Function (VF) devices.
+.Pp
+The ENA devices enable high speed and low overhead network traffic
+processing by providing multiple Tx/Rx queue pairs (the maximum number
+is advertised by the device via the Admin Queue), a dedicated MSI-X
+interrupt vector per Tx/Rx queue pair, and CPU cacheline optimized
+data placement.
+.Pp
+The
+.Nm
+driver supports industry standard TCP/IP offload features such
+as checksum offload and TCP transmit segmentation offload (TSO).
+Receive-side scaling (RSS) is supported for multi-core scaling.
+.Pp
+The
+.Nm
+driver and its corresponding devices implement health
+monitoring mechanisms such as watchdog, enabling the device and driver
+to recover in a manner transparent to the application, as well as
+debug logs.
+.Pp
+Some of the ENA devices support a working mode called Low-latency
+Queue (LLQ), which saves several more microseconds. This feature will
+be implemented for driver in future releases.
+.Sh HARDWARE
+Supported PCI vendor ID/device IDs:
+.Pp
+.Bl -bullet -compact
+.It
+1d0f:0ec2 - ENA PF
+.It
+1d0f:1ec2 - ENA PF with LLQ support
+.It
+1d0f:ec20 - ENA VF
+.It
+1d0f:ec21 - ENA VF with LLQ support
+.El
+.Sh DIAGNOSTICS
+.Ss Device initialization phase:
+.Bl -diag
+.It ena%d: failed to init mmio read less
+.Pp
+Error occured during initialization of the mmio register read request.
+.It ena%d: Can not reset device
+.Pp
+Device could not be reset; device may not be responding or is already
+during reset.
+.It ena%d: device version is too low
+.Pp
+Version of the controller is too low and it is not supported by the driver.
+.It ena%d: Invalid dma width value %d
+.Pp
+The controller is able to request dma transcation width. Device stopped
+responding or it demanded invalid value.
+.It ena%d: Can not initialize ena admin queue with device
+.Pp
+Initialization of the Admin Queue failed; device may not be responding or there
+was a problem with initialization of the resources.
+.It ena%d: Cannot get attribute for ena device rc: %d
+.Pp
+Failed to get attributes of the device from the controller.
+.It ena%d: Cannot configure aenq groups rc: %d
+.Pp
+Errors occured when trying to configure AENQ groups.
+.El
+.Ss Driver initialisation/shutdown phase:
+.Bl -diag
+.It ena%d: PCI resource allocation failed!
+.It ena%d: allocating ena_dev failed
+.It ena%d: failed to pmap registers bar
+.It ena%d: Error while setting up bufring
+.It ena%d: Error with initialization of IO rings
+.It ena%d: can not allocate ifnet structure
+.It ena%d: Error with network interface setup
+.It ena%d: Failed to enable and set the admin interrupts
+.It ena%d: Failed to allocate %d, vectors %d
+.It ena%d: Failed to enable MSIX, vectors %d rc %d
+.It ena%d: Error with MSI-X enablement
+.It ena%d: could not allocate irq vector: %d
+.It ena%d: Unable to allocate bus resource: registers
+.Pp
+Resource allocation failed when initializing the device; driver will not
+be attached.
+.It ena%d: ENA device init failed (err: %d)
+.Pp
+Device initialization failed; driver will not be attached.
+.It ena%d: could not activate irq vector: %d
+.Pp
+Error occured when trying to activate interrupt vectors for Admin Queue.
+.It ena%d: failed to register interrupt handler for irq %ju: %d
+.Pp
+Error occured when trying to register Admin Queue interrupt handler.
+.It ena%d: Cannot setup mgmnt queue intr
+.Pp
+Error occured during configuration of the Admin Queue interrupts.
+.It ena%d: Enable MSI-X failed
+.Pp
+Configuration of the MSI-X for Admin Queue failed; there could be lack
+of resources or interrupts could not have been configured; driver will
+not be attached.
+.It ena%d: VLAN is in use, detach first
+.Pp
+VLANs are being used when trying to detach the driver; VLANs should be detached
+first and then detach routine should be called again.
+.It ena%d: Unmapped RX DMA tag associations
+.It ena%d: Unmapped TX DMA tag associations
+.Pp
+Error occured when trying to destroy RX/TX DMA tag.
+.It ena%d: Cannot init RSS
+.It ena%d: Cannot fill indirect table
+.It ena%d: Cannot fill indirect table
+.It ena%d: Cannot fill hash function
+.It ena%d: Cannot fill hash control
+.It ena%d: WARNING: RSS was not properly initialized, it will affect bandwidth
+.Pp
+Error occured during initialization of one of RSS resources; device is still
+going to work but it will affect performance because all RX packets will be
+passed to queue 0 and there will be no hash information.
+.It ena%d: failed to tear down irq: %d
+.It ena%d: dev has no parent while releasing res for irq: %d
+Release of the interrupts failed.
+.El
+.Ss Additional diagnostic:
+.Bl -diag
+.It ena%d: Cannot get attribute for ena device
+.Pp
+This message appears when trying to change MTU and driver is unable to get
+attributes from the device.
+.It ena%d: Invalid MTU setting. new_mtu: %d
+.Pp
+Requested MTU value is not supported and will not be set.
+.It ena%d: keep alive watchdog timeout
+.Pp
+Device stopped responding and will be reset.
+.It ena%d: Found a Tx that wasn't completed on time, qid %d, index %d.
+.Pp
+Packet was pushed to the NIC but not sent within given time limit; it may
+be caused by hang of the IO queue.
+.It ena%d: The number of lost tx completion is aboce the threshold (%d > %d). Reset the device
+.Pp
+If too many Tx wasn't completed on time the device is going to be reset; it may
+be caused by hanged queue or device.
+.It ena%d: trigger reset is on
+.Pp
+Device will be reset; reset is triggered either by watchdog or if too many TX
+packets were not completed on time.
+.It ena%d: invalid value recvd
+.Pp
+Link status received from the device in the AENQ handler is invalid.
+.It ena%d: Allocation for Tx Queue %u failed
+.It ena%d: Allocation for Rx Queue %u failed
+.It ena%d: Unable to create Rx DMA map for buffer %d
+.It ena%d: Failed to create io TX queue #%d rc: %d
+.It ena%d: Failed to get TX queue handlers. TX queue num %d rc: %d
+.It ena%d: Failed to create io RX queue[%d] rc: %d
+.It ena%d: Failed to get RX queue handlers. RX queue num %d rc: %d
+.It ena%d: failed to request irq
+.It ena%d: could not allocate irq vector: %d
+.It ena%d: failed to register interrupt handler for irq %ju: %d
+.Pp
+IO resources initialization failed. Interface will not be brought up.
+.It ena%d: LRO[%d] Initialization failed!
+.Pp
+Initialization of the LRO for the RX ring failed.
+.It ena%d: failed to alloc buffer for rx queue
+.It ena%d: failed to add buffer for rx queue %d
+.It ena%d: refilled rx queue %d with %d pages only
+.Pp
+Allocation of resources used on RX path failed; if happened during
+initialization of the IO queue, the interface will not be brought up.
+.It ena%d: ioctl promisc/allmulti
+.Pp
+IOCTL request for the device to work in promiscuous/allmulti mode; see
+.Xr ifconfig 8
+for more details.
+.It ena%d: too many fragments. Last fragment: %d!
+.Pp
+Packet with unsupported number of segments was queued for sending to the
+device; packet will be dropped.
+.Sh SUPPORT
+If an issue is identified with the released source code with a supported adapter
+email the specific information related to the issue to
+.Aq Mt mk@semihalf.com
+and
+.Aq Mt mw@semihalf.com .
+.Sh SEE ALSO
+.Xr vlan 4 ,
+.Xr ifconfig 8
+.Sh AUTHORS
+The
+.Nm
+driver was written by
+.An Semihalf.
diff --git a/sys/conf/files b/sys/conf/files
index b24a187..e4da191 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1547,6 +1547,12 @@ dev/e1000/e1000_mbx.c optional em | igb \
dev/e1000/e1000_osdep.c optional em | igb \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/et/if_et.c optional et
+dev/ena/ena.c optional ena \
+ compile-with "${NORMAL_C} -I$S/contrib"
+dev/ena/ena_sysctl.c optional ena \
+ compile-with "${NORMAL_C} -I$S/contrib"
+contrib/ena-com/ena_com.c optional ena
+contrib/ena-com/ena_eth_com.c optional ena
dev/en/if_en_pci.c optional en pci
dev/en/midway.c optional en
dev/ep/if_ep.c optional ep
diff --git a/sys/contrib/ena-com/ena_admin_defs.h b/sys/contrib/ena-com/ena_admin_defs.h
new file mode 100644
index 0000000..1d845c0
--- /dev/null
+++ b/sys/contrib/ena-com/ena_admin_defs.h
@@ -0,0 +1,1412 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+enum ena_admin_aq_completion_status {
+ ENA_ADMIN_SUCCESS = 0,
+
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+enum ena_admin_aq_feature_id {
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ ENA_ADMIN_HW_HINTS = 3,
+
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ ENA_ADMIN_MTU = 14,
+
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers are in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+struct ena_admin_aq_common_desc {
+ /* 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command_id;
+
+ /* as appears in ena_admin_aq_opcode */
+ uint8_t opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ uint32_t length;
+
+ struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+ uint16_t sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved1;
+};
+
+struct ena_admin_aq_entry {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ uint32_t inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command;
+
+ uint8_t status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ uint16_t extended_status;
+
+ /* serves as a hint what AQ entries can be revoked */
+ uint16_t sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ uint32_t response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ uint8_t sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ uint8_t sq_caps_3;
+
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ uint16_t cq_idx;
+
+ /* submission queue depth in entries */
+ uint16_t sq_depth;
+
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ uint32_t reserved0_w7;
+
+ uint32_t reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ uint16_t sq_idx;
+
+ uint16_t reserved;
+
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+ uint32_t sq_doorbell_offset;
+
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ uint32_t llq_descriptors_offset;
+
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ uint32_t llq_headers_offset;
+};
+
+struct ena_admin_aq_destroy_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_sq sq;
+};
+
+struct ena_admin_acq_destroy_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_admin_aq_create_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ uint8_t cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ uint8_t cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ uint16_t cq_depth;
+
+ /* msix vector assigned to this cq */
+ uint32_t msix_vector;
+
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_admin_acq_create_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ uint16_t cq_idx;
+
+ /* actual cq depth in number of entries */
+ uint16_t cq_actual_depth;
+
+ uint32_t numa_node_register_offset;
+
+ uint32_t cq_head_db_register_offset;
+
+ uint32_t cq_interrupt_unmask_register_offset;
+};
+
+struct ena_admin_aq_destroy_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ uint16_t cq_idx;
+
+ uint16_t reserved1;
+};
+
+struct ena_admin_acq_destroy_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ /* command specific inline data */
+ uint32_t inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* stats type as defined in enum ena_admin_get_stats_type */
+ uint8_t type;
+
+ /* stats scope defined in enum ena_admin_get_stats_scope */
+ uint8_t scope;
+
+ uint16_t reserved3;
+
+ /* queue id. used when scope is specific_queue */
+ uint16_t queue_idx;
+
+ /* device id, value 0xFFFF means mine. only privileged device can get
+ * stats of other device
+ */
+ uint16_t device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+ uint32_t tx_bytes_low;
+
+ uint32_t tx_bytes_high;
+
+ uint32_t tx_pkts_low;
+
+ uint32_t tx_pkts_high;
+
+ uint32_t rx_bytes_low;
+
+ uint32_t rx_bytes_high;
+
+ uint32_t rx_pkts_low;
+
+ uint32_t rx_pkts_high;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
+struct ena_admin_acq_get_stats_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ struct ena_admin_basic_stats basic_stats;
+};
+
+struct ena_admin_get_set_feature_common_desc {
+ /* 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+
+ /* as appears in ena_admin_aq_feature_id */
+ uint8_t feature_id;
+
+ uint16_t reserved16;
+};
+
+struct ena_admin_device_attr_feature_desc {
+ uint32_t impl_id;
+
+ uint32_t device_version;
+
+ /* bitmap of ena_admin_aq_feature_id */
+ uint32_t supported_features;
+
+ uint32_t reserved3;
+
+ /* Indicates how many bits are used physical address access. */
+ uint32_t phys_addr_width;
+
+ /* Indicates how many bits are used virtual address access. */
+ uint32_t virt_addr_width;
+
+ /* unicast MAC address (in Network byte order) */
+ uint8_t mac_addr[6];
+
+ uint8_t reserved7[2];
+
+ uint32_t max_mtu;
+};
+
+struct ena_admin_queue_feature_desc {
+ /* including LLQs */
+ uint32_t max_sq_num;
+
+ uint32_t max_sq_depth;
+
+ uint32_t max_cq_num;
+
+ uint32_t max_cq_depth;
+
+ uint32_t max_llq_num;
+
+ uint32_t max_llq_depth;
+
+ uint32_t max_header_size;
+
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
+ */
+ uint16_t max_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ uint16_t max_packet_rx_descs;
+};
+
+struct ena_admin_set_feature_mtu_desc {
+ /* exclude L2 */
+ uint32_t mtu;
+};
+
+struct ena_admin_set_feature_host_attr_desc {
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
+ */
+ struct ena_common_mem_addr os_info_ba;
+
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
+ */
+ struct ena_common_mem_addr debug_ba;
+
+ /* debug area size */
+ uint32_t debug_area_size;
+};
+
+struct ena_admin_feature_intr_moder_desc {
+ /* interrupt delay granularity in usec */
+ uint16_t intr_delay_resolution;
+
+ uint16_t reserved;
+};
+
+struct ena_admin_get_feature_link_desc {
+ /* Link speed in Mb */
+ uint32_t speed;
+
+ /* bit field of enum ena_admin_link types */
+ uint32_t supported;
+
+ /* 0 : autoneg
+ * 1 : duplex - Full Duplex
+ * 31:2 : reserved2
+ */
+ uint32_t flags;
+};
+
+struct ena_admin_feature_aenq_desc {
+ /* bitmask for AENQ groups the device can report */
+ uint32_t supported_groups;
+
+ /* bitmask for AENQ groups to report */
+ uint32_t enabled_groups;
+};
+
+struct ena_admin_feature_offload_desc {
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
+ */
+ uint32_t tx;
+
+ /* Receive side supported stateless offload
+ * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+ * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+ * 3 : RX_hash - Hash calculation
+ */
+ uint32_t rx_supported;
+
+ uint32_t rx_enabled;
+};
+
+enum ena_admin_hash_functions {
+ ENA_ADMIN_TOEPLITZ = 1,
+
+ ENA_ADMIN_CRC32 = 2,
+};
+
+struct ena_admin_feature_rss_flow_hash_control {
+ uint32_t keys_num;
+
+ uint32_t reserved;
+
+ uint32_t key[10];
+};
+
+struct ena_admin_feature_rss_flow_hash_function {
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
+ uint32_t supported_func;
+
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
+ */
+ uint32_t selected_func;
+
+ /* initial value */
+ uint32_t init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+ ENA_ADMIN_RSS_TCP4 = 0,
+
+ ENA_ADMIN_RSS_UDP4 = 1,
+
+ ENA_ADMIN_RSS_TCP6 = 2,
+
+ ENA_ADMIN_RSS_UDP6 = 3,
+
+ ENA_ADMIN_RSS_IP4 = 4,
+
+ ENA_ADMIN_RSS_IP6 = 5,
+
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+
+ ENA_ADMIN_RSS_NOT_IP = 7,
+
+ /* TCPv6 with extension header */
+ ENA_ADMIN_RSS_TCP6_EX = 8,
+
+ /* IPv6 with extension header */
+ ENA_ADMIN_RSS_IP6_EX = 9,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+ /* Ethernet Dest Addr */
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
+
+ /* Ethernet Src Addr */
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
+
+ /* ipv4/6 Dest Addr */
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
+
+ /* ipv4/6 Src Addr */
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
+
+ /* tcp/udp Dest Port */
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
+
+ /* tcp/udp Src Port */
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
+};
+
+struct ena_admin_proto_input {
+ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+ uint16_t fields;
+
+ uint16_t reserved2;
+};
+
+struct ena_admin_feature_rss_hash_control {
+ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+struct ena_admin_feature_rss_flow_hash_input {
+ /* supported hash input sorting
+ * 1 : L3_sort - support swap L3 addresses if DA is
+ * smaller than SA
+ * 2 : L4_sort - support swap L4 ports if DP smaller
+ * SP
+ */
+ uint16_t supported_input_sort;
+
+ /* enabled hash input sorting
+ * 1 : enable_L3_sort - enable swap L3 addresses if
+ * DA smaller than SA
+ * 2 : enable_L4_sort - enable swap L4 ports if DP
+ * smaller than SP
+ */
+ uint16_t enabled_input_sort;
+};
+
+enum ena_admin_os_type {
+ ENA_ADMIN_OS_LINUX = 1,
+
+ ENA_ADMIN_OS_WIN = 2,
+
+ ENA_ADMIN_OS_DPDK = 3,
+
+ ENA_ADMIN_OS_FREEBSD = 4,
+
+ ENA_ADMIN_OS_IPXE = 5,
+};
+
+struct ena_admin_host_info {
+ /* defined in enum ena_admin_os_type */
+ uint32_t os_type;
+
+ /* os distribution string format */
+ uint8_t os_dist_str[128];
+
+ /* OS distribution numeric format */
+ uint32_t os_dist;
+
+ /* kernel version string format */
+ uint8_t kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ uint32_t kernel_ver;
+
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
+ */
+ uint32_t driver_version;
+
+ /* features bitmap */
+ uint32_t supported_network_features[4];
+};
+
+struct ena_admin_rss_ind_table_entry {
+ uint16_t cq_idx;
+
+ uint16_t reserved;
+};
+
+struct ena_admin_feature_rss_ind_table {
+ /* min supported table size (2^min_size) */
+ uint16_t min_size;
+
+ /* max supported table size (2^max_size) */
+ uint16_t max_size;
+
+ /* table size (2^size) */
+ uint16_t size;
+
+ uint16_t reserved;
+
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
+ uint32_t inline_index;
+
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
+ */
+ struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ uint16_t mmio_read_timeout;
+
+ /* value in ms */
+ uint16_t driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ uint16_t missing_tx_completion_timeout;
+
+ uint16_t missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ uint16_t admin_completion_tx_timeout;
+
+ uint16_t netdev_wd_timeout;
+
+ uint16_t max_tx_sgl_size;
+
+ uint16_t max_rx_sgl_size;
+
+ uint16_t reserved[8];
+};
+
+struct ena_admin_get_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ uint32_t raw[11];
+};
+
+struct ena_admin_get_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ uint32_t raw[14];
+
+ struct ena_admin_device_attr_feature_desc dev_attr;
+
+ struct ena_admin_queue_feature_desc max_queue;
+
+ struct ena_admin_feature_aenq_desc aenq;
+
+ struct ena_admin_get_feature_link_desc link;
+
+ struct ena_admin_feature_offload_desc offload;
+
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ struct ena_admin_feature_rss_ind_table ind_table;
+
+ struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
+ } u;
+};
+
+struct ena_admin_set_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ union {
+ uint32_t raw[11];
+
+ /* mtu size */
+ struct ena_admin_set_feature_mtu_desc mtu;
+
+ /* host attributes */
+ struct ena_admin_set_feature_host_attr_desc host_attr;
+
+ /* AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+ } u;
+};
+
+struct ena_admin_set_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ uint32_t raw[14];
+ } u;
+};
+
+struct ena_admin_aenq_common_desc {
+ uint16_t group;
+
+ uint16_t syndrom;
+
+ /* 0 : phase */
+ uint8_t flags;
+
+ uint8_t reserved1[3];
+
+ uint32_t timestamp_low;
+
+ uint32_t timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+ ENA_ADMIN_LINK_CHANGE = 0,
+
+ ENA_ADMIN_FATAL_ERROR = 1,
+
+ ENA_ADMIN_WARNING = 2,
+
+ ENA_ADMIN_NOTIFICATION = 3,
+
+ ENA_ADMIN_KEEP_ALIVE = 4,
+
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+enum ena_admin_aenq_notification_syndrom {
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
+
+ ENA_ADMIN_UPDATE_HINTS = 2,
+};
+
+struct ena_admin_aenq_entry {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_aenq_link_change_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* 0 : link_status */
+ uint32_t flags;
+};
+
+struct ena_admin_aenq_keep_alive_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
+struct ena_admin_ena_mmio_req_read_less_resp {
+ uint16_t req_id;
+
+ uint16_t reg_off;
+
+ /* value is valid when poll is cleared */
+ uint32_t reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
+{
+ return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
+{
+ p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+}
+
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+{
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+{
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+}
+
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
+{
+ return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
+{
+ p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+ p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+ p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
+{
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+ return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+ return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+ p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+ p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+{
+ return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+}
+
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ADMIN_H_ */
diff --git a/sys/contrib/ena-com/ena_com.c b/sys/contrib/ena-com/ena_com.c
new file mode 100644
index 0000000..c17ac24
--- /dev/null
+++ b/sys/contrib/ena-com/ena_com.c
@@ -0,0 +1,2761 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ena_com.h"
+#ifdef ENA_INTERNAL
+#include "ena_gen_info.h"
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* Timeout in micro-sec */
+#define ADMIN_CMD_TIMEOUT_US (3000000)
+
+#define ENA_ASYNC_QUEUE_DEPTH 16
+#define ENA_ADMIN_QUEUE_DEPTH 32
+
+#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
+ | (ENA_COMMON_SPEC_VERSION_MINOR))
+
+#define ENA_CTRL_MAJOR 0
+#define ENA_CTRL_MINOR 0
+#define ENA_CTRL_SUB_MINOR 1
+
+#define MIN_ENA_CTRL_VER \
+ (((ENA_CTRL_MAJOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+ ((ENA_CTRL_MINOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+ (ENA_CTRL_SUB_MINOR))
+
+#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
+#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+
+/*****************************************************************************/
+/*****************************************************************************/
+/*****************************************************************************/
+
+enum ena_cmd_status {
+ ENA_CMD_SUBMITTED,
+ ENA_CMD_COMPLETED,
+ /* Abort - canceled by the driver */
+ ENA_CMD_ABORTED,
+};
+
+struct ena_comp_ctx {
+ ena_wait_event_t wait_event;
+ struct ena_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum ena_cmd_status status;
+ /* status from the device */
+ u8 comp_status;
+ u8 cmd_opcode;
+ bool occupied;
+};
+
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
+static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+ struct ena_common_mem_addr *ena_addr,
+ dma_addr_t addr)
+{
+ if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+ ena_trc_err("dma address has more bits that the device supports\n");
+ return ENA_COM_INVAL;
+ }
+
+ ena_addr->mem_addr_low = (u32)addr;
+ ena_addr->mem_addr_high = (u64)addr >> 32;
+
+ return 0;
+}
+
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ sq->mem_handle);
+
+ if (!sq->entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
+
+ sq->db_addr = NULL;
+
+ return 0;
+}
+
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ cq->mem_handle);
+
+ if (!cq->entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ cq->head = 0;
+ cq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+ struct ena_aenq_handlers *aenq_handlers)
+{
+ struct ena_com_aenq *aenq = &dev->aenq;
+ u32 addr_low, addr_high, aenq_caps;
+ u16 size;
+
+ dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+ aenq->entries,
+ aenq->dma_addr,
+ aenq->mem_handle);
+
+ if (!aenq->entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+
+ ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+
+ aenq_caps = 0;
+ aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers)) {
+ ena_trc_err("aenq handlers pointer is NULL\n");
+ return ENA_COM_INVAL;
+ }
+
+ aenq->aenq_handlers = aenq_handlers;
+
+ return 0;
+}
+
+static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ struct ena_comp_ctx *comp_ctx)
+{
+ comp_ctx->occupied = false;
+ ATOMIC32_DEC(&queue->outstanding_cmds);
+}
+
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ u16 command_id, bool capture)
+{
+ if (unlikely(command_id >= queue->q_depth)) {
+ ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, queue->q_depth);
+ return NULL;
+ }
+
+ if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ ena_trc_err("Completion context is occupied\n");
+ return NULL;
+ }
+
+ if (capture) {
+ ATOMIC32_INC(&queue->outstanding_cmds);
+ queue->comp_ctx[command_id].occupied = true;
+ }
+
+ return &queue->comp_ctx[command_id];
+}
+
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 tail_masked, cmd_id;
+ u16 queue_size_mask;
+ u16 cnt;
+
+ queue_size_mask = admin_queue->q_depth - 1;
+
+ tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+ /* In case of queue FULL */
+ cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ if (cnt >= admin_queue->q_depth) {
+ ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
+ admin_queue->sq.tail,
+ admin_queue->sq.head,
+ admin_queue->q_depth);
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(ENA_COM_NO_SPACE);
+ }
+
+ cmd_id = admin_queue->curr_cmd_id;
+
+ cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
+ ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+ cmd->aq_common_descriptor.command_id |= cmd_id &
+ ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(ENA_COM_INVAL);
+
+ comp_ctx->status = ENA_CMD_SUBMITTED;
+ comp_ctx->comp_size = (u32)comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+ ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
+
+ memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
+
+ admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
+ queue_size_mask;
+
+ admin_queue->sq.tail++;
+ admin_queue->stats.submitted_cmd++;
+
+ if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
+ admin_queue->sq.phase = !admin_queue->sq.phase;
+
+ ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
+ admin_queue->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+{
+ size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
+ if (unlikely(!queue->comp_ctx)) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ for (i = 0; i < queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(queue, i, false);
+ if (comp_ctx)
+ ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
+ }
+
+ return 0;
+}
+
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ unsigned long flags;
+ struct ena_comp_ctx *comp_ctx;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ if (unlikely(!admin_queue->running_state)) {
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ return ERR_PTR(ENA_COM_NO_DEVICE);
+ }
+ comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
+ cmd_size_in_bytes,
+ comp,
+ comp_size_in_bytes);
+ if (unlikely(IS_ERR(comp_ctx)))
+ admin_queue->running_state = false;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ return comp_ctx;
+}
+
+static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_sq *io_sq)
+{
+ size_t size;
+ int dev_node = 0;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+
+ io_sq->desc_entry_size =
+ (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_desc) :
+ sizeof(struct ena_eth_io_rx_desc);
+
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+ io_sq->bus = ena_dev->bus;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle,
+ ctx->numa_node,
+ dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ }
+ } else {
+ ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ ctx->numa_node,
+ dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ }
+ }
+
+ if (!io_sq->desc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ io_sq->tail = 0;
+ io_sq->next_to_comp = 0;
+ io_sq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+ int prev_node = 0;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+
+ /* Use the basic completion descriptor for Rx */
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+ io_cq->bus = ena_dev->bus;
+
+ ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ctx->numa_node,
+ prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
+ }
+
+ if (!io_cq->cdesc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ io_cq->phase = 1;
+ io_cq->head = 0;
+
+ return 0;
+}
+
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = cqe->acq_common_descriptor.command &
+ ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
+ if (unlikely(!comp_ctx)) {
+ ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ admin_queue->running_state = false;
+ return;
+ }
+
+ comp_ctx->status = ENA_CMD_COMPLETED;
+ comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+
+ if (!admin_queue->polling)
+ ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+}
+
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+{
+ struct ena_admin_acq_entry *cqe = NULL;
+ u16 comp_num = 0;
+ u16 head_masked;
+ u8 phase;
+
+ head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
+ phase = admin_queue->cq.phase;
+
+ cqe = &admin_queue->cq.entries[head_masked];
+
+ /* Go over all the completions */
+ while ((cqe->acq_common_descriptor.flags &
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ rmb();
+ ena_com_handle_single_admin_completion(admin_queue, cqe);
+
+ head_masked++;
+ comp_num++;
+ if (unlikely(head_masked == admin_queue->q_depth)) {
+ head_masked = 0;
+ phase = !phase;
+ }
+
+ cqe = &admin_queue->cq.entries[head_masked];
+ }
+
+ admin_queue->cq.head += comp_num;
+ admin_queue->cq.phase = phase;
+ admin_queue->sq.head += comp_num;
+ admin_queue->stats.completed_cmd += comp_num;
+}
+
+static int ena_com_comp_status_to_errno(u8 comp_status)
+{
+ if (unlikely(comp_status != 0))
+ ena_trc_err("admin command failed[%u]\n", comp_status);
+
+ if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
+ return ENA_COM_INVAL;
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+ return 0;
+ case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return ENA_COM_NO_MEM;
+ case ENA_ADMIN_UNSUPPORTED_OPCODE:
+ return ENA_COM_PERMISSION;
+ case ENA_ADMIN_BAD_OPCODE:
+ case ENA_ADMIN_MALFORMED_REQUEST:
+ case ENA_ADMIN_ILLEGAL_PARAMETER:
+ case ENA_ADMIN_UNKNOWN_ERROR:
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags, timeout;
+ int ret;
+
+ timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
+
+ while (1) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
+
+ if (ENA_TIME_EXPIRE(timeout)) {
+ ena_trc_err("Wait for completion (polling) timeout\n");
+ /* ENA didn't have any completion */
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ admin_queue->stats.no_completion++;
+ admin_queue->running_state = false;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
+
+ ENA_MSLEEP(100);
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+ ena_trc_err("Command was aborted\n");
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ret = ENA_COM_NO_DEVICE;
+ goto err;
+ }
+
+ ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
+ "Invalid comp status %d\n", comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags;
+ int ret;
+
+ ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
+ admin_queue->completion_timeout);
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ admin_queue->stats.no_completion++;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status == ENA_CMD_COMPLETED)
+ ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
+ else
+ ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
+
+ admin_queue->running_state = false;
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+/* This method read the hardware device register through posting writes
+ * and waiting for response
+ * On timeout the function will return ENA_MMIO_READ_TIMEOUT
+ */
+static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+ mmio_read->read_resp;
+ u32 mmio_read_reg, timeout, ret;
+ unsigned long flags;
+ int i;
+
+ ENA_MIGHT_SLEEP();
+
+ timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT;
+
+ /* If readless is disabled, perform regular read */
+ if (!mmio_read->readless_supported)
+ return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
+
+ ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
+ mmio_read->seq_num++;
+
+ read_resp->req_id = mmio_read->seq_num + 0xDEAD;
+ mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+ ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+ mmio_read_reg |= mmio_read->seq_num &
+ ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+ /* make sure read_resp->req_id get updated before the hw can write
+ * there
+ */
+ wmb();
+
+ ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+
+ for (i = 0; i < timeout; i++) {
+ if (read_resp->req_id == mmio_read->seq_num)
+ break;
+
+ ENA_UDELAY(1);
+ }
+
+ if (unlikely(i == timeout)) {
+ ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num,
+ offset,
+ read_resp->req_id,
+ read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ if (read_resp->reg_off != offset) {
+ ena_trc_err("Read failure: wrong offset provided");
+ ret = ENA_MMIO_READ_TIMEOUT;
+ } else {
+ ret = read_resp->reg_val;
+ }
+err:
+ ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
+
+ return ret;
+}
+
+/* There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called ena_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ if (admin_queue->polling)
+ return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+ admin_queue);
+
+ return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
+ admin_queue);
+}
+
+static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
+ u8 direction;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ destroy_cmd.sq.sq_identity |= (direction <<
+ ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+
+ destroy_cmd.sq.sq_idx = io_sq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+ }
+
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ else
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+ }
+}
+
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
+{
+ u32 val, i;
+
+ for (i = 0; i < timeout; i++) {
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+ exp_state)
+ return 0;
+
+ /* The resolution of the timeout is 100ms */
+ ENA_MSLEEP(100);
+ }
+
+ return ENA_COM_TIMER_EXPIRED;
+}
+
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
+ !(ena_dev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_get_feat_cmd get_cmd;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+ ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ get_cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ else
+ get_cmd.aq_common_descriptor.flags = 0;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd.control_buffer.address,
+ control_buf_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ get_cmd.control_buffer.length = control_buff_size;
+
+ get_cmd.feat_common.feature_id = feature_id;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct ena_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
+
+ return ret;
+}
+
+static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ return ena_com_get_feature_ex(ena_dev,
+ get_resp,
+ feature_id,
+ 0,
+ 0);
+}
+
+static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_key),
+ rss->hash_key,
+ rss->hash_key_dma_addr,
+ rss->hash_key_mem_handle);
+
+ if (unlikely(!rss->hash_key))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_key),
+ rss->hash_key,
+ rss->hash_key_dma_addr,
+ rss->hash_key_mem_handle);
+ rss->hash_key = NULL;
+}
+
+static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr,
+ rss->hash_ctrl_mem_handle);
+
+ if (unlikely(!rss->hash_ctrl))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr,
+ rss->hash_ctrl_mem_handle);
+ rss->hash_ctrl = NULL;
+}
+
+static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ u16 log_size)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ size_t tbl_size;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ if (unlikely(ret))
+ return ret;
+
+ if ((get_resp.u.ind_table.min_size > log_size) ||
+ (get_resp.u.ind_table.max_size < log_size)) {
+ ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size,
+ 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
+ return ENA_COM_INVAL;
+ }
+
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1ULL << log_size) * sizeof(u16);
+ rss->host_rss_ind_tbl =
+ ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+ rss->tbl_log_size = log_size;
+
+ return 0;
+
+mem_err2:
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ rss->rss_ind_tbl = NULL;
+mem_err1:
+ rss->tbl_log_size = 0;
+ return ENA_COM_NO_MEM;
+}
+
+static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ size_t tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ if (rss->rss_ind_tbl)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ rss->rss_ind_tbl = NULL;
+
+ if (rss->host_rss_ind_tbl)
+ ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
+ rss->host_rss_ind_tbl = NULL;
+}
+
+static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq, u16 cq_idx)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_sq_cmd create_cmd;
+ struct ena_admin_acq_create_sq_resp_desc cmd_completion;
+ u8 direction;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ create_cmd.sq_identity |= (direction <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+
+ create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+
+ create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+
+ create_cmd.sq_caps_3 |=
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+
+ create_cmd.cq_idx = cq_idx;
+ create_cmd.sq_depth = io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_sq->idx = cmd_completion.sq_idx;
+
+ io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ (uintptr_t)cmd_completion.sq_doorbell_offset);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
+ + cmd_completion.llq_headers_offset);
+
+ io_sq->desc_addr.pbuf_dev_addr =
+ (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
+ cmd_completion.llq_descriptors_offset);
+ }
+
+ ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+}
+
+static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_com_io_sq *io_sq;
+ u16 qid;
+ int i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ qid = rss->host_rss_ind_tbl[i];
+ if (qid >= ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+
+ if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
+ return ENA_COM_INVAL;
+
+ rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
+ }
+
+ return 0;
+}
+
+static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+{
+ u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
+ struct ena_rss *rss = &ena_dev->rss;
+ u8 idx;
+ u16 i;
+
+ for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+ dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+ idx = (u8)rss->rss_ind_tbl[i].cq_idx;
+
+ if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+ }
+
+ return 0;
+}
+
+static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ size_t size;
+
+ size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
+
+ ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ if (!ena_dev->intr_moder_tbl)
+ return ENA_COM_NO_MEM;
+
+ ena_com_config_default_interrupt_moderation_table(ena_dev);
+
+ return 0;
+}
+
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int i;
+
+ if (!intr_delay_resolution) {
+ ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ intr_delay_resolution = 1;
+ }
+ ena_dev->intr_delay_resolution = intr_delay_resolution;
+
+ /* update Rx */
+ for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
+ intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+
+ /* update Tx */
+ ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+}
+
+/*****************************************************************************/
+/******************************* API ******************************/
+/*****************************************************************************/
+
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct ena_comp_ctx *comp_ctx;
+ int ret;
+
+ comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
+ comp, comp_size);
+ if (unlikely(IS_ERR(comp_ctx))) {
+ if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
+ ena_trc_dbg("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ else
+ ena_trc_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+
+ return PTR_ERR(comp_ctx);
+ }
+
+ ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
+ if (unlikely(ret)) {
+ if (admin_queue->running_state)
+ ena_trc_err("Failed to process command. ret = %d\n",
+ ret);
+ else
+ ena_trc_dbg("Failed to process command. ret = %d\n",
+ ret);
+ }
+ return ret;
+}
+
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_cq_cmd create_cmd;
+ struct ena_admin_acq_create_cq_resp_desc cmd_completion;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
+
+ create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.cq_caps_1 |=
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+
+ create_cmd.msix_vector = io_cq->msix_vector;
+ create_cmd.cq_depth = io_cq->q_depth;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.cq_ba,
+ io_cq->cdesc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_cq->idx = cmd_completion.cq_idx;
+
+ io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_interrupt_unmask_register_offset);
+
+ if (cmd_completion.cq_head_db_register_offset)
+ io_cq->cq_head_db_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_head_db_register_offset);
+
+ if (cmd_completion.numa_node_register_offset)
+ io_cq->numa_node_cfg_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+ ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+}
+
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq)
+{
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Invalid queue number %d but the max is %d\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return ENA_COM_INVAL;
+ }
+
+ *io_sq = &ena_dev->io_sq_queues[qid];
+ *io_cq = &ena_dev->io_cq_queues[qid];
+
+ return 0;
+}
+
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ if (!admin_queue->comp_ctx)
+ return;
+
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
+ if (unlikely(!comp_ctx))
+ break;
+
+ comp_ctx->status = ENA_CMD_ABORTED;
+
+ ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+ }
+}
+
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_MSLEEP(20);
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ }
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+ destroy_cmd.cq_idx = io_cq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+}
+
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->admin_queue.running_state;
+}
+
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_dev->admin_queue.running_state = state;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
+{
+ u16 depth = ena_dev->aenq.q_depth;
+
+ ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
+
+ /* Init head_db to mark that all entries in the queue
+ * are initially available
+ */
+ ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+ if (ret) {
+ ena_trc_info("Can't get aenq configuration\n");
+ return ret;
+ }
+
+ if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+ ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ get_resp.u.aenq.supported_groups,
+ groups_flag);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
+ cmd.u.aenq.enabled_groups = groups_flag;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+{
+ u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+ int width;
+
+ if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+ ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+ ena_trc_dbg("ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+ ena_trc_err("DMA width illegal value: %d\n", width);
+ return ENA_COM_INVAL;
+ }
+
+ ena_dev->dma_addr_bits = width;
+
+ return width;
+}
+
+int ena_com_validate_version(struct ena_com_dev *ena_dev)
+{
+ u32 ver;
+ u32 ctrl_ver;
+ u32 ctrl_ver_masked;
+
+ /* Make sure the ENA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+ (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ ena_trc_info("ena device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+ if (ver < MIN_ENA_VER) {
+ ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
+ return -1;
+ }
+
+ ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
+ >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
+ >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+ ctrl_ver_masked =
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
+ ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
+
+ ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
+
+ ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
+
+ if (admin_queue->comp_ctx)
+ ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
+
+ admin_queue->comp_ctx = NULL;
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr, sq->mem_handle);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr, cq->mem_handle);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ if (ena_dev->aenq.entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr, aenq->mem_handle);
+ aenq->entries = NULL;
+}
+
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
+{
+ ena_dev->admin_queue.polling = polling;
+}
+
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ ENA_SPINLOCK_INIT(mmio_read->lock);
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr,
+ mmio_read->read_resp_mem_handle);
+ if (unlikely(!mmio_read->read_resp))
+ return ENA_COM_NO_MEM;
+
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ mmio_read->read_resp->req_id = 0x0;
+ mmio_read->seq_num = 0x0;
+ mmio_read->readless_supported = true;
+
+ return 0;
+}
+
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ mmio_read->readless_supported = readless_supported;
+}
+
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr,
+ mmio_read->read_resp_mem_handle);
+
+ mmio_read->read_resp = NULL;
+
+ ENA_SPINLOCK_DESTROY(mmio_read->lock);
+}
+
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ u32 addr_low, addr_high;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
+
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+}
+
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
+ int ret;
+
+#ifdef ENA_INTERNAL
+ ena_trc_info("ena_defs : Version:[%s] Build date [%s]",
+ ENA_GEN_COMMIT, ENA_GEN_DATE);
+#endif
+ dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+ ena_trc_err("Device isn't ready, abort com init\n");
+ return ENA_COM_NO_DEVICE;
+ }
+
+ admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
+
+ admin_queue->bus = ena_dev->bus;
+ admin_queue->q_dmadev = ena_dev->dmadev;
+ admin_queue->polling = false;
+ admin_queue->curr_cmd_id = 0;
+
+ ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
+
+ if (init_spinlock)
+ ENA_SPINLOCK_INIT(admin_queue->q_lock);
+
+ ret = ena_com_init_comp_ctxt(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_sq(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_cq(admin_queue);
+ if (ret)
+ goto error;
+
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
+
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
+
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
+
+ aq_caps = 0;
+ aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+ aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+ acq_caps = 0;
+ acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+ acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
+ ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
+ if (ret)
+ goto error;
+
+ admin_queue->running_state = true;
+
+ return 0;
+error:
+ ena_com_admin_destroy(ena_dev);
+
+ return ret;
+}
+
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+ int ret;
+
+ if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ return ENA_COM_INVAL;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[ctx->qid];
+ io_cq = &ena_dev->io_cq_queues[ctx->qid];
+
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
+
+ /* Init CQ */
+ io_cq->q_depth = ctx->queue_size;
+ io_cq->direction = ctx->direction;
+ io_cq->qid = ctx->qid;
+
+ io_cq->msix_vector = ctx->msix_vector;
+
+ io_sq->q_depth = ctx->queue_size;
+ io_sq->direction = ctx->direction;
+ io_sq->qid = ctx->qid;
+
+ io_sq->mem_queue_type = ctx->mem_queue_type;
+
+ if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+ io_sq->tx_max_header_size =
+ ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ if (ret)
+ goto error;
+ ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_cq(ena_dev, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
+ if (ret)
+ goto destroy_io_cq;
+
+ return 0;
+
+destroy_io_cq:
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+error:
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+ return ret;
+}
+
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+ io_cq = &ena_dev->io_cq_queues[qid];
+
+ ena_com_destroy_io_sq(ena_dev, io_sq);
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+}
+
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp)
+{
+ return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+}
+
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_DEVICE_ATTRIBUTES);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
+ sizeof(get_resp.u.dev_attr));
+ ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_NUM);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+ sizeof(get_resp.u.max_queue));
+ ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_AENQ_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
+ sizeof(get_resp.u.aenq));
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
+ sizeof(get_resp.u.offload));
+
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == ENA_COM_PERMISSION)
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
+ return 0;
+}
+
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
+{
+ ena_com_handle_admin_completion(&ena_dev->admin_queue);
+}
+
+/* ena_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+ u16 group)
+{
+ struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+
+ if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/* ena_aenq_intr_handler:
+ * handles the aenq incoming events.
+ * pop events from the queue and apply the specific handler
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+{
+ struct ena_admin_aenq_entry *aenq_e;
+ struct ena_admin_aenq_common_desc *aenq_common;
+ struct ena_com_aenq *aenq = &dev->aenq;
+ ena_aenq_handler handler_cb;
+ u16 masked_head, processed = 0;
+ u8 phase;
+
+ masked_head = aenq->head & (aenq->q_depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[masked_head]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
+ phase) {
+ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n",
+ aenq_common->group,
+ aenq_common->syndrom,
+ (u64)aenq_common->timestamp_low +
+ ((u64)aenq_common->timestamp_high << 32));
+
+ /* Handle specific event*/
+ handler_cb = ena_com_get_specific_aenq_cb(dev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ masked_head++;
+ processed++;
+
+ if (unlikely(masked_head == aenq->q_depth)) {
+ masked_head = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[masked_head];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->head += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+ ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+{
+ u32 stat, timeout, cap, reset_val;
+ int rc;
+
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+ (cap == ENA_MMIO_READ_TIMEOUT))) {
+ ena_trc_err("Reg read32 timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+ ena_trc_err("Device isn't ready, can't reset device\n");
+ return ENA_COM_INVAL;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+ ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ if (timeout == 0) {
+ ena_trc_err("Invalid timeout value\n");
+ return ENA_COM_INVAL;
+ }
+
+ /* start reset */
+ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+
+ /* Write again the MMIO read request address */
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+ ena_trc_err("Reset indication didn't turn on\n");
+ return rc;
+ }
+
+ /* reset done */
+ ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+ ena_trc_err("Reset indication didn't turn off\n");
+ return rc;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+ return 0;
+}
+
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
+
+ return ret;
+}
+
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+ ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_MTU;
+ cmd.u.mtu.mtu = mtu;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+
+ return ret;
+}
+
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload)
+{
+ int ret;
+ struct ena_admin_get_feat_resp resp;
+
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to get offload capabilities %d\n", ret);
+ return ret;
+ }
+
+ memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
+
+ return 0;
+}
+
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return ENA_COM_PERMISSION;
+ }
+
+ /* Validate hash function is supported */
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ if (unlikely(ret))
+ return ret;
+
+ if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ ena_trc_err("Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
+ cmd.u.flow_hash_func.init_val = rss->hash_init_val;
+ cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_key_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = sizeof(*rss->hash_key);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ /* Make sure size is a mult of DWs */
+ if (unlikely(key_len & 0x3))
+ return ENA_COM_INVAL;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ ena_trc_err("Flow hash function %d isn't supported\n", func);
+ return ENA_COM_PERMISSION;
+ }
+
+ switch (func) {
+ case ENA_ADMIN_TOEPLITZ:
+ if (key_len > sizeof(hash_key->key)) {
+ ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return ENA_COM_INVAL;
+ }
+
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
+ break;
+ case ENA_ADMIN_CRC32:
+ rss->hash_init_val = init_val;
+ break;
+ default:
+ ena_trc_err("Invalid hash function (%d)\n", func);
+ return ENA_COM_INVAL;
+ }
+
+ rc = ena_com_set_hash_function(ena_dev);
+
+ /* Restore the old function */
+ if (unlikely(rc))
+ ena_com_get_hash_function(ena_dev, NULL, NULL);
+
+ return rc;
+}
+
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ if (func)
+ *func = rss->hash_func;
+
+ if (key)
+ memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+
+ return 0;
+}
+
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_INPUT,
+ rss->hash_ctrl_dma_addr,
+ sizeof(*rss->hash_ctrl));
+ if (unlikely(rc))
+ return rc;
+
+ if (fields)
+ *fields = rss->hash_ctrl->selected_fields[proto].fields;
+
+ return 0;
+}
+
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_INPUT)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
+ cmd.u.flow_hash_input.enabled_input_sort =
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_ctrl_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret))
+ ena_trc_err("Failed to set hash input. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl =
+ rss->hash_ctrl;
+ u16 available_fields = 0;
+ int rc, i;
+
+ /* Get the supported hash input */
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
+ ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
+
+ for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
+ available_fields = hash_ctrl->selected_fields[i].fields &
+ hash_ctrl->supported_fields[i].fields;
+ if (available_fields != hash_ctrl->selected_fields[i].fields) {
+ ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
+ return ENA_COM_PERMISSION;
+ }
+ }
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return rc;
+}
+
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ u16 supported_fields;
+ int rc;
+
+ if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+ ena_trc_err("Invalid proto num (%u)\n", proto);
+ return ENA_COM_INVAL;
+ }
+
+ /* Get the ctrl table */
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ /* Make sure all the fields are supported */
+ supported_fields = hash_ctrl->supported_fields[proto].fields;
+ if ((hash_fields & supported_fields) != hash_fields) {
+ ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
+ }
+
+ hash_ctrl->selected_fields[proto].fields = hash_fields;
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return 0;
+}
+
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
+ return ENA_COM_INVAL;
+
+ if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
+ return ENA_COM_INVAL;
+
+ rss->host_rss_ind_tbl[entry_idx] = entry_value;
+
+ return 0;
+}
+
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return ENA_COM_PERMISSION;
+ }
+
+ ret = ena_com_ind_tbl_convert_to_device(ena_dev);
+ if (ret) {
+ ena_trc_err("Failed to convert host indirection table to device table\n");
+ return ret;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.u.ind_table.size = rss->tbl_log_size;
+ cmd.u.ind_table.inline_index = 0xFFFFFFFF;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->rss_ind_tbl_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set indirect table. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ u32 tbl_size;
+ int i, rc;
+
+ tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ rss->rss_ind_tbl_dma_addr,
+ tbl_size);
+ if (unlikely(rc))
+ return rc;
+
+ if (!ind_tbl)
+ return 0;
+
+ rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+ if (unlikely(rc))
+ return rc;
+
+ for (i = 0; i < (1 << rss->tbl_log_size); i++)
+ ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+ return 0;
+}
+
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+{
+ int rc;
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+ rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
+ if (unlikely(rc))
+ goto err_indr_tbl;
+
+ rc = ena_com_hash_key_allocate(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_key;
+
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_ctrl;
+
+ return 0;
+
+err_hash_ctrl:
+ ena_com_hash_key_destroy(ena_dev);
+err_hash_key:
+ ena_com_indirect_table_destroy(ena_dev);
+err_indr_tbl:
+
+ return rc;
+}
+
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
+{
+ ena_com_indirect_table_destroy(ena_dev);
+ ena_com_hash_key_destroy(ena_dev);
+ ena_com_hash_ctrl_destroy(ena_dev);
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+}
+
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ if (unlikely(!host_attr->host_info))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return ENA_COM_NO_MEM;
+ }
+
+ host_attr->debug_area_size = debug_area_size;
+
+ return 0;
+}
+
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->host_info) {
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ host_attr->host_info = NULL;
+ }
+}
+
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->debug_area_virt_addr) {
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ host_attr->debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+}
+
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+
+ int ret;
+
+ /* Host attribute config is called before ena_com_get_dev_attr_feat
+ * so ena_com can't check if the feature is supported.
+ */
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.debug_ba,
+ host_attr->debug_area_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.os_info_ba,
+ host_attr->host_info_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set host attributes: %d\n", ret);
+
+ return ret;
+}
+
+/* Interrupt moderation */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+}
+
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ ena_trc_err("Illegal interrupt delay granularity value\n");
+ return ENA_COM_FAULT;
+ }
+
+ ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
+ ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ ena_trc_err("Illegal interrupt delay granularity value\n");
+ return ENA_COM_FAULT;
+ }
+
+ /* We use LOWEST entry of moderation table for storing
+ * nonadaptive interrupt coalescing values
+ */
+ ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ rx_coalesce_usecs / ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ if (ena_dev->intr_moder_tbl)
+ ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
+ ena_dev->intr_moder_tbl = NULL;
+}
+
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ u16 delay_resolution;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+
+ if (rc) {
+ if (rc == ENA_COM_PERMISSION) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+ ena_com_disable_adaptive_moderation(ena_dev);
+ return rc;
+ }
+
+ rc = ena_com_init_interrupt_moderation_table(ena_dev);
+ if (rc)
+ goto err;
+
+ /* if moderation is supported by device we set adaptive moderation */
+ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
+ ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
+ ena_com_enable_adaptive_moderation(ena_dev);
+
+ return 0;
+err:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ return rc;
+}
+
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (!intr_moder_tbl)
+ return;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ ENA_INTR_LOWEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
+ ENA_INTR_LOWEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
+ ENA_INTR_LOWEST_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
+ ENA_INTR_LOW_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
+ ENA_INTR_LOW_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
+ ENA_INTR_LOW_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
+ ENA_INTR_MID_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
+ ENA_INTR_MID_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
+ ENA_INTR_MID_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
+ ENA_INTR_HIGH_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
+ ENA_INTR_HIGH_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
+ ENA_INTR_HIGH_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
+ ENA_INTR_HIGHEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
+ ENA_INTR_HIGHEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
+ ENA_INTR_HIGHEST_BYTES;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->intr_moder_tx_interval;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (intr_moder_tbl)
+ return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
+
+ return 0;
+}
+
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ intr_moder_tbl[level].intr_moder_interval /=
+ ena_dev->intr_delay_resolution;
+ intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+}
+
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
+ entry->pkts_per_interval =
+ intr_moder_tbl[level].pkts_per_interval;
+ entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
diff --git a/sys/contrib/ena-com/ena_com.h b/sys/contrib/ena-com/ena_com.h
new file mode 100644
index 0000000..3d8bf0e
--- /dev/null
+++ b/sys/contrib/ena-com/ena_com.h
@@ -0,0 +1,1054 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ENA_COM
+#define ENA_COM
+
+#ifndef ENA_INTERNAL
+#include "ena_plat.h"
+#else
+#include "ena_plat.h"
+#include "ena_includes.h"
+#endif
+
+#define ENA_MAX_NUM_IO_QUEUES 128U
+/* We need to queues for each IO (on for Tx and one for Rx) */
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+
+#define ENA_MAX_HANDLERS 256
+
+#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
+
+/* Unit in usec */
+#define ENA_REG_READ_TIMEOUT 200000
+
+#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
+#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
+#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENA adaptive interrupt moderation settings */
+
+#define ENA_INTR_LOWEST_USECS (0)
+#define ENA_INTR_LOWEST_PKTS (3)
+#define ENA_INTR_LOWEST_BYTES (2 * 1524)
+
+#define ENA_INTR_LOW_USECS (32)
+#define ENA_INTR_LOW_PKTS (12)
+#define ENA_INTR_LOW_BYTES (16 * 1024)
+
+#define ENA_INTR_MID_USECS (80)
+#define ENA_INTR_MID_PKTS (48)
+#define ENA_INTR_MID_BYTES (64 * 1024)
+
+#define ENA_INTR_HIGH_USECS (128)
+#define ENA_INTR_HIGH_PKTS (96)
+#define ENA_INTR_HIGH_BYTES (128 * 1024)
+
+#define ENA_INTR_HIGHEST_USECS (192)
+#define ENA_INTR_HIGHEST_PKTS (128)
+#define ENA_INTR_HIGHEST_BYTES (192 * 1024)
+
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
+#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
+#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+#define ENA_INTR_MODER_LEVEL_STRIDE 1
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
+enum ena_intr_moder_level {
+ ENA_INTR_MODER_LOWEST = 0,
+ ENA_INTR_MODER_LOW,
+ ENA_INTR_MODER_MID,
+ ENA_INTR_MODER_HIGH,
+ ENA_INTR_MODER_HIGHEST,
+ ENA_INTR_MAX_NUM_OF_LEVELS,
+};
+
+struct ena_intr_moder_entry {
+ unsigned int intr_moder_interval;
+ unsigned int pkts_per_interval;
+ unsigned int bytes_per_interval;
+};
+
+enum queue_direction {
+ ENA_COM_IO_QUEUE_DIRECTION_TX,
+ ENA_COM_IO_QUEUE_DIRECTION_RX
+};
+
+struct ena_com_buf {
+ dma_addr_t paddr; /**< Buffer physical address */
+ u16 len; /**< Buffer length in bytes */
+};
+
+struct ena_com_rx_buf_info {
+ u16 len;
+ u16 req_id;
+};
+
+struct ena_com_io_desc_addr {
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
+ dma_addr_t phys_addr;
+ ena_mem_handle_t mem_handle;
+};
+
+struct ena_com_tx_meta {
+ u16 mss;
+ u16 l3_hdr_len;
+ u16 l3_hdr_offset;
+ u16 l4_hdr_len; /* In words */
+};
+
+struct ena_com_io_cq {
+ struct ena_com_io_desc_addr cdesc_addr;
+ void *bus;
+
+ /* Interrupt unmask register */
+ u32 __iomem *unmask_reg;
+
+ /* The completion queue head doorbell register */
+ u32 __iomem *cq_head_db_reg;
+
+ /* numa configuration register (for TPH) */
+ u32 __iomem *numa_node_cfg_reg;
+
+ /* The value to write to the above register to unmask
+ * the interrupt of this queue
+ */
+ u32 msix_vector;
+
+ enum queue_direction direction;
+
+ /* holds the number of cdesc of the current packet */
+ u16 cur_rx_pkt_cdesc_count;
+ /* save the firt cdesc idx of the current packet */
+ u16 cur_rx_pkt_cdesc_start_idx;
+
+ u16 q_depth;
+ /* Caller qid */
+ u16 qid;
+
+ /* Device queue index */
+ u16 idx;
+ u16 head;
+ u16 last_head_update;
+ u8 phase;
+ u8 cdesc_entry_size_in_bytes;
+
+} ____cacheline_aligned;
+
+struct ena_com_io_sq {
+ struct ena_com_io_desc_addr desc_addr;
+ void *bus;
+
+ u32 __iomem *db_addr;
+ u8 __iomem *header_addr;
+
+ enum queue_direction direction;
+ enum ena_admin_placement_policy_type mem_queue_type;
+
+ u32 msix_vector;
+ struct ena_com_tx_meta cached_tx_meta;
+
+ u16 q_depth;
+ u16 qid;
+
+ u16 idx;
+ u16 tail;
+ u16 next_to_comp;
+ u32 tx_max_header_size;
+ u8 phase;
+ u8 desc_entry_size;
+ u8 dma_addr_bits;
+} ____cacheline_aligned;
+
+struct ena_com_admin_cq {
+ struct ena_admin_acq_entry *entries;
+ ena_mem_handle_t mem_handle;
+ dma_addr_t dma_addr;
+
+ u16 head;
+ u8 phase;
+};
+
+struct ena_com_admin_sq {
+ struct ena_admin_aq_entry *entries;
+ ena_mem_handle_t mem_handle;
+ dma_addr_t dma_addr;
+
+ u32 __iomem *db_addr;
+
+ u16 head;
+ u16 tail;
+ u8 phase;
+
+};
+
+struct ena_com_stats_admin {
+ u32 aborted_cmd;
+ u32 submitted_cmd;
+ u32 completed_cmd;
+ u32 out_of_space;
+ u32 no_completion;
+};
+
+struct ena_com_admin_queue {
+ void *q_dmadev;
+ void *bus;
+ ena_spinlock_t q_lock; /* spinlock for the admin queue */
+
+ struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
+ u16 q_depth;
+ struct ena_com_admin_cq cq;
+ struct ena_com_admin_sq sq;
+
+ /* Indicate if the admin queue should poll for completion */
+ bool polling;
+
+ u16 curr_cmd_id;
+
+ /* Indicate that the ena was initialized and can
+ * process new admin commands
+ */
+ bool running_state;
+
+ /* Count the number of outstanding admin commands */
+ ena_atomic32_t outstanding_cmds;
+
+ struct ena_com_stats_admin stats;
+};
+
+struct ena_aenq_handlers;
+
+struct ena_com_aenq {
+ u16 head;
+ u8 phase;
+ struct ena_admin_aenq_entry *entries;
+ dma_addr_t dma_addr;
+ ena_mem_handle_t mem_handle;
+ u16 q_depth;
+ struct ena_aenq_handlers *aenq_handlers;
+};
+
+struct ena_com_mmio_read {
+ struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ ena_mem_handle_t read_resp_mem_handle;
+ u32 reg_read_to; /* in us */
+ u16 seq_num;
+ bool readless_supported;
+ /* spin lock to ensure a single outstanding read */
+ ena_spinlock_t lock;
+};
+
+struct ena_rss {
+ /* Indirect table */
+ u16 *host_rss_ind_tbl;
+ struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
+ dma_addr_t rss_ind_tbl_dma_addr;
+ ena_mem_handle_t rss_ind_tbl_mem_handle;
+ u16 tbl_log_size;
+
+ /* Hash key */
+ enum ena_admin_hash_functions hash_func;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ dma_addr_t hash_key_dma_addr;
+ ena_mem_handle_t hash_key_mem_handle;
+ u32 hash_init_val;
+
+ /* Flow Control */
+ struct ena_admin_feature_rss_hash_control *hash_ctrl;
+ dma_addr_t hash_ctrl_dma_addr;
+ ena_mem_handle_t hash_ctrl_mem_handle;
+
+};
+
+struct ena_host_attribute {
+ /* Debug area */
+ u8 *debug_area_virt_addr;
+ dma_addr_t debug_area_dma_addr;
+ ena_mem_handle_t debug_area_dma_handle;
+ u32 debug_area_size;
+
+ /* Host information */
+ struct ena_admin_host_info *host_info;
+ dma_addr_t host_info_dma_addr;
+ ena_mem_handle_t host_info_dma_handle;
+};
+
+/* Each ena_dev is a PCI function. */
+struct ena_com_dev {
+ struct ena_com_admin_queue admin_queue;
+ struct ena_com_aenq aenq;
+ struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
+ struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
+ u8 __iomem *reg_bar;
+ void __iomem *mem_bar;
+ void *dmadev;
+ void *bus;
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ u32 tx_max_header_size;
+ u16 stats_func; /* Selected function for extended statistic dump */
+ u16 stats_queue; /* Selected queue for extended statistic dump */
+
+ struct ena_com_mmio_read mmio_read;
+
+ struct ena_rss rss;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct ena_host_attribute host_attr;
+ bool adaptive_coalescing;
+ u16 intr_delay_resolution;
+ u32 intr_moder_tx_interval;
+ struct ena_intr_moder_entry *intr_moder_tbl;
+};
+
+struct ena_com_dev_get_features_ctx {
+ struct ena_admin_queue_feature_desc max_queues;
+ struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_aenq_desc aenq;
+ struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
+};
+
+struct ena_com_create_io_ctx {
+ enum ena_admin_placement_policy_type mem_queue_type;
+ enum queue_direction direction;
+ int numa_node;
+ u32 msix_vector;
+ u16 queue_size;
+ u16 qid;
+};
+
+typedef void (*ena_aenq_handler)(void *data,
+ struct ena_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct ena_aenq_handlers {
+ ena_aenq_handler handlers[ENA_MAX_HANDLERS];
+ ena_aenq_handler unimplemented_handler;
+};
+
+/*****************************************************************************/
+/*****************************************************************************/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the register read mechanism.
+ *
+ * @note: This method must be the first stage in the initialization sequence.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ * @readless_supported: readless mode (enable/disable)
+ */
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
+ bool readless_supported);
+
+/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
+ * value physical address.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
+
+/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_init - Init the admin and the async queues
+ * @ena_dev: ENA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ * @init_spinlock: Indicate if this method should init the admin spinlock or
+ * the spinlock was init before (for example, in a case of FLR).
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock);
+
+/* ena_com_admin_destroy - Destroy the admin and the async events queues.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @note: Before calling this method, the caller must validate that the device
+ * won't send any additional admin completions/aenq.
+ * To achieve that, a FLR is recommended.
+ */
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_dev_reset - Perform device FLR to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_queue - Create io queue.
+ * @ena_dev: ENA communication layer struct
+ * @ctx - create context structure
+ *
+ * Create the submission and the completion queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx);
+
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ */
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
+
+/* ena_com_get_io_handlers - Return the io queue handlers
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @io_sq - IO submission queue handler
+ * @io_cq - IO completion queue handler.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq);
+
+/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
+ * @ena_dev: ENA communication layer struct
+ *
+ * After this method, aenq event can be received via AENQ.
+ */
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_running_state - Set the state of the admin queue
+ * @ena_dev: ENA communication layer struct
+ *
+ * Change the state of the admin queue (enable/disable)
+ */
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
+
+/* ena_com_get_admin_running_state - Get the admin queue state
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the state of the admin queue (enable/disable)
+ *
+ * @return - current polling mode (enable/disable)
+ */
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: ENAble/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
+
+/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ *
+ * Get the admin completion mode.
+ * If polling mode is on, ena_com_execute_admin_command will perform a
+ * polling on the admin completion queue for the commands completion,
+ * otherwise it will wait on wait event.
+ *
+ * @return state
+ */
+bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the admin completion queue and wake up all the pending
+ * threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
+
+/* ena_com_aenq_intr_handler - AENQ interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the async event notification queue and call the proper
+ * aenq handler.
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+
+/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The caller should then call ena_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
+
+/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
+
+/* ena_com_validate_version - Validate the device parameters
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method validate the device parameters are the same as the saved
+ * parameters in ena_dev.
+ * This method is useful after device reset, to validate the device mac address
+ * and the device offloads are the same as before the reset.
+ *
+ * @return - 0 on success negative value otherwise.
+ */
+int ena_com_validate_version(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_link_params - Retrieve physical link parameters.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Link parameters
+ *
+ * Retrieve the physical link parameters,
+ * like speed, auto-negotiation and full duplex support.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp);
+
+/* ena_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_aenq_config - Set aenq groups configurations
+ * @ena_dev: ENA communication layer struct
+ * @groups flag: bit fields flags of enum ena_admin_aenq_group.
+ *
+ * Configure which aenq event group the driver would like to receive.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
+
+/* ena_com_get_dev_attr_feat - Get device features
+ * @ena_dev: ENA communication layer struct
+ * @get_feat_ctx: returned context that contain the get features.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
+
+/* ena_com_get_dev_basic_stats - Get device basic statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats);
+
+/* ena_com_set_dev_mtu - Configure the device mtu.
+ * @ena_dev: ENA communication layer struct
+ * @mtu: mtu value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+
+/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+ * @ena_dev: ENA communication layer struct
+ * @offlad: offload return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
+
+/* ena_com_rss_init - Init RSS
+ * @ena_dev: ENA communication layer struct
+ * @log_size: indirection log size
+ *
+ * Allocate RSS/RFS resources.
+ * The caller then can configure rss using ena_com_set_hash_function,
+ * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+
+/* ena_com_rss_destroy - Destroy rss
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free all the RSS/RFS resources.
+ */
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_fill_hash_function - Fill RSS hash function
+ * @ena_dev: ENA communication layer struct
+ * @func: The hash function (Toeplitz or crc)
+ * @key: Hash key (for toeplitz hash)
+ * @key_len: key length (max length 10 DW)
+ * @init_val: initial value for the hash function
+ *
+ * Fill the ena_dev resources with the desire hash function, hash key, key_len
+ * and key initial value (if needed by the hash function).
+ * To flush the key into the device the caller should call
+ * ena_com_set_hash_function.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val);
+
+/* ena_com_set_hash_function - Flush the hash function and it dependencies to
+ * the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash function and it dependencies (key, key length and
+ * initial value) if needed.
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_function
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_function - Retrieve the hash function and the hash key
+ * from the device.
+ * @ena_dev: ENA communication layer struct
+ * @func: hash function
+ * @key: hash key
+ *
+ * Retrieve the hash function and the hash key from the device.
+ *
+ * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key);
+
+/* ena_com_fill_hash_ctrl - Fill RSS hash control
+ * @ena_dev: ENA communication layer struct.
+ * @proto: The protocol to configure.
+ * @hash_fields: bit mask of ena_admin_flow_hash_fields
+ *
+ * Fill the ena_dev resources with the desire hash control (the ethernet
+ * fields that take part of the hash) for a specific protocol.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields);
+
+/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash control (the ethernet fields that take part of the hash)
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
+ * @ena_dev: ENA communication layer struct
+ * @proto: The protocol to retrieve.
+ * @fields: bit mask of ena_admin_flow_hash_fields.
+ *
+ * Retrieve the hash control from the device.
+ *
+ * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields);
+
+/* ena_com_set_default_hash_ctrl - Set the hash control to a default
+ * configuration.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Fill the ena_dev resources with the default hash control configuration.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
+ * indirection table
+ * @ena_dev: ENA communication layer struct.
+ * @entry_idx - indirection table entry.
+ * @entry_value - redirection value
+ *
+ * Fill a single entry of the RSS indirection table in the ena_dev resources.
+ * To flush the indirection table to the device, the called should call
+ * ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value);
+
+/* ena_com_indirect_table_set - Flush the indirection table to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the indirection hash control to the device.
+ * Prior to this method the caller should call ena_com_indirect_table_fill_entry
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
+ * @ena_dev: ENA communication layer struct
+ * @ind_tbl: indirection table
+ *
+ * Retrieve the RSS indirection table from the device.
+ *
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
+
+/* ena_com_allocate_host_info - Allocate host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_allocate_debug_area - Allocate debug area.
+ * @ena_dev: ENA communication layer struct
+ * @debug_area_size - debug area size.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size);
+
+/* ena_com_delete_debug_area - Free the debug area resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate debug area.
+ */
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
+
+/* ena_com_delete_host_info - Free the host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate host info.
+ */
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_host_attributes - Update the device with the host
+ * attributes (debug area and host info) base address.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_cq - Create io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Create IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_destroy_io_cq - Destroy io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Destroy IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_execute_admin_command - Execute admin command
+ * @admin_queue: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @cmd_completion: command completion return value.
+ * @cmd_comp_size: command completion size.
+
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copyed into cmd_comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *cmd_comp,
+ size_t cmd_comp_size);
+
+/* ena_com_init_interrupt_moderation - Init interrupt moderation
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
+ * capability is supported by the device.
+ *
+ * @return - supported or not.
+ */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
+ * moderation table back to the default parameters.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+
+/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ * @tx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
+
+/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ * @rx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
+
+/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+
+/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
+ * moderation table.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry value
+ *
+ * Update a single entry in the interrupt moderation table.
+ */
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry to fill.
+ *
+ * Initialize the entry according to the adaptive interrupt moderation table.
+ */
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->adaptive_coalescing;
+}
+
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = true;
+}
+
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = false;
+}
+
+/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
+ * @ena_dev: ENA communication layer struct
+ * @pkts: Number of packets since the last update
+ * @bytes: Number of bytes received since the last update.
+ * @smoothed_interval: Returned interval
+ * @moder_tbl_idx: Current table level as input update new level as return
+ * value.
+ */
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
+{
+ enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
+ struct ena_intr_moder_entry *curr_moder_entry;
+ struct ena_intr_moder_entry *pred_moder_entry;
+ struct ena_intr_moder_entry *new_moder_entry;
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int interval;
+
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ if (!pkts || !bytes)
+ /* Tx interrupt, or spurious interrupt,
+ * in both cases we just use same delay values
+ */
+ return;
+
+ curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
+ if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
+ ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
+ return;
+ }
+
+ curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
+ new_moder_idx = curr_moder_idx;
+
+ if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
+ if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ } else {
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
+
+ if ((pkts <= pred_moder_entry->pkts_per_interval) ||
+ (bytes <= pred_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
+ else if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval)) {
+ if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ }
+ }
+ new_moder_entry = &intr_moder_tbl[new_moder_idx];
+
+ interval = new_moder_entry->intr_moder_interval;
+ *smoothed_interval = (
+ (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
+ ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
+ 10;
+
+ *moder_tbl_idx = new_moder_idx;
+}
+
+/* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+ * @tx_delay_interval: Tx interval in usecs
+ * @unmask: unask enable/disable
+ *
+ * Prepare interrupt update register with the supplied parameters.
+ */
+static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
+ u32 rx_delay_interval,
+ u32 tx_delay_interval,
+ bool unmask)
+{
+ intr_reg->intr_control = 0;
+ intr_reg->intr_control |= rx_delay_interval &
+ ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+ intr_reg->intr_control |=
+ (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+
+ if (unmask)
+ intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+#if defined(__cplusplus)
+}
+#endif /* __cplusplus */
+#endif /* !(ENA_COM) */
diff --git a/sys/contrib/ena-com/ena_common_defs.h b/sys/contrib/ena-com/ena_common_defs.h
new file mode 100644
index 0000000..f2d8189
--- /dev/null
+++ b/sys/contrib/ena-com/ena_common_defs.h
@@ -0,0 +1,50 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+ uint32_t mem_addr_low;
+
+ uint16_t mem_addr_high;
+
+ /* MBZ */
+ uint16_t reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/sys/contrib/ena-com/ena_eth_com.c b/sys/contrib/ena-com/ena_eth_com.c
new file mode 100644
index 0000000..fbf561e
--- /dev/null
+++ b/sys/contrib/ena-com/ena_eth_com.c
@@ -0,0 +1,509 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ena_eth_com.h"
+
+static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 expected_phase, head_masked;
+ u16 desc_phase;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+ return NULL;
+
+ return cdesc;
+}
+
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked;
+ u32 offset;
+
+ tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+
+ offset = tail_masked * io_sq->desc_entry_size;
+
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
+}
+
+static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u32 offset = tail_masked * io_sq->desc_entry_size;
+
+ /* In case this queue isn't a LLQ */
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return;
+
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
+ io_sq->desc_entry_size);
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
+}
+
+static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
+ u8 *head_src, u16 header_len)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u8 __iomem *dev_head_addr =
+ io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (unlikely(!io_sq->header_addr)) {
+ ena_trc_err("Push buffer header ptr is NULL\n");
+ return ENA_COM_INVAL;
+ }
+
+ memcpy_toio(dev_head_addr, head_src, header_len);
+
+ return 0;
+}
+
+static inline struct ena_eth_io_rx_cdesc_base *
+ ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+{
+ idx &= (io_cq->q_depth - 1);
+ return (struct ena_eth_io_rx_cdesc_base *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
+}
+
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 count = 0, head_masked;
+ u32 last = 0;
+
+ do {
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (!cdesc)
+ break;
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+ if (last) {
+ *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+ count += io_cq->cur_rx_pkt_cdesc_count;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+
+ io_cq->cur_rx_pkt_cdesc_count = 0;
+ io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+
+ ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
+ } else {
+ io_cq->cur_rx_pkt_cdesc_count += count;
+ count = 0;
+ }
+
+ return count;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ int rc;
+
+ if (ena_tx_ctx->meta_valid) {
+ rc = memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ if (unlikely(rc != 0))
+ return true;
+ }
+
+ return false;
+}
+
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ meta_desc = get_sq_desc(io_sq);
+ memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+
+ /* bits 0-9 of the mss */
+ meta_desc->word2 |= (ena_meta->mss <<
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ /* bits 10-13 of the mss */
+ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+
+ /* Extended meta desc */
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ meta_desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->word2 |= ena_meta->l3_hdr_len &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+ meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+
+ meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
+ /* Cached the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+}
+
+static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+ struct ena_eth_io_rx_cdesc_base *cdesc)
+{
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ ena_rx_ctx->l3_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ ena_rx_ctx->l4_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->frag =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+
+ ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ ena_rx_ctx->l3_proto,
+ ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash,
+ ena_rx_ctx->frag,
+ cdesc->status);
+}
+
+/*****************************************************************************/
+/***************************** API **********************************/
+/*****************************************************************************/
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc)
+{
+ struct ena_eth_io_tx_desc *desc = NULL;
+ struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
+ void *push_header = ena_tx_ctx->push_header;
+ u16 header_len = ena_tx_ctx->header_len;
+ u16 num_bufs = ena_tx_ctx->num_bufs;
+ int total_desc, i, rc;
+ bool have_meta;
+ u64 addr_hi;
+
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
+ "wrong Q type");
+
+ /* num_bufs +1 for potential meta desc */
+ if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+ ena_trc_err("Not enough space in the tx queue\n");
+ return ENA_COM_NO_MEM;
+ }
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ ena_trc_err("header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
+ return ENA_COM_INVAL;
+ }
+
+ /* start with pushing the header (if needed) */
+ rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(rc))
+ return rc;
+
+ have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
+ ena_tx_ctx);
+ if (have_meta)
+ ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+
+ /* If the caller doesn't want send packets */
+ if (unlikely(!num_bufs && !header_len)) {
+ *nb_hw_desc = have_meta ? 0 : 1;
+ return 0;
+ }
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ /* Set first desc when we don't have meta descriptor */
+ if (!have_meta)
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
+
+ desc->buff_addr_hi_hdr_sz |= (header_len <<
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+
+ /* Bits 0-9 */
+ desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+
+ desc->meta_ctrl |= (ena_tx_ctx->df <<
+ ENA_ETH_IO_TX_DESC_DF_SHIFT) &
+ ENA_ETH_IO_TX_DESC_DF_MASK;
+
+ /* Bits 10-15 */
+ desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+
+ if (ena_tx_ctx->meta_valid) {
+ desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
+ ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ desc->meta_ctrl |= ena_tx_ctx->l3_proto &
+ ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ /* The first desc share the same desc as the header */
+ if (likely(i != 0)) {
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ }
+
+ desc->len_ctrl |= ena_bufs->len &
+ ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+
+ addr_hi = ((ena_bufs->paddr &
+ GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ desc->buff_addr_lo = (u32)ena_bufs->paddr;
+ desc->buff_addr_hi_hdr_sz |= addr_hi &
+ ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+ ena_bufs++;
+ }
+
+ /* set the last desc indicator */
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+
+ ena_com_sq_update_tail(io_sq);
+
+ total_desc = ENA_MAX16(num_bufs, 1);
+ total_desc += have_meta ? 1 : 0;
+
+ *nb_hw_desc = total_desc;
+ return 0;
+}
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+ struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 cdesc_idx = 0;
+ u16 nb_hw_desc;
+ u16 i;
+
+ ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
+
+ nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ if (nb_hw_desc == 0) {
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+ }
+
+ ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
+ io_cq->qid, nb_hw_desc);
+
+ if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
+ nb_hw_desc, ena_rx_ctx->max_bufs);
+ return ENA_COM_NO_SPACE;
+ }
+
+ for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+ ena_buf->len = cdesc->length;
+ ena_buf->req_id = cdesc->req_id;
+ ena_buf++;
+ }
+
+ /* Update SQ head ptr */
+ io_sq->next_to_comp += nb_hw_desc;
+
+ ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+ io_sq->qid, io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+}
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id)
+{
+ struct ena_eth_io_rx_desc *desc;
+
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
+
+ if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ return ENA_COM_NO_SPACE;
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
+
+ desc->length = ena_buf->len;
+
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
+ desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+
+ desc->req_id = req_id;
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ ena_com_sq_update_tail(io_sq);
+
+ return 0;
+}
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return ENA_COM_TRY_AGAIN;
+
+ ena_com_cq_inc_head(io_cq);
+
+ *req_id = READ_ONCE(cdesc->req_id);
+
+ return 0;
+}
diff --git a/sys/contrib/ena-com/ena_eth_com.h b/sys/contrib/ena-com/ena_eth_com.h
new file mode 100644
index 0000000..ec32d77
--- /dev/null
+++ b/sys/contrib/ena-com/ena_eth_com.h
@@ -0,0 +1,167 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ENA_ETH_COM_H_
+#define ENA_ETH_COM_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#include "ena_com.h"
+
+/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
+#define ENA_COMP_HEAD_THRESH 4
+
+struct ena_com_tx_ctx {
+ struct ena_com_tx_meta ena_meta;
+ struct ena_com_buf *ena_bufs;
+ /* For LLQ, header buffer - pushed to the device mem space */
+ void *push_header;
+
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ u16 num_bufs;
+ u16 req_id;
+ /* For regular queue, indicate the size of the header
+ * For LLQ, indicate the size of the pushed buffer
+ */
+ u16 header_len;
+
+ u8 meta_valid;
+ u8 tso_enable;
+ u8 l3_csum_enable;
+ u8 l4_csum_enable;
+ u8 l4_csum_partial;
+ u8 df; /* Don't fragment */
+};
+
+struct ena_com_rx_ctx {
+ struct ena_com_rx_buf_info *ena_bufs;
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ bool l3_csum_err;
+ bool l4_csum_err;
+ /* fragmented packet */
+ bool frag;
+ u32 hash;
+ u16 descs;
+ int max_bufs;
+};
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc);
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx);
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id);
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+
+static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
+ struct ena_eth_io_intr_reg *intr_reg)
+{
+ ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
+}
+
+static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+{
+ u16 tail, next_to_comp, cnt;
+
+ next_to_comp = io_sq->next_to_comp;
+ tail = io_sq->tail;
+ cnt = tail - next_to_comp;
+
+ return io_sq->q_depth - 1 - cnt;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail;
+
+ tail = io_sq->tail;
+
+ ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
+
+ ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
+
+ return 0;
+}
+
+static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
+{
+ u16 unreported_comp, head;
+ bool need_update;
+
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (io_cq->cq_head_db_reg && need_update) {
+ ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
+
+ return 0;
+}
+
+static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
+ u8 numa_node)
+{
+ struct ena_eth_io_numa_node_cfg_reg numa_cfg;
+
+ if (!io_cq->numa_node_cfg_reg)
+ return;
+
+ numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
+ | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+
+ ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+}
+
+static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
+{
+ io_sq->next_to_comp += elem;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* ENA_ETH_COM_H_ */
diff --git a/sys/contrib/ena-com/ena_eth_io_defs.h b/sys/contrib/ena-com/ena_eth_io_defs.h
new file mode 100644
index 0000000..c16fed8
--- /dev/null
+++ b/sys/contrib/ena-com/ena_eth_io_defs.h
@@ -0,0 +1,960 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+enum ena_eth_io_l3_proto_index {
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
+};
+
+enum ena_eth_io_l4_proto_index {
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+};
+
+struct ena_eth_io_tx_desc {
+ /* 15:0 : length - Buffer length in bytes, must
+ * include any packet trailers that the ENA supposed
+ * to update like End-to-End CRC, Authentication GMAC
+ * etc. This length must not include the
+ * 'Push_Buffer' length. This length must not include
+ * the 4-byte added in the end for 802.3 Ethernet FCS
+ * 21:16 : req_id_hi - Request ID[15:10]
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBZ
+ * 24 : phase
+ * 25 : reserved1 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
+ * required when l3_csum_en,l3_csum or tso_en are set.
+ * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+ * DF flags of the IPv4 header is 0. Otherwise must
+ * be set to 1
+ * 6:5 : reserved5
+ * 7 : tso_en - Enable TSO, For TCP only.
+ * 12:8 : l4_proto_idx - L4 protocol. This field need
+ * to be set when l4_csum_en or tso_en are set.
+ * 13 : l3_csum_en - enable IPv4 header checksum.
+ * 14 : l4_csum_en - enable TCP/UDP checksum.
+ * 15 : ethernet_fcs_dis - when set, the controller
+ * will not append the 802.3 Ethernet Frame Check
+ * Sequence to the packet
+ * 16 : reserved16
+ * 17 : l4_csum_partial - L4 partial checksum. when
+ * set to 0, the ENA calculates the L4 checksum,
+ * where the Destination Address required for the
+ * TCP/UDP pseudo-header is taken from the actual
+ * packet L3 header. when set to 1, the ENA doesn't
+ * calculate the sum of the pseudo-header, instead,
+ * the checksum field of the L4 is used instead. When
+ * TSO enabled, the checksum of the pseudo-header
+ * must not include the tcp length field. L4 partial
+ * checksum should be used for IPv6 packet that
+ * contains Routing Headers.
+ * 20:18 : reserved18 - MBZ
+ * 21 : reserved21 - MBZ
+ * 31:22 : req_id_lo - Request ID[9:0]
+ */
+ uint32_t meta_ctrl;
+
+ uint32_t buff_addr_lo;
+
+ /* address high and header size
+ * 15:0 : addr_hi - Buffer Pointer[47:32]
+ * 23:16 : reserved16_w2
+ * 31:24 : header_length - Header length. For Low
+ * Latency Queues, this fields indicates the number
+ * of bytes written to the headers' memory. For
+ * normal queues, if packet is TCP or UDP, and longer
+ * than max_header_size, then this field should be
+ * set to the sum of L4 header offset and L4 header
+ * size(without options), otherwise, this field
+ * should be set to 0. For both modes, this field
+ * must not exceed the max_header_size.
+ * max_header_size value is reported by the Max
+ * Queues Feature descriptor
+ */
+ uint32_t buff_addr_hi_hdr_sz;
+};
+
+struct ena_eth_io_tx_meta_desc {
+ /* 9:0 : req_id_lo - Request ID[9:0]
+ * 11:10 : reserved10 - MBZ
+ * 12 : reserved12 - MBZ
+ * 13 : reserved13 - MBZ
+ * 14 : ext_valid - if set, offset fields in Word2
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
+ * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+ * Extended Metadata Descriptor
+ * 21 : meta_store - Store extended metadata in queue
+ * cache
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBO
+ * 24 : phase
+ * 25 : reserved25 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* 5:0 : req_id_hi
+ * 31:6 : reserved6 - MBZ
+ */
+ uint32_t word1;
+
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
+ * 21:16 : l4_hdr_len_in_words - counts the L4 header
+ * length in words. there is an explicit assumption
+ * that L4 header appears right after L3 header and
+ * L4 offset is based on l3_hdr_off+l3_hdr_len
+ * 31:22 : mss_lo
+ */
+ uint32_t word2;
+
+ uint32_t reserved;
+};
+
+struct ena_eth_io_tx_cdesc {
+ /* Request ID[15:0] */
+ uint16_t req_id;
+
+ uint8_t status;
+
+ /* flags
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ uint16_t sub_qid;
+
+ uint16_t sq_head_idx;
+};
+
+struct ena_eth_io_rx_desc {
+ /* In bytes. 0 means 64KB */
+ uint16_t length;
+
+ /* MBZ */
+ uint8_t reserved2;
+
+ /* 0 : phase
+ * 1 : reserved1 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req
+ * 5 : reserved5 - MBO
+ * 7:6 : reserved6 - MBZ
+ */
+ uint8_t ctrl;
+
+ uint16_t req_id;
+
+ /* MBZ */
+ uint16_t reserved6;
+
+ uint32_t buff_addr_lo;
+
+ uint16_t buff_addr_hi;
+
+ /* MBZ */
+ uint16_t reserved16_w3;
+};
+
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
+ * 7 : reserved7 - MBZ
+ * 12:8 : l4_proto_idx
+ * 13 : l3_csum_err - when set, either the L3
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l3_proto_idx indicates IPv4 packet
+ * 14 : l4_csum_err - when set, either the L4
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l4_proto_idx indicates TCP/UDP packet, and,
+ * ipv4_frag is not set
+ * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+ * 23:16 : reserved16
+ * 24 : phase
+ * 25 : l3_csum2 - second checksum engine result
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 29:28 : reserved28
+ * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+ * Descriptor was used
+ * 31 : reserved31
+ */
+ uint32_t status;
+
+ uint16_t length;
+
+ uint16_t req_id;
+
+ /* 32-bit hash result */
+ uint32_t hash;
+
+ uint16_t sub_qid;
+
+ uint16_t reserved;
+};
+
+/* 8-word format */
+struct ena_eth_io_rx_cdesc_ext {
+ struct ena_eth_io_rx_cdesc_base base;
+
+ uint32_t buff_addr_lo;
+
+ uint16_t buff_addr_hi;
+
+ uint16_t reserved16;
+
+ uint32_t reserved_w6;
+
+ uint32_t reserved_w7;
+};
+
+struct ena_eth_io_intr_reg {
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
+ * 31 : reserved
+ */
+ uint32_t intr_control;
+};
+
+struct ena_eth_io_numa_node_cfg_reg {
+ /* 7:0 : numa
+ * 30:8 : reserved
+ * 31 : enabled
+ */
+ uint32_t numa_cfg;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+/* numa_node_cfg_reg */
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
+{
+ return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
+{
+ return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
+{
+ return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
+{
+ return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
+{
+ return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+ p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
+{
+ return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
+}
+
+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+{
+ p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
+{
+ return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
+}
+
+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+{
+ p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/sys/contrib/ena-com/ena_plat.h b/sys/contrib/ena-com/ena_plat.h
new file mode 100644
index 0000000..481f7aa
--- /dev/null
+++ b/sys/contrib/ena-com/ena_plat.h
@@ -0,0 +1,376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ENA_PLAT_H_
+#define ENA_PLAT_H_
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/bus.h>
+#include <sys/condvar.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/eventhandler.h>
+#include <sys/types.h>
+#include <sys/timetc.h>
+#include <sys/cdefs.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/in_cksum.h>
+#include <machine/pcpu.h>
+#include <machine/resource.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+
+#include <dev/led/led.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+extern struct ena_bus_space ebs;
+
+/* Levels */
+#define ENA_ALERT (1 << 0) /* Alerts are providing more error info. */
+#define ENA_WARNING (1 << 1) /* Driver output is more error sensitive. */
+#define ENA_INFO (1 << 2) /* Provides additional driver info. */
+#define ENA_DBG (1 << 3) /* Driver output for debugging. */
+/* Detailed info that will be printed with ENA_INFO or ENA_DEBUG flag. */
+#define ENA_TXPTH (1 << 4) /* Allows TX path tracing. */
+#define ENA_RXPTH (1 << 5) /* Allows RX path tracing. */
+#define ENA_RSC (1 << 6) /* Goes with TXPTH or RXPTH, free/alloc res. */
+#define ENA_IOQ (1 << 7) /* Detailed info about IO queues. */
+#define ENA_ADMQ (1 << 8) /* Detailed info about admin queue. */
+
+#ifndef ENA_DEBUG_LEVEL
+#define ENA_DEBUG_LEVEL (ENA_ALERT | ENA_WARNING)
+#endif
+
+#ifdef ENA_TRACE
+#define ena_trace_raw(level, fmt, args...) \
+ do { \
+ if (((level) & ENA_DEBUG_LEVEL) != (level)) \
+ break; \
+ printf(fmt, ##args); \
+ } while (0)
+
+#define ena_trace(level, fmt, args...) \
+ ena_trace_raw(level, "%s() [TID:%d]: " \
+ fmt " \n", __func__, curthread->td_tid, ##args)
+
+#else /* ENA_TRACE */
+#define ena_trace_raw(...)
+#define ena_trace(...)
+#endif /* ENA_TRACE */
+
+#define ena_trc_dbg(format, arg...) ena_trace(ENA_DBG, format, ##arg)
+#define ena_trc_info(format, arg...) ena_trace(ENA_INFO, format, ##arg)
+#define ena_trc_warn(format, arg...) ena_trace(ENA_WARNING, format, ##arg)
+#define ena_trc_err(format, arg...) ena_trace(ENA_ALERT, format, ##arg)
+
+#define unlikely(x) __predict_false(x)
+#define likely(x) __predict_true(x)
+
+#define __iomem
+#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
+
+#define MAX_ERRNO 4095
+#define IS_ERR_VALUE(x) unlikely((x) <= (unsigned long)MAX_ERRNO)
+
+#define WARN_ON(condition) \
+ do { \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ printf("%s %s", __FUNCTION__, __FILE__); \
+ unlikely(__ret_warn_on); \
+ } while (0)
+
+#define ENA_ASSERT(cond, format, arg...) \
+ do { \
+ if (unlikely(!(cond))) { \
+ ena_trc_err( \
+ "Assert failed on %s:%s:%d:" format, \
+ __FILE__, __func__, __LINE__, ##arg); \
+ WARN_ON(cond); \
+ } \
+ } while (0)
+
+#define ENA_WARN(cond, format, arg...) \
+ do { \
+ if (unlikely((cond))) { \
+ ena_trc_warn(format, ##arg); \
+ } \
+ } while (0)
+
+static inline long IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+static inline void *ERR_PTR(long error)
+{
+ return (void *)error;
+}
+
+static inline long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+#define GENMASK(h, l) (((1U << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((~0ULL) << (l)) & (~0ULL >> (64 - 1 - (h))))
+#define BIT(x) (1 << (x))
+
+#define ENA_ABORT() BUG()
+#define BUG() panic("ENA BUG")
+
+#define SZ_256 (256)
+#define SZ_4K (4096)
+
+#define ENA_COM_OK 0
+#define ENA_COM_FAULT EFAULT
+#define ENA_COM_INVAL EINVAL
+#define ENA_COM_NO_MEM ENOMEM
+#define ENA_COM_NO_SPACE ENOSPC
+#define ENA_COM_TRY_AGAIN -1
+#define ENA_COM_NO_DEVICE ENODEV
+#define ENA_COM_PERMISSION EPERM
+#define ENA_COM_TIMER_EXPIRED ETIMEDOUT
+
+#define ENA_MSLEEP(x) pause_sbt("ena", SBT_1MS * (x), SBT_1MS, 0)
+#define ENA_UDELAY(x) DELAY(x)
+#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
+ ((long)cputick2usec(cpu_ticks()) + (timeout_us))
+#define ENA_TIME_EXPIRE(timeout) ((timeout) < (long)cputick2usec(cpu_ticks()))
+#define ENA_MIGHT_SLEEP()
+
+#define min_t(type, _x, _y) ((type)(_x) < (type)(_y) ? (type)(_x) : (type)(_y))
+#define max_t(type, _x, _y) ((type)(_x) > (type)(_y) ? (type)(_x) : (type)(_y))
+
+#define ENA_MIN32(x,y) MIN(x, y)
+#define ENA_MIN16(x,y) MIN(x, y)
+#define ENA_MIN8(x,y) MIN(x, y)
+
+#define ENA_MAX32(x,y) MAX(x, y)
+#define ENA_MAX16(x,y) MAX(x, y)
+#define ENA_MAX8(x,y) MAX(x, y)
+
+/* Spinlock related methods */
+#define ena_spinlock_t struct mtx
+#define ENA_SPINLOCK_INIT(spinlock) \
+ mtx_init(&(spinlock), "ena_spin", NULL, MTX_SPIN)
+#define ENA_SPINLOCK_DESTROY(spinlock) \
+ do { \
+ if (mtx_initialized(&(spinlock))) \
+ mtx_destroy(&(spinlock)); \
+ } while (0)
+#define ENA_SPINLOCK_LOCK(spinlock, flags) \
+ do { \
+ (void)(flags); \
+ mtx_lock_spin(&(spinlock)); \
+ } while (0)
+#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \
+ do { \
+ (void)(flags); \
+ mtx_unlock_spin(&(spinlock)); \
+ } while (0)
+
+
+/* Wait queue related methods */
+#define ena_wait_event_t struct { struct cv wq; struct mtx mtx; }
+#define ENA_WAIT_EVENT_INIT(waitqueue) \
+ do { \
+ cv_init(&((waitqueue).wq), "cv"); \
+ mtx_init(&((waitqueue).mtx), "wq", NULL, MTX_DEF); \
+ } while (0)
+#define ENA_WAIT_EVENT_DESTROY(waitqueue) \
+ do { \
+ cv_destroy(&((waitqueue).wq)); \
+ mtx_destroy(&((waitqueue).mtx)); \
+ } while (0)
+#define ENA_WAIT_EVENT_CLEAR(waitqueue) \
+ cv_init(&((waitqueue).wq), (waitqueue).wq.cv_description)
+#define ENA_WAIT_EVENT_WAIT(waitqueue, timeout_us) \
+ do { \
+ mtx_lock(&((waitqueue).mtx)); \
+ cv_timedwait(&((waitqueue).wq), &((waitqueue).mtx), \
+ timeout_us * hz / 1000 / 1000 ); \
+ mtx_unlock(&((waitqueue).mtx)); \
+ } while (0)
+#define ENA_WAIT_EVENT_SIGNAL(waitqueue) cv_broadcast(&((waitqueue).wq))
+
+#define dma_addr_t bus_addr_t
+#define u8 uint8_t
+#define u16 uint16_t
+#define u32 uint32_t
+#define u64 uint64_t
+
+typedef struct {
+ bus_addr_t paddr;
+ caddr_t vaddr;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ int nseg;
+} ena_mem_handle_t;
+
+struct ena_bus {
+ bus_space_handle_t reg_bar_h;
+ bus_space_tag_t reg_bar_t;
+ bus_space_handle_t mem_bar_h;
+ bus_space_tag_t mem_bar_t;
+};
+
+typedef uint32_t ena_atomic32_t;
+
+void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error);
+int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
+ int mapflags);
+
+#define ENA_MEM_ALLOC(dmadev, size) malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO)
+#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) (virt = NULL)
+#define ENA_MEM_FREE(dmadev, ptr) free(ptr, M_DEVBUF)
+#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, handle, node, \
+ dev_node) \
+ do { \
+ ((virt) = NULL); \
+ (void)(dev_node); \
+ } while (0)
+
+#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, dma) \
+ do { \
+ ena_dma_alloc((dmadev), (size), &(dma), 0); \
+ (virt) = (void *)(dma).vaddr; \
+ (phys) = (dma).paddr; \
+ } while (0)
+
+#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, dma) \
+ do { \
+ (void)size; \
+ bus_dmamap_unload((dma).tag, (dma).map); \
+ bus_dmamem_free((dma).tag, (virt), (dma).map); \
+ bus_dma_tag_destroy((dma).tag); \
+ (dma).tag = NULL; \
+ (virt) = NULL; \
+ } while (0)
+
+/* Register R/W methods */
+#define ENA_REG_WRITE32(bus, value, offset) \
+ bus_space_write_4( \
+ ((struct ena_bus*)bus)->reg_bar_t, \
+ ((struct ena_bus*)bus)->reg_bar_h, \
+ (bus_size_t)(offset), (value))
+
+#define ENA_REG_READ32(bus, offset) \
+ bus_space_read_4( \
+ ((struct ena_bus*)bus)->reg_bar_t, \
+ ((struct ena_bus*)bus)->reg_bar_h, \
+ (bus_size_t)(offset))
+
+#define time_after(a,b) ((long)((unsigned long)(b) - (unsigned long)(a)) < 0)
+
+#define VLAN_HLEN sizeof(struct ether_vlan_header)
+#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
+
+#if defined(__i386__) || defined(__amd64__)
+static __inline
+void prefetch(void *x)
+{
+ __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+#else
+#define prefetch(x)
+#endif
+
+/* DMA buffers access */
+#define dma_unmap_addr(p, name) ((p)->dma->name)
+#define dma_unmap_addr_set(p, name, v) (((p)->dma->name) = (v))
+#define dma_unmap_len(p, name) ((p)->name)
+#define dma_unmap_len_set(p, name, v) (((p)->name) = (v))
+
+#define memcpy_toio memcpy
+
+#define ATOMIC32_INC(I32_PTR) atomic_add_int(I32_PTR, 1)
+#define ATOMIC32_DEC(I32_PTR) atomic_add_int(I32_PTR, -1)
+#define ATOMIC32_READ(I32_PTR) atomic_load_acq_int(I32_PTR)
+#define ATOMIC32_SET(I32_PTR, VAL) atomic_store_rel_int(I32_PTR, VAL)
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+#define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x))
+#define READ_ONCE(x) ({ \
+ __typeof(x) __var; \
+ barrier(); \
+ __var = ACCESS_ONCE(x); \
+ barrier(); \
+ __var; \
+ })
+
+#include "ena_common_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_regs_defs.h"
+
+#endif /* ENA_PLAT_H_ */
diff --git a/sys/contrib/ena-com/ena_regs_defs.h b/sys/contrib/ena-com/ena_regs_defs.h
new file mode 100644
index 0000000..1438fb8
--- /dev/null
+++ b/sys/contrib/ena-com/ena_regs_defs.h
@@ -0,0 +1,137 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+
+#endif /*_ENA_REGS_H_ */
diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c
new file mode 100644
index 0000000..a726e3c
--- /dev/null
+++ b/sys/dev/ena/ena.c
@@ -0,0 +1,3848 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/time.h>
+#include <sys/eventhandler.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/in_cksum.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/rss_config.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_rss.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include "ena.h"
+#include "ena_sysctl.h"
+
+/*********************************************************
+ * Function prototypes
+ *********************************************************/
+static int ena_probe(device_t);
+static void ena_intr_msix_mgmnt(void *);
+static int ena_allocate_pci_resources(struct ena_adapter*);
+static void ena_free_pci_resources(struct ena_adapter *);
+static int ena_change_mtu(if_t, int);
+static inline void ena_alloc_counters(counter_u64_t *, int);
+static inline void ena_free_counters(counter_u64_t *, int);
+static inline void ena_reset_counters(counter_u64_t *, int);
+static void ena_init_io_rings_common(struct ena_adapter *,
+ struct ena_ring *, uint16_t);
+static int ena_init_io_rings(struct ena_adapter *);
+static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
+static void ena_free_all_io_rings_resources(struct ena_adapter *);
+static int ena_setup_tx_dma_tag(struct ena_adapter *);
+static int ena_free_tx_dma_tag(struct ena_adapter *);
+static int ena_setup_rx_dma_tag(struct ena_adapter *);
+static int ena_free_rx_dma_tag(struct ena_adapter *);
+static int ena_setup_tx_resources(struct ena_adapter *, int);
+static void ena_free_tx_resources(struct ena_adapter *, int);
+static int ena_setup_all_tx_resources(struct ena_adapter *);
+static void ena_free_all_tx_resources(struct ena_adapter *);
+static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
+static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
+static int ena_setup_all_rx_resources(struct ena_adapter *);
+static void ena_free_all_rx_resources(struct ena_adapter *);
+static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
+ struct ena_rx_buffer *);
+static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
+ struct ena_rx_buffer *);
+static int ena_refill_rx_bufs(struct ena_ring *, uint32_t);
+static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
+static void ena_refill_all_rx_bufs(struct ena_adapter *);
+static void ena_free_all_rx_bufs(struct ena_adapter *);
+static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
+static void ena_free_all_tx_bufs(struct ena_adapter *);
+static void ena_destroy_all_tx_queues(struct ena_adapter *);
+static void ena_destroy_all_rx_queues(struct ena_adapter *);
+static void ena_destroy_all_io_queues(struct ena_adapter *);
+static int ena_create_io_queues(struct ena_adapter *);
+static int ena_tx_cleanup(struct ena_ring *);
+static int ena_rx_cleanup(struct ena_ring *);
+static int validate_tx_req_id(struct ena_ring *, uint16_t);
+static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *,
+ struct mbuf *);
+static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *,
+ struct ena_com_rx_ctx *, uint16_t *);
+static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *,
+ struct mbuf *);
+static void ena_handle_msix(void *);
+static int ena_enable_msix(struct ena_adapter *);
+static void ena_setup_mgmnt_intr(struct ena_adapter *);
+static void ena_setup_io_intr(struct ena_adapter *);
+static int ena_request_mgmnt_irq(struct ena_adapter *);
+static int ena_request_io_irq(struct ena_adapter *);
+static void ena_free_mgmnt_irq(struct ena_adapter *);
+static void ena_free_io_irq(struct ena_adapter *);
+static void ena_free_irqs(struct ena_adapter*);
+static void ena_disable_msix(struct ena_adapter *);
+static void ena_unmask_all_io_irqs(struct ena_adapter *);
+static int ena_rss_configure(struct ena_adapter *);
+static void ena_update_hw_stats(void *, int);
+static int ena_up_complete(struct ena_adapter *);
+static int ena_up(struct ena_adapter *);
+static void ena_down(struct ena_adapter *);
+static uint64_t ena_get_counter(if_t, ift_counter);
+static int ena_media_change(if_t);
+static void ena_media_status(if_t, struct ifmediareq *);
+static void ena_init(void *);
+static int ena_ioctl(if_t, u_long, caddr_t);
+static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
+static void ena_update_host_info(struct ena_admin_host_info *, if_t);
+static void ena_update_hwassist(struct ena_adapter *);
+static int ena_setup_ifnet(device_t, struct ena_adapter *,
+ struct ena_com_dev_get_features_ctx *);
+static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *);
+static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
+ struct mbuf **mbuf);
+static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
+static void ena_start_xmit(struct ena_ring *);
+static int ena_mq_start(if_t, struct mbuf *);
+static void ena_deferred_mq_start(void *, int);
+static void ena_qflush(if_t);
+static int ena_calc_io_queue_num(struct ena_adapter *,
+ struct ena_com_dev_get_features_ctx *);
+static int ena_calc_queue_size(struct ena_adapter *, uint16_t *,
+ uint16_t *, struct ena_com_dev_get_features_ctx *);
+static int ena_rss_init_default(struct ena_adapter *);
+static void ena_rss_init_default_deferred(void *);
+static void ena_config_host_info(struct ena_com_dev *);
+static int ena_attach(device_t);
+static int ena_detach(device_t);
+static int ena_device_init(struct ena_adapter *, device_t,
+ struct ena_com_dev_get_features_ctx *, int *);
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *,
+ int);
+static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
+static void unimplemented_aenq_handler(void *,
+ struct ena_admin_aenq_entry *);
+static void ena_timer_service(void *);
+
+static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
+
+static SYSCTL_NODE(_hw, OID_AUTO, ena, CTLFLAG_RD, 0, "ENA driver parameters");
+
+/*
+ * Tuneable number of buffers in the buf-ring (drbr)
+ */
+static int ena_buf_ring_size = 4096;
+SYSCTL_INT(_hw_ena, OID_AUTO, buf_ring_size, CTLFLAG_RWTUN,
+ &ena_buf_ring_size, 0, "Size of the bufring");
+
+
+static ena_vendor_info_t ena_vendor_info_array[] = {
+ { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
+ { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0},
+ { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
+ { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0},
+ /* Last entry */
+ { 0, 0, 0 }
+};
+
+/*
+ * Contains pointers to event handlers, e.g. link state chage.
+ */
+static struct ena_aenq_handlers aenq_handlers;
+
+void
+ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs[0].ds_addr;
+ return;
+}
+
+int
+ena_dma_alloc(device_t dmadev, bus_size_t size,
+ ena_mem_handle_t *dma , int mapflags)
+{
+ struct ena_adapter* adapter = device_get_softc(dmadev);
+ uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
+ uint64_t dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
+ int error;
+
+ if (dma_space_addr == 0)
+ dma_space_addr = BUS_SPACE_MAXADDR;
+ error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
+ 8, 0, /* alignment, bounds */
+ dma_space_addr, /* lowaddr of exclusion window */
+ BUS_SPACE_MAXADDR,/* highaddr of exclusion window */
+ NULL, NULL, /* filter, filterarg */
+ maxsize, /* maxsize */
+ 1, /* nsegments */
+ maxsize, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &dma->tag);
+ if (error) {
+ device_printf(dmadev,
+ "%s: bus_dma_tag_create failed: %d\n",
+ __func__, error);
+ goto fail_tag;
+ }
+
+ error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
+ BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
+ if (error) {
+ device_printf(dmadev,
+ "%s: bus_dmamem_alloc(%ju) failed: %d\n",
+ __func__, (uintmax_t)size, error);
+ goto fail_map_create;
+ }
+
+ dma->paddr = 0;
+ error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
+ size, ena_dmamap_callback, &dma->paddr, mapflags);
+ if (error || dma->paddr == 0) {
+ device_printf(dmadev,
+ "%s: bus_dmamap_load failed: %d\n",
+ __func__, error);
+ goto fail_map_load;
+ }
+
+ return (0);
+
+fail_map_load:
+ bus_dmamap_unload(dma->tag, dma->map);
+fail_map_create:
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ bus_dma_tag_destroy(dma->tag);
+fail_tag:
+ dma->tag = NULL;
+
+ return (error);
+}
+
+static int
+ena_allocate_pci_resources(struct ena_adapter* adapter)
+{
+ device_t pdev = adapter->pdev;
+ int rid;
+
+ rid = PCIR_BAR(ENA_REG_BAR);
+ adapter->memory = NULL;
+ adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+ if (adapter->registers == NULL) {
+ device_printf(pdev, "Unable to allocate bus resource: "
+ "registers\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static void
+ena_free_pci_resources(struct ena_adapter *adapter)
+{
+ device_t pdev = adapter->pdev;
+
+ if (adapter->memory != NULL) {
+ bus_release_resource(pdev, SYS_RES_MEMORY,
+ PCIR_BAR(ENA_MEM_BAR), adapter->memory);
+ }
+
+ if (adapter->registers != NULL) {
+ bus_release_resource(pdev, SYS_RES_MEMORY,
+ PCIR_BAR(ENA_REG_BAR), adapter->registers);
+ }
+
+ return;
+}
+
+static int
+ena_probe(device_t dev)
+{
+ ena_vendor_info_t *ent;
+ char adapter_name[60];
+ uint16_t pci_vendor_id = 0;
+ uint16_t pci_device_id = 0;
+
+ pci_vendor_id = pci_get_vendor(dev);
+ pci_device_id = pci_get_device(dev);
+
+ ent = ena_vendor_info_array;
+ while (ent->vendor_id != 0) {
+ if ((pci_vendor_id == ent->vendor_id) &&
+ (pci_device_id == ent->device_id)) {
+ ena_trace(ENA_DBG, "vendor=%x device=%x ",
+ pci_vendor_id, pci_device_id);
+
+ sprintf(adapter_name, DEVICE_DESC);
+ device_set_desc_copy(dev, adapter_name);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ ent++;
+
+ }
+
+ return (ENXIO);
+}
+
+static int
+ena_change_mtu(if_t ifp, int new_mtu)
+{
+ struct ena_adapter *adapter = if_getsoftc(ifp);
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ int rc, old_mtu, max_frame;
+
+ rc = ena_com_get_dev_attr_feat(adapter->ena_dev, &get_feat_ctx);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "Cannot get attribute for ena device\n");
+ return (ENXIO);
+ }
+
+ /* Save old MTU in case of fail */
+ old_mtu = if_getmtu(ifp);
+
+ /* Change MTU and calculate max frame */
+ if_setmtu(ifp, new_mtu);
+ max_frame = ETHER_MAX_FRAME(ifp, ETHERTYPE_VLAN, 1);
+
+ if ((new_mtu < ENA_MIN_FRAME_LEN) ||
+ (new_mtu > get_feat_ctx.dev_attr.max_mtu) ||
+ (max_frame > ENA_MAX_FRAME_LEN)) {
+ device_printf(adapter->pdev, "Invalid MTU setting. "
+ "new_mtu: %d\n", new_mtu);
+ goto error;
+ }
+
+ rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
+ if (rc != 0)
+ goto error;
+
+ return (0);
+error:
+ if_setmtu(ifp, old_mtu);
+ return (EINVAL);
+}
+
+static inline void
+ena_alloc_counters(counter_u64_t *begin, int size)
+{
+ counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
+
+ for (; begin < end; ++begin)
+ *begin = counter_u64_alloc(M_WAITOK);
+}
+
+static inline void
+ena_free_counters(counter_u64_t *begin, int size)
+{
+ counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
+
+ for (; begin < end; ++begin)
+ counter_u64_free(*begin);
+}
+
+static inline void
+ena_reset_counters(counter_u64_t *begin, int size)
+{
+ counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
+
+ for (; begin < end; ++begin)
+ counter_u64_zero(*begin);
+}
+
+static void
+ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
+ uint16_t qid)
+{
+
+ ring->qid = qid;
+ ring->adapter = adapter;
+ ring->ena_dev = adapter->ena_dev;
+}
+
+static int
+ena_init_io_rings(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev;
+ struct ena_ring *txr, *rxr;
+ struct ena_que *que;
+ int i;
+ int rc;
+
+ ena_dev = adapter->ena_dev;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ txr = &adapter->tx_ring[i];
+ rxr = &adapter->rx_ring[i];
+
+ /* TX/RX common ring state */
+ ena_init_io_rings_common(adapter, txr, i);
+ ena_init_io_rings_common(adapter, rxr, i);
+
+ /* TX specific ring state */
+ txr->ring_size = adapter->tx_ring_size;
+ txr->tx_max_header_size = ena_dev->tx_max_header_size;
+ txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
+ txr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
+
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF,
+ M_WAITOK, &txr->ring_mtx);
+ if (txr->br == NULL) {
+ device_printf(adapter->pdev,
+ "Error while setting up bufring\n");
+ rc = ENOMEM;
+ goto err_bufr_free;
+ }
+
+ /* Alloc TX statistics. */
+ ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
+ sizeof(txr->tx_stats));
+
+ /* RX specific ring state */
+ rxr->ring_size = adapter->rx_ring_size;
+ rxr->rx_small_copy_len = adapter->small_copy_len;
+ rxr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+
+ /* Alloc RX statistics. */
+ ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
+ sizeof(rxr->rx_stats));
+
+ /* Initialize locks */
+ snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(adapter->pdev), i);
+ snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(adapter->pdev), i);
+
+ mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+ que = &adapter->que[i];
+ que->adapter = adapter;
+ que->id = i;
+ que->tx_ring = txr;
+ que->rx_ring = rxr;
+
+ txr->que = que;
+ rxr->que = que;
+ }
+
+ return 0;
+
+err_bufr_free:
+ while (i--)
+ ena_free_io_ring_resources(adapter, i);
+
+ return (rc);
+}
+
+static void
+ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
+{
+ struct ena_ring *txr = &adapter->tx_ring[qid];
+ struct ena_ring *rxr = &adapter->rx_ring[qid];
+
+ ena_free_counters((counter_u64_t *)&txr->tx_stats,
+ sizeof(txr->tx_stats));
+ ena_free_counters((counter_u64_t *)&rxr->rx_stats,
+ sizeof(rxr->rx_stats));
+
+ mtx_destroy(&txr->ring_mtx);
+
+ drbr_free(txr->br, M_DEVBUF);
+
+}
+
+static void
+ena_free_all_io_rings_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_io_ring_resources(adapter, i);
+
+}
+
+static int
+ena_setup_tx_dma_tag(struct ena_adapter *adapter)
+{
+ int ret;
+
+ /* Create DMA tag for Tx buffers */
+ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
+ 1, 0, /* alignment, bounds */
+ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
+ BUS_SPACE_MAXADDR, /* highaddr of excl window */
+ NULL, NULL, /* filter, filterarg */
+ ENA_TSO_MAXSIZE, /* maxsize */
+ adapter->max_tx_sgl_size - 1, /* nsegments */
+ ENA_TSO_MAXSIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &adapter->tx_buf_tag);
+
+ if (ret != 0)
+ device_printf(adapter->pdev, "Unable to create Tx DMA tag\n");
+
+ return (ret);
+}
+
+static int
+ena_free_tx_dma_tag(struct ena_adapter *adapter)
+{
+ int ret;
+
+ ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
+
+ if (ret == 0)
+ adapter->tx_buf_tag = NULL;
+
+ return (ret);
+}
+
+static int
+ena_setup_rx_dma_tag(struct ena_adapter *adapter)
+{
+ int ret;
+
+ /* Create DMA tag for Rx buffers*/
+ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
+ 1, 0, /* alignment, bounds */
+ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
+ BUS_SPACE_MAXADDR, /* highaddr of excl window */
+ NULL, NULL, /* filter, filterarg */
+ MJUM16BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM16BYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &adapter->rx_buf_tag);
+
+ if (ret != 0)
+ device_printf(adapter->pdev, "Unable to create Rx DMA tag\n");
+
+ return (ret);
+}
+
+static int
+ena_free_rx_dma_tag(struct ena_adapter *adapter)
+{
+ int ret;
+
+ ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
+
+ if (ret == 0)
+ adapter->rx_buf_tag = NULL;
+
+ return (ret);
+}
+
+
+/**
+ * ena_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Returns 0 on success, otherwise on failure.
+ **/
+static int
+ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
+{
+ struct ena_que *que = &adapter->que[qid];
+ struct ena_ring *tx_ring = que->tx_ring;
+ int size, i, err;
+#ifdef RSS
+ cpuset_t cpu_mask;
+#endif
+
+ size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
+
+ tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!tx_ring->tx_buffer_info)
+ goto err_tx_buffer_info;
+
+ size = sizeof(uint16_t) * tx_ring->ring_size;
+ tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!tx_ring->free_tx_ids)
+ goto err_tx_reqs;
+
+ /* Req id stack for TX OOO completions */
+ for (i = 0; i < tx_ring->ring_size; i++)
+ tx_ring->free_tx_ids[i] = i;
+
+ /* Reset TX statistics. */
+ ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
+ sizeof(tx_ring->tx_stats));
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ /* Make sure that drbr is empty */
+ ENA_RING_MTX_LOCK(tx_ring);
+ drbr_flush(adapter->ifp, tx_ring->br);
+ ENA_RING_MTX_UNLOCK(tx_ring);
+
+ /* ... and create the buffer DMA maps */
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ err = bus_dmamap_create(adapter->tx_buf_tag, 0,
+ &tx_ring->tx_buffer_info[i].map);
+ if (err != 0) {
+ device_printf(adapter->pdev,
+ "Unable to create Tx DMA map for buffer %d\n", i);
+ goto err_tx_map;
+ }
+ }
+
+ /* Allocate taskqueues */
+ TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
+ tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
+ taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
+ if (tx_ring->enqueue_tq == NULL) {
+ device_printf(adapter->pdev,
+ "Unable to create taskqueue for enqueue task\n");
+ i = tx_ring->ring_size;
+ goto err_tx_map;
+ }
+
+ /* RSS set cpu for thread */
+#ifdef RSS
+ CPU_SETOF(que->cpu, &cpu_mask);
+ taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
+ &cpu_mask, "%s tx_ring enq (bucket %d)",
+ device_get_nameunit(adapter->pdev), que->cpu);
+#else /* RSS */
+ taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
+ "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
+#endif /* RSS */
+
+ return (0);
+
+err_tx_map:
+ while (i--) {
+ bus_dmamap_destroy(adapter->tx_buf_tag,
+ tx_ring->tx_buffer_info[i].map);
+ }
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->free_tx_ids);
+err_tx_reqs:
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->tx_buffer_info);
+err_tx_buffer_info:
+ return (ENOMEM);
+}
+
+/**
+ * ena_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Free all transmit software resources
+ **/
+static void
+ena_free_tx_resources(struct ena_adapter *adapter, int qid)
+{
+ struct ena_ring *tx_ring = &adapter->tx_ring[qid];
+
+ while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
+ NULL))
+ taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
+
+ taskqueue_free(tx_ring->enqueue_tq);
+
+ ENA_RING_MTX_LOCK(tx_ring);
+ /* Flush buffer ring, */
+ drbr_flush(adapter->ifp, tx_ring->br);
+
+ /* Free buffer DMA maps, */
+ for (int i = 0; i < tx_ring->ring_size; i++) {
+ m_freem(tx_ring->tx_buffer_info[i].mbuf);
+ tx_ring->tx_buffer_info[i].mbuf = NULL;
+ bus_dmamap_unload(adapter->tx_buf_tag,
+ tx_ring->tx_buffer_info[i].map);
+ bus_dmamap_destroy(adapter->tx_buf_tag,
+ tx_ring->tx_buffer_info[i].map);
+ }
+ ENA_RING_MTX_UNLOCK(tx_ring);
+
+ /* And free allocated memory. */
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->free_tx_ids);
+ tx_ring->free_tx_ids = NULL;
+}
+
+/**
+ * ena_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: network interface device structure
+ *
+ * Returns 0 on success, otherwise on failure.
+ **/
+static int
+ena_setup_all_tx_resources(struct ena_adapter *adapter)
+{
+ int i, rc;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_setup_tx_resources(adapter, i);
+ if (!rc)
+ continue;
+
+ device_printf(adapter->pdev,
+ "Allocation for Tx Queue %u failed\n", i);
+ goto err_setup_tx;
+ }
+
+ return (0);
+
+err_setup_tx:
+ /* Rewind the index freeing the rings as we go */
+ while (i--)
+ ena_free_tx_resources(adapter, i);
+ return (rc);
+}
+
+/**
+ * ena_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: network interface device structure
+ *
+ * Free all transmit software resources
+ **/
+static void
+ena_free_all_tx_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_tx_resources(adapter, i);
+
+ return;
+}
+
+/**
+ * ena_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Returns 0 on success, otherwise on failure.
+ **/
+static int
+ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
+{
+ struct ena_que *que = &adapter->que[qid];
+ struct ena_ring *rx_ring = que->rx_ring;
+ int size, err, i;
+#ifdef RSS
+ cpuset_t cpu_mask;
+#endif
+
+ size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
+
+ /*
+ * Alloc extra element so in rx path
+ * we can always prefetch rx_info + 1
+ */
+ size += sizeof(struct ena_rx_buffer);
+
+ rx_ring->rx_buffer_info = ENA_MEM_ALLOC(adapter->ena_dev->dmadev, size);
+ if (!rx_ring->rx_buffer_info)
+ return (ENOMEM);
+
+ /* Reset RX statistics. */
+ ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
+ sizeof(rx_ring->rx_stats));
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ /* ... and create the buffer DMA maps */
+ for (i = 0; i < rx_ring->ring_size; i++) {
+ err = bus_dmamap_create(adapter->rx_buf_tag, 0,
+ &(rx_ring->rx_buffer_info[i].map));
+ if (err != 0) {
+ device_printf(adapter->pdev,
+ "Unable to create Rx DMA map for buffer %d\n", i);
+ goto err_rx_dma;
+ }
+ }
+
+ /* Create LRO for the ring */
+ if (adapter->ifp->if_capenable & IFCAP_LRO) {
+ int err = tcp_lro_init(&rx_ring->lro);
+ if (err) {
+ device_printf(adapter->pdev,
+ "LRO[%d] Initialization failed!\n", qid);
+ } else {
+ ena_trace(ENA_INFO,
+ "RX Soft LRO[%d] Initialized\n", qid);
+ rx_ring->lro.ifp = adapter->ifp;
+ }
+ }
+
+ return (0);
+
+err_rx_dma:
+ while (i--) {
+ bus_dmamap_destroy(adapter->rx_buf_tag,
+ rx_ring->rx_buffer_info[i].map);
+ }
+
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ ena_trace(ENA_ALERT, "RX resource allocation fail");
+ return (ENOMEM);
+}
+
+/**
+ * ena_free_rx_resources - Free Rx Resources
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Free all receive software resources
+ **/
+static void
+ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+
+ ena_trace(ENA_INFO, "%s qid %d\n", __func__, qid);
+
+ /* Free buffer DMA maps, */
+ for (int i = 0; i < rx_ring->ring_size; i++) {
+ m_freem(rx_ring->rx_buffer_info[i].mbuf);
+ rx_ring->rx_buffer_info[i].mbuf = NULL;
+ bus_dmamap_unload(adapter->rx_buf_tag,
+ rx_ring->rx_buffer_info[i].map);
+ bus_dmamap_destroy(adapter->rx_buf_tag,
+ rx_ring->rx_buffer_info[i].map);
+ }
+
+ /* free LRO resources, */
+ tcp_lro_free(&rx_ring->lro);
+
+ /* free allocated memory */
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ return;
+}
+
+/**
+ * ena_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: network interface device structure
+ *
+ * Returns 0 on success, otherwise on failure.
+ **/
+static int
+ena_setup_all_rx_resources(struct ena_adapter *adapter)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_setup_rx_resources(adapter, i);
+ if (!rc)
+ continue;
+
+ device_printf(adapter->pdev,
+ "Allocation for Rx Queue %u failed\n", i);
+ goto err_setup_rx;
+ }
+ return (0);
+
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ena_free_rx_resources(adapter, i);
+ return (rc);
+}
+
+/**
+ * ena_free_all_rx_resources - Free Rx resources for all queues
+ * @adapter: network interface device structure
+ *
+ * Free all receive software resources
+ **/
+static void
+ena_free_all_rx_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_rx_resources(adapter, i);
+
+ return;
+}
+
+static inline int
+ena_alloc_rx_mbuf(struct ena_adapter *adapter,
+ struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
+{
+ struct ena_com_buf *ena_buf;
+ bus_dma_segment_t segs[1];
+ int nsegs, error;
+
+ /* if previous allocated frag is not used */
+ if (rx_info->mbuf != NULL)
+ return (0);
+
+ /* Get mbuf using UMA allocator */
+ rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES);
+
+ if (!rx_info->mbuf) {
+ counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
+ return (ENOMEM);
+ }
+ /* Set mbuf length*/
+ rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = MJUM16BYTES;
+
+ /* Map packets for DMA */
+ ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
+ "Using tag %p for buffers' DMA mapping, mbuf %p len: %d",
+ adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
+ error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
+ rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error || (nsegs != 1)) {
+ device_printf(adapter->pdev, "failed to map mbuf, error: %d, "
+ "nsegs: %d\n", error, nsegs);
+ counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
+ goto exit;
+
+ }
+
+ bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
+
+ ena_buf = &rx_info->ena_buf;
+ ena_buf->paddr = segs[0].ds_addr;
+ ena_buf->len = MJUM16BYTES;
+
+ ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
+ "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
+ rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
+
+ return (0);
+
+exit:
+ m_freem(rx_info->mbuf);
+ rx_info->mbuf = NULL;
+ return (EFAULT);
+}
+
+static void
+ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+
+ if (!rx_info->mbuf)
+ return;
+
+ bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
+ m_freem(rx_info->mbuf);
+ rx_info->mbuf = NULL;
+
+ return;
+}
+
+
+/**
+ * ena_refill_rx_bufs - Refills ring with descriptors
+ * @rx_ring: the ring which we want to feed with free descriptors
+ * @num: number of descriptors to refill
+ * Refills the ring with newly allocated DMA-mapped mbufs for receiving
+ **/
+static int
+ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
+{
+ struct ena_adapter *adapter = rx_ring->adapter;
+ uint16_t next_to_use;
+ uint32_t i;
+ int rc;
+
+ ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d",
+ rx_ring->qid);
+
+ next_to_use = rx_ring->next_to_use;
+
+ for (i = 0; i < num; i++) {
+ ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC,
+ "RX buffer - next to use: %d", next_to_use);
+
+ struct ena_rx_buffer *rx_info =
+ &rx_ring->rx_buffer_info[next_to_use];
+
+ rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
+ if (rc < 0) {
+ device_printf(adapter->pdev,
+ "failed to alloc buffer for rx queue\n");
+ break;
+ }
+ rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
+ &rx_info->ena_buf, next_to_use);
+ if (unlikely(rc)) {
+ device_printf(adapter->pdev,
+ "failed to add buffer for rx queue %d\n",
+ rx_ring->qid);
+ break;
+ }
+ next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
+ rx_ring->ring_size);
+ }
+
+ if (i < num) {
+ counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
+ device_printf(adapter->pdev,
+ "refilled rx queue %d with %d pages only\n",
+ rx_ring->qid, i);
+ }
+
+ if (i != 0) {
+ wmb();
+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
+ }
+ rx_ring->next_to_use = next_to_use;
+ return (i);
+}
+
+static void
+ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->ring_size; i++) {
+ struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
+
+ if (rx_info->mbuf)
+ ena_free_rx_mbuf(adapter, rx_ring, rx_info);
+ }
+
+ return;
+}
+
+/**
+ * ena_refill_all_rx_bufs - allocate all queues Rx buffers
+ * @adapter: network interface device structure
+ *
+ */
+static void
+ena_refill_all_rx_bufs(struct ena_adapter *adapter)
+{
+ struct ena_ring *rx_ring;
+ int i, rc, bufs_num;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ bufs_num = rx_ring->ring_size - 1;
+ rc = ena_refill_rx_bufs(rx_ring, bufs_num);
+
+ if (unlikely(rc != bufs_num))
+ device_printf(adapter->pdev,
+ "refilling Queue %d failed. allocated %d buffers"
+ " from: %d\n", i, rc, bufs_num);
+ }
+}
+
+static void
+ena_free_all_rx_bufs(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_rx_bufs(adapter, i);
+ return;
+}
+
+/**
+ * ena_free_tx_bufs - Free Tx Buffers per Queue
+ * @adapter: network interface device structure
+ * @qid: queue index
+ **/
+static void
+ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
+{
+ struct ena_ring *tx_ring = &adapter->tx_ring[qid];
+
+ ENA_RING_MTX_LOCK(tx_ring);
+ for (int i = 0; i < tx_ring->ring_size; i++) {
+ struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+
+ if (tx_info->mbuf == NULL)
+ continue;
+
+ ena_trace(ENA_DBG | ENA_TXPTH | ENA_RSC,
+ "free uncompleted Tx mbufs qid[%d] idx: 0x%x", qid, i);
+
+ bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
+ m_free(tx_info->mbuf);
+ tx_info->mbuf = NULL;
+ }
+ ENA_RING_MTX_UNLOCK(tx_ring);
+
+ return;
+}
+
+static void
+ena_free_all_tx_bufs(struct ena_adapter *adapter)
+{
+
+ for (int i = 0; i < adapter->num_queues; i++)
+ ena_free_tx_bufs(adapter, i);
+
+ return;
+}
+
+static void
+ena_destroy_all_tx_queues(struct ena_adapter *adapter)
+{
+ uint16_t ena_qid;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_TXQ_IDX(i);
+ ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
+ }
+}
+
+static void
+ena_destroy_all_rx_queues(struct ena_adapter *adapter)
+{
+ uint16_t ena_qid;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_RXQ_IDX(i);
+ ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
+ }
+}
+
+static void
+ena_destroy_all_io_queues(struct ena_adapter *adapter)
+{
+ ena_destroy_all_tx_queues(adapter);
+ ena_destroy_all_rx_queues(adapter);
+}
+
+static int
+validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (tx_info->mbuf)
+ return 0;
+ }
+
+ counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
+
+ return (EFAULT);
+}
+
+static int
+ena_create_io_queues(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_com_create_io_ctx ctx;
+ struct ena_ring *ring;
+ uint16_t ena_qid;
+ uint32_t msix_vector;
+ int rc, i;
+
+ /* Create TX queues */
+ for (i = 0; i < adapter->num_queues; i++) {
+ msix_vector = ENA_IO_IRQ_IDX(i);
+ ena_qid = ENA_IO_TXQ_IDX(i);
+ ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
+ ctx.queue_size = adapter->tx_ring_size;
+ ctx.msix_vector = msix_vector;
+ ctx.qid = ena_qid;
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "Failed to create io TX queue #%d rc: %d\n", i, rc);
+ goto err_tx;
+ }
+ ring = &adapter->tx_ring[i];
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &ring->ena_com_io_sq,
+ &ring->ena_com_io_cq);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "Failed to get TX queue handlers. TX queue num"
+ " %d rc: %d\n", i, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ goto err_tx;
+ }
+ }
+
+ /* Create RX queues */
+ for (i = 0; i < adapter->num_queues; i++) {
+ msix_vector = ENA_IO_IRQ_IDX(i);
+ ena_qid = ENA_IO_RXQ_IDX(i);
+ ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
+ ctx.queue_size = adapter->rx_ring_size;
+ ctx.msix_vector = msix_vector;
+ ctx.qid = ena_qid;
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "Failed to create io RX queue[%d] rc: %d\n", i, rc);
+ goto err_rx;
+ }
+
+ ring = &adapter->rx_ring[i];
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &ring->ena_com_io_sq,
+ &ring->ena_com_io_cq);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "Failed to get RX queue handlers. RX queue num"
+ " %d rc: %d\n", i, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ goto err_rx;
+ }
+ }
+
+ return (0);
+
+err_rx:
+ while (i--)
+ ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
+ i = adapter->num_queues;
+err_tx:
+ while (i--)
+ ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
+
+ return (ENXIO);
+}
+
+/**
+ * ena_tx_cleanup - clear sent packets and corresponding descriptors
+ * @tx_ring: ring for which we want to clean packets
+ *
+ * Once packets are sent, we ask the device in a loop for no longer used
+ * descriptors. We find the related mbuf chain in a map (index in an array)
+ * and free it, then update ring state.
+ * This is performed in "endless" loop, updating ring pointers every
+ * TX_COMMIT. The first check of free descriptor is performed before the actual
+ * loop, then repeated at the loop end.
+ **/
+static int
+ena_tx_cleanup(struct ena_ring *tx_ring)
+{
+ struct ena_adapter *adapter;
+ struct ena_com_io_cq* io_cq;
+ uint16_t next_to_clean;
+ uint16_t req_id;
+ uint16_t ena_qid;
+ unsigned int total_done = 0;
+ int rc;
+ int commit = TX_COMMIT;
+ int budget = TX_BUDGET;
+ int work_done;
+
+ adapter = tx_ring->que->adapter;
+ ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
+ io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
+ next_to_clean = tx_ring->next_to_clean;
+
+ do {
+ struct ena_tx_buffer *tx_info;
+ struct mbuf *mbuf;
+
+ rc = ena_com_tx_comp_req_id_get(io_cq, &req_id);
+ if (rc != 0)
+ break;
+
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ mbuf = tx_info->mbuf;
+
+ tx_info->mbuf = NULL;
+ bintime_clear(&tx_info->timestamp);
+
+ if (tx_info->num_of_bufs != 0) {
+ /* Map is no longer required */
+ bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
+ }
+
+ m_freem(mbuf);
+
+ total_done += tx_info->tx_descs;
+
+ tx_ring->free_tx_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+
+ if (--commit == 0) {
+ commit = TX_COMMIT;
+ /* update ring state every TX_COMMIT descriptor */
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], total_done);
+ ena_com_update_dev_comp_head(io_cq);
+ total_done = 0;
+ }
+ } while (--budget);
+
+ work_done = TX_BUDGET - budget;
+
+ /* If there is still something to commit update ring state */
+ if (commit != TX_COMMIT) {
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], total_done);
+ ena_com_update_dev_comp_head(io_cq);
+ }
+
+ taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
+
+ return (work_done);
+}
+
+static void
+ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
+ struct mbuf *mbuf)
+{
+ struct ena_adapter *adapter = rx_ring->adapter;
+
+ if (adapter->rss_support) {
+ mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
+
+ if (ena_rx_ctx->frag &&
+ ena_rx_ctx->l3_proto != ENA_ETH_IO_L4_PROTO_UNKNOWN) {
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
+ return;
+ }
+
+ switch (ena_rx_ctx->l3_proto) {
+ case ENA_ETH_IO_L3_PROTO_IPV4:
+ switch (ena_rx_ctx->l4_proto) {
+ case ENA_ETH_IO_L4_PROTO_TCP:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
+ break;
+ case ENA_ETH_IO_L4_PROTO_UDP:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
+ break;
+ default:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
+ }
+ break;
+ case ENA_ETH_IO_L3_PROTO_IPV6:
+ switch (ena_rx_ctx->l4_proto) {
+ case ENA_ETH_IO_L4_PROTO_TCP:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
+ break;
+ case ENA_ETH_IO_L4_PROTO_UDP:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
+ break;
+ default:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
+ }
+ break;
+ case ENA_ETH_IO_L3_PROTO_UNKNOWN:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
+ break;
+ default:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
+ }
+ } else {
+ mbuf->m_pkthdr.flowid = rx_ring->qid;
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
+ }
+}
+
+/**
+ * ena_rx_mbuf - assemble mbuf from descriptors
+ * @rx_ring: ring for which we want to clean packets
+ * @ena_bufs: buffer info
+ * @ena_rx_ctx: metadata for this packet(s)
+ * @next_to_clean: ring pointer
+ *
+ **/
+static struct mbuf*
+ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
+ struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
+{
+ struct mbuf *mbuf;
+ struct ena_rx_buffer *rx_info;
+ struct ena_adapter *adapter;
+ unsigned int len, buf = 0;
+ unsigned int descs = ena_rx_ctx->descs;
+
+ adapter = rx_ring->adapter;
+ rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
+
+ ENA_ASSERT(rx_info->mbuf, "Invalid alloc frag buffer\n");
+
+ len = ena_bufs[0].len;
+ ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx",
+ rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
+
+ mbuf = rx_info->mbuf;
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = len;
+ mbuf->m_len = len;
+ mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
+
+ /* Fill mbuf with hash key and it's interpretation for optimization */
+ ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
+
+ ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d",
+ mbuf, mbuf->m_flags, mbuf->m_pkthdr.len);
+
+ /* DMA address is not needed anymore, unmap it */
+ bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
+
+ rx_info->mbuf = NULL;
+ *next_to_clean = ENA_RX_RING_IDX_NEXT(*next_to_clean,
+ rx_ring->ring_size);
+
+ /*
+ * While we have more than 1 descriptors for one rcvd packet, append
+ * other mbufs to the main one
+ */
+ while (--descs) {
+ rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
+ len = ena_bufs[++buf].len;
+
+ if (!m_append(mbuf, len, rx_info->mbuf->m_data)) {
+ counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
+ ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p",
+ mbuf);
+ }
+ /* Free already appended mbuf, it won't be useful anymore */
+ bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
+ m_freem(rx_info->mbuf);
+ rx_info->mbuf = NULL;
+
+ *next_to_clean = ENA_RX_RING_IDX_NEXT(*next_to_clean,
+ rx_ring->ring_size);
+ }
+
+ return (mbuf);
+}
+
+/**
+ * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum
+ **/
+static inline void
+ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
+ struct mbuf *mbuf)
+{
+
+ /* if IP and error */
+ if ((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
+ (ena_rx_ctx->l3_csum_err)) {
+ /* ipv4 checksum error */
+ mbuf->m_pkthdr.csum_flags = 0;
+ counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
+ return;
+ }
+
+ /* if TCP/UDP */
+ if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
+ (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
+ if (ena_rx_ctx->l4_csum_err) {
+ /* TCP/UDP checksum error */
+ mbuf->m_pkthdr.csum_flags = 0;
+ counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
+ } else {
+ mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+ mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+ }
+
+ return;
+}
+
+/**
+ * ena_rx_cleanup - handle rx irq
+ * @arg: ring for which irq is being handled
+ **/
+static int
+ena_rx_cleanup(struct ena_ring *rx_ring)
+{
+ struct ena_adapter *adapter;
+ struct mbuf *mbuf;
+ struct ena_com_rx_ctx ena_rx_ctx;
+ struct ena_com_io_cq* io_cq;
+ struct ena_com_io_sq* io_sq;
+ /* struct ena_eth_io_intr_reg intr_reg; */
+ if_t ifp;
+ uint16_t ena_qid;
+ uint16_t next_to_clean;
+ uint32_t refill_required;
+ uint32_t refill_threshold;
+ uint32_t do_if_input = 0;
+ unsigned int qid;
+ int rc;
+ int budget = RX_BUDGET;
+
+ adapter = rx_ring->que->adapter;
+ ifp = adapter->ifp;
+ qid = rx_ring->que->id;
+ ena_qid = ENA_IO_RXQ_IDX(qid);
+ io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
+ io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
+ next_to_clean = rx_ring->next_to_clean;
+
+ do {
+ ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
+ ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
+ ena_rx_ctx.descs = 0;
+ rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
+
+ if (unlikely(rc))
+ goto error;
+
+ if (unlikely(ena_rx_ctx.descs == 0))
+ break;
+
+ /* Receive mbuf from the ring */
+ mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
+ &ena_rx_ctx, &next_to_clean);
+
+ /* Exit if we failed to retrieve a buffer */
+ if (unlikely(!mbuf)) {
+ next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
+ ena_rx_ctx.descs, rx_ring->ring_size);
+ break;
+ }
+ ena_trace(ENA_DBG | ENA_RXPTH, "Rx: %d bytes",
+ mbuf->m_pkthdr.len);
+
+ if ((ifp->if_capenable & IFCAP_RXCSUM) ||
+ (ifp->if_capenable & IFCAP_RXCSUM_IPV6)) {
+ ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
+ }
+
+ counter_u64_add(rx_ring->rx_stats.bytes, mbuf->m_pkthdr.len);
+ /*
+ * LRO is only for IP/TCP packets and TCP checksum of the packet
+ * should be computed by hardware.
+ */
+ do_if_input = 1;
+ if ((ifp->if_capenable & IFCAP_LRO) &&
+ (mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) &&
+ ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP) {
+ /*
+ * Send to the stack if:
+ * - LRO not enabled, or
+ * - no LRO resources, or
+ * - lro enqueue fails
+ */
+ if (rx_ring->lro.lro_cnt != 0 &&
+ tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
+ do_if_input = 0;
+ }
+ if (do_if_input) {
+ ena_trace(ENA_DBG | ENA_RXPTH, "calling if_input() with mbuf %p",
+ mbuf);
+ (*ifp->if_input)(ifp, mbuf);
+ }
+
+ counter_u64_add(rx_ring->rx_stats.cnt, 1);
+ } while (--budget);
+
+ rx_ring->next_to_clean = next_to_clean;
+
+ refill_required = ena_com_sq_empty_space(io_sq);
+ refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DEVIDER;
+
+ if (refill_required > refill_threshold) {
+ ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ ena_refill_rx_bufs(rx_ring, refill_required);
+ }
+
+ tcp_lro_flush_all(&rx_ring->lro);
+
+ return (RX_BUDGET - budget);
+
+error:
+ counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1);
+ return (RX_BUDGET - budget);
+}
+
+/*********************************************************************
+ *
+ * MSIX & Interrupt Service routine
+ *
+ **********************************************************************/
+
+/**
+ * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
+ * @arg: interrupt number
+ **/
+static void
+ena_intr_msix_mgmnt(void *arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+
+ ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
+ if (likely(adapter->running))
+ ena_com_aenq_intr_handler(adapter->ena_dev, arg);
+}
+
+/**
+ * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
+ * @arg: interrupt number
+ **/
+static void
+ena_handle_msix(void *arg)
+{
+ struct ena_que *que = arg;
+ struct ena_adapter *adapter = que->adapter;
+ if_t ifp = adapter->ifp;
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+ struct ena_com_io_cq* io_cq;
+ struct ena_eth_io_intr_reg intr_reg;
+ int qid, ena_qid;
+ int txc, rxc, i;
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ ena_trace(ENA_DBG, "MSI-X TX/RX routine");
+
+ tx_ring = que->tx_ring;
+ rx_ring = que->rx_ring;
+ qid = que->id;
+ ena_qid = ENA_IO_TXQ_IDX(qid);
+ io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
+
+ for (i = 0; i < CLEAN_BUDGET; ++i) {
+ rxc = ena_rx_cleanup(rx_ring);
+
+ /* Protection from calling ena_tx_cleanup from ena_start_xmit */
+ ENA_RING_MTX_LOCK(tx_ring);
+ txc = ena_tx_cleanup(tx_ring);
+ ENA_RING_MTX_UNLOCK(tx_ring);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ if (txc != TX_BUDGET && rxc != RX_BUDGET)
+ break;
+ }
+
+ /* Signal that work is done and unmask interrupt */
+ ena_com_update_intr_reg(&intr_reg,
+ RX_IRQ_INTERVAL,
+ TX_IRQ_INTERVAL,
+ true);
+ ena_com_unmask_intr(io_cq, &intr_reg);
+}
+
+static int
+ena_enable_msix(struct ena_adapter *adapter)
+{
+ device_t dev = adapter->pdev;
+ int i, msix_vecs, rc = 0;
+
+ /* Reserved the max msix vectors we might need */
+ msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues);
+
+ adapter->msix_entries = ENA_MEM_ALLOC(adapter->ena_dev->dmadev,
+ msix_vecs * sizeof(struct msix_entry));
+ if (!adapter->msix_entries) {
+ device_printf(dev,
+ "Failed to allocate msix_entries, vectors %d\n", msix_vecs);
+ rc = ENOMEM;
+ goto error;
+ }
+ device_printf(dev, "Allocated msix_entries, vectors (cnt: %d)\n",
+ msix_vecs);
+
+ for (i = 0; i < msix_vecs; i++) {
+ adapter->msix_entries[i].entry = i;
+ /* Vectors must start from 1 */
+ adapter->msix_entries[i].vector = i + 1;
+ }
+
+ rc = pci_alloc_msix(dev, &msix_vecs);
+ if (rc != 0) {
+ device_printf(dev,
+ "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ rc = ENOSPC;
+ goto error;
+ }
+
+ adapter->msix_vecs = msix_vecs;
+ adapter->msix_enabled = true;
+
+error:
+ return (rc);
+}
+
+static void
+ena_setup_mgmnt_intr(struct ena_adapter *adapter)
+{
+
+ snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
+ ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
+ device_get_nameunit(adapter->pdev));
+ /*
+ * Handler is NULL on purpose, it will be set
+ * when mgmnt interrupt is acquired
+ */
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
+ adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
+
+ return;
+}
+
+static void
+ena_setup_io_intr(struct ena_adapter *adapter)
+{
+ static int last_bind_cpu = -1;
+ int irq_idx;
+ ena_trace(ENA_DBG, "enter");
+
+ for (int i = 0; i < adapter->num_queues; i++) {
+ irq_idx = ENA_IO_IRQ_IDX(i);
+
+ snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
+ "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
+ adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
+ adapter->irq_tbl[irq_idx].data = &adapter->que[i];
+ adapter->irq_tbl[irq_idx].vector =
+ adapter->msix_entries[irq_idx].vector;
+ ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
+ adapter->msix_entries[irq_idx].vector);
+#ifdef RSS
+ adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
+ rss_getcpu(i % rss_getnumbuckets());
+#else
+ /*
+ * We still want to bind rings to the corresponding cpu
+ * using something similar to the RSS round-robin technique.
+ */
+ if (last_bind_cpu < 0)
+ last_bind_cpu = CPU_FIRST();
+ adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
+ last_bind_cpu;
+ last_bind_cpu = CPU_NEXT(last_bind_cpu);
+#endif
+ }
+
+ return;
+}
+
+static int
+ena_request_mgmnt_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+ unsigned long flags;
+ int rc, rcc;
+
+ flags = RF_ACTIVE | RF_SHAREABLE;
+
+ irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
+ irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
+ &irq->vector, flags);
+
+ if (irq->res == NULL) {
+ device_printf(adapter->pdev, "could not allocate "
+ "irq vector: %d\n", irq->vector);
+ rc = ENXIO;
+ goto exit_res;
+ }
+
+ if ((rc = bus_activate_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
+ irq->res)) != 0) {
+ device_printf(adapter->pdev, "could not activate "
+ "irq vector: %d\n", irq->vector);
+ goto exit_intr;
+ }
+
+ if ((rc = bus_setup_intr(adapter->pdev, irq->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ena_intr_msix_mgmnt, irq->data, &irq->cookie)) != 0) {
+ device_printf(adapter->pdev, "failed to register "
+ "interrupt handler for irq %ju: %d\n",
+ rman_get_start(irq->res), rc);
+ goto exit_intr;
+ }
+ irq->requested = true;
+
+ return (rc);
+
+exit_intr:
+ device_printf(adapter->pdev, "exit_intr: releasing resource"
+ " for irq %d\n", irq->vector);
+ rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
+ irq->vector, irq->res);
+ if (rcc)
+ device_printf(adapter->pdev, "dev has no parent while "
+ "releasing res for irq: %d\n", irq->vector);
+ irq->res = NULL;
+
+exit_res:
+ return (rc);
+}
+
+static int
+ena_request_io_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+ unsigned long flags = 0;
+ int rc = 0, i, rcc;
+
+ if (!adapter->msix_enabled) {
+ device_printf(adapter->pdev, "failed to request irq\n");
+ return (EINVAL);
+ } else {
+ flags = RF_ACTIVE | RF_SHAREABLE;
+ }
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+ irq = &adapter->irq_tbl[i];
+
+ if (irq->requested)
+ continue;
+
+ irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
+ &irq->vector, flags);
+ if (irq->res == NULL) {
+ device_printf(adapter->pdev, "could not allocate "
+ "irq vector: %d\n", irq->vector);
+ goto err;
+ }
+
+ if ((rc = bus_setup_intr(adapter->pdev, irq->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, irq->handler,
+ irq->data, &irq->cookie)) != 0) {
+ device_printf(adapter->pdev, "failed to register "
+ "interrupt handler for irq %ju: %d\n",
+ rman_get_start(irq->res), rc);
+ goto err;
+ }
+ irq->requested = true;
+
+#ifdef RSS
+ device_printf(adapter->pdev, "queue %d - RSS bucket %d\n",
+ i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
+#else
+ device_printf(adapter->pdev, "queue %d - cpu %d\n",
+ i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
+#endif
+ }
+
+ return (rc);
+
+err:
+
+ for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
+ irq = &adapter->irq_tbl[i];
+ rcc = 0;
+
+ /* Once we entered err: section and irq->requested is true we
+ free both intr and resources */
+ if (irq->requested == true)
+ rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
+ if (rcc)
+ device_printf(adapter->pdev, "could not release"
+ " irq: %d, error: %d\n", irq->vector, rcc);
+
+ /* If we entred err: section without irq->requested set we know
+ it was bus_alloc_resource_any() that needs cleanup, provided
+ res is not NULL. In case res is NULL no work in needed in
+ this iteration */
+ rcc = 0;
+ if (irq->res != NULL) {
+ rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
+ irq->vector, irq->res);
+ }
+ if (rcc)
+ device_printf(adapter->pdev, "dev has no parent while "
+ "releasing res for irq: %d\n", irq->vector);
+ irq->requested = false;
+ irq->res = NULL;
+ }
+
+ return (rc);
+}
+
+static void
+ena_free_mgmnt_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+ int rc;
+
+ irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
+ if (irq->requested) {
+ ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
+ irq->vector);
+ rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
+ if (rc)
+ device_printf(adapter->pdev, "failed to tear "
+ "down irq: %d\n", irq->vector);
+ irq->requested = 0;
+ }
+
+ if (irq->res != NULL) {
+ ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
+ irq->vector);
+ rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
+ irq->vector, irq->res);
+ irq->res = NULL;
+ if (rc)
+ device_printf(adapter->pdev, "dev has no parent while "
+ "releasing res for irq: %d\n", irq->vector);
+ }
+
+ return;
+}
+
+static void
+ena_free_io_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+ int rc;
+
+ for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+ irq = &adapter->irq_tbl[i];
+ if (irq->requested) {
+ ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
+ irq->vector);
+ rc = bus_teardown_intr(adapter->pdev, irq->res,
+ irq->cookie);
+ if (rc) {
+ device_printf(adapter->pdev, "failed to tear "
+ "down irq: %d\n", irq->vector);
+ }
+ irq->requested = 0;
+ }
+
+ if (irq->res != NULL) {
+ ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
+ irq->vector);
+ rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
+ irq->vector, irq->res);
+ irq->res = NULL;
+ if (rc) {
+ device_printf(adapter->pdev, "dev has no parent"
+ " while releasing res for irq: %d\n",
+ irq->vector);
+ }
+ }
+ }
+
+ return;
+}
+
+static void
+ena_free_irqs(struct ena_adapter* adapter)
+{
+
+ ena_free_io_irq(adapter);
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+}
+
+static void
+ena_disable_msix(struct ena_adapter *adapter)
+{
+
+ pci_release_msi(adapter->pdev);
+
+ adapter->msix_vecs = 0;
+ ENA_MEM_FREE(adapter->ena_dev->dmadev, adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+static void
+ena_unmask_all_io_irqs(struct ena_adapter *adapter)
+{
+ struct ena_com_io_cq* io_cq;
+ struct ena_eth_io_intr_reg intr_reg;
+ uint16_t ena_qid;
+ int i;
+
+ /* Unmask interrupts for all queues */
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_TXQ_IDX(i);
+ io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
+ ena_com_update_intr_reg(&intr_reg, 0, 0, true);
+ ena_com_unmask_intr(io_cq, &intr_reg);
+ }
+}
+
+/* Configure the Rx forwarding */
+static int ena_rss_configure(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc;
+
+ /* Set indirect table */
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != EPERM))
+ return rc;
+
+ /* Configure hash function (if supported) */
+ rc = ena_com_set_hash_function(ena_dev);
+ if (unlikely(rc && (rc != EPERM)))
+ return rc;
+
+ /* Configure hash inputs (if supported) */
+ rc = ena_com_set_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != EPERM)))
+ return rc;
+
+ return 0;
+}
+
+static void
+ena_update_hw_stats(void *arg, int pending)
+{
+ struct ena_adapter *adapter = arg;
+ int rc;
+
+ for (;;) {
+ if (!adapter->up)
+ return;
+
+ rc = ena_update_stats_counters(adapter);
+ if (rc)
+ ena_trace(ENA_WARNING,
+ "Error updating stats counters, rc = %d", rc);
+
+ pause("ena update hw stats", hz);
+ }
+}
+
+static int
+ena_up_complete(struct ena_adapter *adapter)
+{
+ int rc;
+
+ if (adapter->rss_support) {
+ rc = ena_rss_configure(adapter);
+ if (rc)
+ return (rc);
+ }
+
+ ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
+ ena_refill_all_rx_bufs(adapter);
+
+ return (0);
+}
+
+static int
+ena_up(struct ena_adapter *adapter)
+{
+ int rc = 0;
+
+ if (!device_is_attached(adapter->pdev)) {
+ device_printf(adapter->pdev, "device is not attached!\n");
+ return (ENXIO);
+ }
+
+ if (!adapter->running) {
+ device_printf(adapter->pdev, "device is not running!\n");
+ return (ENXIO);
+ }
+
+ if (!adapter->up) {
+ device_printf(adapter->pdev, "device is going UP\n");
+
+ /* setup interrupts for IO queues */
+ ena_setup_io_intr(adapter);
+ rc = ena_request_io_irq(adapter);
+ if (rc) {
+ ena_trace(ENA_ALERT, "err_req_irq");
+ goto err_req_irq;
+ }
+
+ /* allocate transmit descriptors */
+ rc = ena_setup_all_tx_resources(adapter);
+ if (rc) {
+ ena_trace(ENA_ALERT, "err_setup_tx");
+ goto err_setup_tx;
+ }
+
+ /* allocate receive descriptors */
+ rc = ena_setup_all_rx_resources(adapter);
+ if (rc) {
+ ena_trace(ENA_ALERT, "err_setup_rx");
+ goto err_setup_rx;
+ }
+
+ /* create IO queues for Rx & Tx */
+ rc = ena_create_io_queues(adapter);
+ if (rc) {
+ ena_trace(ENA_ALERT,
+ "create IO queues failed");
+ goto err_io_que;
+ }
+
+ if (adapter->link_status)
+ if_link_state_change(adapter->ifp, LINK_STATE_UP);
+
+ rc = ena_up_complete(adapter);
+ if (rc)
+ goto err_up_complete;
+
+ counter_u64_add(adapter->dev_stats.interface_up, 1);
+
+ ena_update_hwassist(adapter);
+
+ if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
+ IFF_DRV_OACTIVE);
+
+ callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
+ ena_timer_service, (void *)adapter, 0);
+
+ taskqueue_enqueue(adapter->stats_tq, &adapter->stats_task);
+
+ adapter->up = true;
+
+ ena_unmask_all_io_irqs(adapter);
+ }
+
+ return (0);
+
+err_up_complete:
+ ena_destroy_all_io_queues(adapter);
+err_io_que:
+ ena_free_all_rx_resources(adapter);
+err_setup_rx:
+ ena_free_all_tx_resources(adapter);
+err_setup_tx:
+ ena_free_io_irq(adapter);
+err_req_irq:
+ return (rc);
+}
+
+int
+ena_update_stats_counters(struct ena_adapter *adapter)
+{
+ struct ena_admin_basic_stats ena_stats;
+ struct ena_hw_stats *stats = &adapter->hw_stats;
+ int rc = 0;
+
+ if (!adapter->up)
+ return (rc);
+
+ rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
+ if (rc)
+ return (rc);
+
+ stats->tx_bytes = ((uint64_t)ena_stats.tx_bytes_high << 32) |
+ ena_stats.tx_bytes_low;
+ stats->rx_bytes = ((uint64_t)ena_stats.rx_bytes_high << 32) |
+ ena_stats.rx_bytes_low;
+
+ stats->rx_packets = ((uint64_t)ena_stats.rx_pkts_high << 32) |
+ ena_stats.rx_pkts_low;
+ stats->tx_packets = ((uint64_t)ena_stats.tx_pkts_high << 32) |
+ ena_stats.tx_pkts_low;
+
+ stats->rx_drops = ((uint64_t)ena_stats.rx_drops_high << 32) |
+ ena_stats.rx_drops_low;
+
+ return (0);
+}
+
+static uint64_t
+ena_get_counter(if_t ifp, ift_counter cnt)
+{
+ struct ena_adapter *adapter;
+ struct ena_hw_stats *stats;
+
+ adapter = if_getsoftc(ifp);
+ stats = &adapter->hw_stats;
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (stats->rx_packets);
+ case IFCOUNTER_OPACKETS:
+ return (stats->tx_packets);
+ case IFCOUNTER_IBYTES:
+ return (stats->rx_bytes);
+ case IFCOUNTER_OBYTES:
+ return (stats->tx_bytes);
+ case IFCOUNTER_IQDROPS:
+ return (stats->rx_drops);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
+static int
+ena_media_change(if_t ifp)
+{
+ /* Media Change is not supported by firmware */
+ return (0);
+}
+
+static void
+ena_media_status(if_t ifp, struct ifmediareq *ifmr)
+{
+ struct ena_adapter *adapter = if_getsoftc(ifp);
+ ena_trace(ENA_DBG, "enter");
+
+ ENA_DEV_LOCK;
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!adapter->link_status) {
+ ENA_DEV_UNLOCK;
+ ena_trace(ENA_WARNING, "link_status = false");
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
+
+ ENA_DEV_UNLOCK;
+
+ return;
+}
+
+static void
+ena_init(void *arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+
+ if (adapter->up == false) {
+ sx_xlock(&adapter->ioctl_sx);
+ ena_up(adapter);
+ sx_unlock(&adapter->ioctl_sx);
+ }
+
+ return;
+}
+
+static int
+ena_ioctl(if_t ifp, u_long command, caddr_t data)
+{
+ struct ena_adapter *adapter;
+ struct ifreq *ifr;
+ int rc;
+
+ adapter = ifp->if_softc;
+ ifr = (struct ifreq *)data;
+
+ /*
+ * Acquiring lock to prevent from running up and down routines parallel.
+ */
+ rc = 0;
+ switch (command) {
+ case SIOCSIFMTU:
+ sx_xlock(&adapter->ioctl_sx);
+ ena_down(adapter);
+
+ ena_change_mtu(ifp, ifr->ifr_mtu);
+
+ rc = ena_up(adapter);
+ sx_unlock(&adapter->ioctl_sx);
+ break;
+
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (ifp->if_flags & (IFF_PROMISC |
+ IFF_ALLMULTI)) {
+ device_printf(adapter->pdev,
+ "ioctl promisc/allmulti\n");
+ }
+ } else {
+ sx_xlock(&adapter->ioctl_sx);
+ rc = ena_up(adapter);
+ sx_unlock(&adapter->ioctl_sx);
+ }
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ sx_xlock(&adapter->ioctl_sx);
+ ena_down(adapter);
+ sx_unlock(&adapter->ioctl_sx);
+ }
+ }
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+ break;
+
+ case SIOCSIFCAP:
+ {
+ int reinit = 0;
+
+ if (ifr->ifr_reqcap != ifp->if_capenable) {
+ ifp->if_capenable = ifr->ifr_reqcap;
+ reinit = 1;
+ }
+
+ if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ sx_xlock(&adapter->ioctl_sx);
+ ena_down(adapter);
+ rc = ena_up(adapter);
+ sx_unlock(&adapter->ioctl_sx);
+ }
+ }
+
+ break;
+ default:
+ rc = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (rc);
+}
+
+static int
+ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
+{
+ int caps = 0;
+
+ if (feat->offload.tx &
+ (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK))
+ caps |= IFCAP_TXCSUM;
+
+ if (feat->offload.tx &
+ (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK))
+ caps |= IFCAP_TXCSUM_IPV6;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ caps |= IFCAP_TSO4;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
+ caps |= IFCAP_TSO6;
+
+ if (feat->offload.rx_supported &
+ (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK))
+ caps |= IFCAP_RXCSUM;
+
+ if (feat->offload.rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+ caps |= IFCAP_RXCSUM_IPV6;
+
+ caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
+
+ return (caps);
+}
+
+static void
+ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
+{
+
+ host_info->supported_network_features[0] =
+ (uint32_t)if_getcapabilities(ifp);
+}
+
+static void
+ena_update_hwassist(struct ena_adapter *adapter)
+{
+ if_t ifp = adapter->ifp;
+ uint32_t feat = adapter->tx_offload_cap;
+ int cap = if_getcapenable(ifp);
+ int flags = 0;
+
+ if_clearhwassist(ifp);
+
+ if (cap & IFCAP_TXCSUM) {
+ if (feat & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
+ flags |= CSUM_IP;
+ if (feat &
+ (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK))
+ flags |= CSUM_IP_UDP | CSUM_IP_TCP;
+ }
+
+ if (cap & IFCAP_TXCSUM_IPV6)
+ flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
+
+ if (cap & IFCAP_TSO4)
+ flags |= CSUM_IP_TSO;
+
+ if (cap & IFCAP_TSO6)
+ flags |= CSUM_IP6_TSO;
+
+ if_sethwassistbits(ifp, flags, 0);
+}
+
+static int
+ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *feat)
+{
+ if_t ifp;
+ int caps = 0;
+
+ ena_trace(ENA_DBG, "enter");
+
+ ifp = adapter->ifp = if_gethandle(IFT_ETHER);
+ if (ifp == 0) {
+ device_printf(pdev, "can not allocate ifnet structure\n");
+ return (ENXIO);
+ }
+ if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
+ if_setdev(ifp, pdev);
+ if_setsoftc(ifp, adapter);
+
+ if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+ if_setinitfn(ifp, ena_init);
+ if_settransmitfn(ifp, ena_mq_start);
+ if_setqflushfn(ifp, ena_qflush);
+ if_setioctlfn(ifp, ena_ioctl);
+ if_setgetcounterfn(ifp, ena_get_counter);
+
+ if_setsendqlen(ifp, adapter->tx_ring_size);
+ if_setsendqready(ifp);
+ if_setmtu(ifp, ETHERMTU);
+ if_setbaudrate(ifp, 0);
+ /* Zeroize capabilities... */
+ if_setcapabilities(ifp, 0);
+ if_setcapenable(ifp, 0);
+ /* check hardware support */
+ caps = ena_get_dev_offloads(feat);
+ /* ... and set them */
+ if_setcapabilitiesbit(ifp, caps, 0);
+
+ /* TSO parameters */
+ ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
+ (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
+ ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
+ ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
+
+ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
+ if_setcapenable(ifp, if_getcapabilities(ifp));
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&adapter->media, IFM_IMASK,
+ ena_media_change, ena_media_status);
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+ ether_ifattach(ifp, adapter->mac_addr);
+
+ return (0);
+}
+
+static void
+ena_down(struct ena_adapter *adapter)
+{
+
+ if (adapter->up) {
+ device_printf(adapter->pdev, "device is going DOWN\n");
+
+ callout_drain(&adapter->timer_service);
+
+ adapter->up = false;
+ if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
+ IFF_DRV_RUNNING);
+
+ /* Drain task responsible for updating hw stats */
+ while (taskqueue_cancel(adapter->stats_tq, &adapter->stats_task, NULL))
+ taskqueue_drain(adapter->stats_tq, &adapter->stats_task);
+
+ ena_free_io_irq(adapter);
+
+ ena_destroy_all_io_queues(adapter);
+
+ ena_free_all_tx_bufs(adapter);
+ ena_free_all_rx_bufs(adapter);
+ ena_free_all_tx_resources(adapter);
+ ena_free_all_rx_resources(adapter);
+
+ counter_u64_add(adapter->dev_stats.interface_down, 1);
+ }
+
+ return;
+}
+
+static void
+ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf)
+{
+ struct ena_com_tx_meta *ena_meta;
+ struct ether_vlan_header *eh;
+ u32 mss;
+ bool offload;
+ uint16_t etype;
+ int ehdrlen;
+ struct ip *ip;
+ int iphlen;
+ struct tcphdr *th;
+
+ offload = false;
+ ena_meta = &ena_tx_ctx->ena_meta;
+ mss = mbuf->m_pkthdr.tso_segsz;
+
+ if (mss != 0)
+ offload = true;
+
+ if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0)
+ offload = true;
+
+ if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
+ offload = true;
+
+ if (offload == false) {
+ ena_tx_ctx->meta_valid = 0;
+ return;
+ }
+
+ /* Determine where frame payload starts. */
+ eh = mtod(mbuf, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ ehdrlen = ETHER_HDR_LEN;
+ }
+
+ ip = (struct ip *)(mbuf->m_data + ehdrlen);
+ iphlen = ip->ip_hl << 2;
+ th = (struct tcphdr *)((caddr_t)ip + iphlen);
+
+ if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) {
+ ena_tx_ctx->l3_csum_enable = 1;
+ }
+ if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
+ ena_tx_ctx->tso_enable = 1;
+ ena_meta->l4_hdr_len = (th->th_off);
+ }
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
+ if (ip->ip_off == 0)
+ ena_tx_ctx->df = 1;
+ break;
+ case ETHERTYPE_IPV6:
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
+
+ default:
+ break;
+ }
+
+ if (ip->ip_p == IPPROTO_TCP) {
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
+ if (mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
+ ena_tx_ctx->l4_csum_enable = 1;
+ else
+ ena_tx_ctx->l4_csum_enable = 0;
+ } else if (ip->ip_p == IPPROTO_UDP) {
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
+ if (mbuf->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
+ ena_tx_ctx->l4_csum_enable = 1;
+ else
+ ena_tx_ctx->l4_csum_enable = 0;
+ } else {
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
+ ena_tx_ctx->l4_csum_enable = 0;
+ }
+
+ ena_meta->mss = mss;
+ ena_meta->l3_hdr_len = iphlen;
+ ena_meta->l3_hdr_offset = ehdrlen;
+ ena_tx_ctx->meta_valid = 1;
+}
+
+static int
+ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
+{
+ struct ena_adapter *adapter;
+ struct mbuf *collapsed_mbuf;
+ int num_frags;
+
+ adapter = tx_ring->adapter;
+ num_frags = ena_mbuf_count(*mbuf);
+
+ /* One segment must be reserved for configuration descriptor. */
+ if (num_frags < adapter->max_tx_sgl_size)
+ return (0);
+ counter_u64_add(tx_ring->tx_stats.collapse, 1);
+
+ collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT,
+ adapter->max_tx_sgl_size - 1);
+ if (collapsed_mbuf == NULL) {
+ counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
+ return (ENOMEM);
+ }
+
+ /* If mbuf was collapsed succesfully, original mbuf is released. */
+ *mbuf = collapsed_mbuf;
+
+ return (0);
+}
+
+static int
+ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
+{
+ struct ena_adapter *adapter;
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_buf *ena_buf;
+ struct ena_com_io_sq* io_sq;
+ bus_dma_segment_t segs[ENA_BUS_DMA_SEGS];
+ void *push_hdr;
+ uint16_t next_to_use;
+ uint16_t req_id;
+ uint16_t push_len;
+ uint16_t ena_qid;
+ uint32_t len, nsegs, header_len;
+ int i, rc;
+ int nb_hw_desc;
+
+ ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
+ adapter = tx_ring->que->adapter;
+ ena_dev = adapter->ena_dev;
+ io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
+
+ ENA_ASSERT(*mbuf, "mbuf is NULL\n");
+
+ rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
+ if (rc) {
+ ena_trace(ENA_WARNING,
+ "Failed to collapse mbuf! err: %d", rc);
+ return (rc);
+ }
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_tx_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ tx_info->mbuf = *mbuf;
+ tx_info->num_of_bufs = 0;
+
+ ena_buf = tx_info->bufs;
+ len = (*mbuf)->m_len;
+
+ ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len);
+
+ push_len = 0;
+ header_len = min_t(uint32_t, len, tx_ring->tx_max_header_size);
+ push_hdr = NULL;
+
+ rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map,
+ *mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (rc || (nsegs == 0)) {
+ ena_trace(ENA_WARNING,
+ "dmamap load failed! err: %d nsegs: %d", rc, nsegs);
+ counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
+ tx_info->mbuf = NULL;
+ if (rc == ENOMEM)
+ return (ENA_COM_NO_MEM);
+ else
+ return (ENA_COM_INVAL);
+ }
+
+ for (i = 0; i < nsegs; i++) {
+ ena_buf->len = segs[i].ds_len;
+ ena_buf->paddr = segs[i].ds_addr;
+ ena_buf++;
+ }
+ tx_info->num_of_bufs = nsegs;
+
+ memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_hdr;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = header_len;
+
+ /* Set flags and meta data */
+ ena_tx_csum(&ena_tx_ctx, *mbuf);
+ /* Prepare the packet's descriptors and send them to device */
+ rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
+ if (rc != 0) {
+ ena_trace(ENA_WARNING, "failed to prepare tx bufs\n");
+ counter_enter();
+ counter_u64_add_protected(tx_ring->tx_stats.queue_stop, 1);
+ counter_u64_add_protected(tx_ring->tx_stats.prepare_ctx_err, 1);
+ counter_exit();
+ goto dma_error;
+ }
+
+ counter_enter();
+ counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
+ counter_u64_add_protected(tx_ring->tx_stats.bytes, (*mbuf)->m_pkthdr.len);
+ counter_exit();
+
+ tx_info->tx_descs = nb_hw_desc;
+ getbinuptime(&tx_info->timestamp);
+ tx_info->print_once = true;
+
+ tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
+ tx_ring->ring_size);
+
+ bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map, BUS_DMASYNC_PREWRITE);
+
+ return (0);
+
+dma_error:
+ tx_info->mbuf = NULL;
+ bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
+
+ return (rc);
+}
+
+static void
+ena_start_xmit(struct ena_ring *tx_ring)
+{
+ struct mbuf *mbuf;
+ struct ena_adapter *adapter = tx_ring->adapter;
+ struct ena_com_io_sq* io_sq;
+ int ena_qid;
+ int acum_pkts = 0;
+ int ret = 0;
+
+ if ((adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ if (!adapter->link_status)
+ return;
+
+ ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
+ io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
+
+ while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
+ ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and"
+ " header csum flags %#jx",
+ mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags);
+
+ if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
+ ena_tx_cleanup(tx_ring);
+
+ if ((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0) {
+ if (ret == ENA_COM_NO_MEM) {
+ drbr_putback(adapter->ifp, tx_ring->br, mbuf);
+ } else if (ret == ENA_COM_NO_SPACE) {
+ drbr_putback(adapter->ifp, tx_ring->br, mbuf);
+ } else {
+ m_freem(mbuf);
+ drbr_advance(adapter->ifp, tx_ring->br);
+ }
+
+ break;
+ }
+
+ drbr_advance(adapter->ifp, tx_ring->br);
+
+ if ((adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ acum_pkts++;
+
+ BPF_MTAP(adapter->ifp, mbuf);
+
+ if (acum_pkts == DB_THRESHOLD) {
+ acum_pkts = 0;
+ wmb();
+ /* Trigger the dma engine */
+ ena_com_write_sq_doorbell(io_sq);
+ counter_u64_add(tx_ring->tx_stats.doorbells, 1);
+ }
+
+ }
+
+ if (acum_pkts) {
+ wmb();
+ /* Trigger the dma engine */
+ ena_com_write_sq_doorbell(io_sq);
+ counter_u64_add(tx_ring->tx_stats.doorbells, 1);
+ }
+
+ if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
+ ena_tx_cleanup(tx_ring);
+}
+
+static void
+ena_deferred_mq_start(void *arg, int pending)
+{
+ struct ena_ring *tx_ring = (struct ena_ring *)arg;
+ struct ifnet *ifp = tx_ring->adapter->ifp;
+
+ while (drbr_empty(ifp, tx_ring->br) == FALSE &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ ENA_RING_MTX_LOCK(tx_ring);
+ ena_start_xmit(tx_ring);
+ ENA_RING_MTX_UNLOCK(tx_ring);
+ }
+}
+
+static int
+ena_mq_start(if_t ifp, struct mbuf *m)
+{
+ struct ena_adapter *adapter = ifp->if_softc;
+ struct ena_ring *tx_ring;
+ int ret, is_drbr_empty;
+ uint32_t i;
+
+ if ((adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return (ENODEV);
+
+ /* Which queue to use */
+ /*
+ * If everything is setup correctly, it should be the
+ * same bucket that the current CPU we're on is.
+ * It should improve performance.
+ */
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+#ifdef RSS
+ if (rss_hash2bucket(m->m_pkthdr.flowid,
+ M_HASHTYPE_GET(m), &i) == 0) {
+ i = i % adapter->num_queues;
+
+ } else
+#endif
+ {
+ i = m->m_pkthdr.flowid % adapter->num_queues;
+ }
+ } else {
+ i = curcpu % adapter->num_queues;
+ }
+ tx_ring = &adapter->tx_ring[i];
+
+ /* Check if drbr is empty before putting packet */
+ is_drbr_empty = drbr_empty(ifp, tx_ring->br);
+ ret = drbr_enqueue(ifp, tx_ring->br, m);
+ if (ret) {
+ taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
+ return (ret);
+ }
+
+ if (is_drbr_empty && ENA_RING_MTX_TRYLOCK(tx_ring)) {
+ ena_start_xmit(tx_ring);
+ ENA_RING_MTX_UNLOCK(tx_ring);
+ } else {
+ taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
+ }
+
+ return (0);
+}
+
+static void
+ena_qflush(if_t ifp)
+{
+ struct ena_adapter *adapter = ifp->if_softc;
+ struct ena_ring *tx_ring = adapter->tx_ring;
+ int i;
+
+ for(i = 0; i < adapter->num_queues; ++i, ++tx_ring)
+ if (drbr_empty(ifp, tx_ring->br) == FALSE) {
+ ENA_RING_MTX_LOCK(tx_ring);
+ drbr_flush(ifp, tx_ring->br);
+ ENA_RING_MTX_UNLOCK(tx_ring);
+ }
+
+ if_qflush(ifp);
+
+ return;
+}
+
+static int ena_calc_io_queue_num(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_cq_num, io_queue_num;
+
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ io_cq_num = get_feat_ctx->max_queues.max_sq_num;
+
+ io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
+ io_queue_num = min_t(int, io_queue_num, io_sq_num);
+ io_queue_num = min_t(int, io_queue_num, io_cq_num);
+ /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
+ io_queue_num = min_t(int, io_queue_num,
+ pci_msix_count(adapter->pdev) - 1);
+#ifdef RSS
+ io_queue_num = min_t(int, io_queue_num, rss_getnumbuckets());
+#endif
+
+ return io_queue_num;
+}
+
+static int ena_calc_queue_size(struct ena_adapter *adapter,
+ uint16_t *max_tx_sgl_size, uint16_t *max_rx_sgl_size,
+ struct ena_com_dev_get_features_ctx *feat)
+{
+ uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
+ uint32_t v;
+ uint32_t q;
+
+ queue_size = min_t(uint32_t, queue_size,
+ feat->max_queues.max_cq_depth);
+ queue_size = min_t(uint32_t, queue_size,
+ feat->max_queues.max_sq_depth);
+
+ /* round down to the nearest power of 2 */
+ v = queue_size;
+ while (v != 0) {
+ if (powerof2(queue_size))
+ break;
+ v /= 2;
+ q = rounddown2(queue_size, v);
+ if (q != 0) {
+ queue_size = q;
+ break;
+ }
+ }
+
+ if (unlikely(!queue_size)) {
+ device_printf(adapter->pdev, "Invalid queue size\n");
+ return ENA_COM_FAULT;
+ }
+
+ *max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
+ feat->max_queues.max_packet_tx_descs);
+ *max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
+ feat->max_queues.max_packet_rx_descs);
+
+ return queue_size;
+}
+
+static int ena_rss_init_default(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ device_t dev = adapter->pdev;
+ int qid, rc, i;
+
+ rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
+ if (unlikely(rc)) {
+ device_printf(dev, "Cannot init RSS\n");
+ goto err_rss_init;
+ }
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+#ifdef RSS
+ qid = rss_get_indirection_to_bucket(i);
+ qid = qid % adapter->num_queues;
+#else
+ qid = i % adapter->num_queues;
+#endif
+ rc = ena_com_indirect_table_fill_entry(ena_dev, i,
+ ENA_IO_RXQ_IDX(qid));
+ if (unlikely(rc && (rc != EPERM))) {
+ device_printf(dev, "Cannot fill indirect table\n");
+ goto err_fill_indir;
+ }
+ }
+
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ if (unlikely(rc && (rc != EPERM))) {
+ device_printf(dev, "Cannot fill hash function\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_set_default_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != EPERM))) {
+ device_printf(dev, "Cannot fill hash control\n");
+ goto err_fill_indir;
+ }
+
+ return (0);
+
+err_fill_indir:
+ ena_com_rss_destroy(ena_dev);
+err_rss_init:
+ return (rc);
+}
+
+static void
+ena_rss_init_default_deferred(void *arg)
+{
+ struct ena_adapter *adapter;
+ devclass_t dc;
+ int max;
+ int rc;
+
+ dc = devclass_find("ena");
+ if (dc == NULL) {
+ ena_trace(ENA_DBG, "No devclass ena\n");
+ return;
+ }
+
+ max = devclass_get_maxunit(dc);
+ while (max-- >= 0) {
+ adapter = devclass_get_softc(dc, max);
+ if (adapter != NULL) {
+ rc = ena_rss_init_default(adapter);
+ adapter->rss_support = true;
+ if (rc) {
+ device_printf(adapter->pdev,
+ "WARNING: RSS was not properly initialized,"
+ " it will affect bandwith\n");
+ adapter->rss_support = false;
+ }
+ }
+ }
+}
+SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
+
+static void ena_config_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_host_info *host_info;
+ int rc;
+
+ /* Allocate only the host info */
+ rc = ena_com_allocate_host_info(ena_dev);
+ if (rc) {
+ ena_trace(ENA_ALERT, "Cannot allocate host info\n");
+ return;
+ }
+
+ host_info = ena_dev->host_attr.host_info;
+
+ host_info->os_type = ENA_ADMIN_OS_FREEBSD;
+ host_info->kernel_ver = osreldate;
+
+ sprintf(host_info->kernel_ver_str, "%d", osreldate);
+ host_info->os_dist = 0;
+ strncpy(host_info->os_dist_str, osrelease,
+ sizeof(host_info->os_dist_str) - 1);
+
+ host_info->driver_version =
+ (DRV_MODULE_VER_MAJOR) |
+ (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+
+ rc = ena_com_set_host_attributes(ena_dev);
+ if (rc) {
+ if (rc == EPERM)
+ ena_trace(ENA_WARNING, "Cannot set host attributes\n");
+ else
+ ena_trace(ENA_ALERT, "Cannot set host attributes\n");
+
+ goto err;
+ }
+
+ return;
+
+err:
+ ena_com_delete_host_info(ena_dev);
+}
+
+static int
+ena_device_init(struct ena_adapter *adapter, device_t pdev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
+{
+ struct ena_com_dev* ena_dev = adapter->ena_dev;
+ bool readless_supported;
+ uint32_t aenq_groups;
+ int dma_width;
+ int rc;
+
+ rc = ena_com_mmio_reg_read_request_init(ena_dev);
+ if (rc) {
+ device_printf(pdev, "failed to init mmio read less\n");
+ return rc;
+ }
+
+ /*
+ * The PCIe configuration space revision id indicate if mmio reg
+ * read is disabled
+ */
+ readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
+ ena_com_set_mmio_read_mode(ena_dev, readless_supported);
+
+ rc = ena_com_dev_reset(ena_dev);
+ if (rc) {
+ device_printf(pdev, "Can not reset device\n");
+ goto err_mmio_read_less;
+ }
+
+ rc = ena_com_validate_version(ena_dev);
+ if (rc) {
+ device_printf(pdev, "device version is too low\n");
+ goto err_mmio_read_less;
+ }
+
+ dma_width = ena_com_get_dma_width(ena_dev);
+ if (dma_width < 0) {
+ device_printf(pdev, "Invalid dma width value %d", dma_width);
+ rc = dma_width;
+ goto err_mmio_read_less;
+ }
+ adapter->dma_width = dma_width;
+
+ /* ENA admin level init */
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ if (rc) {
+ device_printf(pdev,
+ "Can not initialize ena admin queue with device\n");
+ goto err_mmio_read_less;
+ }
+
+ /*
+ * To enable the msix interrupts the driver needs to know the number
+ * of queues. So the driver uses polling mode to retrieve this
+ * information
+ */
+ ena_com_set_admin_polling_mode(ena_dev, true);
+
+ ena_config_host_info(ena_dev);
+
+ /* Get Device Attributes */
+ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
+ if (rc) {
+ device_printf(pdev,
+ "Cannot get attribute for ena device rc: %d\n", rc);
+ goto err_admin_init;
+ }
+
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
+ return 0;
+
+err_admin_init:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+err_mmio_read_less:
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ return rc;
+}
+
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
+ int io_vectors)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc;
+
+ rc = ena_enable_msix(adapter);
+ if (rc) {
+ device_printf(adapter->pdev, "Error with MSI-X enablement\n");
+ return rc;
+ }
+
+ ena_setup_mgmnt_intr(adapter);
+
+ rc = ena_request_mgmnt_irq(adapter);
+ if (rc) {
+ device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
+ goto err_disable_msix;
+ }
+
+ ena_com_set_admin_polling_mode(ena_dev, false);
+
+ ena_com_admin_aenq_enable(ena_dev);
+
+ return 0;
+
+err_disable_msix:
+ ena_disable_msix(adapter);
+
+ return rc;
+}
+
+/* Function called on ENA_ADMIN_KEEP_ALIVE event */
+static void ena_keep_alive_wd(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ sbintime_t stime;
+
+ stime = getsbinuptime();
+ atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
+}
+
+/* Check for keep alive expiration */
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ sbintime_t timestamp, time;
+
+ if (adapter->wd_active == 0)
+ return;
+
+ if (adapter->keep_alive_timeout == 0)
+ return;
+
+ timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
+ time = getsbinuptime() - timestamp;
+ if (unlikely(time > adapter->keep_alive_timeout)) {
+ device_printf(adapter->pdev,
+ "Keep alive watchdog timeout.\n");
+ counter_u64_add(adapter->dev_stats.wd_expired, 1);
+ adapter->trigger_reset = true;
+ }
+}
+
+/* Check if admin queue is enabled */
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
+ device_printf(adapter->pdev,
+ "ENA admin queue is not in running state!\n");
+ counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
+ adapter->trigger_reset = true;
+ }
+}
+
+/*
+ * Check for TX which were not completed on time.
+ * Timeout is defined by "missing_tx_timeout".
+ * Reset will be performed if number of incompleted
+ * transactions exceeds "missing_tx_threshold".
+ */
+static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+{
+ struct ena_ring *tx_ring;
+ struct ena_tx_buffer *tx_info;
+ struct bintime curtime, time;
+ int i, j, budget, missed_tx;
+
+ /* Make sure the driver doesn't turn the device in other process */
+ rmb();
+
+ if (!adapter->up)
+ return;
+
+ if (adapter->trigger_reset)
+ return;
+
+ if (adapter->missing_tx_timeout == 0)
+ return;
+
+ budget = adapter->missing_tx_max_queues;
+ getbinuptime(&curtime);
+
+ for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+
+ missed_tx = 0;
+
+ for (j = 0; j < tx_ring->ring_size; j++) {
+ tx_info = &tx_ring->tx_buffer_info[j];
+
+ if (!bintime_isset(&tx_info->timestamp))
+ continue;
+
+ time = curtime;
+ bintime_sub(&time, &tx_info->timestamp);
+
+ /* Check again if packet is still waiting */
+ if (bintime_isset(&tx_info->timestamp) && unlikely(
+ bttosbt(time) > adapter->missing_tx_timeout)) {
+ if (tx_info->print_once)
+ device_printf(adapter->pdev,
+ "Found a Tx that wasn't completed "
+ "on time, qid %d, index %d.\n",
+ tx_ring->qid, j);
+
+ tx_info->print_once = false;
+ missed_tx++;
+
+ if (unlikely(missed_tx >
+ adapter->missing_tx_threshold)) {
+ device_printf(adapter->pdev,
+ "The number of lost tx completion "
+ "is above the threshold (%d > %d). "
+ "Reset the device\n", missed_tx,
+ adapter->missing_tx_threshold);
+ adapter->trigger_reset = true;
+ return;
+ }
+ }
+ }
+
+ budget--;
+ if (!budget) {
+ i++;
+ break;
+ }
+ }
+
+ adapter->next_monitored_tx_qid = i % adapter->num_queues;
+}
+
+
+static void
+ena_timer_service(void *data)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_admin_host_info *host_info =
+ adapter->ena_dev->host_attr.host_info;
+
+ check_for_missing_keep_alive(adapter);
+
+ check_for_admin_com_state(adapter);
+
+ check_for_missing_tx_completions(adapter);
+
+ if (host_info)
+ ena_update_host_info(host_info, adapter->ifp);
+
+ if (unlikely(adapter->trigger_reset)) {
+ device_printf(adapter->pdev, "Trigger reset is on\n");
+ taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
+ return;
+ }
+
+ /*
+ * Schedule another timeout one second from now.
+ */
+ callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
+}
+
+static void
+ena_reset_task(void *arg, int pending)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ bool dev_up;
+ int rc;
+
+ if (unlikely(!adapter->trigger_reset)) {
+ device_printf(adapter->pdev,
+ "device reset scheduled but trigger_reset is off\n");
+ return;
+ }
+
+ sx_xlock(&adapter->ioctl_sx);
+
+ callout_drain(&adapter->timer_service);
+
+ dev_up = adapter->up;
+
+ ena_com_set_admin_running_state(ena_dev, false);
+ ena_free_mgmnt_irq(adapter);
+ ena_down(adapter);
+ ena_com_dev_reset(ena_dev);
+ ena_disable_msix(adapter);
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ adapter->trigger_reset = false;
+
+ /* Finished destroy part. Restart the device */
+ rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx,
+ &adapter->wd_active);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "ENA device init failed! (err: %d)\n", rc);
+ goto err_dev_free;
+ }
+
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter,
+ adapter->num_queues);
+ if (rc) {
+ device_printf(adapter->pdev, "Enable MSI-X failed\n");
+ goto err_com_free;
+ }
+
+ /* If the interface was up before the reset bring it up */
+ if (dev_up) {
+ rc = ena_up(adapter);
+ if (rc) {
+ device_printf(adapter->pdev,
+ "Failed to create I/O queues\n");
+ goto err_msix_free;
+ }
+ }
+
+ callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
+ ena_timer_service, (void *)adapter, 0);
+
+ sx_unlock(&adapter->ioctl_sx);
+
+ return;
+
+err_msix_free:
+ ena_com_dev_reset(ena_dev);
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+err_com_free:
+ ena_com_admin_destroy(ena_dev);
+err_dev_free:
+ device_printf(adapter->pdev, "ENA reset failed!\n");
+ adapter->running = false;
+ sx_unlock(&adapter->ioctl_sx);
+}
+
+/**
+ * ena_attach - Device Initialization Routine
+ * @pdev: device information struct
+ *
+ * Returns 0 on success, otherwise on failure.
+ *
+ * ena_attach initializes an adapter identified by a device structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int
+ena_attach(device_t pdev)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ static int version_printed;
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev = NULL;
+ uint16_t tx_sgl_size = 0;
+ uint16_t rx_sgl_size = 0;
+ int io_queue_num;
+ int queue_size;
+ int rc;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children;
+
+ adapter = device_get_softc(pdev);
+ adapter->pdev = pdev;
+ ctx = device_get_sysctl_ctx(pdev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(pdev));
+
+ mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF);
+ sx_init(&adapter->ioctl_sx, "ENA ioctl sx");
+
+ /* Sysctl calls for Watchdog service */
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "wd_active",
+ CTLFLAG_RWTUN, &adapter->wd_active, 0,
+ "Watchdog is active");
+
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "keep_alive_timeout",
+ CTLFLAG_RWTUN, &adapter->keep_alive_timeout,
+ "Timeout for Keep Alive messages");
+
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "missing_tx_timeout",
+ CTLFLAG_RWTUN, &adapter->missing_tx_timeout,
+ "Timeout for TX completion");
+
+ SYSCTL_ADD_U32(ctx, children, OID_AUTO, "missing_tx_max_queues",
+ CTLFLAG_RWTUN, &adapter->missing_tx_max_queues, 0,
+ "Number of TX queues to check per run");
+
+ SYSCTL_ADD_U32(ctx, children, OID_AUTO, "missing_tx_threshold",
+ CTLFLAG_RWTUN, &adapter->missing_tx_threshold, 0,
+ "Max number of timeouted packets");
+
+ /* Set up the timer service */
+ callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0);
+ adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
+ adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
+ adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
+ adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
+
+ if (version_printed++ == 0)
+ device_printf(pdev, "%s\n", ena_version);
+
+ rc = ena_allocate_pci_resources(adapter);
+ if (rc) {
+ device_printf(pdev, "PCI resource allocation failed!\n");
+ ena_free_pci_resources(adapter);
+ goto err_pci_res;
+ }
+
+ /* Allocate memory for ena_dev structure */
+ ena_dev = ENA_MEM_ALLOC(pdev, sizeof(struct ena_com_dev));
+ if (!ena_dev) {
+ device_printf(pdev, "allocating ena_dev failed\n");
+ rc = ENOMEM;
+ goto err_select_region;
+ }
+
+ adapter->ena_dev = ena_dev;
+ ena_dev->dmadev = pdev;
+ ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
+ M_WAITOK | M_ZERO);
+
+ /* Store register resources */
+ ((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
+ rman_get_bustag(adapter->registers);
+ ((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
+ rman_get_bushandle(adapter->registers);
+
+ if (((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0) {
+ device_printf(pdev, "failed to pmap registers bar\n");
+ rc = ENXIO;
+ goto err_dev_free;
+ }
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+
+ /* Device initialization */
+ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
+ if (rc) {
+ device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
+ rc = ENXIO;
+ goto err_bus_free;
+ }
+
+ adapter->keep_alive_timestamp = getsbinuptime();
+
+ adapter->tx_offload_cap = get_feat_ctx.offload.tx;
+
+ /* Set for sure that interface is not up */
+ adapter->up = false;
+
+ memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
+ ETHER_ADDR_LEN);
+
+ adapter->small_copy_len =
+ ENA_DEFAULT_SMALL_PACKET_LEN;
+
+ /* calculate IO queue number to create */
+ io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx);
+
+ ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n",
+ io_queue_num);
+ adapter->num_queues = io_queue_num;
+
+ /* calculatre ring sizes */
+ queue_size = ena_calc_queue_size(adapter,&tx_sgl_size,
+ &rx_sgl_size, &get_feat_ctx);
+ if ((queue_size <= 0) || (io_queue_num <= 0)) {
+ rc = ENA_COM_FAULT;
+ goto err_com_free;
+ }
+
+ adapter->tx_ring_size = queue_size;
+ adapter->rx_ring_size = queue_size;
+
+ adapter->max_tx_sgl_size = tx_sgl_size;
+ adapter->max_rx_sgl_size = rx_sgl_size;
+
+ /* set up dma tags for rx and tx buffers */
+ rc = ena_setup_tx_dma_tag(adapter);
+ if (rc)
+ goto dma_tx_err;
+
+ rc = ena_setup_rx_dma_tag(adapter);
+ if (rc)
+ goto dma_rx_err;
+
+ /* initialize rings basic information */
+ device_printf(pdev, "initalize %d io queues\n", io_queue_num);
+ rc = ena_init_io_rings(adapter);
+ if (rc) {
+ device_printf(pdev,"Error with initialization of IO rings\n");
+ goto err_io_init;
+ }
+
+ /* setup network interface */
+ rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
+ if (rc) {
+ device_printf(pdev,"Error with network interface setup\n");
+ goto err_com_free;
+ }
+
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
+ if (rc) {
+ device_printf(pdev,
+ "Failed to enable and set the admin interrupts\n");
+ goto err_ifp_free;
+ }
+
+ /* Initialize reset task queue */
+ TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
+ adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
+ M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
+ if (adapter->reset_tq == NULL) {
+ device_printf(adapter->pdev,
+ "Unable to create reset task queue\n");
+ goto err_reset_tq;
+ }
+ taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
+ "%s rstq", device_get_nameunit(adapter->pdev));
+
+ /* Initialize task queue responsible for updating hw stats */
+ TASK_INIT(&adapter->stats_task, 0, ena_update_hw_stats, adapter);
+ adapter->stats_tq = taskqueue_create_fast("ena_stats_update",
+ M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->stats_tq);
+ if (adapter->stats_tq == NULL) {
+ device_printf(adapter->pdev,
+ "Unable to create taskqueue for updating hw stats\n");
+ goto err_stats_tq;
+ }
+ taskqueue_start_threads(&adapter->stats_tq, 1, PI_REALTIME,
+ "%s stats tq", device_get_nameunit(adapter->pdev));
+
+ /* Initialize statistics */
+ ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
+ sizeof(struct ena_stats_dev));
+ ena_update_stats_counters(adapter);
+ ena_sysctl_add_nodes(adapter);
+
+ /* Tell the stack that the interface is not active */
+ if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
+
+ adapter->running = true;
+ return (0);
+
+err_stats_tq:
+ taskqueue_free(adapter->reset_tq);
+err_reset_tq:
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+err_ifp_free:
+ if_detach(adapter->ifp);
+ if_free(adapter->ifp);
+err_com_free:
+ ena_free_all_io_rings_resources(adapter);
+err_io_init:
+ ena_free_rx_dma_tag(adapter);
+dma_rx_err:
+ ena_free_tx_dma_tag(adapter);
+dma_tx_err:
+ ena_com_admin_destroy(ena_dev);
+ ena_com_delete_host_info(ena_dev);
+err_bus_free:
+ free(ena_dev->bus, M_DEVBUF);
+err_dev_free:
+ free(ena_dev, M_DEVBUF);
+err_select_region:
+ ena_free_pci_resources(adapter);
+err_pci_res:
+ return (rc);
+}
+
+/**
+ * ena_detach - Device Removal Routine
+ * @pdev: device information struct
+ *
+ * ena_detach is called by the device subsystem to alert the driver
+ * that it should release a PCI device.
+ **/
+static int
+ena_detach(device_t pdev)
+{
+ struct ena_adapter *adapter = device_get_softc(pdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc;
+
+ /* Make sure VLANS are not using driver */
+ if (adapter->ifp->if_vlantrunk != NULL) {
+ device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
+ return (EBUSY);
+ }
+
+ /* Free reset task and callout */
+ callout_drain(&adapter->timer_service);
+ while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
+ taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
+ taskqueue_free(adapter->reset_tq);
+
+ sx_xlock(&adapter->ioctl_sx);
+ ena_down(adapter);
+ sx_unlock(&adapter->ioctl_sx);
+
+ taskqueue_free(adapter->stats_tq);
+
+ if (adapter->ifp != NULL) {
+ ether_ifdetach(adapter->ifp);
+ if_free(adapter->ifp);
+ }
+
+ ena_free_all_io_rings_resources(adapter);
+
+ ena_free_counters((counter_u64_t *)&adapter->dev_stats,
+ sizeof(struct ena_stats_dev));
+
+ if (adapter->rss_support)
+ ena_com_rss_destroy(ena_dev);
+
+ rc = ena_free_rx_dma_tag(adapter);
+ if (rc)
+ device_printf(adapter->pdev,
+ "Unmapped RX DMA tag associations\n");
+
+ rc = ena_free_tx_dma_tag(adapter);
+ if (rc)
+ device_printf(adapter->pdev,
+ "Unmapped TX DMA tag associations\n");
+
+ /* Reset the device only if the device is running. */
+ if (adapter->running)
+ ena_com_dev_reset(ena_dev);
+
+ ena_com_delete_host_info(ena_dev);
+
+ ena_com_admin_destroy(ena_dev);
+
+ ena_free_irqs(adapter);
+
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ ena_free_pci_resources(adapter);
+
+ mtx_destroy(&adapter->global_mtx);
+ sx_destroy(&adapter->ioctl_sx);
+
+ if (ena_dev->bus != NULL)
+ free(ena_dev->bus, M_DEVBUF);
+
+ if (ena_dev != NULL)
+ free(ena_dev, M_DEVBUF);
+
+ return (bus_generic_detach(pdev));
+}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+/**
+ * ena_update_on_link_change:
+ * Notify the network interface about the change in link status
+ **/
+static void
+ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_aenq_link_change_desc *aenq_desc;
+ int status;
+ if_t ifp;
+
+ aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ ifp = adapter->ifp;
+ status = aenq_desc->flags &
+ ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+
+ if (status != 0) {
+ device_printf(adapter->pdev, "link is UP\n");
+ if_link_state_change(ifp, LINK_STATE_UP);
+ } else if (status == 0) {
+ device_printf(adapter->pdev, "link is DOWN\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ } else {
+ device_printf(adapter->pdev, "invalid value recvd\n");
+ BUG();
+ }
+
+ adapter->link_status = status;
+
+ return;
+}
+
+/**
+ * This handler will called for unknown event group or unimplemented handlers
+ **/
+static void
+unimplemented_aenq_handler(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ return;
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ena_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ena_probe),
+ DEVMETHOD(device_attach, ena_attach),
+ DEVMETHOD(device_detach, ena_detach),
+ DEVMETHOD_END
+};
+
+static driver_t ena_driver = {
+ "ena", ena_methods, sizeof(struct ena_adapter),
+};
+
+devclass_t ena_devclass;
+DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
+MODULE_DEPEND(ena, pci, 1, 1, 1);
+MODULE_DEPEND(ena, ether, 1, 1, 1);
+
+/*********************************************************************/
diff --git a/sys/dev/ena/ena.h b/sys/dev/ena/ena.h
new file mode 100644
index 0000000..22701d8
--- /dev/null
+++ b/sys/dev/ena/ena.h
@@ -0,0 +1,440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef ENA_H
+#define ENA_H
+
+#include <sys/types.h>
+
+#include "ena-com/ena_com.h"
+#include "ena-com/ena_eth_com.h"
+
+#define DRV_MODULE_VER_MAJOR 0
+#define DRV_MODULE_VER_MINOR 7
+#define DRV_MODULE_VER_SUBMINOR 0
+
+#define DRV_MODULE_NAME "ena"
+
+#ifndef DRV_MODULE_VERSION
+#define DRV_MODULE_VERSION \
+ __XSTRING(DRV_MODULE_VER_MAJOR) "." \
+ __XSTRING(DRV_MODULE_VER_MINOR) "." \
+ __XSTRING(DRV_MODULE_VER_SUBMINOR)
+#endif
+#define DEVICE_NAME "Elastic Network Adapter (ENA)"
+#define DEVICE_DESC "ENA adapter"
+
+/* Calculate DMA mask - width for ena cannot exceed 48, so it is safe */
+#define ENA_DMA_BIT_MASK(x) ((1ULL << (x)) - 1ULL)
+
+/* 1 for AENQ + ADMIN */
+#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues))
+
+#define ENA_REG_BAR 0
+#define ENA_MEM_BAR 2
+
+#define ENA_BUS_DMA_SEGS 32
+
+#define ENA_DEFAULT_RING_SIZE 1024
+#define ENA_DEFAULT_SMALL_PACKET_LEN 128
+#define ENA_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE 1536
+
+#define ENA_RX_REFILL_THRESH_DEVIDER 8
+
+#define ENA_MAX_PUSH_PKT_SIZE 128
+
+#define ENA_NAME_MAX_LEN 20
+#define ENA_IRQNAME_SIZE 40
+
+#define ENA_PKT_MAX_BUFS 19
+#define ENA_STALL_TIMEOUT 100
+
+#define ENA_RX_RSS_TABLE_LOG_SIZE 7
+#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
+
+#define ENA_HASH_KEY_SIZE 40
+
+#define ENA_DMA_BITS_MASK 40
+#define ENA_MAX_FRAME_LEN 10000
+#define ENA_MIN_FRAME_LEN 60
+#define ENA_RX_HASH_KEY_NUM 10
+#define ENA_RX_THASH_TABLE_SIZE (1 << 8)
+
+#define ENA_TX_CLEANUP_TRESHOLD 128
+
+#define DB_THRESHOLD 64
+
+#define TX_COMMIT 32
+ /*
+ * TX budget for cleaning. It should be half of the RX budget to reduce amount
+ * of TCP retransmissions.
+ */
+#define TX_BUDGET 128
+/* RX cleanup budget. -1 stands for infinity. */
+#define RX_BUDGET 256
+/*
+ * How many times we can repeat cleanup in the io irq handling routine if the
+ * RX or TX budget was depleted.
+ */
+#define CLEAN_BUDGET 8
+
+#define RX_IRQ_INTERVAL 20
+#define TX_IRQ_INTERVAL 50
+
+#define ENA_MAX_MTU 9216
+#define ENA_TSO_MAXSIZE 65536
+#define ENA_TSO_NSEGS ENA_PKT_MAX_BUFS
+#define ENA_RX_OFFSET NET_SKB_PAD + NET_IP_ALIGN
+
+#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+
+#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+
+#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \
+ (((idx) + (n)) & ((ring_size) - 1))
+
+#define ENA_IO_TXQ_IDX(q) (2 * (q))
+#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+
+#define ENA_MGMNT_IRQ_IDX 0
+#define ENA_IO_IRQ_FIRST_IDX 1
+#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
+
+/*
+ * ENA device should send keep alive msg every 1 sec.
+ * We wait for 6 sec just to be on the safe side.
+ */
+#define DEFAULT_KEEP_ALIVE_TO (SBT_1S * 6)
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define DEFAULT_TX_CMP_TO (SBT_1S * 5)
+
+/* Number of queues to check for missing queues per timer tick */
+#define DEFAULT_TX_MONITORED_QUEUES (4)
+
+/* Max number of timeouted packets before device reset */
+#define DEFAULT_TX_CMP_THRESHOLD (128)
+
+/*
+ * Supported PCI vendor and devices IDs
+ */
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+
+#define PCI_DEV_ID_ENA_PF 0x0ec2
+#define PCI_DEV_ID_ENA_LLQ_PF 0x1ec2
+#define PCI_DEV_ID_ENA_VF 0xec20
+#define PCI_DEV_ID_ENA_LLQ_VF 0xec21
+
+struct msix_entry {
+ int entry;
+ int vector;
+};
+
+typedef struct _ena_vendor_info_t {
+ unsigned int vendor_id;
+ unsigned int device_id;
+ unsigned int index;
+} ena_vendor_info_t;
+
+struct ena_irq {
+ /* Interrupt resources */
+ struct resource *res;
+ driver_intr_t *handler;
+ void *data;
+ void *cookie;
+ unsigned int vector;
+ bool requested;
+ int cpu;
+ char name[ENA_IRQNAME_SIZE];
+};
+
+struct ena_que {
+ struct ena_adapter *adapter;
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+ uint32_t id;
+ int cpu;
+};
+
+struct ena_tx_buffer {
+ struct mbuf *mbuf;
+ /* # of ena desc for this specific mbuf
+ * (includes data desc and metadata desc) */
+ unsigned int tx_descs;
+ /* # of buffers used by this mbuf */
+ unsigned int num_of_bufs;
+ bus_dmamap_t map;
+
+ /* Used to detect missing tx packets */
+ struct bintime timestamp;
+ bool print_once;
+
+ struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
+} __aligned(CACHE_LINE_SIZE);
+
+struct ena_rx_buffer {
+ struct mbuf *mbuf;
+ bus_dmamap_t map;
+ struct ena_com_buf ena_buf;
+} __aligned(CACHE_LINE_SIZE);
+
+
+struct ena_stats_tx {
+ counter_u64_t cnt;
+ counter_u64_t bytes;
+ counter_u64_t queue_stop;
+ counter_u64_t prepare_ctx_err;
+ counter_u64_t queue_wakeup;
+ counter_u64_t dma_mapping_err;
+ /* Not counted */
+ counter_u64_t unsupported_desc_num;
+ /* Not counted */
+ counter_u64_t napi_comp;
+ /* Not counted */
+ counter_u64_t tx_poll;
+ counter_u64_t doorbells;
+ counter_u64_t missing_tx_comp;
+ counter_u64_t bad_req_id;
+ counter_u64_t collapse;
+ counter_u64_t collapse_err;
+};
+
+struct ena_stats_rx {
+ counter_u64_t cnt;
+ counter_u64_t bytes;
+ counter_u64_t refil_partial;
+ counter_u64_t bad_csum;
+ /* Not counted */
+ counter_u64_t page_alloc_fail;
+ counter_u64_t mbuf_alloc_fail;
+ counter_u64_t dma_mapping_err;
+ counter_u64_t bad_desc_num;
+ /* Not counted */
+ counter_u64_t small_copy_len_pkt;
+};
+
+
+struct ena_ring {
+ /* Holds the empty requests for TX out of order completions */
+ uint16_t *free_tx_ids;
+ struct ena_com_dev *ena_dev;
+ struct ena_adapter *adapter;
+ struct ena_com_io_cq *ena_com_io_cq;
+ struct ena_com_io_sq *ena_com_io_sq;
+
+ /* The maximum length the driver can push to the device (For LLQ) */
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ uint16_t rx_small_copy_len;
+ uint16_t qid;
+ uint16_t mtu;
+ uint8_t tx_max_header_size;
+
+ struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
+ uint32_t smoothed_interval;
+ enum ena_intr_moder_level moder_tbl_idx;
+
+ struct ena_que *que;
+ struct lro_ctrl lro;
+
+ uint16_t next_to_use;
+ uint16_t next_to_clean;
+
+ union {
+ struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
+ struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
+ };
+ int ring_size; /* number of tx/rx_buffer_info's entries */
+
+ struct buf_ring *br; /* only for TX */
+ struct mtx ring_mtx;
+ char mtx_name[16];
+ struct task enqueue_task;
+ struct taskqueue *enqueue_tq;
+ struct task cmpl_task;
+ struct taskqueue *cmpl_tq;
+
+ union {
+ struct ena_stats_tx tx_stats;
+ struct ena_stats_rx rx_stats;
+ };
+
+} __aligned(CACHE_LINE_SIZE);
+
+struct ena_stats_dev {
+ /* Not counted */
+ counter_u64_t tx_timeout;
+ /* Not counted */
+ counter_u64_t io_suspend;
+ /* Not counted */
+ counter_u64_t io_resume;
+ /* Not counted */
+ counter_u64_t wd_expired;
+ counter_u64_t interface_up;
+ counter_u64_t interface_down;
+ /* Not counted */
+ counter_u64_t admin_q_pause;
+};
+
+struct ena_hw_stats {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+
+ uint64_t rx_drops;
+};
+
+/* Board specific private data structure */
+struct ena_adapter {
+ struct ena_com_dev *ena_dev;
+
+ /* OS defined structs */
+ if_t ifp;
+ device_t pdev;
+ struct ifmedia media;
+
+ /* OS resources */
+ struct resource * memory;
+ struct resource * registers;
+
+ struct mtx global_mtx;
+ struct sx ioctl_sx;
+
+ /* MSI-X */
+ uint32_t msix_enabled;
+ struct msix_entry *msix_entries;
+ int msix_vecs;
+
+ /* DMA tags used throughout the driver adapter for Tx and Rx */
+ bus_dma_tag_t tx_buf_tag;
+ bus_dma_tag_t rx_buf_tag;
+ int dma_width;
+ /*
+ * RX packets that shorter that this len will be copied to the skb
+ * header
+ */
+ unsigned int small_copy_len;
+
+ uint16_t max_tx_sgl_size;
+ uint16_t max_rx_sgl_size;
+
+ uint32_t tx_offload_cap;
+
+ /* Tx fast path data */
+ int num_queues;
+
+ unsigned int tx_usecs, rx_usecs; /* Interrupt coalescing */
+
+ unsigned int tx_ring_size;
+ unsigned int rx_ring_size;
+
+ /* RSS*/
+ uint8_t rss_ind_tbl[ENA_RX_RSS_TABLE_SIZE];
+ bool rss_support;
+
+ uint32_t msg_enable;
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ /* mdio and phy*/
+
+ char name[ENA_NAME_MAX_LEN];
+ bool link_status;
+ bool trigger_reset;
+ bool up;
+ bool running;
+
+ uint32_t wol;
+
+ /* Queue will represent one TX and one RX ring */
+ struct ena_que que[ENA_MAX_NUM_IO_QUEUES]
+ __aligned(CACHE_LINE_SIZE);
+
+ /* TX */
+ struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
+ __aligned(CACHE_LINE_SIZE);
+
+ /* RX */
+ struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES]
+ __aligned(CACHE_LINE_SIZE);
+
+ struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)];
+
+ /* Timer service */
+ struct callout timer_service;
+ sbintime_t keep_alive_timestamp;
+ uint32_t next_monitored_tx_qid;
+ struct task reset_task;
+ struct taskqueue *reset_tq;
+ int wd_active;
+ sbintime_t keep_alive_timeout;
+ sbintime_t missing_tx_timeout;
+ uint32_t missing_tx_max_queues;
+ uint32_t missing_tx_threshold;
+
+ /* Task updating hw stats */
+ struct task stats_task;
+ struct taskqueue *stats_tq;
+
+ /* Statistics */
+ struct ena_stats_dev dev_stats;
+ struct ena_hw_stats hw_stats;
+};
+
+
+#define ENA_DEV_LOCK mtx_lock(&adapter->global_mtx)
+#define ENA_DEV_UNLOCK mtx_unlock(&adapter->global_mtx)
+
+#define ENA_RING_MTX_LOCK(_ring) mtx_lock(&(_ring)->ring_mtx)
+#define ENA_RING_MTX_TRYLOCK(_ring) mtx_trylock(&(_ring)->ring_mtx)
+#define ENA_RING_MTX_UNLOCK(_ring) mtx_unlock(&(_ring)->ring_mtx)
+
+struct ena_dev *ena_efa_enadev_get(device_t pdev);
+
+int ena_register_adapter(struct ena_adapter *adapter);
+void ena_unregister_adapter(struct ena_adapter *adapter);
+
+int ena_update_stats_counters(struct ena_adapter *adapter);
+
+static inline int ena_mbuf_count(struct mbuf *mbuf)
+{
+ int count = 1;
+
+ while ((mbuf = mbuf->m_next) != NULL)
+ ++count;
+
+ return count;
+}
+
+#endif /* !(ENA_H) */
diff --git a/sys/dev/ena/ena_sysctl.c b/sys/dev/ena/ena_sysctl.c
new file mode 100644
index 0000000..859ef2a
--- /dev/null
+++ b/sys/dev/ena/ena_sysctl.c
@@ -0,0 +1,251 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "ena_sysctl.h"
+
+static int ena_sysctl_update_stats(SYSCTL_HANDLER_ARGS);
+static void ena_sysctl_add_stats(struct ena_adapter *);
+
+void
+ena_sysctl_add_nodes(struct ena_adapter *adapter)
+{
+ ena_sysctl_add_stats(adapter);
+}
+
+static void
+ena_sysctl_add_stats(struct ena_adapter *adapter)
+{
+ device_t dev;
+
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+
+ struct ena_hw_stats *hw_stats;
+ struct ena_stats_dev *dev_stats;
+ struct ena_stats_tx *tx_stats;
+ struct ena_stats_rx *rx_stats;
+ struct ena_com_stats_admin *admin_stats;
+
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+
+ struct sysctl_oid *queue_node, *tx_node, *rx_node, *hw_node;
+ struct sysctl_oid *admin_node;
+ struct sysctl_oid_list *queue_list, *tx_list, *rx_list, *hw_list;
+ struct sysctl_oid_list *admin_list;
+
+#define QUEUE_NAME_LEN 32
+ char namebuf[QUEUE_NAME_LEN];
+ int i;
+
+ dev = adapter->pdev;
+
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+
+ tx_ring = adapter->tx_ring;
+ rx_ring = adapter->rx_ring;
+
+ hw_stats = &adapter->hw_stats;
+ dev_stats = &adapter->dev_stats;
+ admin_stats = &adapter->ena_dev->admin_queue.stats;
+
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "tx_timeout",
+ CTLFLAG_RD, &dev_stats->tx_timeout,
+ "Driver TX timeouts");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "io_suspend",
+ CTLFLAG_RD, &dev_stats->io_suspend,
+ "IO queue suspends");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "io_resume",
+ CTLFLAG_RD, &dev_stats->io_resume,
+ "IO queue resumes");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "wd_expired",
+ CTLFLAG_RD, &dev_stats->wd_expired,
+ "Watchdog expiry count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_up",
+ CTLFLAG_RD, &dev_stats->interface_up,
+ "Network interface up count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_down",
+ CTLFLAG_RD, &dev_stats->interface_down,
+ "Network interface down count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "admin_q_pause",
+ CTLFLAG_RD, &dev_stats->admin_q_pause,
+ "Admin queue pauses");
+
+ for (i = 0; i < adapter->num_queues; ++i, ++tx_ring, ++rx_ring) {
+ snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+
+ queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
+ namebuf, CTLFLAG_RD, NULL, "Queue Name");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ /* TX specific stats */
+ tx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO,
+ "tx_ring", CTLFLAG_RD, NULL, "TX ring");
+ tx_list = SYSCTL_CHILDREN(tx_node);
+
+ tx_stats = &tx_ring->tx_stats;
+
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "count", CTLFLAG_RD,
+ &tx_stats->cnt, "Packets sent");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "bytes", CTLFLAG_RD,
+ &tx_stats->bytes, "Bytes sent");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "prepare_ctx_err", CTLFLAG_RD,
+ &tx_stats->prepare_ctx_err,
+ "TX buffer preparation failures");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "queue_wakeup", CTLFLAG_RD,
+ &tx_stats->queue_wakeup, "Queue wakeups");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "dma_mapping_err", CTLFLAG_RD,
+ &tx_stats->dma_mapping_err, "DMA mapping failures");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "unsupported_desc_num", CTLFLAG_RD,
+ &tx_stats->unsupported_desc_num,
+ "Excessive descriptor packet discards");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "napi_comp", CTLFLAG_RD,
+ &tx_stats->napi_comp, "Napi completions");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_poll", CTLFLAG_RD,
+ &tx_stats->tx_poll, "TX poll count");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "doorbells", CTLFLAG_RD,
+ &tx_stats->doorbells, "Queue doorbells");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "missing_tx_comp", CTLFLAG_RD,
+ &tx_stats->missing_tx_comp, "TX completions missed");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "bad_req_id", CTLFLAG_RD,
+ &tx_stats->bad_req_id, "Bad request id count");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "stops", CTLFLAG_RD,
+ &tx_stats->queue_stop, "Queue stops");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "mbuf_collapses", CTLFLAG_RD,
+ &tx_stats->collapse,
+ "Mbuf collapse count");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "mbuf_collapse_err", CTLFLAG_RD,
+ &tx_stats->collapse_err,
+ "Mbuf collapse failures");
+
+ /* RX specific stats */
+ rx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO,
+ "rx_ring", CTLFLAG_RD, NULL, "RX ring");
+ rx_list = SYSCTL_CHILDREN(rx_node);
+
+ rx_stats = &rx_ring->rx_stats;
+
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "count", CTLFLAG_RD,
+ &rx_stats->cnt, "Packets received");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "bytes", CTLFLAG_RD,
+ &rx_stats->bytes, "Bytes received");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "refil_partial", CTLFLAG_RD,
+ &rx_stats->refil_partial, "Partial refilled mbufs");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "bad_csum", CTLFLAG_RD,
+ &rx_stats->bad_csum, "Bad RX checksum");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "page_alloc_fail", CTLFLAG_RD,
+ &rx_stats->page_alloc_fail, "Failed page allocs");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "mbuf_alloc_fail", CTLFLAG_RD,
+ &rx_stats->mbuf_alloc_fail, "Failed mbuf allocs");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "dma_mapping_err", CTLFLAG_RD,
+ &rx_stats->dma_mapping_err, "DMA mapping errors");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "bad_desc_num", CTLFLAG_RD,
+ &rx_stats->bad_desc_num, "Bad descriptor count");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "small_copy_len_pkt", CTLFLAG_RD,
+ &rx_stats->small_copy_len_pkt, "Small copy packet count");
+ }
+
+ /* Stats read from device */
+ hw_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hw_stats",
+ CTLFLAG_RD, NULL, "Statistics from hardware");
+ hw_list = SYSCTL_CHILDREN(hw_node);
+
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
+ &hw_stats->rx_packets, 0, "Packets received");
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
+ &hw_stats->tx_packets, 0, "Packets transmitted");
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
+ &hw_stats->rx_bytes, 0, "Bytes received");
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
+ &hw_stats->tx_bytes, 0, "Bytes transmitted");
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_drops", CTLFLAG_RD,
+ &hw_stats->rx_drops, 0, "Receive packet drops");
+
+ SYSCTL_ADD_PROC(ctx, hw_list, OID_AUTO, "update_stats",
+ CTLTYPE_INT|CTLFLAG_RD, adapter, 0, ena_sysctl_update_stats,
+ "A", "Update stats from hardware");
+ /* ENA Admin queue stats */
+ admin_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "admin_stats",
+ CTLFLAG_RD, NULL, "ENA Admin Queue statistics");
+ admin_list = SYSCTL_CHILDREN(admin_node);
+
+ SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "aborted_cmd", CTLFLAG_RD,
+ &admin_stats->aborted_cmd, 0, "Aborted commands");
+ SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "sumbitted_cmd", CTLFLAG_RD,
+ &admin_stats->submitted_cmd, 0, "Submitted commands");
+ SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "completed_cmd", CTLFLAG_RD,
+ &admin_stats->completed_cmd, 0, "Completed commands");
+ SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "out_of_space", CTLFLAG_RD,
+ &admin_stats->out_of_space, 0, "Queue out of space");
+ SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "no_completion", CTLFLAG_RD,
+ &admin_stats->no_completion, 0, "Commands not completed");
+}
+
+static int
+ena_sysctl_update_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg1;
+ int rc;
+
+ if (adapter->up)
+ ena_update_stats_counters(adapter);
+
+ rc = sysctl_handle_string(oidp, "", 1, req);
+ return (rc);
+}
+
diff --git a/sys/dev/ena/ena_sysctl.h b/sys/dev/ena/ena_sysctl.h
new file mode 100644
index 0000000..b2c5ccc
--- /dev/null
+++ b/sys/dev/ena/ena_sysctl.h
@@ -0,0 +1,44 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef ENA_SYSCTL_H
+#define ENA_SYSCTL_H
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#include "ena.h"
+
+void ena_sysctl_add_nodes(struct ena_adapter *);
+
+#endif /* !(ENA_SYSCTL_H) */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index efc9b25..2b93223 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -109,6 +109,7 @@ SUBDIR= \
${_efirt} \
${_elink} \
${_em} \
+ ${_ena} \
en \
${_ep} \
${_epic} \
@@ -575,6 +576,7 @@ _drm= drm
_drm2= drm2
_ed= ed
_em= em
+_ena= ena
_ep= ep
_et= et
_exca= exca
diff --git a/sys/modules/ena/Makefile b/sys/modules/ena/Makefile
new file mode 100644
index 0000000..fc70205
--- /dev/null
+++ b/sys/modules/ena/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+.PATH: ${SRCTOP}/sys/dev/ena \
+ ${SRCTOP}/sys/contrib/ena-com
+
+KMOD = if_ena
+SRCS = ena.c ena_com.c ena_eth_com.c ena_sysctl.c
+SRCS += device_if.h bus_if.h pci_if.h
+CFLAGS += -I${SRCTOP}/sys/contrib
+
+.include <bsd.kmod.mk>
OpenPOWER on IntegriCloud