kernel: Bring in FreeBSD's ena(4) driver unchanged and unhooked.
authorSascha Wildner <saw@online.de>
Wed, 18 Jul 2018 16:54:07 +0000 (18:54 +0200)
committerSascha Wildner <saw@online.de>
Sun, 22 Jul 2018 13:22:03 +0000 (15:22 +0200)
This is a driver for Amazon's Elastic Network Adapter family,
for EC2 instances that support it.

21 files changed:
share/man/man4/ena.4 [new file with mode: 0644]
sys/dev/virtual/amazon/ena/Makefile [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_admin_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_com.c [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_com.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_common_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_admin_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_common_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_eth_io_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_gen_info.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_includes.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_regs_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_eth_com.c [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_eth_com.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_eth_io_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_plat.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena-com/ena_regs_defs.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena.c [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena.h [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena_sysctl.c [new file with mode: 0644]
sys/dev/virtual/amazon/ena/ena_sysctl.h [new file with mode: 0644]

diff --git a/share/man/man4/ena.4 b/share/man/man4/ena.4
new file mode 100644 (file)
index 0000000..85ca697
--- /dev/null
@@ -0,0 +1,255 @@
+.\" Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in
+.\"    the documentation and/or other materials provided with the
+.\"    distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+.\" OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+.\" SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+.\" LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+.\" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: head/share/man/man4/ena.4 322683 2017-08-19 00:51:45Z emaste $
+.\"
+.Dd May 04, 2017
+.Dt ENA 4
+.Os
+.Sh NAME
+.Nm ena
+.Nd "FreeBSD kernel driver for Elastic Network Adapter (ENA) family"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following line in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device ena"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_ena_load="YES"
+.Ed
+.Sh DESCRIPTION
+The ENA is a networking interface designed to make good use of modern CPU
+features and system architectures.
+.Pp
+The ENA device exposes a lightweight management interface with a
+minimal set of memory mapped registers and extendable command set
+through an Admin Queue.
+.Pp
+The driver supports a range of ENA devices, is link-speed independent
+(i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc.), and has
+a negotiated and extendable feature set.
+.Pp
+Some ENA devices support SR-IOV. This driver is used for both the
+SR-IOV Physical Function (PF) and Virtual Function (VF) devices.
+.Pp
+The ENA devices enable high speed and low overhead network traffic
+processing by providing multiple Tx/Rx queue pairs (the maximum number
+is advertised by the device via the Admin Queue), a dedicated MSI-X
+interrupt vector per Tx/Rx queue pair, and CPU cacheline optimized
+data placement.
+.Pp
+The
+.Nm
+driver supports industry standard TCP/IP offload features such
+as checksum offload and TCP transmit segmentation offload (TSO).
+Receive-side scaling (RSS) is supported for multi-core scaling.
+.Pp
+The
+.Nm
+driver and its corresponding devices implement health
+monitoring mechanisms such as watchdog, enabling the device and driver
+to recover in a manner transparent to the application, as well as
+debug logs.
+.Pp
+Some of the ENA devices support a working mode called Low-latency
+Queue (LLQ), which saves several more microseconds. This feature will
+be implemented for driver in future releases.
+.Sh HARDWARE
+Supported PCI vendor ID/device IDs:
+.Pp
+.Bl -bullet -compact
+.It
+1d0f:0ec2 - ENA PF
+.It
+1d0f:1ec2 - ENA PF with LLQ support
+.It
+1d0f:ec20 - ENA VF
+.It
+1d0f:ec21 - ENA VF with LLQ support
+.El
+.Sh DIAGNOSTICS
+.Ss Device initialization phase:
+.Bl -diag
+.It ena%d: failed to init mmio read less
+.Pp
+Error occurred during initialization of the mmio register read request.
+.It ena%d: Can not reset device
+.Pp
+Device could not be reset; device may not be responding or is already
+during reset.
+.It ena%d: device version is too low
+.Pp
+Version of the controller is too low and it is not supported by the driver.
+.It ena%d: Invalid dma width value %d
+.Pp
+The controller is able to request dma transcation width. Device stopped
+responding or it demanded invalid value.
+.It ena%d: Can not initialize ena admin queue with device
+.Pp
+Initialization of the Admin Queue failed; device may not be responding or there
+was a problem with initialization of the resources.
+.It ena%d: Cannot get attribute for ena device rc: %d
+.Pp
+Failed to get attributes of the device from the controller.
+.It ena%d: Cannot configure aenq groups rc: %d
+.Pp
+Errors occurred when trying to configure AENQ groups.
+.El
+.Ss Driver initialisation/shutdown phase:
+.Bl -diag
+.It ena%d: PCI resource allocation failed!
+.It ena%d: allocating ena_dev failed
+.It ena%d: failed to pmap registers bar
+.It ena%d: Error while setting up bufring
+.It ena%d: Error with initialization of IO rings
+.It ena%d: can not allocate ifnet structure
+.It ena%d: Error with network interface setup
+.It ena%d: Failed to enable and set the admin interrupts
+.It ena%d: Failed to allocate %d, vectors %d
+.It ena%d: Failed to enable MSIX, vectors %d rc %d
+.It ena%d: Error with MSI-X enablement
+.It ena%d: could not allocate irq vector: %d
+.It ena%d: Unable to allocate bus resource: registers
+.Pp
+Resource allocation failed when initializing the device; driver will not
+be attached.
+.It ena%d: ENA device init failed (err: %d)
+.Pp
+Device initialization failed; driver will not be attached.
+.It ena%d: could not activate irq vector: %d
+.Pp
+Error occurred when trying to activate interrupt vectors for Admin Queue.
+.It ena%d: failed to register interrupt handler for irq %ju: %d
+.Pp
+Error occurred when trying to register Admin Queue interrupt handler.
+.It ena%d: Cannot setup mgmnt queue intr
+.Pp
+Error occurred during configuration of the Admin Queue interrupts.
+.It ena%d: Enable MSI-X failed
+.Pp
+Configuration of the MSI-X for Admin Queue failed; there could be lack
+of resources or interrupts could not have been configured; driver will
+not be attached.
+.It ena%d: VLAN is in use, detach first
+.Pp
+VLANs are being used when trying to detach the driver; VLANs should be detached
+first and then detach routine should be called again.
+.It ena%d: Unmapped RX DMA tag associations
+.It ena%d: Unmapped TX DMA tag associations
+.Pp
+Error occurred when trying to destroy RX/TX DMA tag.
+.It ena%d: Cannot init RSS
+.It ena%d: Cannot fill indirect table
+.It ena%d: Cannot fill indirect table
+.It ena%d: Cannot fill hash function
+.It ena%d: Cannot fill hash control
+.It ena%d: WARNING: RSS was not properly initialized, it will affect bandwidth
+.Pp
+Error occurred during initialization of one of RSS resources; device is still
+going to work but it will affect performance because all RX packets will be
+passed to queue 0 and there will be no hash information.
+.It ena%d: failed to tear down irq: %d
+.It ena%d: dev has no parent while releasing res for irq: %d
+Release of the interrupts failed.
+.El
+.Ss Additional diagnostic:
+.Bl -diag
+.It ena%d: Cannot get attribute for ena device
+.Pp
+This message appears when trying to change MTU and driver is unable to get
+attributes from the device.
+.It ena%d: Invalid MTU setting. new_mtu: %d
+.Pp
+Requested MTU value is not supported and will not be set.
+.It ena%d: keep alive watchdog timeout
+.Pp
+Device stopped responding and will be reset.
+.It ena%d: Found a Tx that wasn't completed on time, qid %d, index %d.
+.Pp
+Packet was pushed to the NIC but not sent within given time limit; it may
+be caused by hang of the IO queue.
+.It ena%d: The number of lost tx completion is aboce the threshold (%d > %d). Reset the device
+.Pp
+If too many Tx wasn't completed on time the device is going to be reset; it may
+be caused by hanged queue or device.
+.It ena%d: trigger reset is on
+.Pp
+Device will be reset; reset is triggered either by watchdog or if too many TX
+packets were not completed on time.
+.It ena%d: invalid value recvd
+.Pp
+Link status received from the device in the AENQ handler is invalid.
+.It ena%d: Allocation for Tx Queue %u failed
+.It ena%d: Allocation for Rx Queue %u failed
+.It ena%d: Unable to create Rx DMA map for buffer %d
+.It ena%d: Failed to create io TX queue #%d rc: %d
+.It ena%d: Failed to get TX queue handlers. TX queue num %d rc: %d
+.It ena%d: Failed to create io RX queue[%d] rc: %d
+.It ena%d: Failed to get RX queue handlers. RX queue num %d rc: %d
+.It ena%d: failed to request irq
+.It ena%d: could not allocate irq vector: %d
+.It ena%d: failed to register interrupt handler for irq %ju: %d
+.Pp
+IO resources initialization failed. Interface will not be brought up.
+.It ena%d: LRO[%d] Initialization failed!
+.Pp
+Initialization of the LRO for the RX ring failed.
+.It ena%d: failed to alloc buffer for rx queue
+.It ena%d: failed to add buffer for rx queue %d
+.It ena%d: refilled rx queue %d with %d pages only
+.Pp
+Allocation of resources used on RX path failed; if happened during
+initialization of the IO queue, the interface will not be brought up.
+.It ena%d: ioctl promisc/allmulti
+.Pp
+IOCTL request for the device to work in promiscuous/allmulti mode; see
+.Xr ifconfig 8
+for more details.
+.It ena%d: too many fragments. Last fragment: %d!
+.Pp
+Packet with unsupported number of segments was queued for sending to the
+device; packet will be dropped.
+.Sh SUPPORT
+If an issue is identified with the released source code with a supported adapter
+email the specific information related to the issue to
+.Aq Mt mk@semihalf.com
+and
+.Aq Mt mw@semihalf.com .
+.Sh SEE ALSO
+.Xr vlan 4 ,
+.Xr ifconfig 8
+.Sh AUTHORS
+The
+.Nm
+driver was written by
+.An Semihalf.
diff --git a/sys/dev/virtual/amazon/ena/Makefile b/sys/dev/virtual/amazon/ena/Makefile
new file mode 100644 (file)
index 0000000..c8858b9
--- /dev/null
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# $FreeBSD: head/sys/modules/ena/Makefile 318647 2017-05-22 14:46:13Z zbb $
+#
+
+.PATH: ${SRCTOP}/sys/dev/ena \
+       ${SRCTOP}/sys/contrib/ena-com
+
+KMOD   = if_ena
+SRCS   = ena.c ena_com.c ena_eth_com.c ena_sysctl.c
+SRCS   += device_if.h bus_if.h pci_if.h
+CFLAGS  += -I${SRCTOP}/sys/contrib
+
+.include <bsd.kmod.mk>
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_admin_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_admin_defs.h
new file mode 100644 (file)
index 0000000..1d845c0
--- /dev/null
@@ -0,0 +1,1412 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+       ENA_ADMIN_CREATE_SQ     = 1,
+
+       ENA_ADMIN_DESTROY_SQ    = 2,
+
+       ENA_ADMIN_CREATE_CQ     = 3,
+
+       ENA_ADMIN_DESTROY_CQ    = 4,
+
+       ENA_ADMIN_GET_FEATURE   = 8,
+
+       ENA_ADMIN_SET_FEATURE   = 9,
+
+       ENA_ADMIN_GET_STATS     = 11,
+};
+
+enum ena_admin_aq_completion_status {
+       ENA_ADMIN_SUCCESS                       = 0,
+
+       ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE   = 1,
+
+       ENA_ADMIN_BAD_OPCODE                    = 2,
+
+       ENA_ADMIN_UNSUPPORTED_OPCODE            = 3,
+
+       ENA_ADMIN_MALFORMED_REQUEST             = 4,
+
+       /* Additional status is provided in ACQ entry extended_status */
+       ENA_ADMIN_ILLEGAL_PARAMETER             = 5,
+
+       ENA_ADMIN_UNKNOWN_ERROR                 = 6,
+};
+
+enum ena_admin_aq_feature_id {
+       ENA_ADMIN_DEVICE_ATTRIBUTES             = 1,
+
+       ENA_ADMIN_MAX_QUEUES_NUM                = 2,
+
+       ENA_ADMIN_HW_HINTS                      = 3,
+
+       ENA_ADMIN_RSS_HASH_FUNCTION             = 10,
+
+       ENA_ADMIN_STATELESS_OFFLOAD_CONFIG      = 11,
+
+       ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG  = 12,
+
+       ENA_ADMIN_MTU                           = 14,
+
+       ENA_ADMIN_RSS_HASH_INPUT                = 18,
+
+       ENA_ADMIN_INTERRUPT_MODERATION          = 20,
+
+       ENA_ADMIN_AENQ_CONFIG                   = 26,
+
+       ENA_ADMIN_LINK_CONFIG                   = 27,
+
+       ENA_ADMIN_HOST_ATTR_CONFIG              = 28,
+
+       ENA_ADMIN_FEATURES_OPCODE_NUM           = 32,
+};
+
+enum ena_admin_placement_policy_type {
+       /* descriptors and headers are in host memory */
+       ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+       /* descriptors and headers are in device memory (a.k.a Low Latency
+        * Queue)
+        */
+       ENA_ADMIN_PLACEMENT_POLICY_DEV  = 3,
+};
+
+enum ena_admin_link_types {
+       ENA_ADMIN_LINK_SPEED_1G         = 0x1,
+
+       ENA_ADMIN_LINK_SPEED_2_HALF_G   = 0x2,
+
+       ENA_ADMIN_LINK_SPEED_5G         = 0x4,
+
+       ENA_ADMIN_LINK_SPEED_10G        = 0x8,
+
+       ENA_ADMIN_LINK_SPEED_25G        = 0x10,
+
+       ENA_ADMIN_LINK_SPEED_40G        = 0x20,
+
+       ENA_ADMIN_LINK_SPEED_50G        = 0x40,
+
+       ENA_ADMIN_LINK_SPEED_100G       = 0x80,
+
+       ENA_ADMIN_LINK_SPEED_200G       = 0x100,
+
+       ENA_ADMIN_LINK_SPEED_400G       = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+       /* completion queue entry for each sq descriptor */
+       ENA_ADMIN_COMPLETION_POLICY_DESC                = 0,
+
+       /* completion queue entry upon request in sq descriptor */
+       ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND      = 1,
+
+       /* current queue head pointer is updated in OS memory upon sq
+        * descriptor request
+        */
+       ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND      = 2,
+
+       /* current queue head pointer is updated in OS memory for each sq
+        * descriptor
+        */
+       ENA_ADMIN_COMPLETION_POLICY_HEAD                = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+       ENA_ADMIN_GET_STATS_TYPE_BASIC          = 0,
+
+       ENA_ADMIN_GET_STATS_TYPE_EXTENDED       = 1,
+};
+
+enum ena_admin_get_stats_scope {
+       ENA_ADMIN_SPECIFIC_QUEUE        = 0,
+
+       ENA_ADMIN_ETH_TRAFFIC           = 1,
+};
+
+struct ena_admin_aq_common_desc {
+       /* 11:0 : command_id
+        * 15:12 : reserved12
+        */
+       uint16_t command_id;
+
+       /* as appears in ena_admin_aq_opcode */
+       uint8_t opcode;
+
+       /* 0 : phase
+        * 1 : ctrl_data - control buffer address valid
+        * 2 : ctrl_data_indirect - control buffer address
+        *    points to list of pages with addresses of control
+        *    buffers
+        * 7:3 : reserved3
+        */
+       uint8_t flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+       uint32_t length;
+
+       struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+       uint16_t sq_idx;
+
+       /* 4:0 : reserved
+        * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+        */
+       uint8_t sq_identity;
+
+       uint8_t reserved1;
+};
+
+struct ena_admin_aq_entry {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       union {
+               uint32_t inline_data_w1[3];
+
+               struct ena_admin_ctrl_buff_info control_buffer;
+       } u;
+
+       uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+       /* command identifier to associate it with the aq descriptor
+        * 11:0 : command_id
+        * 15:12 : reserved12
+        */
+       uint16_t command;
+
+       uint8_t status;
+
+       /* 0 : phase
+        * 7:1 : reserved1
+        */
+       uint8_t flags;
+
+       uint16_t extended_status;
+
+       /* serves as a hint what AQ entries can be revoked */
+       uint16_t sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+       struct ena_admin_acq_common_desc acq_common_descriptor;
+
+       uint32_t response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       /* 4:0 : reserved0_w1
+        * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+        */
+       uint8_t sq_identity;
+
+       uint8_t reserved8_w1;
+
+       /* 3:0 : placement_policy - Describing where the SQ
+        *    descriptor ring and the SQ packet headers reside:
+        *    0x1 - descriptors and headers are in OS memory,
+        *    0x3 - descriptors and headers in device memory
+        *    (a.k.a Low Latency Queue)
+        * 6:4 : completion_policy - Describing what policy
+        *    to use for generation completion entry (cqe) in
+        *    the CQ associated with this SQ: 0x0 - cqe for each
+        *    sq descriptor, 0x1 - cqe upon request in sq
+        *    descriptor, 0x2 - current queue head pointer is
+        *    updated in OS memory upon sq descriptor request
+        *    0x3 - current queue head pointer is updated in OS
+        *    memory for each sq descriptor
+        * 7 : reserved15_w1
+        */
+       uint8_t sq_caps_2;
+
+       /* 0 : is_physically_contiguous - Described if the
+        *    queue ring memory is allocated in physical
+        *    contiguous pages or split.
+        * 7:1 : reserved17_w1
+        */
+       uint8_t sq_caps_3;
+
+       /* associated completion queue id. This CQ must be created prior to
+        *    SQ creation
+        */
+       uint16_t cq_idx;
+
+       /* submission queue depth in entries */
+       uint16_t sq_depth;
+
+       /* SQ physical base address in OS memory. This field should not be
+        * used for Low Latency queues. Has to be page aligned.
+        */
+       struct ena_common_mem_addr sq_ba;
+
+       /* specifies queue head writeback location in OS memory. Valid if
+        * completion_policy is set to completion_policy_head_on_demand or
+        * completion_policy_head. Has to be cache aligned
+        */
+       struct ena_common_mem_addr sq_head_writeback;
+
+       uint32_t reserved0_w7;
+
+       uint32_t reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+       ENA_ADMIN_SQ_DIRECTION_TX       = 1,
+
+       ENA_ADMIN_SQ_DIRECTION_RX       = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       uint16_t sq_idx;
+
+       uint16_t reserved;
+
+       /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+       uint32_t sq_doorbell_offset;
+
+       /* low latency queue ring base address as an offset to PCIe MMIO
+        * LLQ_MEM BAR
+        */
+       uint32_t llq_descriptors_offset;
+
+       /* low latency queue headers' memory as an offset to PCIe MMIO
+        * LLQ_MEM BAR
+        */
+       uint32_t llq_headers_offset;
+};
+
+struct ena_admin_aq_destroy_sq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       struct ena_admin_sq sq;
+};
+
+struct ena_admin_acq_destroy_sq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_admin_aq_create_cq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       /* 4:0 : reserved5
+        * 5 : interrupt_mode_enabled - if set, cq operates
+        *    in interrupt mode, otherwise - polling
+        * 7:6 : reserved6
+        */
+       uint8_t cq_caps_1;
+
+       /* 4:0 : cq_entry_size_words - size of CQ entry in
+        *    32-bit words, valid values: 4, 8.
+        * 7:5 : reserved7
+        */
+       uint8_t cq_caps_2;
+
+       /* completion queue depth in # of entries. must be power of 2 */
+       uint16_t cq_depth;
+
+       /* msix vector assigned to this cq */
+       uint32_t msix_vector;
+
+       /* cq physical base address in OS memory. CQ must be physically
+        * contiguous
+        */
+       struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_admin_acq_create_cq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       uint16_t cq_idx;
+
+       /* actual cq depth in number of entries */
+       uint16_t cq_actual_depth;
+
+       uint32_t numa_node_register_offset;
+
+       uint32_t cq_head_db_register_offset;
+
+       uint32_t cq_interrupt_unmask_register_offset;
+};
+
+struct ena_admin_aq_destroy_cq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       uint16_t cq_idx;
+
+       uint16_t reserved1;
+};
+
+struct ena_admin_acq_destroy_cq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       union {
+               /* command specific inline data */
+               uint32_t inline_data_w1[3];
+
+               struct ena_admin_ctrl_buff_info control_buffer;
+       } u;
+
+       /* stats type as defined in enum ena_admin_get_stats_type */
+       uint8_t type;
+
+       /* stats scope defined in enum ena_admin_get_stats_scope */
+       uint8_t scope;
+
+       uint16_t reserved3;
+
+       /* queue id. used when scope is specific_queue */
+       uint16_t queue_idx;
+
+       /* device id, value 0xFFFF means mine. only privileged device can get
+        *    stats of other device
+        */
+       uint16_t device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+       uint32_t tx_bytes_low;
+
+       uint32_t tx_bytes_high;
+
+       uint32_t tx_pkts_low;
+
+       uint32_t tx_pkts_high;
+
+       uint32_t rx_bytes_low;
+
+       uint32_t rx_bytes_high;
+
+       uint32_t rx_pkts_low;
+
+       uint32_t rx_pkts_high;
+
+       uint32_t rx_drops_low;
+
+       uint32_t rx_drops_high;
+};
+
+struct ena_admin_acq_get_stats_resp {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       struct ena_admin_basic_stats basic_stats;
+};
+
+struct ena_admin_get_set_feature_common_desc {
+       /* 1:0 : select - 0x1 - current value; 0x3 - default
+        *    value
+        * 7:3 : reserved3
+        */
+       uint8_t flags;
+
+       /* as appears in ena_admin_aq_feature_id */
+       uint8_t feature_id;
+
+       uint16_t reserved16;
+};
+
+struct ena_admin_device_attr_feature_desc {
+       uint32_t impl_id;
+
+       uint32_t device_version;
+
+       /* bitmap of ena_admin_aq_feature_id */
+       uint32_t supported_features;
+
+       uint32_t reserved3;
+
+       /* Indicates how many bits are used physical address access. */
+       uint32_t phys_addr_width;
+
+       /* Indicates how many bits are used virtual address access. */
+       uint32_t virt_addr_width;
+
+       /* unicast MAC address (in Network byte order) */
+       uint8_t mac_addr[6];
+
+       uint8_t reserved7[2];
+
+       uint32_t max_mtu;
+};
+
+struct ena_admin_queue_feature_desc {
+       /* including LLQs */
+       uint32_t max_sq_num;
+
+       uint32_t max_sq_depth;
+
+       uint32_t max_cq_num;
+
+       uint32_t max_cq_depth;
+
+       uint32_t max_llq_num;
+
+       uint32_t max_llq_depth;
+
+       uint32_t max_header_size;
+
+       /* Maximum Descriptors number, including meta descriptor, allowed for
+        *    a single Tx packet
+        */
+       uint16_t max_packet_tx_descs;
+
+       /* Maximum Descriptors number allowed for a single Rx packet */
+       uint16_t max_packet_rx_descs;
+};
+
+struct ena_admin_set_feature_mtu_desc {
+       /* exclude L2 */
+       uint32_t mtu;
+};
+
+struct ena_admin_set_feature_host_attr_desc {
+       /* host OS info base address in OS memory. host info is 4KB of
+        * physically contiguous
+        */
+       struct ena_common_mem_addr os_info_ba;
+
+       /* host debug area base address in OS memory. debug area must be
+        * physically contiguous
+        */
+       struct ena_common_mem_addr debug_ba;
+
+       /* debug area size */
+       uint32_t debug_area_size;
+};
+
+struct ena_admin_feature_intr_moder_desc {
+       /* interrupt delay granularity in usec */
+       uint16_t intr_delay_resolution;
+
+       uint16_t reserved;
+};
+
+struct ena_admin_get_feature_link_desc {
+       /* Link speed in Mb */
+       uint32_t speed;
+
+       /* bit field of enum ena_admin_link types */
+       uint32_t supported;
+
+       /* 0 : autoneg
+        * 1 : duplex - Full Duplex
+        * 31:2 : reserved2
+        */
+       uint32_t flags;
+};
+
+struct ena_admin_feature_aenq_desc {
+       /* bitmask for AENQ groups the device can report */
+       uint32_t supported_groups;
+
+       /* bitmask for AENQ groups to report */
+       uint32_t enabled_groups;
+};
+
+struct ena_admin_feature_offload_desc {
+       /* 0 : TX_L3_csum_ipv4
+        * 1 : TX_L4_ipv4_csum_part - The checksum field
+        *    should be initialized with pseudo header checksum
+        * 2 : TX_L4_ipv4_csum_full
+        * 3 : TX_L4_ipv6_csum_part - The checksum field
+        *    should be initialized with pseudo header checksum
+        * 4 : TX_L4_ipv6_csum_full
+        * 5 : tso_ipv4
+        * 6 : tso_ipv6
+        * 7 : tso_ecn
+        */
+       uint32_t tx;
+
+       /* Receive side supported stateless offload
+        * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+        * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+        * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+        * 3 : RX_hash - Hash calculation
+        */
+       uint32_t rx_supported;
+
+       uint32_t rx_enabled;
+};
+
+enum ena_admin_hash_functions {
+       ENA_ADMIN_TOEPLITZ      = 1,
+
+       ENA_ADMIN_CRC32         = 2,
+};
+
+struct ena_admin_feature_rss_flow_hash_control {
+       uint32_t keys_num;
+
+       uint32_t reserved;
+
+       uint32_t key[10];
+};
+
+struct ena_admin_feature_rss_flow_hash_function {
+       /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
+       uint32_t supported_func;
+
+       /* 7:0 : selected_func - bitmask of
+        *    ena_admin_hash_functions
+        */
+       uint32_t selected_func;
+
+       /* initial value */
+       uint32_t init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+       ENA_ADMIN_RSS_TCP4      = 0,
+
+       ENA_ADMIN_RSS_UDP4      = 1,
+
+       ENA_ADMIN_RSS_TCP6      = 2,
+
+       ENA_ADMIN_RSS_UDP6      = 3,
+
+       ENA_ADMIN_RSS_IP4       = 4,
+
+       ENA_ADMIN_RSS_IP6       = 5,
+
+       ENA_ADMIN_RSS_IP4_FRAG  = 6,
+
+       ENA_ADMIN_RSS_NOT_IP    = 7,
+
+       /* TCPv6 with extension header */
+       ENA_ADMIN_RSS_TCP6_EX   = 8,
+
+       /* IPv6 with extension header */
+       ENA_ADMIN_RSS_IP6_EX    = 9,
+
+       ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+       /* Ethernet Dest Addr */
+       ENA_ADMIN_RSS_L2_DA     = BIT(0),
+
+       /* Ethernet Src Addr */
+       ENA_ADMIN_RSS_L2_SA     = BIT(1),
+
+       /* ipv4/6 Dest Addr */
+       ENA_ADMIN_RSS_L3_DA     = BIT(2),
+
+       /* ipv4/6 Src Addr */
+       ENA_ADMIN_RSS_L3_SA     = BIT(3),
+
+       /* tcp/udp Dest Port */
+       ENA_ADMIN_RSS_L4_DP     = BIT(4),
+
+       /* tcp/udp Src Port */
+       ENA_ADMIN_RSS_L4_SP     = BIT(5),
+};
+
+struct ena_admin_proto_input {
+       /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+       uint16_t fields;
+
+       uint16_t reserved2;
+};
+
+struct ena_admin_feature_rss_hash_control {
+       struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+       struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+       struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
+
+       struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+struct ena_admin_feature_rss_flow_hash_input {
+       /* supported hash input sorting
+        * 1 : L3_sort - support swap L3 addresses if DA is
+        *    smaller than SA
+        * 2 : L4_sort - support swap L4 ports if DP smaller
+        *    SP
+        */
+       uint16_t supported_input_sort;
+
+       /* enabled hash input sorting
+        * 1 : enable_L3_sort - enable swap L3 addresses if
+        *    DA smaller than SA
+        * 2 : enable_L4_sort - enable swap L4 ports if DP
+        *    smaller than SP
+        */
+       uint16_t enabled_input_sort;
+};
+
+enum ena_admin_os_type {
+       ENA_ADMIN_OS_LINUX      = 1,
+
+       ENA_ADMIN_OS_WIN        = 2,
+
+       ENA_ADMIN_OS_DPDK       = 3,
+
+       ENA_ADMIN_OS_FREEBSD    = 4,
+
+       ENA_ADMIN_OS_IPXE       = 5,
+};
+
+struct ena_admin_host_info {
+       /* defined in enum ena_admin_os_type */
+       uint32_t os_type;
+
+       /* os distribution string format */
+       uint8_t os_dist_str[128];
+
+       /* OS distribution numeric format */
+       uint32_t os_dist;
+
+       /* kernel version string format */
+       uint8_t kernel_ver_str[32];
+
+       /* Kernel version numeric format */
+       uint32_t kernel_ver;
+
+       /* 7:0 : major
+        * 15:8 : minor
+        * 23:16 : sub_minor
+        */
+       uint32_t driver_version;
+
+       /* features bitmap */
+       uint32_t supported_network_features[4];
+};
+
+struct ena_admin_rss_ind_table_entry {
+       uint16_t cq_idx;
+
+       uint16_t reserved;
+};
+
+struct ena_admin_feature_rss_ind_table {
+       /* min supported table size (2^min_size) */
+       uint16_t min_size;
+
+       /* max supported table size (2^max_size) */
+       uint16_t max_size;
+
+       /* table size (2^size) */
+       uint16_t size;
+
+       uint16_t reserved;
+
+       /* index of the inline entry. 0xFFFFFFFF means invalid */
+       uint32_t inline_index;
+
+       /* used for updating single entry, ignored when setting the entire
+        * table through the control buffer.
+        */
+       struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+       /* value in ms */
+       uint16_t mmio_read_timeout;
+
+       /* value in ms */
+       uint16_t driver_watchdog_timeout;
+
+       /* Per packet tx completion timeout. value in ms */
+       uint16_t missing_tx_completion_timeout;
+
+       uint16_t missed_tx_completion_count_threshold_to_reset;
+
+       /* value in ms */
+       uint16_t admin_completion_tx_timeout;
+
+       uint16_t netdev_wd_timeout;
+
+       uint16_t max_tx_sgl_size;
+
+       uint16_t max_rx_sgl_size;
+
+       uint16_t reserved[8];
+};
+
+struct ena_admin_get_feat_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       struct ena_admin_ctrl_buff_info control_buffer;
+
+       struct ena_admin_get_set_feature_common_desc feat_common;
+
+       uint32_t raw[11];
+};
+
+struct ena_admin_get_feat_resp {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       union {
+               uint32_t raw[14];
+
+               struct ena_admin_device_attr_feature_desc dev_attr;
+
+               struct ena_admin_queue_feature_desc max_queue;
+
+               struct ena_admin_feature_aenq_desc aenq;
+
+               struct ena_admin_get_feature_link_desc link;
+
+               struct ena_admin_feature_offload_desc offload;
+
+               struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+               struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+               struct ena_admin_feature_rss_ind_table ind_table;
+
+               struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+               struct ena_admin_ena_hw_hints hw_hints;
+       } u;
+};
+
+struct ena_admin_set_feat_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       struct ena_admin_ctrl_buff_info control_buffer;
+
+       struct ena_admin_get_set_feature_common_desc feat_common;
+
+       union {
+               uint32_t raw[11];
+
+               /* mtu size */
+               struct ena_admin_set_feature_mtu_desc mtu;
+
+               /* host attributes */
+               struct ena_admin_set_feature_host_attr_desc host_attr;
+
+               /* AENQ configuration */
+               struct ena_admin_feature_aenq_desc aenq;
+
+               /* rss flow hash function */
+               struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+               /* rss flow hash input */
+               struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+               /* rss indirection table */
+               struct ena_admin_feature_rss_ind_table ind_table;
+       } u;
+};
+
+struct ena_admin_set_feat_resp {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       union {
+               uint32_t raw[14];
+       } u;
+};
+
+struct ena_admin_aenq_common_desc {
+       uint16_t group;
+
+       uint16_t syndrom;
+
+       /* 0 : phase */
+       uint8_t flags;
+
+       uint8_t reserved1[3];
+
+       uint32_t timestamp_low;
+
+       uint32_t timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+       ENA_ADMIN_LINK_CHANGE           = 0,
+
+       ENA_ADMIN_FATAL_ERROR           = 1,
+
+       ENA_ADMIN_WARNING               = 2,
+
+       ENA_ADMIN_NOTIFICATION          = 3,
+
+       ENA_ADMIN_KEEP_ALIVE            = 4,
+
+       ENA_ADMIN_AENQ_GROUPS_NUM       = 5,
+};
+
+enum ena_admin_aenq_notification_syndrom {
+       ENA_ADMIN_SUSPEND       = 0,
+
+       ENA_ADMIN_RESUME        = 1,
+
+       ENA_ADMIN_UPDATE_HINTS  = 2,
+};
+
+struct ena_admin_aenq_entry {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       /* command specific inline data */
+       uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_aenq_link_change_desc {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       /* 0 : link_status */
+       uint32_t flags;
+};
+
+struct ena_admin_aenq_keep_alive_desc {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       uint32_t rx_drops_low;
+
+       uint32_t rx_drops_high;
+};
+
+struct ena_admin_ena_mmio_req_read_less_resp {
+       uint16_t req_id;
+
+       uint16_t reg_off;
+
+       /* value is valid when poll is cleared */
+       uint32_t reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
+{
+       return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
+{
+       p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
+{
+       return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+       p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
+{
+       return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+       p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+}
+
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+{
+       return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+{
+       p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+}
+
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
+{
+       return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
+{
+       p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
+{
+       return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+       p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
+{
+       return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+       p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
+{
+       return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+       p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
+{
+       return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+       p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+       return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+       return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+       return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+       p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+       return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+       p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+{
+       return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+{
+       p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+{
+       return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+       p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+{
+       return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+       p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+}
+
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
+{
+       return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
+{
+       p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ADMIN_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_com.c b/sys/dev/virtual/amazon/ena/ena-com/ena_com.c
new file mode 100644 (file)
index 0000000..4314b31
--- /dev/null
@@ -0,0 +1,2980 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ena_com.h"
+#ifdef ENA_INTERNAL
+#include "ena_gen_info.h"
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* Timeout in micro-sec */
+#define ADMIN_CMD_TIMEOUT_US (3000000)
+
+#define ENA_ASYNC_QUEUE_DEPTH 16
+#define ENA_ADMIN_QUEUE_DEPTH 32
+
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
+#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+               ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
+               | (ENA_COMMON_SPEC_VERSION_MINOR))
+
+#define ENA_CTRL_MAJOR         0
+#define ENA_CTRL_MINOR         0
+#define ENA_CTRL_SUB_MINOR     1
+
+#define MIN_ENA_CTRL_VER \
+       (((ENA_CTRL_MAJOR) << \
+       (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+       ((ENA_CTRL_MINOR) << \
+       (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+       (ENA_CTRL_SUB_MINOR))
+
+#define ENA_DMA_ADDR_TO_UINT32_LOW(x)  ((u32)((u64)(x)))
+#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT        4
+
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
+/*****************************************************************************/
+/*****************************************************************************/
+/*****************************************************************************/
+
+enum ena_cmd_status {
+       ENA_CMD_SUBMITTED,
+       ENA_CMD_COMPLETED,
+       /* Abort - canceled by the driver */
+       ENA_CMD_ABORTED,
+};
+
+struct ena_comp_ctx {
+       ena_wait_event_t wait_event;
+       struct ena_admin_acq_entry *user_cqe;
+       u32 comp_size;
+       enum ena_cmd_status status;
+       /* status from the device */
+       u8 comp_status;
+       u8 cmd_opcode;
+       bool occupied;
+};
+
+struct ena_com_stats_ctx {
+       struct ena_admin_aq_get_stats_cmd get_cmd;
+       struct ena_admin_acq_get_stats_resp get_resp;
+};
+
+static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+                                      struct ena_common_mem_addr *ena_addr,
+                                      dma_addr_t addr)
+{
+       if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+               ena_trc_err("dma address has more bits that the device supports\n");
+               return ENA_COM_INVAL;
+       }
+
+       ena_addr->mem_addr_low = (u32)addr;
+       ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
+
+       return 0;
+}
+
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+{
+       struct ena_com_admin_sq *sq = &queue->sq;
+       u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+
+       ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+                              sq->mem_handle);
+
+       if (!sq->entries) {
+               ena_trc_err("memory allocation failed");
+               return ENA_COM_NO_MEM;
+       }
+
+       sq->head = 0;
+       sq->tail = 0;
+       sq->phase = 1;
+
+       sq->db_addr = NULL;
+
+       return 0;
+}
+
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+{
+       struct ena_com_admin_cq *cq = &queue->cq;
+       u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+       ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+                              cq->mem_handle);
+
+       if (!cq->entries)  {
+               ena_trc_err("memory allocation failed");
+               return ENA_COM_NO_MEM;
+       }
+
+       cq->head = 0;
+       cq->phase = 1;
+
+       return 0;
+}
+
+static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+                                  struct ena_aenq_handlers *aenq_handlers)
+{
+       struct ena_com_aenq *aenq = &dev->aenq;
+       u32 addr_low, addr_high, aenq_caps;
+       u16 size;
+
+       dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+       size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+       ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+                       aenq->entries,
+                       aenq->dma_addr,
+                       aenq->mem_handle);
+
+       if (!aenq->entries) {
+               ena_trc_err("memory allocation failed");
+               return ENA_COM_NO_MEM;
+       }
+
+       aenq->head = aenq->q_depth;
+       aenq->phase = 1;
+
+       addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+       addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+
+       ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+       ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+
+       aenq_caps = 0;
+       aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+       aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
+               ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+               ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+       ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+       if (unlikely(!aenq_handlers)) {
+               ena_trc_err("aenq handlers pointer is NULL\n");
+               return ENA_COM_INVAL;
+       }
+
+       aenq->aenq_handlers = aenq_handlers;
+
+       return 0;
+}
+
+static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+                                    struct ena_comp_ctx *comp_ctx)
+{
+       comp_ctx->occupied = false;
+       ATOMIC32_DEC(&queue->outstanding_cmds);
+}
+
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+                                         u16 command_id, bool capture)
+{
+       if (unlikely(command_id >= queue->q_depth)) {
+               ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+                           command_id, queue->q_depth);
+               return NULL;
+       }
+
+       if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+               ena_trc_err("Completion context is occupied\n");
+               return NULL;
+       }
+
+       if (capture) {
+               ATOMIC32_INC(&queue->outstanding_cmds);
+               queue->comp_ctx[command_id].occupied = true;
+       }
+
+       return &queue->comp_ctx[command_id];
+}
+
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+                                                      struct ena_admin_aq_entry *cmd,
+                                                      size_t cmd_size_in_bytes,
+                                                      struct ena_admin_acq_entry *comp,
+                                                      size_t comp_size_in_bytes)
+{
+       struct ena_comp_ctx *comp_ctx;
+       u16 tail_masked, cmd_id;
+       u16 queue_size_mask;
+       u16 cnt;
+
+       queue_size_mask = admin_queue->q_depth - 1;
+
+       tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+       /* In case of queue FULL */
+       cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
+       if (cnt >= admin_queue->q_depth) {
+               ena_trc_dbg("admin queue is full.\n");
+               admin_queue->stats.out_of_space++;
+               return ERR_PTR(ENA_COM_NO_SPACE);
+       }
+
+       cmd_id = admin_queue->curr_cmd_id;
+
+       cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
+               ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+       cmd->aq_common_descriptor.command_id |= cmd_id &
+               ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+       comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+       if (unlikely(!comp_ctx))
+               return ERR_PTR(ENA_COM_INVAL);
+
+       comp_ctx->status = ENA_CMD_SUBMITTED;
+       comp_ctx->comp_size = (u32)comp_size_in_bytes;
+       comp_ctx->user_cqe = comp;
+       comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+       ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
+
+       memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
+
+       admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
+               queue_size_mask;
+
+       admin_queue->sq.tail++;
+       admin_queue->stats.submitted_cmd++;
+
+       if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
+               admin_queue->sq.phase = !admin_queue->sq.phase;
+
+       ENA_DB_SYNC(&admin_queue->sq.mem_handle);
+       ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
+                       admin_queue->sq.db_addr);
+
+       return comp_ctx;
+}
+
+static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+{
+       size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+       struct ena_comp_ctx *comp_ctx;
+       u16 i;
+
+       queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
+       if (unlikely(!queue->comp_ctx)) {
+               ena_trc_err("memory allocation failed");
+               return ENA_COM_NO_MEM;
+       }
+
+       for (i = 0; i < queue->q_depth; i++) {
+               comp_ctx = get_comp_ctxt(queue, i, false);
+               if (comp_ctx)
+                       ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
+       }
+
+       return 0;
+}
+
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+                                                    struct ena_admin_aq_entry *cmd,
+                                                    size_t cmd_size_in_bytes,
+                                                    struct ena_admin_acq_entry *comp,
+                                                    size_t comp_size_in_bytes)
+{
+       unsigned long flags;
+       struct ena_comp_ctx *comp_ctx;
+
+       ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+       if (unlikely(!admin_queue->running_state)) {
+               ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+               return ERR_PTR(ENA_COM_NO_DEVICE);
+       }
+       comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
+                                             cmd_size_in_bytes,
+                                             comp,
+                                             comp_size_in_bytes);
+       if (unlikely(IS_ERR(comp_ctx)))
+               admin_queue->running_state = false;
+       ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+       return comp_ctx;
+}
+
+static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+                             struct ena_com_create_io_ctx *ctx,
+                             struct ena_com_io_sq *io_sq)
+{
+       size_t size;
+       int dev_node = 0;
+
+       memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+
+       io_sq->desc_entry_size =
+               (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+               sizeof(struct ena_eth_io_tx_desc) :
+               sizeof(struct ena_eth_io_rx_desc);
+
+       size = io_sq->desc_entry_size * io_sq->q_depth;
+       io_sq->bus = ena_dev->bus;
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+               ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+                                           size,
+                                           io_sq->desc_addr.virt_addr,
+                                           io_sq->desc_addr.phys_addr,
+                                           io_sq->desc_addr.mem_handle,
+                                           ctx->numa_node,
+                                           dev_node);
+               if (!io_sq->desc_addr.virt_addr) {
+                       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                                              size,
+                                              io_sq->desc_addr.virt_addr,
+                                              io_sq->desc_addr.phys_addr,
+                                              io_sq->desc_addr.mem_handle);
+               }
+
+               if (!io_sq->desc_addr.virt_addr) {
+                       ena_trc_err("memory allocation failed");
+                       return ENA_COM_NO_MEM;
+               }
+       }
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               /* Allocate bounce buffers */
+               io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
+               io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+               io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+               size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
+
+               ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
+                                  size,
+                                  io_sq->bounce_buf_ctrl.base_buffer,
+                                  ctx->numa_node,
+                                  dev_node);
+               if (!io_sq->bounce_buf_ctrl.base_buffer)
+                       io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+
+               if (!io_sq->bounce_buf_ctrl.base_buffer) {
+                       ena_trc_err("bounce buffer memory allocation failed");
+                       return ENA_COM_NO_MEM;
+               }
+
+               memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
+
+               /* Initiate the first bounce buffer */
+               io_sq->llq_buf_ctrl.curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+               memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                      0x0, io_sq->llq_info.desc_list_entry_size);
+               io_sq->llq_buf_ctrl.descs_left_in_line =
+                       io_sq->llq_info.descs_num_before_header;
+       }
+
+       io_sq->tail = 0;
+       io_sq->next_to_comp = 0;
+       io_sq->phase = 1;
+
+       return 0;
+}
+
+static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+                             struct ena_com_create_io_ctx *ctx,
+                             struct ena_com_io_cq *io_cq)
+{
+       size_t size;
+       int prev_node = 0;
+
+       memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+
+       /* Use the basic completion descriptor for Rx */
+       io_cq->cdesc_entry_size_in_bytes =
+               (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+               sizeof(struct ena_eth_io_tx_cdesc) :
+               sizeof(struct ena_eth_io_rx_cdesc_base);
+
+       size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+       io_cq->bus = ena_dev->bus;
+
+       ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+                       size,
+                       io_cq->cdesc_addr.virt_addr,
+                       io_cq->cdesc_addr.phys_addr,
+                       io_cq->cdesc_addr.mem_handle,
+                       ctx->numa_node,
+                       prev_node);
+       if (!io_cq->cdesc_addr.virt_addr) {
+               ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                                      size,
+                                      io_cq->cdesc_addr.virt_addr,
+                                      io_cq->cdesc_addr.phys_addr,
+                                      io_cq->cdesc_addr.mem_handle);
+       }
+
+       if (!io_cq->cdesc_addr.virt_addr) {
+               ena_trc_err("memory allocation failed");
+               return ENA_COM_NO_MEM;
+       }
+
+       io_cq->phase = 1;
+       io_cq->head = 0;
+
+       return 0;
+}
+
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+                                                  struct ena_admin_acq_entry *cqe)
+{
+       struct ena_comp_ctx *comp_ctx;
+       u16 cmd_id;
+
+       cmd_id = cqe->acq_common_descriptor.command &
+               ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+       comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
+       if (unlikely(!comp_ctx)) {
+               ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
+               admin_queue->running_state = false;
+               return;
+       }
+
+       comp_ctx->status = ENA_CMD_COMPLETED;
+       comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+
+       if (comp_ctx->user_cqe)
+               memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+
+       if (!admin_queue->polling)
+               ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+}
+
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+{
+       struct ena_admin_acq_entry *cqe = NULL;
+       u16 comp_num = 0;
+       u16 head_masked;
+       u8 phase;
+
+       head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
+       phase = admin_queue->cq.phase;
+
+       cqe = &admin_queue->cq.entries[head_masked];
+
+       /* Go over all the completions */
+       while ((cqe->acq_common_descriptor.flags &
+                       ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+               /* Do not read the rest of the completion entry before the
+                * phase bit was validated
+                */
+               rmb();
+               ena_com_handle_single_admin_completion(admin_queue, cqe);
+
+               head_masked++;
+               comp_num++;
+               if (unlikely(head_masked == admin_queue->q_depth)) {
+                       head_masked = 0;
+                       phase = !phase;
+               }
+
+               cqe = &admin_queue->cq.entries[head_masked];
+       }
+
+       admin_queue->cq.head += comp_num;
+       admin_queue->cq.phase = phase;
+       admin_queue->sq.head += comp_num;
+       admin_queue->stats.completed_cmd += comp_num;
+}
+
+static int ena_com_comp_status_to_errno(u8 comp_status)
+{
+       if (unlikely(comp_status != 0))
+               ena_trc_err("admin command failed[%u]\n", comp_status);
+
+       if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
+               return ENA_COM_INVAL;
+
+       switch (comp_status) {
+       case ENA_ADMIN_SUCCESS:
+               return 0;
+       case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+               return ENA_COM_NO_MEM;
+       case ENA_ADMIN_UNSUPPORTED_OPCODE:
+               return ENA_COM_UNSUPPORTED;
+       case ENA_ADMIN_BAD_OPCODE:
+       case ENA_ADMIN_MALFORMED_REQUEST:
+       case ENA_ADMIN_ILLEGAL_PARAMETER:
+       case ENA_ADMIN_UNKNOWN_ERROR:
+               return ENA_COM_INVAL;
+       }
+
+       return 0;
+}
+
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+                                                    struct ena_com_admin_queue *admin_queue)
+{
+       unsigned long flags, timeout;
+       int ret;
+
+       timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
+
+       while (1) {
+                ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+                ena_com_handle_admin_completion(admin_queue);
+                ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+                if (comp_ctx->status != ENA_CMD_SUBMITTED)
+                       break;
+
+               if (ENA_TIME_EXPIRE(timeout)) {
+                       ena_trc_err("Wait for completion (polling) timeout\n");
+                       /* ENA didn't have any completion */
+                       ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+                       admin_queue->stats.no_completion++;
+                       admin_queue->running_state = false;
+                       ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+                       ret = ENA_COM_TIMER_EXPIRED;
+                       goto err;
+               }
+
+               ENA_MSLEEP(100);
+       }
+
+       if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+               ena_trc_err("Command was aborted\n");
+               ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+               admin_queue->stats.aborted_cmd++;
+               ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+               ret = ENA_COM_NO_DEVICE;
+               goto err;
+       }
+
+       ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
+                "Invalid comp status %d\n", comp_ctx->status);
+
+       ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+       comp_ctxt_release(admin_queue, comp_ctx);
+       return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                                  struct ena_admin_feature_llq_desc *llq_desc)
+{
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+
+       memset(llq_info, 0, sizeof(*llq_info));
+
+       switch (llq_desc->header_location_ctrl) {
+       case ENA_ADMIN_INLINE_HEADER:
+               llq_info->inline_header = true;
+               break;
+       case ENA_ADMIN_HEADER_RING:
+               llq_info->inline_header = false;
+               break;
+       default:
+               ena_trc_err("Invalid header location control\n");
+               return -EINVAL;
+       }
+
+       switch (llq_desc->entry_size_ctrl) {
+       case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
+               llq_info->desc_list_entry_size = 128;
+               break;
+       case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
+               llq_info->desc_list_entry_size = 192;
+               break;
+       case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
+               llq_info->desc_list_entry_size = 256;
+               break;
+       default:
+               ena_trc_err("Invalid entry_size_ctrl %d\n",
+                           llq_desc->entry_size_ctrl);
+               return -EINVAL;
+       }
+
+       if ((llq_info->desc_list_entry_size & 0x7)) {
+               /* The desc list entry size should be whole multiply of 8
+                * This requirement comes from __iowrite64_copy()
+                */
+               ena_trc_err("illegal entry size %d\n",
+                           llq_info->desc_list_entry_size);
+               return -EINVAL;
+       }
+
+       if (llq_info->inline_header) {
+               llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
+               if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
+                   (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
+                       ena_trc_err("Invalid desc_stride_ctrl %d\n",
+                                   llq_info->desc_stride_ctrl);
+                       return -EINVAL;
+               }
+       } else {
+               llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+       }
+
+       if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+               llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+                       sizeof(struct ena_eth_io_tx_desc);
+       else
+               llq_info->descs_per_entry = 1;
+
+       llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
+
+       return 0;
+}
+
+
+
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+                                                       struct ena_com_admin_queue *admin_queue)
+{
+       unsigned long flags;
+       int ret;
+
+       ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
+                           admin_queue->completion_timeout);
+
+       /* In case the command wasn't completed find out the root cause.
+        * There might be 2 kinds of errors
+        * 1) No completion (timeout reached)
+        * 2) There is completion but the device didn't get any msi-x interrupt.
+        */
+       if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
+               ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+               ena_com_handle_admin_completion(admin_queue);
+               admin_queue->stats.no_completion++;
+               ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+               if (comp_ctx->status == ENA_CMD_COMPLETED)
+                       ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
+                                   comp_ctx->cmd_opcode);
+               else
+                       ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+                                   comp_ctx->cmd_opcode, comp_ctx->status);
+
+               admin_queue->running_state = false;
+               ret = ENA_COM_TIMER_EXPIRED;
+               goto err;
+       }
+
+       ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+       comp_ctxt_release(admin_queue, comp_ctx);
+       return ret;
+}
+
+/* This method read the hardware device register through posting writes
+ * and waiting for response
+ * On timeout the function will return ENA_MMIO_READ_TIMEOUT
+ */
+static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+{
+       struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+       volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+               mmio_read->read_resp;
+       u32 mmio_read_reg, ret, i;
+       unsigned long flags;
+       u32 timeout = mmio_read->reg_read_to;
+
+       ENA_MIGHT_SLEEP();
+
+       if (timeout == 0)
+               timeout = ENA_REG_READ_TIMEOUT;
+
+       /* If readless is disabled, perform regular read */
+       if (!mmio_read->readless_supported)
+               return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
+
+       ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
+       mmio_read->seq_num++;
+
+       read_resp->req_id = mmio_read->seq_num + 0xDEAD;
+       mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+                       ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+       mmio_read_reg |= mmio_read->seq_num &
+                       ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+       /* make sure read_resp->req_id get updated before the hw can write
+        * there
+        */
+       wmb();
+
+       ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+
+       for (i = 0; i < timeout; i++) {
+               if (read_resp->req_id == mmio_read->seq_num)
+                       break;
+
+               ENA_UDELAY(1);
+       }
+
+       if (unlikely(i == timeout)) {
+               ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+                           mmio_read->seq_num,
+                           offset,
+                           read_resp->req_id,
+                           read_resp->reg_off);
+               ret = ENA_MMIO_READ_TIMEOUT;
+               goto err;
+       }
+
+       if (read_resp->reg_off != offset) {
+               ena_trc_err("Read failure: wrong offset provided");
+               ret = ENA_MMIO_READ_TIMEOUT;
+       } else {
+               ret = read_resp->reg_val;
+       }
+err:
+       ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
+
+       return ret;
+}
+
+/* There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called ena_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+                                            struct ena_com_admin_queue *admin_queue)
+{
+       if (admin_queue->polling)
+               return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+                                                                admin_queue);
+
+       return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
+                                                           admin_queue);
+}
+
+static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+                                struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
+       struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
+       u8 direction;
+       int ret;
+
+       memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+       if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+               direction = ENA_ADMIN_SQ_DIRECTION_TX;
+       else
+               direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+       destroy_cmd.sq.sq_identity |= (direction <<
+               ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+               ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+
+       destroy_cmd.sq.sq_idx = io_sq->idx;
+       destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&destroy_cmd,
+                                           sizeof(destroy_cmd),
+                                           (struct ena_admin_acq_entry *)&destroy_resp,
+                                           sizeof(destroy_resp));
+
+       if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+               ena_trc_err("failed to destroy io sq error: %d\n", ret);
+
+       return ret;
+}
+
+static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+                                 struct ena_com_io_sq *io_sq,
+                                 struct ena_com_io_cq *io_cq)
+{
+       size_t size;
+
+       if (io_cq->cdesc_addr.virt_addr) {
+               size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     size,
+                                     io_cq->cdesc_addr.virt_addr,
+                                     io_cq->cdesc_addr.phys_addr,
+                                     io_cq->cdesc_addr.mem_handle);
+
+               io_cq->cdesc_addr.virt_addr = NULL;
+       }
+
+       if (io_sq->desc_addr.virt_addr) {
+               size = io_sq->desc_entry_size * io_sq->q_depth;
+
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     size,
+                                     io_sq->desc_addr.virt_addr,
+                                     io_sq->desc_addr.phys_addr,
+                                     io_sq->desc_addr.mem_handle);
+
+               io_sq->desc_addr.virt_addr = NULL;
+       }
+
+       if (io_sq->bounce_buf_ctrl.base_buffer) {
+               size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+               ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+               io_sq->bounce_buf_ctrl.base_buffer = NULL;
+       }
+}
+
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+                               u16 exp_state)
+{
+       u32 val, i;
+
+       for (i = 0; i < timeout; i++) {
+               val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+               if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+                       ena_trc_err("Reg read timeout occurred\n");
+                       return ENA_COM_TIMER_EXPIRED;
+               }
+
+               if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+                       exp_state)
+                       return 0;
+
+               /* The resolution of the timeout is 100ms */
+               ENA_MSLEEP(100);
+       }
+
+       return ENA_COM_TIMER_EXPIRED;
+}
+
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+                                              enum ena_admin_aq_feature_id feature_id)
+{
+       u32 feature_mask = 1 << feature_id;
+
+       /* Device attributes is always supported */
+       if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
+           !(ena_dev->supported_features & feature_mask))
+               return false;
+
+       return true;
+}
+
+static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+                                 struct ena_admin_get_feat_resp *get_resp,
+                                 enum ena_admin_aq_feature_id feature_id,
+                                 dma_addr_t control_buf_dma_addr,
+                                 u32 control_buff_size)
+{
+       struct ena_com_admin_queue *admin_queue;
+       struct ena_admin_get_feat_cmd get_cmd;
+       int ret;
+
+       if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+               ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       memset(&get_cmd, 0x0, sizeof(get_cmd));
+       admin_queue = &ena_dev->admin_queue;
+
+       get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
+
+       if (control_buff_size)
+               get_cmd.aq_common_descriptor.flags =
+                       ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+       else
+               get_cmd.aq_common_descriptor.flags = 0;
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &get_cmd.control_buffer.address,
+                                  control_buf_dma_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+
+       get_cmd.control_buffer.length = control_buff_size;
+
+       get_cmd.feat_common.feature_id = feature_id;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)
+                                           &get_cmd,
+                                           sizeof(get_cmd),
+                                           (struct ena_admin_acq_entry *)
+                                           get_resp,
+                                           sizeof(*get_resp));
+
+       if (unlikely(ret))
+               ena_trc_err("Failed to submit get_feature command %d error: %d\n",
+                           feature_id, ret);
+
+       return ret;
+}
+
+static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+                              struct ena_admin_get_feat_resp *get_resp,
+                              enum ena_admin_aq_feature_id feature_id)
+{
+       return ena_com_get_feature_ex(ena_dev,
+                                     get_resp,
+                                     feature_id,
+                                     0,
+                                     0);
+}
+
+static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                              sizeof(*rss->hash_key),
+                              rss->hash_key,
+                              rss->hash_key_dma_addr,
+                              rss->hash_key_mem_handle);
+
+       if (unlikely(!rss->hash_key))
+               return ENA_COM_NO_MEM;
+
+       return 0;
+}
+
+static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+
+       if (rss->hash_key)
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     sizeof(*rss->hash_key),
+                                     rss->hash_key,
+                                     rss->hash_key_dma_addr,
+                                     rss->hash_key_mem_handle);
+       rss->hash_key = NULL;
+}
+
+static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                              sizeof(*rss->hash_ctrl),
+                              rss->hash_ctrl,
+                              rss->hash_ctrl_dma_addr,
+                              rss->hash_ctrl_mem_handle);
+
+       if (unlikely(!rss->hash_ctrl))
+               return ENA_COM_NO_MEM;
+
+       return 0;
+}
+
+static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+
+       if (rss->hash_ctrl)
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     sizeof(*rss->hash_ctrl),
+                                     rss->hash_ctrl,
+                                     rss->hash_ctrl_dma_addr,
+                                     rss->hash_ctrl_mem_handle);
+       rss->hash_ctrl = NULL;
+}
+
+static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+                                          u16 log_size)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_get_feat_resp get_resp;
+       size_t tbl_size;
+       int ret;
+
+       ret = ena_com_get_feature(ena_dev, &get_resp,
+                                 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+       if (unlikely(ret))
+               return ret;
+
+       if ((get_resp.u.ind_table.min_size > log_size) ||
+           (get_resp.u.ind_table.max_size < log_size)) {
+               ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+                           1 << log_size,
+                           1 << get_resp.u.ind_table.min_size,
+                           1 << get_resp.u.ind_table.max_size);
+               return ENA_COM_INVAL;
+       }
+
+       tbl_size = (1ULL << log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                            tbl_size,
+                            rss->rss_ind_tbl,
+                            rss->rss_ind_tbl_dma_addr,
+                            rss->rss_ind_tbl_mem_handle);
+       if (unlikely(!rss->rss_ind_tbl))
+               goto mem_err1;
+
+       tbl_size = (1ULL << log_size) * sizeof(u16);
+       rss->host_rss_ind_tbl =
+               ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
+       if (unlikely(!rss->host_rss_ind_tbl))
+               goto mem_err2;
+
+       rss->tbl_log_size = log_size;
+
+       return 0;
+
+mem_err2:
+       tbl_size = (1ULL << log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+
+       ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                             tbl_size,
+                             rss->rss_ind_tbl,
+                             rss->rss_ind_tbl_dma_addr,
+                             rss->rss_ind_tbl_mem_handle);
+       rss->rss_ind_tbl = NULL;
+mem_err1:
+       rss->tbl_log_size = 0;
+       return ENA_COM_NO_MEM;
+}
+
+static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       size_t tbl_size = (1ULL << rss->tbl_log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+
+       if (rss->rss_ind_tbl)
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     tbl_size,
+                                     rss->rss_ind_tbl,
+                                     rss->rss_ind_tbl_dma_addr,
+                                     rss->rss_ind_tbl_mem_handle);
+       rss->rss_ind_tbl = NULL;
+
+       if (rss->host_rss_ind_tbl)
+               ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
+       rss->host_rss_ind_tbl = NULL;
+}
+
+static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+                               struct ena_com_io_sq *io_sq, u16 cq_idx)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_admin_aq_create_sq_cmd create_cmd;
+       struct ena_admin_acq_create_sq_resp_desc cmd_completion;
+       u8 direction;
+       int ret;
+
+       memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+       create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
+
+       if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+               direction = ENA_ADMIN_SQ_DIRECTION_TX;
+       else
+               direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+       create_cmd.sq_identity |= (direction <<
+               ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
+               ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+
+       create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
+               ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+
+       create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
+               ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
+               ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+
+       create_cmd.sq_caps_3 |=
+               ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+
+       create_cmd.cq_idx = cq_idx;
+       create_cmd.sq_depth = io_sq->q_depth;
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+               ret = ena_com_mem_addr_set(ena_dev,
+                                          &create_cmd.sq_ba,
+                                          io_sq->desc_addr.phys_addr);
+               if (unlikely(ret)) {
+                       ena_trc_err("memory address set failed\n");
+                       return ret;
+               }
+       }
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&create_cmd,
+                                           sizeof(create_cmd),
+                                           (struct ena_admin_acq_entry *)&cmd_completion,
+                                           sizeof(cmd_completion));
+       if (unlikely(ret)) {
+               ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
+               return ret;
+       }
+
+       io_sq->idx = cmd_completion.sq_idx;
+
+       io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+               (uintptr_t)cmd_completion.sq_doorbell_offset);
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
+                               + cmd_completion.llq_headers_offset);
+
+               io_sq->desc_addr.pbuf_dev_addr =
+                       (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
+                       cmd_completion.llq_descriptors_offset);
+       }
+
+       ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+       return ret;
+}
+
+static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_com_io_sq *io_sq;
+       u16 qid;
+       int i;
+
+       for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+               qid = rss->host_rss_ind_tbl[i];
+               if (qid >= ENA_TOTAL_NUM_QUEUES)
+                       return ENA_COM_INVAL;
+
+               io_sq = &ena_dev->io_sq_queues[qid];
+
+               if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
+                       return ENA_COM_INVAL;
+
+               rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
+       }
+
+       return 0;
+}
+
+static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+{
+       u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
+       struct ena_rss *rss = &ena_dev->rss;
+       u8 idx;
+       u16 i;
+
+       for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+               dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+
+       for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+               if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
+                       return ENA_COM_INVAL;
+               idx = (u8)rss->rss_ind_tbl[i].cq_idx;
+
+               if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+                       return ENA_COM_INVAL;
+
+               rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+       }
+
+       return 0;
+}
+
+static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+       size_t size;
+
+       size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
+
+       ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+       if (!ena_dev->intr_moder_tbl)
+               return ENA_COM_NO_MEM;
+
+       ena_com_config_default_interrupt_moderation_table(ena_dev);
+
+       return 0;
+}
+
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+                                                u16 intr_delay_resolution)
+{
+       struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+       unsigned int i;
+
+       if (!intr_delay_resolution) {
+               ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+               intr_delay_resolution = 1;
+       }
+       ena_dev->intr_delay_resolution = intr_delay_resolution;
+
+       /* update Rx */
+       for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
+               intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+
+       /* update Tx */
+       ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+}
+
+/*****************************************************************************/
+/*******************************      API       ******************************/
+/*****************************************************************************/
+
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+                                 struct ena_admin_aq_entry *cmd,
+                                 size_t cmd_size,
+                                 struct ena_admin_acq_entry *comp,
+                                 size_t comp_size)
+{
+       struct ena_comp_ctx *comp_ctx;
+       int ret;
+
+       comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
+                                           comp, comp_size);
+       if (unlikely(IS_ERR(comp_ctx))) {
+               if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
+                       ena_trc_dbg("Failed to submit command [%ld]\n",
+                                   PTR_ERR(comp_ctx));
+               else
+                       ena_trc_err("Failed to submit command [%ld]\n",
+                                   PTR_ERR(comp_ctx));
+
+               return PTR_ERR(comp_ctx);
+       }
+
+       ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
+       if (unlikely(ret)) {
+               if (admin_queue->running_state)
+                       ena_trc_err("Failed to process command. ret = %d\n",
+                                   ret);
+               else
+                       ena_trc_dbg("Failed to process command. ret = %d\n",
+                                   ret);
+       }
+       return ret;
+}
+
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+                        struct ena_com_io_cq *io_cq)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_admin_aq_create_cq_cmd create_cmd;
+       struct ena_admin_acq_create_cq_resp_desc cmd_completion;
+       int ret;
+
+       memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+       create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
+
+       create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
+               ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+       create_cmd.cq_caps_1 |=
+               ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+
+       create_cmd.msix_vector = io_cq->msix_vector;
+       create_cmd.cq_depth = io_cq->q_depth;
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &create_cmd.cq_ba,
+                                  io_cq->cdesc_addr.phys_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&create_cmd,
+                                           sizeof(create_cmd),
+                                           (struct ena_admin_acq_entry *)&cmd_completion,
+                                           sizeof(cmd_completion));
+       if (unlikely(ret)) {
+               ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
+               return ret;
+       }
+
+       io_cq->idx = cmd_completion.cq_idx;
+
+       io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+               cmd_completion.cq_interrupt_unmask_register_offset);
+
+       if (cmd_completion.cq_head_db_register_offset)
+               io_cq->cq_head_db_reg =
+                       (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+                       cmd_completion.cq_head_db_register_offset);
+
+       if (cmd_completion.numa_node_register_offset)
+               io_cq->numa_node_cfg_reg =
+                       (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+                       cmd_completion.numa_node_register_offset);
+
+       ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+       return ret;
+}
+
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+                           struct ena_com_io_sq **io_sq,
+                           struct ena_com_io_cq **io_cq)
+{
+       if (qid >= ENA_TOTAL_NUM_QUEUES) {
+               ena_trc_err("Invalid queue number %d but the max is %d\n",
+                           qid, ENA_TOTAL_NUM_QUEUES);
+               return ENA_COM_INVAL;
+       }
+
+       *io_sq = &ena_dev->io_sq_queues[qid];
+       *io_cq = &ena_dev->io_cq_queues[qid];
+
+       return 0;
+}
+
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_comp_ctx *comp_ctx;
+       u16 i;
+
+       if (!admin_queue->comp_ctx)
+               return;
+
+       for (i = 0; i < admin_queue->q_depth; i++) {
+               comp_ctx = get_comp_ctxt(admin_queue, i, false);
+               if (unlikely(!comp_ctx))
+                       break;
+
+               comp_ctx->status = ENA_CMD_ABORTED;
+
+               ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+       }
+}
+
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       unsigned long flags;
+
+       ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+       while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
+               ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+               ENA_MSLEEP(20);
+               ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+       }
+       ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+                         struct ena_com_io_cq *io_cq)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
+       struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
+       int ret;
+
+       memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+       destroy_cmd.cq_idx = io_cq->idx;
+       destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&destroy_cmd,
+                                           sizeof(destroy_cmd),
+                                           (struct ena_admin_acq_entry *)&destroy_resp,
+                                           sizeof(destroy_resp));
+
+       if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+               ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+       return ret;
+}
+
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
+{
+       return ena_dev->admin_queue.running_state;
+}
+
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       unsigned long flags;
+
+       ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+       ena_dev->admin_queue.running_state = state;
+       ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
+{
+       u16 depth = ena_dev->aenq.q_depth;
+
+       ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
+
+       /* Init head_db to mark that all entries in the queue
+        * are initially available
+        */
+       ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+{
+       struct ena_com_admin_queue *admin_queue;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       struct ena_admin_get_feat_resp get_resp;
+       int ret;
+
+       ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+       if (ret) {
+               ena_trc_info("Can't get aenq configuration\n");
+               return ret;
+       }
+
+       if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+               ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+                            get_resp.u.aenq.supported_groups,
+                            groups_flag);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       memset(&cmd, 0x0, sizeof(cmd));
+       admin_queue = &ena_dev->admin_queue;
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.aq_common_descriptor.flags = 0;
+       cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
+       cmd.u.aenq.enabled_groups = groups_flag;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+
+       if (unlikely(ret))
+               ena_trc_err("Failed to config AENQ ret: %d\n", ret);
+
+       return ret;
+}
+
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+{
+       u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+       int width;
+
+       if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
+               ena_trc_err("Reg read timeout occurred\n");
+               return ENA_COM_TIMER_EXPIRED;
+       }
+
+       width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+               ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+       ena_trc_dbg("ENA dma width: %d\n", width);
+
+       if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+               ena_trc_err("DMA width illegal value: %d\n", width);
+               return ENA_COM_INVAL;
+       }
+
+       ena_dev->dma_addr_bits = width;
+
+       return width;
+}
+
+int ena_com_validate_version(struct ena_com_dev *ena_dev)
+{
+       u32 ver;
+       u32 ctrl_ver;
+       u32 ctrl_ver_masked;
+
+       /* Make sure the ENA version and the controller version are at least
+        * as the driver expects
+        */
+       ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
+       ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+                                         ENA_REGS_CONTROLLER_VERSION_OFF);
+
+       if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+                    (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+               ena_trc_err("Reg read timeout occurred\n");
+               return ENA_COM_TIMER_EXPIRED;
+       }
+
+       ena_trc_info("ena device version: %d.%d\n",
+                    (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+                    ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+                    ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+       if (ver < MIN_ENA_VER) {
+               ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
+               return -1;
+       }
+
+       ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
+                    (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
+                    >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+                    (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
+                    >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+                    (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+                    (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+                    ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+       ctrl_ver_masked =
+               (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+               (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+               (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+       /* Validate the ctrl version without the implementation ID */
+       if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
+               ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_com_admin_cq *cq = &admin_queue->cq;
+       struct ena_com_admin_sq *sq = &admin_queue->sq;
+       struct ena_com_aenq *aenq = &ena_dev->aenq;
+       u16 size;
+
+       ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
+
+       ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
+
+       if (admin_queue->comp_ctx)
+               ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
+       admin_queue->comp_ctx = NULL;
+       size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+       if (sq->entries)
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
+                                     sq->dma_addr, sq->mem_handle);
+       sq->entries = NULL;
+
+       size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+       if (cq->entries)
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
+                                     cq->dma_addr, cq->mem_handle);
+       cq->entries = NULL;
+
+       size = ADMIN_AENQ_SIZE(aenq->q_depth);
+       if (ena_dev->aenq.entries)
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
+                                     aenq->dma_addr, aenq->mem_handle);
+       aenq->entries = NULL;
+}
+
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
+{
+       u32 mask_value = 0;
+
+       if (polling)
+               mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+       ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
+       ena_dev->admin_queue.polling = polling;
+}
+
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+       ENA_SPINLOCK_INIT(mmio_read->lock);
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                              sizeof(*mmio_read->read_resp),
+                              mmio_read->read_resp,
+                              mmio_read->read_resp_dma_addr,
+                              mmio_read->read_resp_mem_handle);
+       if (unlikely(!mmio_read->read_resp))
+               return ENA_COM_NO_MEM;
+
+       ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+       mmio_read->read_resp->req_id = 0x0;
+       mmio_read->seq_num = 0x0;
+       mmio_read->readless_supported = true;
+
+       return 0;
+}
+
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+{
+       struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+       mmio_read->readless_supported = readless_supported;
+}
+
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+       ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+       ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+       ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                             sizeof(*mmio_read->read_resp),
+                             mmio_read->read_resp,
+                             mmio_read->read_resp_dma_addr,
+                             mmio_read->read_resp_mem_handle);
+
+       mmio_read->read_resp = NULL;
+
+       ENA_SPINLOCK_DESTROY(mmio_read->lock);
+}
+
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+       u32 addr_low, addr_high;
+
+       addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
+       addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
+
+       ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+       ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+}
+
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+                      struct ena_aenq_handlers *aenq_handlers,
+                      bool init_spinlock)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
+       int ret;
+
+#ifdef ENA_INTERNAL
+       ena_trc_info("ena_defs : Version:[%s] Build date [%s]",
+                    ENA_GEN_COMMIT, ENA_GEN_DATE);
+#endif
+       dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+       if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
+               ena_trc_err("Reg read timeout occurred\n");
+               return ENA_COM_TIMER_EXPIRED;
+       }
+
+       if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+               ena_trc_err("Device isn't ready, abort com init\n");
+               return ENA_COM_NO_DEVICE;
+       }
+
+       admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
+
+       admin_queue->bus = ena_dev->bus;
+       admin_queue->q_dmadev = ena_dev->dmadev;
+       admin_queue->polling = false;
+       admin_queue->curr_cmd_id = 0;
+
+       ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
+
+       if (init_spinlock)
+               ENA_SPINLOCK_INIT(admin_queue->q_lock);
+
+       ret = ena_com_init_comp_ctxt(admin_queue);
+       if (ret)
+               goto error;
+
+       ret = ena_com_admin_init_sq(admin_queue);
+       if (ret)
+               goto error;
+
+       ret = ena_com_admin_init_cq(admin_queue);
+       if (ret)
+               goto error;
+
+       admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+               ENA_REGS_AQ_DB_OFF);
+
+       addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
+       addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
+
+       ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+       ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
+
+       addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
+       addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
+
+       ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+       ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
+
+       aq_caps = 0;
+       aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+       aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
+                       ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+                       ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+       acq_caps = 0;
+       acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+       acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
+               ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+               ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+
+       ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+       ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
+       ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
+       if (ret)
+               goto error;
+
+       admin_queue->running_state = true;
+
+       return 0;
+error:
+       ena_com_admin_destroy(ena_dev);
+
+       return ret;
+}
+
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+                           struct ena_com_create_io_ctx *ctx)
+{
+       struct ena_com_io_sq *io_sq;
+       struct ena_com_io_cq *io_cq;
+       int ret;
+
+       if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+               ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+                           ctx->qid, ENA_TOTAL_NUM_QUEUES);
+               return ENA_COM_INVAL;
+       }
+
+       io_sq = &ena_dev->io_sq_queues[ctx->qid];
+       io_cq = &ena_dev->io_cq_queues[ctx->qid];
+
+       memset(io_sq, 0x0, sizeof(*io_sq));
+       memset(io_cq, 0x0, sizeof(*io_cq));
+
+       /* Init CQ */
+       io_cq->q_depth = ctx->queue_size;
+       io_cq->direction = ctx->direction;
+       io_cq->qid = ctx->qid;
+
+       io_cq->msix_vector = ctx->msix_vector;
+
+       io_sq->q_depth = ctx->queue_size;
+       io_sq->direction = ctx->direction;
+       io_sq->qid = ctx->qid;
+
+       io_sq->mem_queue_type = ctx->mem_queue_type;
+
+       if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+               /* header length is limited to 8 bits */
+               io_sq->tx_max_header_size =
+                       ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
+
+       ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+       if (ret)
+               goto error;
+       ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
+       if (ret)
+               goto error;
+
+       ret = ena_com_create_io_cq(ena_dev, io_cq);
+       if (ret)
+               goto error;
+
+       ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
+       if (ret)
+               goto destroy_io_cq;
+
+       return 0;
+
+destroy_io_cq:
+       ena_com_destroy_io_cq(ena_dev, io_cq);
+error:
+       ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+       return ret;
+}
+
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+{
+       struct ena_com_io_sq *io_sq;
+       struct ena_com_io_cq *io_cq;
+
+       if (qid >= ENA_TOTAL_NUM_QUEUES) {
+               ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+                           qid, ENA_TOTAL_NUM_QUEUES);
+               return;
+       }
+
+       io_sq = &ena_dev->io_sq_queues[qid];
+       io_cq = &ena_dev->io_cq_queues[qid];
+
+       ena_com_destroy_io_sq(ena_dev, io_sq);
+       ena_com_destroy_io_cq(ena_dev, io_cq);
+
+       ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+}
+
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+                           struct ena_admin_get_feat_resp *resp)
+{
+       return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+}
+
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+                             struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+       struct ena_admin_get_feat_resp get_resp;
+       int rc;
+
+       rc = ena_com_get_feature(ena_dev, &get_resp,
+                                ENA_ADMIN_DEVICE_ATTRIBUTES);
+       if (rc)
+               return rc;
+
+       memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
+              sizeof(get_resp.u.dev_attr));
+       ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+
+       rc = ena_com_get_feature(ena_dev, &get_resp,
+                                ENA_ADMIN_MAX_QUEUES_NUM);
+       if (rc)
+               return rc;
+
+       memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+              sizeof(get_resp.u.max_queue));
+       ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+
+       rc = ena_com_get_feature(ena_dev, &get_resp,
+                                ENA_ADMIN_AENQ_CONFIG);
+       if (rc)
+               return rc;
+
+       memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
+              sizeof(get_resp.u.aenq));
+
+       rc = ena_com_get_feature(ena_dev, &get_resp,
+                                ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+       if (rc)
+               return rc;
+
+       memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
+              sizeof(get_resp.u.offload));
+
+       /* Driver hints isn't mandatory admin command. So in case the
+        * command isn't supported set driver hints to 0
+        */
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+       if (!rc)
+               memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+                      sizeof(get_resp.u.hw_hints));
+       else if (rc == ENA_COM_UNSUPPORTED)
+               memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+       else
+               return rc;
+
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+       if (!rc)
+               memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+                      sizeof(get_resp.u.llq));
+       else if (rc == ENA_COM_UNSUPPORTED)
+               memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+       else
+               return rc;
+
+       return 0;
+}
+
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
+{
+       ena_com_handle_admin_completion(&ena_dev->admin_queue);
+}
+
+/* ena_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+                                                    u16 group)
+{
+       struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+
+       if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
+               return aenq_handlers->handlers[group];
+
+       return aenq_handlers->unimplemented_handler;
+}
+
+/* ena_aenq_intr_handler:
+ * handles the aenq incoming events.
+ * pop events from the queue and apply the specific handler
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+{
+       struct ena_admin_aenq_entry *aenq_e;
+       struct ena_admin_aenq_common_desc *aenq_common;
+       struct ena_com_aenq *aenq  = &dev->aenq;
+       ena_aenq_handler handler_cb;
+       unsigned long long timestamp;
+       u16 masked_head, processed = 0;
+       u8 phase;
+
+       masked_head = aenq->head & (aenq->q_depth - 1);
+       phase = aenq->phase;
+       aenq_e = &aenq->entries[masked_head]; /* Get first entry */
+       aenq_common = &aenq_e->aenq_common_desc;
+
+       /* Go over all the events */
+       while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
+               phase) {
+               timestamp = (unsigned long long)aenq_common->timestamp_low |
+                       ((unsigned long long)aenq_common->timestamp_high << 32);
+               ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+                           aenq_common->group,
+                           aenq_common->syndrom,
+                           timestamp);
+
+               /* Handle specific event*/
+               handler_cb = ena_com_get_specific_aenq_cb(dev,
+                                                         aenq_common->group);
+               handler_cb(data, aenq_e); /* call the actual event handler*/
+
+               /* Get next event entry */
+               masked_head++;
+               processed++;
+
+               if (unlikely(masked_head == aenq->q_depth)) {
+                       masked_head = 0;
+                       phase = !phase;
+               }
+               aenq_e = &aenq->entries[masked_head];
+               aenq_common = &aenq_e->aenq_common_desc;
+       }
+
+       aenq->head += processed;
+       aenq->phase = phase;
+
+       /* Don't update aenq doorbell if there weren't any processed events */
+       if (!processed)
+               return;
+
+       /* write the aenq doorbell after all AENQ descriptors were read */
+       mb();
+       ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+#ifdef ENA_EXTENDED_STATS
+/*
+ * Sets the function Idx and Queue Idx to be used for
+ * get full statistics feature
+ *
+ */
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+                                         u32 func_queue)
+{
+
+       /* Function & Queue is acquired from user in the following format :
+        * Bottom Half word:    funct
+        * Top Half Word:       queue
+        */
+       ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
+       ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
+
+       return 0;
+}
+
+#endif /* ENA_EXTENDED_STATS */
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+                     enum ena_regs_reset_reason_types reset_reason)
+{
+       u32 stat, timeout, cap, reset_val;
+       int rc;
+
+       stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+       cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+       if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+                    (cap == ENA_MMIO_READ_TIMEOUT))) {
+               ena_trc_err("Reg read32 timeout occurred\n");
+               return ENA_COM_TIMER_EXPIRED;
+       }
+
+       if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+               ena_trc_err("Device isn't ready, can't reset device\n");
+               return ENA_COM_INVAL;
+       }
+
+       timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+                       ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+       if (timeout == 0) {
+               ena_trc_err("Invalid timeout value\n");
+               return ENA_COM_INVAL;
+       }
+
+       /* start reset */
+       reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+       reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+                       ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+       ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+
+       /* Write again the MMIO read request address */
+       ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+       rc = wait_for_reset_state(ena_dev, timeout,
+                                 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+       if (rc != 0) {
+               ena_trc_err("Reset indication didn't turn on\n");
+               return rc;
+       }
+
+       /* reset done */
+       ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+       rc = wait_for_reset_state(ena_dev, timeout, 0);
+       if (rc != 0) {
+               ena_trc_err("Reset indication didn't turn off\n");
+               return rc;
+       }
+
+       timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+               ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+       if (timeout)
+               /* the resolution of timeout reg is 100ms */
+               ena_dev->admin_queue.completion_timeout = timeout * 100000;
+       else
+               ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+       return 0;
+}
+
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+                            struct ena_com_stats_ctx *ctx,
+                            enum ena_admin_get_stats_type type)
+{
+       struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+       struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+       struct ena_com_admin_queue *admin_queue;
+       int ret;
+
+       admin_queue = &ena_dev->admin_queue;
+
+       get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+       get_cmd->aq_common_descriptor.flags = 0;
+       get_cmd->type = type;
+
+       ret =  ena_com_execute_admin_command(admin_queue,
+                                            (struct ena_admin_aq_entry *)get_cmd,
+                                            sizeof(*get_cmd),
+                                            (struct ena_admin_acq_entry *)get_resp,
+                                            sizeof(*get_resp));
+
+       if (unlikely(ret))
+               ena_trc_err("Failed to get stats. error: %d\n", ret);
+
+       return ret;
+}
+
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+                               struct ena_admin_basic_stats *stats)
+{
+       struct ena_com_stats_ctx ctx;
+       int ret;
+
+       memset(&ctx, 0x0, sizeof(ctx));
+       ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+       if (likely(ret == 0))
+               memcpy(stats, &ctx.get_resp.basic_stats,
+                      sizeof(ctx.get_resp.basic_stats));
+
+       return ret;
+}
+#ifdef ENA_EXTENDED_STATS
+
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+                                  u32 len)
+{
+       struct ena_com_stats_ctx ctx;
+       struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
+       ena_mem_handle_t mem_handle;
+       void *virt_addr;
+       dma_addr_t phys_addr;
+       int ret;
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
+                              virt_addr, phys_addr, mem_handle);
+       if (!virt_addr) {
+               ret = ENA_COM_NO_MEM;
+               goto done;
+       }
+       memset(&ctx, 0x0, sizeof(ctx));
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &get_cmd->u.control_buffer.address,
+                                  phys_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+       get_cmd->u.control_buffer.length = len;
+
+       get_cmd->device_id = ena_dev->stats_func;
+       get_cmd->queue_idx = ena_dev->stats_queue;
+
+       ret = ena_get_dev_stats(ena_dev, &ctx,
+                               ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
+       if (ret < 0)
+               goto free_ext_stats_mem;
+
+       ret = snprintf(buff, len, "%s", (char *)virt_addr);
+
+free_ext_stats_mem:
+       ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
+                             mem_handle);
+done:
+       return ret;
+}
+#endif
+
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+{
+       struct ena_com_admin_queue *admin_queue;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       int ret;
+
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+               ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       memset(&cmd, 0x0, sizeof(cmd));
+       admin_queue = &ena_dev->admin_queue;
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.aq_common_descriptor.flags = 0;
+       cmd.feat_common.feature_id = ENA_ADMIN_MTU;
+       cmd.u.mtu.mtu = mtu;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+
+       if (unlikely(ret))
+               ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+
+       return ret;
+}
+
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+                                struct ena_admin_feature_offload_desc *offload)
+{
+       int ret;
+       struct ena_admin_get_feat_resp resp;
+
+       ret = ena_com_get_feature(ena_dev, &resp,
+                                 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+       if (unlikely(ret)) {
+               ena_trc_err("Failed to get offload capabilities %d\n", ret);
+               return ret;
+       }
+
+       memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
+
+       return 0;
+}
+
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       struct ena_admin_get_feat_resp get_resp;
+       int ret;
+
+       if (!ena_com_check_supported_feature_id(ena_dev,
+                                               ENA_ADMIN_RSS_HASH_FUNCTION)) {
+               ena_trc_dbg("Feature %d isn't supported\n",
+                           ENA_ADMIN_RSS_HASH_FUNCTION);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       /* Validate hash function is supported */
+       ret = ena_com_get_feature(ena_dev, &get_resp,
+                                 ENA_ADMIN_RSS_HASH_FUNCTION);
+       if (unlikely(ret))
+               return ret;
+
+       if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+               ena_trc_err("Func hash %d isn't supported by device, abort\n",
+                           rss->hash_func);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       memset(&cmd, 0x0, sizeof(cmd));
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.aq_common_descriptor.flags =
+               ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+       cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
+       cmd.u.flow_hash_func.init_val = rss->hash_init_val;
+       cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &cmd.control_buffer.address,
+                                  rss->hash_key_dma_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+
+       cmd.control_buffer.length = sizeof(*rss->hash_key);
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+       if (unlikely(ret)) {
+               ena_trc_err("Failed to set hash function %d. error: %d\n",
+                           rss->hash_func, ret);
+               return ENA_COM_INVAL;
+       }
+
+       return 0;
+}
+
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+                              enum ena_admin_hash_functions func,
+                              const u8 *key, u16 key_len, u32 init_val)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_get_feat_resp get_resp;
+       struct ena_admin_feature_rss_flow_hash_control *hash_key =
+               rss->hash_key;
+       int rc;
+
+       /* Make sure size is a mult of DWs */
+       if (unlikely(key_len & 0x3))
+               return ENA_COM_INVAL;
+
+       rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+                                   ENA_ADMIN_RSS_HASH_FUNCTION,
+                                   rss->hash_key_dma_addr,
+                                   sizeof(*rss->hash_key));
+       if (unlikely(rc))
+               return rc;
+
+       if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+               ena_trc_err("Flow hash function %d isn't supported\n", func);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       switch (func) {
+       case ENA_ADMIN_TOEPLITZ:
+               if (key_len > sizeof(hash_key->key)) {
+                       ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
+                                   key_len, sizeof(hash_key->key));
+                       return ENA_COM_INVAL;
+               }
+
+               memcpy(hash_key->key, key, key_len);
+               rss->hash_init_val = init_val;
+               hash_key->keys_num = key_len >> 2;
+               break;
+       case ENA_ADMIN_CRC32:
+               rss->hash_init_val = init_val;
+               break;
+       default:
+               ena_trc_err("Invalid hash function (%d)\n", func);
+               return ENA_COM_INVAL;
+       }
+
+       rc = ena_com_set_hash_function(ena_dev);
+
+       /* Restore the old function */
+       if (unlikely(rc))
+               ena_com_get_hash_function(ena_dev, NULL, NULL);
+
+       return rc;
+}
+
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+                             enum ena_admin_hash_functions *func,
+                             u8 *key)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_get_feat_resp get_resp;
+       struct ena_admin_feature_rss_flow_hash_control *hash_key =
+               rss->hash_key;
+       int rc;
+
+       rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+                                   ENA_ADMIN_RSS_HASH_FUNCTION,
+                                   rss->hash_key_dma_addr,
+                                   sizeof(*rss->hash_key));
+       if (unlikely(rc))
+               return rc;
+
+       rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+       if (func)
+               *func = rss->hash_func;
+
+       if (key)
+               memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+
+       return 0;
+}
+
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+                         enum ena_admin_flow_hash_proto proto,
+                         u16 *fields)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_get_feat_resp get_resp;
+       int rc;
+
+       rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+                                   ENA_ADMIN_RSS_HASH_INPUT,
+                                   rss->hash_ctrl_dma_addr,
+                                   sizeof(*rss->hash_ctrl));
+       if (unlikely(rc))
+               return rc;
+
+       if (fields)
+               *fields = rss->hash_ctrl->selected_fields[proto].fields;
+
+       return 0;
+}
+
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       int ret;
+
+       if (!ena_com_check_supported_feature_id(ena_dev,
+                                               ENA_ADMIN_RSS_HASH_INPUT)) {
+               ena_trc_dbg("Feature %d isn't supported\n",
+                           ENA_ADMIN_RSS_HASH_INPUT);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       memset(&cmd, 0x0, sizeof(cmd));
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.aq_common_descriptor.flags =
+               ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+       cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
+       cmd.u.flow_hash_input.enabled_input_sort =
+               ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
+               ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &cmd.control_buffer.address,
+                                  rss->hash_ctrl_dma_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+       cmd.control_buffer.length = sizeof(*hash_ctrl);
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+       if (unlikely(ret))
+               ena_trc_err("Failed to set hash input. error: %d\n", ret);
+
+       return ret;
+}
+
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_feature_rss_hash_control *hash_ctrl =
+               rss->hash_ctrl;
+       u16 available_fields = 0;
+       int rc, i;
+
+       /* Get the supported hash input */
+       rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+       if (unlikely(rc))
+               return rc;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+               ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+               ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+               ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+               ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+               ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
+               ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
+
+       for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
+               available_fields = hash_ctrl->selected_fields[i].fields &
+                               hash_ctrl->supported_fields[i].fields;
+               if (available_fields != hash_ctrl->selected_fields[i].fields) {
+                       ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+                                   i, hash_ctrl->supported_fields[i].fields,
+                                   hash_ctrl->selected_fields[i].fields);
+                       return ENA_COM_UNSUPPORTED;
+               }
+       }
+
+       rc = ena_com_set_hash_ctrl(ena_dev);
+
+       /* In case of failure, restore the old hash ctrl */
+       if (unlikely(rc))
+               ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+       return rc;
+}
+
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+                          enum ena_admin_flow_hash_proto proto,
+                          u16 hash_fields)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+       u16 supported_fields;
+       int rc;
+
+       if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+               ena_trc_err("Invalid proto num (%u)\n", proto);
+               return ENA_COM_INVAL;
+       }
+
+       /* Get the ctrl table */
+       rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
+       if (unlikely(rc))
+               return rc;
+
+       /* Make sure all the fields are supported */
+       supported_fields = hash_ctrl->supported_fields[proto].fields;
+       if ((hash_fields & supported_fields) != hash_fields) {
+               ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+                           proto, hash_fields, supported_fields);
+       }
+
+       hash_ctrl->selected_fields[proto].fields = hash_fields;
+
+       rc = ena_com_set_hash_ctrl(ena_dev);
+
+       /* In case of failure, restore the old hash ctrl */
+       if (unlikely(rc))
+               ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+       return 0;
+}
+
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+                                     u16 entry_idx, u16 entry_value)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+
+       if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
+               return ENA_COM_INVAL;
+
+       if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
+               return ENA_COM_INVAL;
+
+       rss->host_rss_ind_tbl[entry_idx] = entry_value;
+
+       return 0;
+}
+
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       int ret;
+
+       if (!ena_com_check_supported_feature_id(ena_dev,
+                                               ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+               ena_trc_dbg("Feature %d isn't supported\n",
+                           ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+               return ENA_COM_UNSUPPORTED;
+       }
+
+       ret = ena_com_ind_tbl_convert_to_device(ena_dev);
+       if (ret) {
+               ena_trc_err("Failed to convert host indirection table to device table\n");
+               return ret;
+       }
+
+       memset(&cmd, 0x0, sizeof(cmd));
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.aq_common_descriptor.flags =
+               ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+       cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+       cmd.u.ind_table.size = rss->tbl_log_size;
+       cmd.u.ind_table.inline_index = 0xFFFFFFFF;
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &cmd.control_buffer.address,
+                                  rss->rss_ind_tbl_dma_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+
+       cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+
+       if (unlikely(ret))
+               ena_trc_err("Failed to set indirect table. error: %d\n", ret);
+
+       return ret;
+}
+
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+{
+       struct ena_rss *rss = &ena_dev->rss;
+       struct ena_admin_get_feat_resp get_resp;
+       u32 tbl_size;
+       int i, rc;
+
+       tbl_size = (1ULL << rss->tbl_log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+
+       rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+                                   ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+                                   rss->rss_ind_tbl_dma_addr,
+                                   tbl_size);
+       if (unlikely(rc))
+               return rc;
+
+       if (!ind_tbl)
+               return 0;
+
+       rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+       if (unlikely(rc))
+               return rc;
+
+       for (i = 0; i < (1 << rss->tbl_log_size); i++)
+               ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+       return 0;
+}
+
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+{
+       int rc;
+
+       memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+       rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
+       if (unlikely(rc))
+               goto err_indr_tbl;
+
+       rc = ena_com_hash_key_allocate(ena_dev);
+       if (unlikely(rc))
+               goto err_hash_key;
+
+       rc = ena_com_hash_ctrl_init(ena_dev);
+       if (unlikely(rc))
+               goto err_hash_ctrl;
+
+       return 0;
+
+err_hash_ctrl:
+       ena_com_hash_key_destroy(ena_dev);
+err_hash_key:
+       ena_com_indirect_table_destroy(ena_dev);
+err_indr_tbl:
+
+       return rc;
+}
+
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
+{
+       ena_com_indirect_table_destroy(ena_dev);
+       ena_com_hash_key_destroy(ena_dev);
+       ena_com_hash_ctrl_destroy(ena_dev);
+
+       memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+}
+
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+{
+       struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                              SZ_4K,
+                              host_attr->host_info,
+                              host_attr->host_info_dma_addr,
+                              host_attr->host_info_dma_handle);
+       if (unlikely(!host_attr->host_info))
+               return ENA_COM_NO_MEM;
+
+       return 0;
+}
+
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+                               u32 debug_area_size)
+{
+       struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+                              debug_area_size,
+                              host_attr->debug_area_virt_addr,
+                              host_attr->debug_area_dma_addr,
+                              host_attr->debug_area_dma_handle);
+       if (unlikely(!host_attr->debug_area_virt_addr)) {
+               host_attr->debug_area_size = 0;
+               return ENA_COM_NO_MEM;
+       }
+
+       host_attr->debug_area_size = debug_area_size;
+
+       return 0;
+}
+
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
+{
+       struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+       if (host_attr->host_info) {
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     SZ_4K,
+                                     host_attr->host_info,
+                                     host_attr->host_info_dma_addr,
+                                     host_attr->host_info_dma_handle);
+               host_attr->host_info = NULL;
+       }
+}
+
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+{
+       struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+       if (host_attr->debug_area_virt_addr) {
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     host_attr->debug_area_size,
+                                     host_attr->debug_area_virt_addr,
+                                     host_attr->debug_area_dma_addr,
+                                     host_attr->debug_area_dma_handle);
+               host_attr->debug_area_virt_addr = NULL;
+       }
+}
+
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+{
+       struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+       struct ena_com_admin_queue *admin_queue;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+
+       int ret;
+
+       /* Host attribute config is called before ena_com_get_dev_attr_feat
+        * so ena_com can't check if the feature is supported.
+        */
+
+       memset(&cmd, 0x0, sizeof(cmd));
+       admin_queue = &ena_dev->admin_queue;
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &cmd.u.host_attr.debug_ba,
+                                  host_attr->debug_area_dma_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &cmd.u.host_attr.os_info_ba,
+                                  host_attr->host_info_dma_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+
+       cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+
+       if (unlikely(ret))
+               ena_trc_err("Failed to set host attributes: %d\n", ret);
+
+       return ret;
+}
+
+/* Interrupt moderation */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
+{
+       return ena_com_check_supported_feature_id(ena_dev,
+                                                 ENA_ADMIN_INTERRUPT_MODERATION);
+}
+
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+                                                     u32 tx_coalesce_usecs)
+{
+       if (!ena_dev->intr_delay_resolution) {
+               ena_trc_err("Illegal interrupt delay granularity value\n");
+               return ENA_COM_FAULT;
+       }
+
+       ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
+               ena_dev->intr_delay_resolution;
+
+       return 0;
+}
+
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+                                                     u32 rx_coalesce_usecs)
+{
+       if (!ena_dev->intr_delay_resolution) {
+               ena_trc_err("Illegal interrupt delay granularity value\n");
+               return ENA_COM_FAULT;
+       }
+
+       /* We use LOWEST entry of moderation table for storing
+        * nonadaptive interrupt coalescing values
+        */
+       ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+               rx_coalesce_usecs / ena_dev->intr_delay_resolution;
+
+       return 0;
+}
+
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+       if (ena_dev->intr_moder_tbl)
+               ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
+       ena_dev->intr_moder_tbl = NULL;
+}
+
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+       struct ena_admin_get_feat_resp get_resp;
+       u16 delay_resolution;
+       int rc;
+
+       rc = ena_com_get_feature(ena_dev, &get_resp,
+                                ENA_ADMIN_INTERRUPT_MODERATION);
+
+       if (rc) {
+               if (rc == ENA_COM_UNSUPPORTED) {
+                       ena_trc_dbg("Feature %d isn't supported\n",
+                                   ENA_ADMIN_INTERRUPT_MODERATION);
+                       rc = 0;
+               } else {
+                       ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
+                                   rc);
+               }
+
+               /* no moderation supported, disable adaptive support */
+               ena_com_disable_adaptive_moderation(ena_dev);
+               return rc;
+       }
+
+       rc = ena_com_init_interrupt_moderation_table(ena_dev);
+       if (rc)
+               goto err;
+
+       /* if moderation is supported by device we set adaptive moderation */
+       delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
+       ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
+       ena_com_enable_adaptive_moderation(ena_dev);
+
+       return 0;
+err:
+       ena_com_destroy_interrupt_moderation(ena_dev);
+       return rc;
+}
+
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+       struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+       if (!intr_moder_tbl)
+               return;
+
+       intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+               ENA_INTR_LOWEST_USECS;
+       intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
+               ENA_INTR_LOWEST_PKTS;
+       intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
+               ENA_INTR_LOWEST_BYTES;
+
+       intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
+               ENA_INTR_LOW_USECS;
+       intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
+               ENA_INTR_LOW_PKTS;
+       intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
+               ENA_INTR_LOW_BYTES;
+
+       intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
+               ENA_INTR_MID_USECS;
+       intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
+               ENA_INTR_MID_PKTS;
+       intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
+               ENA_INTR_MID_BYTES;
+
+       intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
+               ENA_INTR_HIGH_USECS;
+       intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
+               ENA_INTR_HIGH_PKTS;
+       intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
+               ENA_INTR_HIGH_BYTES;
+
+       intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
+               ENA_INTR_HIGHEST_USECS;
+       intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
+               ENA_INTR_HIGHEST_PKTS;
+       intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
+               ENA_INTR_HIGHEST_BYTES;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+{
+       return ena_dev->intr_moder_tx_interval;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+{
+       struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+       if (intr_moder_tbl)
+               return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
+
+       return 0;
+}
+
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+                                       enum ena_intr_moder_level level,
+                                       struct ena_intr_moder_entry *entry)
+{
+       struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+       if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+               return;
+
+       intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
+       if (ena_dev->intr_delay_resolution)
+               intr_moder_tbl[level].intr_moder_interval /=
+                       ena_dev->intr_delay_resolution;
+       intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
+
+       /* use hardcoded value until ethtool supports bytecount parameter */
+       if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+               intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+}
+
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+                                      enum ena_intr_moder_level level,
+                                      struct ena_intr_moder_entry *entry)
+{
+       struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+       if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+               return;
+
+       entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
+       if (ena_dev->intr_delay_resolution)
+               entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
+       entry->pkts_per_interval =
+       intr_moder_tbl[level].pkts_per_interval;
+       entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq)
+{
+       int rc;
+       int size;
+
+       if (llq->max_llq_num == 0) {
+               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+               return 0;
+       }
+
+       rc = ena_com_config_llq_info(ena_dev, llq);
+       if (rc)
+               return rc;
+
+       /* Validate the descriptor is not too big */
+       size = ena_dev->tx_max_header_size;
+       size += ena_dev->llq_info.descs_num_before_header *
+               sizeof(struct ena_eth_io_tx_desc);
+
+       if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+               ena_trc_err("the size of the LLQ entry is smaller than needed\n");
+               return ENA_COM_INVAL;
+       }
+
+       ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+       return 0;
+}
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_com.h b/sys/dev/virtual/amazon/ena/ena-com/ena_com.h
new file mode 100644 (file)
index 0000000..9425205
--- /dev/null
@@ -0,0 +1,1120 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ENA_COM
+#define ENA_COM
+
+#ifndef ENA_INTERNAL
+#include "ena_plat.h"
+#else
+#include "ena_plat.h"
+#include "ena_includes.h"
+#endif
+
+#define ENA_MAX_NUM_IO_QUEUES          128U
+/* We need to queues for each IO (on for Tx and one for Rx) */
+#define ENA_TOTAL_NUM_QUEUES           (2 * (ENA_MAX_NUM_IO_QUEUES))
+
+#define ENA_MAX_HANDLERS 256
+
+#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
+
+/* Unit in usec */
+#define ENA_REG_READ_TIMEOUT 200000
+
+#define ADMIN_SQ_SIZE(depth)   ((depth) * sizeof(struct ena_admin_aq_entry))
+#define ADMIN_CQ_SIZE(depth)   ((depth) * sizeof(struct ena_admin_acq_entry))
+#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENA adaptive interrupt moderation settings */
+
+#define ENA_INTR_LOWEST_USECS           (0)
+#define ENA_INTR_LOWEST_PKTS            (3)
+#define ENA_INTR_LOWEST_BYTES           (2 * 1524)
+
+#define ENA_INTR_LOW_USECS              (32)
+#define ENA_INTR_LOW_PKTS               (12)
+#define ENA_INTR_LOW_BYTES              (16 * 1024)
+
+#define ENA_INTR_MID_USECS              (80)
+#define ENA_INTR_MID_PKTS               (48)
+#define ENA_INTR_MID_BYTES              (64 * 1024)
+
+#define ENA_INTR_HIGH_USECS             (128)
+#define ENA_INTR_HIGH_PKTS              (96)
+#define ENA_INTR_HIGH_BYTES             (128 * 1024)
+
+#define ENA_INTR_HIGHEST_USECS          (192)
+#define ENA_INTR_HIGHEST_PKTS           (128)
+#define ENA_INTR_HIGHEST_BYTES          (192 * 1024)
+
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS             196
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS             4
+#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT                        6
+#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT                        4
+#define ENA_INTR_MODER_LEVEL_STRIDE                    1
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED              0xFFFFFF
+
+#define ENA_HW_HINTS_NO_TIMEOUT                                0xFFFF
+
+enum ena_intr_moder_level {
+       ENA_INTR_MODER_LOWEST = 0,
+       ENA_INTR_MODER_LOW,
+       ENA_INTR_MODER_MID,
+       ENA_INTR_MODER_HIGH,
+       ENA_INTR_MODER_HIGHEST,
+       ENA_INTR_MAX_NUM_OF_LEVELS,
+};
+
+struct ena_intr_moder_entry {
+       unsigned int intr_moder_interval;
+       unsigned int pkts_per_interval;
+       unsigned int bytes_per_interval;
+};
+
+enum queue_direction {
+       ENA_COM_IO_QUEUE_DIRECTION_TX,
+       ENA_COM_IO_QUEUE_DIRECTION_RX
+};
+
+struct ena_com_buf {
+       dma_addr_t paddr; /**< Buffer physical address */
+       u16 len; /**< Buffer length in bytes */
+};
+
+struct ena_com_rx_buf_info {
+       u16 len;
+       u16 req_id;
+};
+
+struct ena_com_io_desc_addr {
+       u8 __iomem *pbuf_dev_addr; /* LLQ address */
+       u8 *virt_addr;
+       dma_addr_t phys_addr;
+       ena_mem_handle_t mem_handle;
+};
+
+struct ena_com_tx_meta {
+       u16 mss;
+       u16 l3_hdr_len;
+       u16 l3_hdr_offset;
+       u16 l4_hdr_len; /* In words */
+};
+
+struct ena_com_llq_info {
+       bool inline_header;
+       u16 desc_stride_ctrl;
+
+       u16 desc_list_entry_size;
+       u16 descs_num_before_header;
+       u16 descs_per_entry;
+};
+
+struct ena_com_io_cq {
+       struct ena_com_io_desc_addr cdesc_addr;
+       void *bus;
+
+       /* Interrupt unmask register */
+       u32 __iomem *unmask_reg;
+
+       /* The completion queue head doorbell register */
+       u32 __iomem *cq_head_db_reg;
+
+       /* numa configuration register (for TPH) */
+       u32 __iomem *numa_node_cfg_reg;
+
+       /* The value to write to the above register to unmask
+        * the interrupt of this queue
+        */
+       u32 msix_vector;
+
+       enum queue_direction direction;
+
+       /* holds the number of cdesc of the current packet */
+       u16 cur_rx_pkt_cdesc_count;
+       /* save the firt cdesc idx of the current packet */
+       u16 cur_rx_pkt_cdesc_start_idx;
+
+       u16 q_depth;
+       /* Caller qid */
+       u16 qid;
+
+       /* Device queue index */
+       u16 idx;
+       u16 head;
+       u16 last_head_update;
+       u8 phase;
+       u8 cdesc_entry_size_in_bytes;
+
+} ____cacheline_aligned;
+
+struct ena_com_io_bounce_buffer_control {
+       u8 *base_buffer;
+       u16 next_to_use;
+       u16 buffer_size;
+       u16 buffers_num;  /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+       u8 *curr_bounce_buf;
+       u16 idx;
+       u16 descs_left_in_line;
+};
+
+struct ena_com_io_sq {
+       struct ena_com_io_desc_addr desc_addr;
+       void *bus;
+
+       u32 __iomem *db_addr;
+       u8 __iomem *header_addr;
+
+       enum queue_direction direction;
+       enum ena_admin_placement_policy_type mem_queue_type;
+
+       u32 msix_vector;
+       struct ena_com_tx_meta cached_tx_meta;
+       struct ena_com_llq_info llq_info;
+       struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+       struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
+
+       u16 q_depth;
+       u16 qid;
+
+       u16 idx;
+       u16 tail;
+       u16 next_to_comp;
+       u16 llq_last_copy_tail;
+       u32 tx_max_header_size;
+       u8 phase;
+       u8 desc_entry_size;
+       u8 dma_addr_bits;
+} ____cacheline_aligned;
+
+struct ena_com_admin_cq {
+       struct ena_admin_acq_entry *entries;
+       ena_mem_handle_t mem_handle;
+       dma_addr_t dma_addr;
+
+       u16 head;
+       u8 phase;
+};
+
+struct ena_com_admin_sq {
+       struct ena_admin_aq_entry *entries;
+       ena_mem_handle_t mem_handle;
+       dma_addr_t dma_addr;
+
+       u32 __iomem *db_addr;
+
+       u16 head;
+       u16 tail;
+       u8 phase;
+
+};
+
+struct ena_com_stats_admin {
+       u32 aborted_cmd;
+       u32 submitted_cmd;
+       u32 completed_cmd;
+       u32 out_of_space;
+       u32 no_completion;
+};
+
+struct ena_com_admin_queue {
+       void *q_dmadev;
+       void *bus;
+       ena_spinlock_t q_lock; /* spinlock for the admin queue */
+
+       struct ena_comp_ctx *comp_ctx;
+       u32 completion_timeout;
+       u16 q_depth;
+       struct ena_com_admin_cq cq;
+       struct ena_com_admin_sq sq;
+
+       /* Indicate if the admin queue should poll for completion */
+       bool polling;
+
+       u16 curr_cmd_id;
+
+       /* Indicate that the ena was initialized and can
+        * process new admin commands
+        */
+       bool running_state;
+
+       /* Count the number of outstanding admin commands */
+       ena_atomic32_t outstanding_cmds;
+
+       struct ena_com_stats_admin stats;
+};
+
+struct ena_aenq_handlers;
+
+struct ena_com_aenq {
+       u16 head;
+       u8 phase;
+       struct ena_admin_aenq_entry *entries;
+       dma_addr_t dma_addr;
+       ena_mem_handle_t mem_handle;
+       u16 q_depth;
+       struct ena_aenq_handlers *aenq_handlers;
+};
+
+struct ena_com_mmio_read {
+       struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
+       dma_addr_t read_resp_dma_addr;
+       ena_mem_handle_t read_resp_mem_handle;
+       u32 reg_read_to; /* in us */
+       u16 seq_num;
+       bool readless_supported;
+       /* spin lock to ensure a single outstanding read */
+       ena_spinlock_t lock;
+};
+
+struct ena_rss {
+       /* Indirect table */
+       u16 *host_rss_ind_tbl;
+       struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
+       dma_addr_t rss_ind_tbl_dma_addr;
+       ena_mem_handle_t rss_ind_tbl_mem_handle;
+       u16 tbl_log_size;
+
+       /* Hash key */
+       enum ena_admin_hash_functions hash_func;
+       struct ena_admin_feature_rss_flow_hash_control *hash_key;
+       dma_addr_t hash_key_dma_addr;
+       ena_mem_handle_t hash_key_mem_handle;
+       u32 hash_init_val;
+
+       /* Flow Control */
+       struct ena_admin_feature_rss_hash_control *hash_ctrl;
+       dma_addr_t hash_ctrl_dma_addr;
+       ena_mem_handle_t hash_ctrl_mem_handle;
+
+};
+
+struct ena_host_attribute {
+       /* Debug area */
+       u8 *debug_area_virt_addr;
+       dma_addr_t debug_area_dma_addr;
+       ena_mem_handle_t debug_area_dma_handle;
+       u32 debug_area_size;
+
+       /* Host information */
+       struct ena_admin_host_info *host_info;
+       dma_addr_t host_info_dma_addr;
+       ena_mem_handle_t host_info_dma_handle;
+};
+
+/* Each ena_dev is a PCI function. */
+struct ena_com_dev {
+       struct ena_com_admin_queue admin_queue;
+       struct ena_com_aenq aenq;
+       struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
+       struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
+       u8 __iomem *reg_bar;
+       void __iomem *mem_bar;
+       void *dmadev;
+       void *bus;
+
+       enum ena_admin_placement_policy_type tx_mem_queue_type;
+       u32 tx_max_header_size;
+       u16 stats_func; /* Selected function for extended statistic dump */
+       u16 stats_queue; /* Selected queue for extended statistic dump */
+
+       struct ena_com_mmio_read mmio_read;
+
+       struct ena_rss rss;
+       u32 supported_features;
+       u32 dma_addr_bits;
+
+       struct ena_host_attribute host_attr;
+       bool adaptive_coalescing;
+       u16 intr_delay_resolution;
+       u32 intr_moder_tx_interval;
+       struct ena_intr_moder_entry *intr_moder_tbl;
+
+       struct ena_com_llq_info llq_info;
+};
+
+struct ena_com_dev_get_features_ctx {
+       struct ena_admin_queue_feature_desc max_queues;
+       struct ena_admin_device_attr_feature_desc dev_attr;
+       struct ena_admin_feature_aenq_desc aenq;
+       struct ena_admin_feature_offload_desc offload;
+       struct ena_admin_ena_hw_hints hw_hints;
+       struct ena_admin_feature_llq_desc llq;
+};
+
+struct ena_com_create_io_ctx {
+       enum ena_admin_placement_policy_type mem_queue_type;
+       enum queue_direction direction;
+       int numa_node;
+       u32 msix_vector;
+       u16 queue_size;
+       u16 qid;
+};
+
+typedef void (*ena_aenq_handler)(void *data,
+       struct ena_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct ena_aenq_handlers {
+       ena_aenq_handler handlers[ENA_MAX_HANDLERS];
+       ena_aenq_handler unimplemented_handler;
+};
+
+/*****************************************************************************/
+/*****************************************************************************/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the register read mechanism.
+ *
+ * @note: This method must be the first stage in the initialization sequence.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ * @readless_supported: readless mode (enable/disable)
+ */
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
+                               bool readless_supported);
+
+/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
+ * value physical address.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
+
+/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_init - Init the admin and the async queues
+ * @ena_dev: ENA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ * @init_spinlock: Indicate if this method should init the admin spinlock or
+ * the spinlock was init before (for example, in a case of FLR).
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+                      struct ena_aenq_handlers *aenq_handlers,
+                      bool init_spinlock);
+
+/* ena_com_admin_destroy - Destroy the admin and the async events queues.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @note: Before calling this method, the caller must validate that the device
+ * won't send any additional admin completions/aenq.
+ * To achieve that, a FLR is recommended.
+ */
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_dev_reset - Perform device FLR to the device.
+ * @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+                     enum ena_regs_reset_reason_types reset_reason);
+
+/* ena_com_create_io_queue - Create io queue.
+ * @ena_dev: ENA communication layer struct
+ * @ctx - create context structure
+ *
+ * Create the submission and the completion queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+                           struct ena_com_create_io_ctx *ctx);
+
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ */
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
+
+/* ena_com_get_io_handlers - Return the io queue handlers
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @io_sq - IO submission queue handler
+ * @io_cq - IO completion queue handler.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+                           struct ena_com_io_sq **io_sq,
+                           struct ena_com_io_cq **io_cq);
+
+/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
+ * @ena_dev: ENA communication layer struct
+ *
+ * After this method, aenq event can be received via AENQ.
+ */
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_running_state - Set the state of the admin queue
+ * @ena_dev: ENA communication layer struct
+ *
+ * Change the state of the admin queue (enable/disable)
+ */
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
+
+/* ena_com_get_admin_running_state - Get the admin queue state
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the state of the admin queue (enable/disable)
+ *
+ * @return - current polling mode (enable/disable)
+ */
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: ENAble/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
+
+/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ *
+ * Get the admin completion mode.
+ * If polling mode is on, ena_com_execute_admin_command will perform a
+ * polling on the admin completion queue for the commands completion,
+ * otherwise it will wait on wait event.
+ *
+ * @return state
+ */
+bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the admin completion queue and wake up all the pending
+ * threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
+
+/* ena_com_aenq_intr_handler - AENQ interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the async event notification queue and call the proper
+ * aenq handler.
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+
+/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The caller should then call ena_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
+
+/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
+
+/* ena_com_validate_version - Validate the device parameters
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method validate the device parameters are the same as the saved
+ * parameters in ena_dev.
+ * This method is useful after device reset, to validate the device mac address
+ * and the device offloads are the same as before the reset.
+ *
+ * @return - 0 on success negative value otherwise.
+ */
+int ena_com_validate_version(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_link_params - Retrieve physical link parameters.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Link parameters
+ *
+ * Retrieve the physical link parameters,
+ * like speed, auto-negotiation and full duplex support.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+                           struct ena_admin_get_feat_resp *resp);
+
+/* ena_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_aenq_config - Set aenq groups configurations
+ * @ena_dev: ENA communication layer struct
+ * @groups flag: bit fields flags of enum ena_admin_aenq_group.
+ *
+ * Configure which aenq event group the driver would like to receive.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
+
+/* ena_com_get_dev_attr_feat - Get device features
+ * @ena_dev: ENA communication layer struct
+ * @get_feat_ctx: returned context that contain the get features.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+                             struct ena_com_dev_get_features_ctx *get_feat_ctx);
+
+/* ena_com_get_dev_basic_stats - Get device basic statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+                               struct ena_admin_basic_stats *stats);
+
+/* ena_com_set_dev_mtu - Configure the device mtu.
+ * @ena_dev: ENA communication layer struct
+ * @mtu: mtu value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+
+/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+ * @ena_dev: ENA communication layer struct
+ * @offlad: offload return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+                                struct ena_admin_feature_offload_desc *offload);
+
+/* ena_com_rss_init - Init RSS
+ * @ena_dev: ENA communication layer struct
+ * @log_size: indirection log size
+ *
+ * Allocate RSS/RFS resources.
+ * The caller then can configure rss using ena_com_set_hash_function,
+ * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+
+/* ena_com_rss_destroy - Destroy rss
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free all the RSS/RFS resources.
+ */
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_fill_hash_function - Fill RSS hash function
+ * @ena_dev: ENA communication layer struct
+ * @func: The hash function (Toeplitz or crc)
+ * @key: Hash key (for toeplitz hash)
+ * @key_len: key length (max length 10 DW)
+ * @init_val: initial value for the hash function
+ *
+ * Fill the ena_dev resources with the desire hash function, hash key, key_len
+ * and key initial value (if needed by the hash function).
+ * To flush the key into the device the caller should call
+ * ena_com_set_hash_function.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+                              enum ena_admin_hash_functions func,
+                              const u8 *key, u16 key_len, u32 init_val);
+
+/* ena_com_set_hash_function - Flush the hash function and it dependencies to
+ * the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash function and it dependencies (key, key length and
+ * initial value) if needed.
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_function
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_function - Retrieve the hash function and the hash key
+ * from the device.
+ * @ena_dev: ENA communication layer struct
+ * @func: hash function
+ * @key: hash key
+ *
+ * Retrieve the hash function and the hash key from the device.
+ *
+ * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+                             enum ena_admin_hash_functions *func,
+                             u8 *key);
+
+/* ena_com_fill_hash_ctrl - Fill RSS hash control
+ * @ena_dev: ENA communication layer struct.
+ * @proto: The protocol to configure.
+ * @hash_fields: bit mask of ena_admin_flow_hash_fields
+ *
+ * Fill the ena_dev resources with the desire hash control (the ethernet
+ * fields that take part of the hash) for a specific protocol.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+                          enum ena_admin_flow_hash_proto proto,
+                          u16 hash_fields);
+
+/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash control (the ethernet fields that take part of the hash)
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
+ * @ena_dev: ENA communication layer struct
+ * @proto: The protocol to retrieve.
+ * @fields: bit mask of ena_admin_flow_hash_fields.
+ *
+ * Retrieve the hash control from the device.
+ *
+ * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+                         enum ena_admin_flow_hash_proto proto,
+                         u16 *fields);
+
+/* ena_com_set_default_hash_ctrl - Set the hash control to a default
+ * configuration.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Fill the ena_dev resources with the default hash control configuration.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
+ * indirection table
+ * @ena_dev: ENA communication layer struct.
+ * @entry_idx - indirection table entry.
+ * @entry_value - redirection value
+ *
+ * Fill a single entry of the RSS indirection table in the ena_dev resources.
+ * To flush the indirection table to the device, the called should call
+ * ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+                                     u16 entry_idx, u16 entry_value);
+
+/* ena_com_indirect_table_set - Flush the indirection table to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the indirection hash control to the device.
+ * Prior to this method the caller should call ena_com_indirect_table_fill_entry
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
+ * @ena_dev: ENA communication layer struct
+ * @ind_tbl: indirection table
+ *
+ * Retrieve the RSS indirection table from the device.
+ *
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
+
+/* ena_com_allocate_host_info - Allocate host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_allocate_debug_area - Allocate debug area.
+ * @ena_dev: ENA communication layer struct
+ * @debug_area_size - debug area size.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+                               u32 debug_area_size);
+
+/* ena_com_delete_debug_area - Free the debug area resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate debug area.
+ */
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
+
+/* ena_com_delete_host_info - Free the host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate host info.
+ */
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_host_attributes - Update the device with the host
+ * attributes (debug area and host info) base address.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_cq - Create io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Create IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+                        struct ena_com_io_cq *io_cq);
+
+/* ena_com_destroy_io_cq - Destroy io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Destroy IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+                         struct ena_com_io_cq *io_cq);
+
+/* ena_com_execute_admin_command - Execute admin command
+ * @admin_queue: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @cmd_completion: command completion return value.
+ * @cmd_comp_size: command completion size.
+
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copyed into cmd_comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+                                 struct ena_admin_aq_entry *cmd,
+                                 size_t cmd_size,
+                                 struct ena_admin_acq_entry *cmd_comp,
+                                 size_t cmd_comp_size);
+
+/* ena_com_init_interrupt_moderation - Init interrupt moderation
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
+ * capability is supported by the device.
+ *
+ * @return - supported or not.
+ */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
+ * moderation table back to the default parameters.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+
+/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ * @tx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+                                                     u32 tx_coalesce_usecs);
+
+/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ * @rx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+                                                     u32 rx_coalesce_usecs);
+
+/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+
+/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
+ * moderation table.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry value
+ *
+ * Update a single entry in the interrupt moderation table.
+ */
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+                                       enum ena_intr_moder_level level,
+                                       struct ena_intr_moder_entry *entry);
+
+/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry to fill.
+ *
+ * Initialize the entry according to the adaptive interrupt moderation table.
+ */
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+                                      enum ena_intr_moder_level level,
+                                      struct ena_intr_moder_entry *entry);
+
+
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.
+ *
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq);
+
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+{
+       return ena_dev->adaptive_coalescing;
+}
+
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+       ena_dev->adaptive_coalescing = true;
+}
+
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+       ena_dev->adaptive_coalescing = false;
+}
+
+/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
+ * @ena_dev: ENA communication layer struct
+ * @pkts: Number of packets since the last update
+ * @bytes: Number of bytes received since the last update.
+ * @smoothed_interval: Returned interval
+ * @moder_tbl_idx: Current table level as input update new level as return
+ * value.
+ */
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+                                                    unsigned int pkts,
+                                                    unsigned int bytes,
+                                                    unsigned int *smoothed_interval,
+                                                    unsigned int *moder_tbl_idx)
+{
+       enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
+       struct ena_intr_moder_entry *curr_moder_entry;
+       struct ena_intr_moder_entry *pred_moder_entry;
+       struct ena_intr_moder_entry *new_moder_entry;
+       struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+       unsigned int interval;
+
+       /* We apply adaptive moderation on Rx path only.
+        * Tx uses static interrupt moderation.
+        */
+       if (!pkts || !bytes)
+               /* Tx interrupt, or spurious interrupt,
+                * in both cases we just use same delay values
+                */
+               return;
+
+       curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
+       if (unlikely(curr_moder_idx >=  ENA_INTR_MAX_NUM_OF_LEVELS)) {
+               ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
+               return;
+       }
+
+       curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
+       new_moder_idx = curr_moder_idx;
+
+       if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
+               if ((pkts > curr_moder_entry->pkts_per_interval) ||
+                   (bytes > curr_moder_entry->bytes_per_interval))
+                       new_moder_idx =
+                               (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+       } else {
+               pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
+
+               if ((pkts <= pred_moder_entry->pkts_per_interval) ||
+                   (bytes <= pred_moder_entry->bytes_per_interval))
+                       new_moder_idx =
+                               (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
+               else if ((pkts > curr_moder_entry->pkts_per_interval) ||
+                        (bytes > curr_moder_entry->bytes_per_interval)) {
+                       if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
+                               new_moder_idx =
+                                       (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+               }
+       }
+       new_moder_entry = &intr_moder_tbl[new_moder_idx];
+
+       interval = new_moder_entry->intr_moder_interval;
+       *smoothed_interval = (
+               (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
+               ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
+               10;
+
+       *moder_tbl_idx = new_moder_idx;
+}
+
+/* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+ * @tx_delay_interval: Tx interval in usecs
+ * @unmask: unask enable/disable
+ *
+ * Prepare interrupt update register with the supplied parameters.
+ */
+static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
+                                          u32 rx_delay_interval,
+                                          u32 tx_delay_interval,
+                                          bool unmask)
+{
+       intr_reg->intr_control = 0;
+       intr_reg->intr_control |= rx_delay_interval &
+               ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+       intr_reg->intr_control |=
+               (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+               & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+
+       if (unmask)
+               intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+       u16 size, buffers_num;
+       u8 *buf;
+
+       size = bounce_buf_ctrl->buffer_size;
+       buffers_num = bounce_buf_ctrl->buffers_num;
+
+       buf = bounce_buf_ctrl->base_buffer +
+               (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+       prefetch(bounce_buf_ctrl->base_buffer +
+               (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+       return buf;
+}
+
+#ifdef ENA_EXTENDED_STATS
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+                                  u32 len);
+
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+                                         u32 funct_queue);
+#endif
+#if defined(__cplusplus)
+}
+#endif /* __cplusplus */
+#endif /* !(ENA_COM) */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_common_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_common_defs.h
new file mode 100644 (file)
index 0000000..f2d8189
--- /dev/null
@@ -0,0 +1,50 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+#define ENA_COMMON_SPEC_VERSION_MAJOR  0 /*  */
+#define ENA_COMMON_SPEC_VERSION_MINOR  10 /*  */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+       uint32_t mem_addr_low;
+
+       uint16_t mem_addr_high;
+
+       /* MBZ */
+       uint16_t reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_admin_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_admin_defs.h
new file mode 100644 (file)
index 0000000..f32bfcc
--- /dev/null
@@ -0,0 +1,1484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+       ENA_ADMIN_CREATE_SQ     = 1,
+
+       ENA_ADMIN_DESTROY_SQ    = 2,
+
+       ENA_ADMIN_CREATE_CQ     = 3,
+
+       ENA_ADMIN_DESTROY_CQ    = 4,
+
+       ENA_ADMIN_GET_FEATURE   = 8,
+
+       ENA_ADMIN_SET_FEATURE   = 9,
+
+       ENA_ADMIN_GET_STATS     = 11,
+};
+
+enum ena_admin_aq_completion_status {
+       ENA_ADMIN_SUCCESS                       = 0,
+
+       ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE   = 1,
+
+       ENA_ADMIN_BAD_OPCODE                    = 2,
+
+       ENA_ADMIN_UNSUPPORTED_OPCODE            = 3,
+
+       ENA_ADMIN_MALFORMED_REQUEST             = 4,
+
+       /* Additional status is provided in ACQ entry extended_status */
+       ENA_ADMIN_ILLEGAL_PARAMETER             = 5,
+
+       ENA_ADMIN_UNKNOWN_ERROR                 = 6,
+};
+
+enum ena_admin_aq_feature_id {
+       ENA_ADMIN_DEVICE_ATTRIBUTES             = 1,
+
+       ENA_ADMIN_MAX_QUEUES_NUM                = 2,
+
+       ENA_ADMIN_HW_HINTS                      = 3,
+
+       ENA_ADMIN_LLQ                           = 4,
+
+       ENA_ADMIN_RSS_HASH_FUNCTION             = 10,
+
+       ENA_ADMIN_STATELESS_OFFLOAD_CONFIG      = 11,
+
+       ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG  = 12,
+
+       ENA_ADMIN_MTU                           = 14,
+
+       ENA_ADMIN_RSS_HASH_INPUT                = 18,
+
+       ENA_ADMIN_INTERRUPT_MODERATION          = 20,
+
+       ENA_ADMIN_AENQ_CONFIG                   = 26,
+
+       ENA_ADMIN_LINK_CONFIG                   = 27,
+
+       ENA_ADMIN_HOST_ATTR_CONFIG              = 28,
+
+       ENA_ADMIN_FEATURES_OPCODE_NUM           = 32,
+};
+
+enum ena_admin_placement_policy_type {
+       /* descriptors and headers are in host memory */
+       ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+       /* descriptors and headers are in device memory (a.k.a Low Latency
+        * Queue)
+        */
+       ENA_ADMIN_PLACEMENT_POLICY_DEV  = 3,
+};
+
+enum ena_admin_link_types {
+       ENA_ADMIN_LINK_SPEED_1G         = 0x1,
+
+       ENA_ADMIN_LINK_SPEED_2_HALF_G   = 0x2,
+
+       ENA_ADMIN_LINK_SPEED_5G         = 0x4,
+
+       ENA_ADMIN_LINK_SPEED_10G        = 0x8,
+
+       ENA_ADMIN_LINK_SPEED_25G        = 0x10,
+
+       ENA_ADMIN_LINK_SPEED_40G        = 0x20,
+
+       ENA_ADMIN_LINK_SPEED_50G        = 0x40,
+
+       ENA_ADMIN_LINK_SPEED_100G       = 0x80,
+
+       ENA_ADMIN_LINK_SPEED_200G       = 0x100,
+
+       ENA_ADMIN_LINK_SPEED_400G       = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+       /* completion queue entry for each sq descriptor */
+       ENA_ADMIN_COMPLETION_POLICY_DESC                = 0,
+
+       /* completion queue entry upon request in sq descriptor */
+       ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND      = 1,
+
+       /* current queue head pointer is updated in OS memory upon sq
+        * descriptor request
+        */
+       ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND      = 2,
+
+       /* current queue head pointer is updated in OS memory for each sq
+        * descriptor
+        */
+       ENA_ADMIN_COMPLETION_POLICY_HEAD                = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+       ENA_ADMIN_GET_STATS_TYPE_BASIC          = 0,
+
+       ENA_ADMIN_GET_STATS_TYPE_EXTENDED       = 1,
+};
+
+enum ena_admin_get_stats_scope {
+       ENA_ADMIN_SPECIFIC_QUEUE        = 0,
+
+       ENA_ADMIN_ETH_TRAFFIC           = 1,
+};
+
+struct ena_admin_aq_common_desc {
+       /* 11:0 : command_id
+        * 15:12 : reserved12
+        */
+       uint16_t command_id;
+
+       /* as appears in ena_admin_aq_opcode */
+       uint8_t opcode;
+
+       /* 0 : phase
+        * 1 : ctrl_data - control buffer address valid
+        * 2 : ctrl_data_indirect - control buffer address
+        *    points to list of pages with addresses of control
+        *    buffers
+        * 7:3 : reserved3
+        */
+       uint8_t flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+       uint32_t length;
+
+       struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+       uint16_t sq_idx;
+
+       /* 4:0 : reserved
+        * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+        */
+       uint8_t sq_identity;
+
+       uint8_t reserved1;
+};
+
+struct ena_admin_aq_entry {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       union {
+               uint32_t inline_data_w1[3];
+
+               struct ena_admin_ctrl_buff_info control_buffer;
+       } u;
+
+       uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+       /* command identifier to associate it with the aq descriptor
+        * 11:0 : command_id
+        * 15:12 : reserved12
+        */
+       uint16_t command;
+
+       uint8_t status;
+
+       /* 0 : phase
+        * 7:1 : reserved1
+        */
+       uint8_t flags;
+
+       uint16_t extended_status;
+
+       /* serves as a hint what AQ entries can be revoked */
+       uint16_t sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+       struct ena_admin_acq_common_desc acq_common_descriptor;
+
+       uint32_t response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       /* 4:0 : reserved0_w1
+        * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+        */
+       uint8_t sq_identity;
+
+       uint8_t reserved8_w1;
+
+       /* 3:0 : placement_policy - Describing where the SQ
+        *    descriptor ring and the SQ packet headers reside:
+        *    0x1 - descriptors and headers are in OS memory,
+        *    0x3 - descriptors and headers in device memory
+        *    (a.k.a Low Latency Queue)
+        * 6:4 : completion_policy - Describing what policy
+        *    to use for generation completion entry (cqe) in
+        *    the CQ associated with this SQ: 0x0 - cqe for each
+        *    sq descriptor, 0x1 - cqe upon request in sq
+        *    descriptor, 0x2 - current queue head pointer is
+        *    updated in OS memory upon sq descriptor request
+        *    0x3 - current queue head pointer is updated in OS
+        *    memory for each sq descriptor
+        * 7 : reserved15_w1
+        */
+       uint8_t sq_caps_2;
+
+       /* 0 : is_physically_contiguous - Described if the
+        *    queue ring memory is allocated in physical
+        *    contiguous pages or split.
+        * 7:1 : reserved17_w1
+        */
+       uint8_t sq_caps_3;
+
+       /* associated completion queue id. This CQ must be created prior to
+        *    SQ creation
+        */
+       uint16_t cq_idx;
+
+       /* submission queue depth in entries */
+       uint16_t sq_depth;
+
+       /* SQ physical base address in OS memory. This field should not be
+        * used for Low Latency queues. Has to be page aligned.
+        */
+       struct ena_common_mem_addr sq_ba;
+
+       /* specifies queue head writeback location in OS memory. Valid if
+        * completion_policy is set to completion_policy_head_on_demand or
+        * completion_policy_head. Has to be cache aligned
+        */
+       struct ena_common_mem_addr sq_head_writeback;
+
+       uint32_t reserved0_w7;
+
+       uint32_t reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+       ENA_ADMIN_SQ_DIRECTION_TX       = 1,
+
+       ENA_ADMIN_SQ_DIRECTION_RX       = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       uint16_t sq_idx;
+
+       uint16_t reserved;
+
+       /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+       uint32_t sq_doorbell_offset;
+
+       /* low latency queue ring base address as an offset to PCIe MMIO
+        * LLQ_MEM BAR
+        */
+       uint32_t llq_descriptors_offset;
+
+       /* low latency queue headers' memory as an offset to PCIe MMIO
+        * LLQ_MEM BAR
+        */
+       uint32_t llq_headers_offset;
+};
+
+struct ena_admin_aq_destroy_sq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       struct ena_admin_sq sq;
+};
+
+struct ena_admin_acq_destroy_sq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_admin_aq_create_cq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       /* 4:0 : reserved5
+        * 5 : interrupt_mode_enabled - if set, cq operates
+        *    in interrupt mode, otherwise - polling
+        * 7:6 : reserved6
+        */
+       uint8_t cq_caps_1;
+
+       /* 4:0 : cq_entry_size_words - size of CQ entry in
+        *    32-bit words, valid values: 4, 8.
+        * 7:5 : reserved7
+        */
+       uint8_t cq_caps_2;
+
+       /* completion queue depth in # of entries. must be power of 2 */
+       uint16_t cq_depth;
+
+       /* msix vector assigned to this cq */
+       uint32_t msix_vector;
+
+       /* cq physical base address in OS memory. CQ must be physically
+        * contiguous
+        */
+       struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_admin_acq_create_cq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       uint16_t cq_idx;
+
+       /* actual cq depth in number of entries */
+       uint16_t cq_actual_depth;
+
+       uint32_t numa_node_register_offset;
+
+       uint32_t cq_head_db_register_offset;
+
+       uint32_t cq_interrupt_unmask_register_offset;
+};
+
+struct ena_admin_aq_destroy_cq_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       uint16_t cq_idx;
+
+       uint16_t reserved1;
+};
+
+struct ena_admin_acq_destroy_cq_resp_desc {
+       struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       union {
+               /* command specific inline data */
+               uint32_t inline_data_w1[3];
+
+               struct ena_admin_ctrl_buff_info control_buffer;
+       } u;
+
+       /* stats type as defined in enum ena_admin_get_stats_type */
+       uint8_t type;
+
+       /* stats scope defined in enum ena_admin_get_stats_scope */
+       uint8_t scope;
+
+       uint16_t reserved3;
+
+       /* queue id. used when scope is specific_queue */
+       uint16_t queue_idx;
+
+       /* device id, value 0xFFFF means mine. only privileged device can get
+        *    stats of other device
+        */
+       uint16_t device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+       uint32_t tx_bytes_low;
+
+       uint32_t tx_bytes_high;
+
+       uint32_t tx_pkts_low;
+
+       uint32_t tx_pkts_high;
+
+       uint32_t rx_bytes_low;
+
+       uint32_t rx_bytes_high;
+
+       uint32_t rx_pkts_low;
+
+       uint32_t rx_pkts_high;
+
+       uint32_t rx_drops_low;
+
+       uint32_t rx_drops_high;
+};
+
+struct ena_admin_acq_get_stats_resp {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       struct ena_admin_basic_stats basic_stats;
+};
+
+struct ena_admin_get_set_feature_common_desc {
+       /* 1:0 : select - 0x1 - current value; 0x3 - default
+        *    value
+        * 7:3 : reserved3
+        */
+       uint8_t flags;
+
+       /* as appears in ena_admin_aq_feature_id */
+       uint8_t feature_id;
+
+       uint16_t reserved16;
+};
+
+struct ena_admin_device_attr_feature_desc {
+       uint32_t impl_id;
+
+       uint32_t device_version;
+
+       /* bitmap of ena_admin_aq_feature_id */
+       uint32_t supported_features;
+
+       uint32_t reserved3;
+
+       /* Indicates how many bits are used physical address access. */
+       uint32_t phys_addr_width;
+
+       /* Indicates how many bits are used virtual address access. */
+       uint32_t virt_addr_width;
+
+       /* unicast MAC address (in Network byte order) */
+       uint8_t mac_addr[6];
+
+       uint8_t reserved7[2];
+
+       uint32_t max_mtu;
+};
+
+enum ena_admin_llq_header_location {
+       /* header is in descriptor list */
+       ENA_ADMIN_INLINE_HEADER = 1,
+
+       /* header in a separate ring, implies 16B descriptor list entry */
+       ENA_ADMIN_HEADER_RING   = 2,
+};
+
+enum ena_admin_llq_ring_entry_size {
+       ENA_ADMIN_LIST_ENTRY_SIZE_128B  = 1,
+
+       ENA_ADMIN_LIST_ENTRY_SIZE_192B  = 2,
+
+       ENA_ADMIN_LIST_ENTRY_SIZE_256B  = 4,
+};
+
+enum ena_admin_llq_num_descs_before_header {
+       ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
+
+       ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
+
+       ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
+
+       ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
+
+       ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
+};
+
+/* packet descriptor list entry always starts with one or more descriptors,
+ * followed by a header. The rest of the descriptors are located in the
+ * beginning of the subsequent entry. Stride refers to how the rest of the
+ * descriptors are placed. This field is relevant only for inline header
+ * mode
+ */
+enum ena_admin_llq_stride_ctrl {
+       ENA_ADMIN_SINGLE_DESC_PER_ENTRY         = 1,
+
+       ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY      = 2,
+};
+
+struct ena_admin_feature_llq_desc {
+       uint32_t max_llq_num;
+
+       uint32_t max_llq_depth;
+
+       /* use enum ena_admin_llq_header_location */
+       uint16_t header_location_ctrl;
+
+       /* if inline header is specified - this is the size of descriptor
+        *    list entry. If header in a separate ring is specified - this is
+        *    the size of header ring entry. use enum
+        *    ena_admin_llq_ring_entry_size
+        */
+       uint16_t entry_size_ctrl;
+
+       /* valid only if inline header is specified. First entry associated
+        *    with the packet includes descriptors and header. Rest of the
+        *    entries occupied by descriptors. This parameter defines the max
+        *    number of descriptors precedding the header in the first entry.
+        *    Values: use enum llq_num_descs_before_header
+        */
+       uint16_t desc_num_before_header_ctrl;
+
+       /* valid, only if inline header is specified. Note, use enum
+        *    ena_admin_llq_stide_ctrl
+        */
+       uint16_t descriptors_stride_ctrl;
+};
+
+struct ena_admin_queue_feature_desc {
+       /* including LLQs */
+       uint32_t max_sq_num;
+
+       uint32_t max_sq_depth;
+
+       uint32_t max_cq_num;
+
+       uint32_t max_cq_depth;
+
+       uint32_t max_legacy_llq_num;
+
+       uint32_t max_legacy_llq_depth;
+
+       uint32_t max_header_size;
+
+       /* Maximum Descriptors number, including meta descriptor, allowed for
+        *    a single Tx packet
+        */
+       uint16_t max_packet_tx_descs;
+
+       /* Maximum Descriptors number allowed for a single Rx packet */
+       uint16_t max_packet_rx_descs;
+};
+
+struct ena_admin_set_feature_mtu_desc {
+       /* exclude L2 */
+       uint32_t mtu;
+};
+
+struct ena_admin_set_feature_host_attr_desc {
+       /* host OS info base address in OS memory. host info is 4KB of
+        * physically contiguous
+        */
+       struct ena_common_mem_addr os_info_ba;
+
+       /* host debug area base address in OS memory. debug area must be
+        * physically contiguous
+        */
+       struct ena_common_mem_addr debug_ba;
+
+       /* debug area size */
+       uint32_t debug_area_size;
+};
+
+struct ena_admin_feature_intr_moder_desc {
+       /* interrupt delay granularity in usec */
+       uint16_t intr_delay_resolution;
+
+       uint16_t reserved;
+};
+
+struct ena_admin_get_feature_link_desc {
+       /* Link speed in Mb */
+       uint32_t speed;
+
+       /* bit field of enum ena_admin_link types */
+       uint32_t supported;
+
+       /* 0 : autoneg
+        * 1 : duplex - Full Duplex
+        * 31:2 : reserved2
+        */
+       uint32_t flags;
+};
+
+struct ena_admin_feature_aenq_desc {
+       /* bitmask for AENQ groups the device can report */
+       uint32_t supported_groups;
+
+       /* bitmask for AENQ groups to report */
+       uint32_t enabled_groups;
+};
+
+struct ena_admin_feature_offload_desc {
+       /* 0 : TX_L3_csum_ipv4
+        * 1 : TX_L4_ipv4_csum_part - The checksum field
+        *    should be initialized with pseudo header checksum
+        * 2 : TX_L4_ipv4_csum_full
+        * 3 : TX_L4_ipv6_csum_part - The checksum field
+        *    should be initialized with pseudo header checksum
+        * 4 : TX_L4_ipv6_csum_full
+        * 5 : tso_ipv4
+        * 6 : tso_ipv6
+        * 7 : tso_ecn
+        */
+       uint32_t tx;
+
+       /* Receive side supported stateless offload
+        * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+        * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+        * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+        * 3 : RX_hash - Hash calculation
+        */
+       uint32_t rx_supported;
+
+       uint32_t rx_enabled;
+};
+
+enum ena_admin_hash_functions {
+       ENA_ADMIN_TOEPLITZ      = 1,
+
+       ENA_ADMIN_CRC32         = 2,
+};
+
+struct ena_admin_feature_rss_flow_hash_control {
+       uint32_t keys_num;
+
+       uint32_t reserved;
+
+       uint32_t key[10];
+};
+
+struct ena_admin_feature_rss_flow_hash_function {
+       /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
+       uint32_t supported_func;
+
+       /* 7:0 : selected_func - bitmask of
+        *    ena_admin_hash_functions
+        */
+       uint32_t selected_func;
+
+       /* initial value */
+       uint32_t init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+       ENA_ADMIN_RSS_TCP4      = 0,
+
+       ENA_ADMIN_RSS_UDP4      = 1,
+
+       ENA_ADMIN_RSS_TCP6      = 2,
+
+       ENA_ADMIN_RSS_UDP6      = 3,
+
+       ENA_ADMIN_RSS_IP4       = 4,
+
+       ENA_ADMIN_RSS_IP6       = 5,
+
+       ENA_ADMIN_RSS_IP4_FRAG  = 6,
+
+       ENA_ADMIN_RSS_NOT_IP    = 7,
+
+       /* TCPv6 with extension header */
+       ENA_ADMIN_RSS_TCP6_EX   = 8,
+
+       /* IPv6 with extension header */
+       ENA_ADMIN_RSS_IP6_EX    = 9,
+
+       ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+       /* Ethernet Dest Addr */
+       ENA_ADMIN_RSS_L2_DA     = BIT(0),
+
+       /* Ethernet Src Addr */
+       ENA_ADMIN_RSS_L2_SA     = BIT(1),
+
+       /* ipv4/6 Dest Addr */
+       ENA_ADMIN_RSS_L3_DA     = BIT(2),
+
+       /* ipv4/6 Src Addr */
+       ENA_ADMIN_RSS_L3_SA     = BIT(3),
+
+       /* tcp/udp Dest Port */
+       ENA_ADMIN_RSS_L4_DP     = BIT(4),
+
+       /* tcp/udp Src Port */
+       ENA_ADMIN_RSS_L4_SP     = BIT(5),
+};
+
+struct ena_admin_proto_input {
+       /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+       uint16_t fields;
+
+       uint16_t reserved2;
+};
+
+struct ena_admin_feature_rss_hash_control {
+       struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+       struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+       struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
+
+       struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+struct ena_admin_feature_rss_flow_hash_input {
+       /* supported hash input sorting
+        * 1 : L3_sort - support swap L3 addresses if DA is
+        *    smaller than SA
+        * 2 : L4_sort - support swap L4 ports if DP smaller
+        *    SP
+        */
+       uint16_t supported_input_sort;
+
+       /* enabled hash input sorting
+        * 1 : enable_L3_sort - enable swap L3 addresses if
+        *    DA smaller than SA
+        * 2 : enable_L4_sort - enable swap L4 ports if DP
+        *    smaller than SP
+        */
+       uint16_t enabled_input_sort;
+};
+
+enum ena_admin_os_type {
+       ENA_ADMIN_OS_LINUX      = 1,
+
+       ENA_ADMIN_OS_WIN        = 2,
+
+       ENA_ADMIN_OS_DPDK       = 3,
+
+       ENA_ADMIN_OS_FREEBSD    = 4,
+
+       ENA_ADMIN_OS_IPXE       = 5,
+};
+
+struct ena_admin_host_info {
+       /* defined in enum ena_admin_os_type */
+       uint32_t os_type;
+
+       /* os distribution string format */
+       uint8_t os_dist_str[128];
+
+       /* OS distribution numeric format */
+       uint32_t os_dist;
+
+       /* kernel version string format */
+       uint8_t kernel_ver_str[32];
+
+       /* Kernel version numeric format */
+       uint32_t kernel_ver;
+
+       /* 7:0 : major
+        * 15:8 : minor
+        * 23:16 : sub_minor
+        */
+       uint32_t driver_version;
+
+       /* features bitmap */
+       uint32_t supported_network_features[4];
+};
+
+struct ena_admin_rss_ind_table_entry {
+       uint16_t cq_idx;
+
+       uint16_t reserved;
+};
+
+struct ena_admin_feature_rss_ind_table {
+       /* min supported table size (2^min_size) */
+       uint16_t min_size;
+
+       /* max supported table size (2^max_size) */
+       uint16_t max_size;
+
+       /* table size (2^size) */
+       uint16_t size;
+
+       uint16_t reserved;
+
+       /* index of the inline entry. 0xFFFFFFFF means invalid */
+       uint32_t inline_index;
+
+       /* used for updating single entry, ignored when setting the entire
+        * table through the control buffer.
+        */
+       struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+       /* value in ms */
+       uint16_t mmio_read_timeout;
+
+       /* value in ms */
+       uint16_t driver_watchdog_timeout;
+
+       /* Per packet tx completion timeout. value in ms */
+       uint16_t missing_tx_completion_timeout;
+
+       uint16_t missed_tx_completion_count_threshold_to_reset;
+
+       /* value in ms */
+       uint16_t admin_completion_tx_timeout;
+
+       uint16_t netdev_wd_timeout;
+
+       uint16_t max_tx_sgl_size;
+
+       uint16_t max_rx_sgl_size;
+
+       uint16_t reserved[8];
+};
+
+struct ena_admin_get_feat_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       struct ena_admin_ctrl_buff_info control_buffer;
+
+       struct ena_admin_get_set_feature_common_desc feat_common;
+
+       uint32_t raw[11];
+};
+
+struct ena_admin_get_feat_resp {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       union {
+               uint32_t raw[14];
+
+               struct ena_admin_device_attr_feature_desc dev_attr;
+
+               struct ena_admin_feature_llq_desc llq;
+
+               struct ena_admin_queue_feature_desc max_queue;
+
+               struct ena_admin_feature_aenq_desc aenq;
+
+               struct ena_admin_get_feature_link_desc link;
+
+               struct ena_admin_feature_offload_desc offload;
+
+               struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+               struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+               struct ena_admin_feature_rss_ind_table ind_table;
+
+               struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+               struct ena_admin_ena_hw_hints hw_hints;
+       } u;
+};
+
+struct ena_admin_set_feat_cmd {
+       struct ena_admin_aq_common_desc aq_common_descriptor;
+
+       struct ena_admin_ctrl_buff_info control_buffer;
+
+       struct ena_admin_get_set_feature_common_desc feat_common;
+
+       union {
+               uint32_t raw[11];
+
+               /* mtu size */
+               struct ena_admin_set_feature_mtu_desc mtu;
+
+               /* host attributes */
+               struct ena_admin_set_feature_host_attr_desc host_attr;
+
+               /* AENQ configuration */
+               struct ena_admin_feature_aenq_desc aenq;
+
+               /* rss flow hash function */
+               struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+               /* rss flow hash input */
+               struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+               /* rss indirection table */
+               struct ena_admin_feature_rss_ind_table ind_table;
+       } u;
+};
+
+struct ena_admin_set_feat_resp {
+       struct ena_admin_acq_common_desc acq_common_desc;
+
+       union {
+               uint32_t raw[14];
+       } u;
+};
+
+struct ena_admin_aenq_common_desc {
+       uint16_t group;
+
+       uint16_t syndrom;
+
+       /* 0 : phase */
+       uint8_t flags;
+
+       uint8_t reserved1[3];
+
+       uint32_t timestamp_low;
+
+       uint32_t timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+       ENA_ADMIN_LINK_CHANGE           = 0,
+
+       ENA_ADMIN_FATAL_ERROR           = 1,
+
+       ENA_ADMIN_WARNING               = 2,
+
+       ENA_ADMIN_NOTIFICATION          = 3,
+
+       ENA_ADMIN_KEEP_ALIVE            = 4,
+
+       ENA_ADMIN_AENQ_GROUPS_NUM       = 5,
+};
+
+enum ena_admin_aenq_notification_syndrom {
+       ENA_ADMIN_SUSPEND       = 0,
+
+       ENA_ADMIN_RESUME        = 1,
+
+       ENA_ADMIN_UPDATE_HINTS  = 2,
+};
+
+struct ena_admin_aenq_entry {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       /* command specific inline data */
+       uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_aenq_link_change_desc {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       /* 0 : link_status */
+       uint32_t flags;
+};
+
+struct ena_admin_aenq_keep_alive_desc {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       uint32_t rx_drops_low;
+
+       uint32_t rx_drops_high;
+};
+
+struct ena_admin_ena_mmio_req_read_less_resp {
+       uint16_t req_id;
+
+       uint16_t reg_off;
+
+       /* value is valid when poll is cleared */
+       uint32_t reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
+{
+       return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
+{
+       p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
+{
+       return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+       p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
+{
+       return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+       p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+}
+
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+{
+       return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+{
+       p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+}
+
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
+{
+       return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
+{
+       p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
+{
+       return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+       p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
+{
+       return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+       p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
+{
+       return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+       p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
+{
+       return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+       p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
+{
+       return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+       p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+       return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+       return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
+{
+       return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+       p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+       return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+       p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+       return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+       p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+       return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+       p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+{
+       return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+{
+       p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+{
+       return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+       p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+{
+       return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+       p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+}
+
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
+{
+       return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
+{
+       return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
+{
+       p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ADMIN_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_common_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_common_defs.h
new file mode 100644 (file)
index 0000000..66b381b
--- /dev/null
@@ -0,0 +1,49 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+#define ENA_COMMON_SPEC_VERSION_MAJOR  0 /*  */
+#define ENA_COMMON_SPEC_VERSION_MINOR  10 /*  */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+       uint32_t mem_addr_low;
+
+       uint16_t mem_addr_high;
+
+       /* MBZ */
+       uint16_t reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_eth_io_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_eth_io_defs.h
new file mode 100644 (file)
index 0000000..f2cc0f0
--- /dev/null
@@ -0,0 +1,959 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+enum ena_eth_io_l3_proto_index {
+       ENA_ETH_IO_L3_PROTO_UNKNOWN     = 0,
+
+       ENA_ETH_IO_L3_PROTO_IPV4        = 8,
+
+       ENA_ETH_IO_L3_PROTO_IPV6        = 11,
+
+       ENA_ETH_IO_L3_PROTO_FCOE        = 21,
+
+       ENA_ETH_IO_L3_PROTO_ROCE        = 22,
+};
+
+enum ena_eth_io_l4_proto_index {
+       ENA_ETH_IO_L4_PROTO_UNKNOWN             = 0,
+
+       ENA_ETH_IO_L4_PROTO_TCP                 = 12,
+
+       ENA_ETH_IO_L4_PROTO_UDP                 = 13,
+
+       ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE      = 23,
+};
+
+struct ena_eth_io_tx_desc {
+       /* 15:0 : length - Buffer length in bytes, must
+        *    include any packet trailers that the ENA supposed
+        *    to update like End-to-End CRC, Authentication GMAC
+        *    etc. This length must not include the
+        *    'Push_Buffer' length. This length must not include
+        *    the 4-byte added in the end for 802.3 Ethernet FCS
+        * 21:16 : req_id_hi - Request ID[15:10]
+        * 22 : reserved22 - MBZ
+        * 23 : meta_desc - MBZ
+        * 24 : phase
+        * 25 : reserved1 - MBZ
+        * 26 : first - Indicates first descriptor in
+        *    transaction
+        * 27 : last - Indicates last descriptor in
+        *    transaction
+        * 28 : comp_req - Indicates whether completion
+        *    should be posted, after packet is transmitted.
+        *    Valid only for first descriptor
+        * 30:29 : reserved29 - MBZ
+        * 31 : reserved31 - MBZ
+        */
+       uint32_t len_ctrl;
+
+       /* 3:0 : l3_proto_idx - L3 protocol. This field
+        *    required when l3_csum_en,l3_csum or tso_en are set.
+        * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+        *    DF flags of the IPv4 header is 0. Otherwise must
+        *    be set to 1
+        * 6:5 : reserved5
+        * 7 : tso_en - Enable TSO, For TCP only.
+        * 12:8 : l4_proto_idx - L4 protocol. This field need
+        *    to be set when l4_csum_en or tso_en are set.
+        * 13 : l3_csum_en - enable IPv4 header checksum.
+        * 14 : l4_csum_en - enable TCP/UDP checksum.
+        * 15 : ethernet_fcs_dis - when set, the controller
+        *    will not append the 802.3 Ethernet Frame Check
+        *    Sequence to the packet
+        * 16 : reserved16
+        * 17 : l4_csum_partial - L4 partial checksum. when
+        *    set to 0, the ENA calculates the L4 checksum,
+        *    where the Destination Address required for the
+        *    TCP/UDP pseudo-header is taken from the actual
+        *    packet L3 header. when set to 1, the ENA doesn't
+        *    calculate the sum of the pseudo-header, instead,
+        *    the checksum field of the L4 is used instead. When
+        *    TSO enabled, the checksum of the pseudo-header
+        *    must not include the tcp length field. L4 partial
+        *    checksum should be used for IPv6 packet that
+        *    contains Routing Headers.
+        * 20:18 : reserved18 - MBZ
+        * 21 : reserved21 - MBZ
+        * 31:22 : req_id_lo - Request ID[9:0]
+        */
+       uint32_t meta_ctrl;
+
+       uint32_t buff_addr_lo;
+
+       /* address high and header size
+        * 15:0 : addr_hi - Buffer Pointer[47:32]
+        * 23:16 : reserved16_w2
+        * 31:24 : header_length - Header length. For Low
+        *    Latency Queues, this fields indicates the number
+        *    of bytes written to the headers' memory. For
+        *    normal queues, if packet is TCP or UDP, and longer
+        *    than max_header_size, then this field should be
+        *    set to the sum of L4 header offset and L4 header
+        *    size(without options), otherwise, this field
+        *    should be set to 0. For both modes, this field
+        *    must not exceed the max_header_size.
+        *    max_header_size value is reported by the Max
+        *    Queues Feature descriptor
+        */
+       uint32_t buff_addr_hi_hdr_sz;
+};
+
+struct ena_eth_io_tx_meta_desc {
+       /* 9:0 : req_id_lo - Request ID[9:0]
+        * 11:10 : reserved10 - MBZ
+        * 12 : reserved12 - MBZ
+        * 13 : reserved13 - MBZ
+        * 14 : ext_valid - if set, offset fields in Word2
+        *    are valid Also MSS High in Word 0 and bits [31:24]
+        *    in Word 3
+        * 15 : reserved15
+        * 19:16 : mss_hi
+        * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+        *    Extended Metadata Descriptor
+        * 21 : meta_store - Store extended metadata in queue
+        *    cache
+        * 22 : reserved22 - MBZ
+        * 23 : meta_desc - MBO
+        * 24 : phase
+        * 25 : reserved25 - MBZ
+        * 26 : first - Indicates first descriptor in
+        *    transaction
+        * 27 : last - Indicates last descriptor in
+        *    transaction
+        * 28 : comp_req - Indicates whether completion
+        *    should be posted, after packet is transmitted.
+        *    Valid only for first descriptor
+        * 30:29 : reserved29 - MBZ
+        * 31 : reserved31 - MBZ
+        */
+       uint32_t len_ctrl;
+
+       /* 5:0 : req_id_hi
+        * 31:6 : reserved6 - MBZ
+        */
+       uint32_t word1;
+
+       /* 7:0 : l3_hdr_len
+        * 15:8 : l3_hdr_off
+        * 21:16 : l4_hdr_len_in_words - counts the L4 header
+        *    length in words. there is an explicit assumption
+        *    that L4 header appears right after L3 header and
+        *    L4 offset is based on l3_hdr_off+l3_hdr_len
+        * 31:22 : mss_lo
+        */
+       uint32_t word2;
+
+       uint32_t reserved;
+};
+
+struct ena_eth_io_tx_cdesc {
+       /* Request ID[15:0] */
+       uint16_t req_id;
+
+       uint8_t status;
+
+       /* flags
+        * 0 : phase
+        * 7:1 : reserved1
+        */
+       uint8_t flags;
+
+       uint16_t sub_qid;
+
+       uint16_t sq_head_idx;
+};
+
+struct ena_eth_io_rx_desc {
+       /* In bytes. 0 means 64KB */
+       uint16_t length;
+
+       /* MBZ */
+       uint8_t reserved2;
+
+       /* 0 : phase
+        * 1 : reserved1 - MBZ
+        * 2 : first - Indicates first descriptor in
+        *    transaction
+        * 3 : last - Indicates last descriptor in transaction
+        * 4 : comp_req
+        * 5 : reserved5 - MBO
+        * 7:6 : reserved6 - MBZ
+        */
+       uint8_t ctrl;
+
+       uint16_t req_id;
+
+       /* MBZ */
+       uint16_t reserved6;
+
+       uint32_t buff_addr_lo;
+
+       uint16_t buff_addr_hi;
+
+       /* MBZ */
+       uint16_t reserved16_w3;
+};
+
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+       /* 4:0 : l3_proto_idx
+        * 6:5 : src_vlan_cnt
+        * 7 : reserved7 - MBZ
+        * 12:8 : l4_proto_idx
+        * 13 : l3_csum_err - when set, either the L3
+        *    checksum error detected, or, the controller didn't
+        *    validate the checksum. This bit is valid only when
+        *    l3_proto_idx indicates IPv4 packet
+        * 14 : l4_csum_err - when set, either the L4
+        *    checksum error detected, or, the controller didn't
+        *    validate the checksum. This bit is valid only when
+        *    l4_proto_idx indicates TCP/UDP packet, and,
+        *    ipv4_frag is not set
+        * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+        * 23:16 : reserved16
+        * 24 : phase
+        * 25 : l3_csum2 - second checksum engine result
+        * 26 : first - Indicates first descriptor in
+        *    transaction
+        * 27 : last - Indicates last descriptor in
+        *    transaction
+        * 29:28 : reserved28
+        * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+        *    Descriptor was used
+        * 31 : reserved31
+        */
+       uint32_t status;
+
+       uint16_t length;
+
+       uint16_t req_id;
+
+       /* 32-bit hash result */
+       uint32_t hash;
+
+       uint16_t sub_qid;
+
+       uint16_t reserved;
+};
+
+/* 8-word format */
+struct ena_eth_io_rx_cdesc_ext {
+       struct ena_eth_io_rx_cdesc_base base;
+
+       uint32_t buff_addr_lo;
+
+       uint16_t buff_addr_hi;
+
+       uint16_t reserved16;
+
+       uint32_t reserved_w6;
+
+       uint32_t reserved_w7;
+};
+
+struct ena_eth_io_intr_reg {
+       /* 14:0 : rx_intr_delay
+        * 29:15 : tx_intr_delay
+        * 30 : intr_unmask
+        * 31 : reserved
+        */
+       uint32_t intr_control;
+};
+
+struct ena_eth_io_numa_node_cfg_reg {
+       /* 7:0 : numa
+        * 30:8 : reserved
+        * 31 : enabled
+        */
+       uint32_t numa_cfg;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+/* numa_node_cfg_reg */
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
+{
+       return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
+{
+       return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
+{
+       return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
+{
+       return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+       p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
+{
+       return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+       p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
+{
+       return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
+{
+       p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
+{
+       return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+       p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
+{
+       return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+       p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
+{
+       return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+       p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
+{
+       return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+       p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
+{
+       return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+       p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
+{
+       return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+       p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
+{
+       return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+       p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
+{
+       return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+       p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
+{
+       return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
+}
+
+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+{
+       p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
+{
+       return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
+}
+
+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+{
+       p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_gen_info.h b/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_gen_info.h
new file mode 100644 (file)
index 0000000..0ff4787
--- /dev/null
@@ -0,0 +1,34 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#define        ENA_GEN_DATE    "Sun Nov 20 11:22:05 IST 2016"
+#define        ENA_GEN_COMMIT  "44da4e8"
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_includes.h b/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_includes.h
new file mode 100644 (file)
index 0000000..5ea312f
--- /dev/null
@@ -0,0 +1,4 @@
+#include "ena_common_defs.h"
+#include "ena_regs_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_regs_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_defs/ena_regs_defs.h
new file mode 100644 (file)
index 0000000..5a540d8
--- /dev/null
@@ -0,0 +1,168 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+enum ena_regs_reset_reason_types {
+       ENA_REGS_RESET_NORMAL                   = 0,
+
+       ENA_REGS_RESET_KEEP_ALIVE_TO            = 1,
+
+       ENA_REGS_RESET_ADMIN_TO                 = 2,
+
+       ENA_REGS_RESET_MISS_TX_CMPL             = 3,
+
+       ENA_REGS_RESET_INV_RX_REQ_ID            = 4,
+
+       ENA_REGS_RESET_INV_TX_REQ_ID            = 5,
+
+       ENA_REGS_RESET_TOO_MANY_RX_DESCS        = 6,
+
+       ENA_REGS_RESET_INIT_ERR                 = 7,
+
+       ENA_REGS_RESET_DRIVER_INVALID_STATE     = 8,
+
+       ENA_REGS_RESET_OS_TRIGGER               = 9,
+
+       ENA_REGS_RESET_OS_NETDEV_WD             = 10,
+
+       ENA_REGS_RESET_SHUTDOWN                 = 11,
+
+       ENA_REGS_RESET_USER_TRIGGER             = 12,
+
+       ENA_REGS_RESET_GENERIC                  = 13,
+};
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF           0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF                0x4
+#define ENA_REGS_CAPS_OFF              0x8
+#define ENA_REGS_CAPS_EXT_OFF          0xc
+#define ENA_REGS_AQ_BASE_LO_OFF                0x10
+#define ENA_REGS_AQ_BASE_HI_OFF                0x14
+#define ENA_REGS_AQ_CAPS_OFF           0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF               0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF               0x24
+#define ENA_REGS_ACQ_CAPS_OFF          0x28
+#define ENA_REGS_AQ_DB_OFF             0x2c
+#define ENA_REGS_ACQ_TAIL_OFF          0x30
+#define ENA_REGS_AENQ_CAPS_OFF         0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF              0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF              0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF              0x40
+#define ENA_REGS_AENQ_TAIL_OFF         0x44
+#define ENA_REGS_INTR_MASK_OFF         0x4c
+#define ENA_REGS_DEV_CTL_OFF           0x54
+#define ENA_REGS_DEV_STS_OFF           0x58
+#define ENA_REGS_MMIO_REG_READ_OFF             0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF              0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF              0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF              0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK            0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT           8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK            0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK              0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT                8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK         0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT                16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK         0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT              24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK               0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK           0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT              1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK               0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT             8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK              0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT               16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK                0xf0000
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK         0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT           16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK            0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK               0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT         16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK          0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK             0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT               16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK                0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK                0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT              1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK               0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT               2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK                0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT               3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK                0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT            28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK             0xf0000000
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK            0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT          1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK           0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT             2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK              0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT               3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK                0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT          4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK           0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT             5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK              0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT             6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK              0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT                7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK         0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK             0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT           16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK            0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK               0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT             16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK              0xffff0000
+
+#endif /*_ENA_REGS_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_eth_com.c b/sys/dev/virtual/amazon/ena/ena-com/ena_eth_com.c
new file mode 100644 (file)
index 0000000..190de1f
--- /dev/null
@@ -0,0 +1,624 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ena_eth_com.h"
+
+static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+       struct ena_com_io_cq *io_cq)
+{
+       struct ena_eth_io_rx_cdesc_base *cdesc;
+       u16 expected_phase, head_masked;
+       u16 desc_phase;
+
+       head_masked = io_cq->head & (io_cq->q_depth - 1);
+       expected_phase = io_cq->phase;
+
+       cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+                       + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+       desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+                       ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+       if (desc_phase != expected_phase)
+               return NULL;
+
+       return cdesc;
+}
+
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+       io_cq->head++;
+
+       /* Switch phase bit in case of wrap around */
+       if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+               io_cq->phase ^= 1;
+}
+
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
+{
+       u16 tail_masked;
+       u32 offset;
+
+       tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+
+       offset = tail_masked * io_sq->desc_entry_size;
+
+       return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
+}
+
+static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+                                                     u8 *bounce_buffer)
+{
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+       u16 dst_tail_mask;
+       u32 dst_offset;
+
+       dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+       dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+       /* Make sure everything was written into the bounce buffer before
+        * writing the bounce buffer to the device
+        */
+       wmb();
+
+       /* The line is completed. Copy it to dev */
+       ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+                               bounce_buffer,
+                               llq_info->desc_list_entry_size);
+
+       io_sq->tail++;
+
+       /* Switch phase bit in case of wrap around */
+       if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+               io_sq->phase ^= 1;
+}
+
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+                                                u8 *header_src,
+                                                u16 header_len)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+       u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+       u16 header_offset;
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+               return 0;
+
+       header_offset =
+               llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+       if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
+               ena_trc_err("trying to write header larger than llq entry can accommodate\n");
+               return ENA_COM_FAULT;
+       }
+
+       if (unlikely(!bounce_buffer)) {
+               ena_trc_err("bounce buffer is NULL\n");
+               return ENA_COM_FAULT;
+       }
+
+       memcpy(bounce_buffer + header_offset, header_src, header_len);
+
+       return 0;
+}
+
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       u8 *bounce_buffer;
+       void *sq_desc;
+
+       bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+       if (unlikely(!bounce_buffer)) {
+               ena_trc_err("bounce buffer is NULL\n");
+               return NULL;
+       }
+
+       sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+       pkt_ctrl->idx++;
+       pkt_ctrl->descs_left_in_line--;
+
+       return sq_desc;
+}
+
+static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+               return;
+
+       /* bounce buffer was used, so write it and get a new one */
+       if (pkt_ctrl->idx) {
+               ena_com_write_bounce_buffer_to_dev(io_sq,
+                                                  pkt_ctrl->curr_bounce_buf);
+               pkt_ctrl->curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+                       memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                              0x0, llq_info->desc_list_entry_size);
+       }
+
+       pkt_ctrl->idx = 0;
+       pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+               return get_sq_desc_llq(io_sq);
+
+       return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+       if (!pkt_ctrl->descs_left_in_line) {
+               ena_com_write_bounce_buffer_to_dev(io_sq,
+                                                  pkt_ctrl->curr_bounce_buf);
+
+               pkt_ctrl->curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+                       memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                              0x0, llq_info->desc_list_entry_size);
+
+               pkt_ctrl->idx = 0;
+               if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+                       pkt_ctrl->descs_left_in_line = 1;
+               else
+                       pkt_ctrl->descs_left_in_line =
+                       llq_info->desc_list_entry_size / io_sq->desc_entry_size;
+       }
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               ena_com_sq_update_llq_tail(io_sq);
+               return;
+       }
+
+       io_sq->tail++;
+
+       /* Switch phase bit in case of wrap around */
+       if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+               io_sq->phase ^= 1;
+}
+
+static inline struct ena_eth_io_rx_cdesc_base *
+       ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+{
+       idx &= (io_cq->q_depth - 1);
+       return (struct ena_eth_io_rx_cdesc_base *)
+               ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+               idx * io_cq->cdesc_entry_size_in_bytes);
+}
+
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+                                          u16 *first_cdesc_idx)
+{
+       struct ena_eth_io_rx_cdesc_base *cdesc;
+       u16 count = 0, head_masked;
+       u32 last = 0;
+
+       do {
+               cdesc = ena_com_get_next_rx_cdesc(io_cq);
+               if (!cdesc)
+                       break;
+
+               ena_com_cq_inc_head(io_cq);
+               count++;
+               last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+                       ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+       } while (!last);
+
+       if (last) {
+               *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+               count += io_cq->cur_rx_pkt_cdesc_count;
+
+               head_masked = io_cq->head & (io_cq->q_depth - 1);
+
+               io_cq->cur_rx_pkt_cdesc_count = 0;
+               io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+
+               ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+                           io_cq->qid, *first_cdesc_idx, count);
+       } else {
+               io_cq->cur_rx_pkt_cdesc_count += count;
+               count = 0;
+       }
+
+       return count;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+                                            struct ena_com_tx_ctx *ena_tx_ctx)
+{
+       int rc;
+
+       if (ena_tx_ctx->meta_valid) {
+               rc = memcmp(&io_sq->cached_tx_meta,
+                           &ena_tx_ctx->ena_meta,
+                           sizeof(struct ena_com_tx_meta));
+
+               if (unlikely(rc != 0))
+                       return true;
+       }
+
+       return false;
+}
+
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+                                                        struct ena_com_tx_ctx *ena_tx_ctx)
+{
+       struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
+       struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+       meta_desc = get_sq_desc(io_sq);
+       memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
+
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+
+       /* bits 0-9 of the mss */
+       meta_desc->word2 |= (ena_meta->mss <<
+               ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
+               ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+       /* bits 10-13 of the mss */
+       meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
+               ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+               ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+
+       /* Extended meta desc */
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+       meta_desc->len_ctrl |= (io_sq->phase <<
+               ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
+               ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+       meta_desc->word2 |= ena_meta->l3_hdr_len &
+               ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+       meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
+               ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
+               ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+
+       meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+               ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
+               ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
+       /* Cached the meta desc */
+       memcpy(&io_sq->cached_tx_meta, ena_meta,
+              sizeof(struct ena_com_tx_meta));
+
+       ena_com_sq_update_tail(io_sq);
+}
+
+static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+                                       struct ena_eth_io_rx_cdesc_base *cdesc)
+{
+       ena_rx_ctx->l3_proto = cdesc->status &
+               ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+       ena_rx_ctx->l4_proto =
+               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+       ena_rx_ctx->l3_csum_err =
+               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+       ena_rx_ctx->l4_csum_err =
+               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+       ena_rx_ctx->hash = cdesc->hash;
+       ena_rx_ctx->frag =
+               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+
+       ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+                   ena_rx_ctx->l3_proto,
+                   ena_rx_ctx->l4_proto,
+                   ena_rx_ctx->l3_csum_err,
+                   ena_rx_ctx->l4_csum_err,
+                   ena_rx_ctx->hash,
+                   ena_rx_ctx->frag,
+                   cdesc->status);
+}
+
+/*****************************************************************************/
+/*****************************     API      **********************************/
+/*****************************************************************************/
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+                      struct ena_com_tx_ctx *ena_tx_ctx,
+                      int *nb_hw_desc)
+{
+       struct ena_eth_io_tx_desc *desc = NULL;
+       struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
+       void *buffer_to_push = ena_tx_ctx->push_header;
+       u16 header_len = ena_tx_ctx->header_len;
+       u16 num_bufs = ena_tx_ctx->num_bufs;
+       u16 start_tail = io_sq->tail;
+       int i, rc;
+       bool have_meta;
+       u64 addr_hi;
+
+       ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
+                "wrong Q type");
+
+       /* num_bufs +1 for potential meta desc */
+       if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
+               ena_trc_err("Not enough space in the tx queue\n");
+               return ENA_COM_NO_MEM;
+       }
+
+       if (unlikely(header_len > io_sq->tx_max_header_size)) {
+               ena_trc_err("header size is too large %d max header: %d\n",
+                           header_len, io_sq->tx_max_header_size);
+               return ENA_COM_INVAL;
+       }
+
+       if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && !buffer_to_push))
+               return ENA_COM_INVAL;
+
+       rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
+       if (unlikely(rc))
+               return rc;
+
+       have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
+                       ena_tx_ctx);
+       if (have_meta)
+               ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+
+       /* If the caller doesn't want send packets */
+       if (unlikely(!num_bufs && !header_len)) {
+               ena_com_close_bounce_buffer(io_sq);
+               *nb_hw_desc = io_sq->tail - start_tail;
+               return 0;
+       }
+
+       desc = get_sq_desc(io_sq);
+       if (unlikely(!desc))
+               return ENA_COM_FAULT;
+       memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+       /* Set first desc when we don't have meta descriptor */
+       if (!have_meta)
+               desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
+
+       desc->buff_addr_hi_hdr_sz |= (header_len <<
+               ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
+               ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+       desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+               ENA_ETH_IO_TX_DESC_PHASE_MASK;
+
+       desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+
+       /* Bits 0-9 */
+       desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+               ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
+               ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+
+       desc->meta_ctrl |= (ena_tx_ctx->df <<
+               ENA_ETH_IO_TX_DESC_DF_SHIFT) &
+               ENA_ETH_IO_TX_DESC_DF_MASK;
+
+       /* Bits 10-15 */
+       desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
+               ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
+               ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+
+       if (ena_tx_ctx->meta_valid) {
+               desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
+                       ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
+                       ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+               desc->meta_ctrl |= ena_tx_ctx->l3_proto &
+                       ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+               desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
+                       ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
+                       ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+               desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
+                       ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
+                       ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+               desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
+                       ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
+                       ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+               desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
+                       ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
+                       ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+       }
+
+       for (i = 0; i < num_bufs; i++) {
+               /* The first desc share the same desc as the header */
+               if (likely(i != 0)) {
+                       ena_com_sq_update_tail(io_sq);
+
+                       desc = get_sq_desc(io_sq);
+                       if (unlikely(!desc))
+                               return ENA_COM_FAULT;
+
+                       memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+                       desc->len_ctrl |= (io_sq->phase <<
+                               ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+                               ENA_ETH_IO_TX_DESC_PHASE_MASK;
+               }
+
+               desc->len_ctrl |= ena_bufs->len &
+                       ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+
+               addr_hi = ((ena_bufs->paddr &
+                       GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+               desc->buff_addr_lo = (u32)ena_bufs->paddr;
+               desc->buff_addr_hi_hdr_sz |= addr_hi &
+                       ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+               ena_bufs++;
+       }
+
+       /* set the last desc indicator */
+       desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
+
+       ena_com_sq_update_tail(io_sq);
+
+       ena_com_close_bounce_buffer(io_sq);
+
+       *nb_hw_desc = io_sq->tail - start_tail;
+       return 0;
+}
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+                  struct ena_com_io_sq *io_sq,
+                  struct ena_com_rx_ctx *ena_rx_ctx)
+{
+       struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+       struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+       u16 cdesc_idx = 0;
+       u16 nb_hw_desc;
+       u16 i;
+
+       ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+                "wrong Q type");
+
+       nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+       if (nb_hw_desc == 0) {
+               ena_rx_ctx->descs = nb_hw_desc;
+               return 0;
+       }
+
+       ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
+                   io_cq->qid, nb_hw_desc);
+
+       if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+               ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
+                           nb_hw_desc, ena_rx_ctx->max_bufs);
+               return ENA_COM_NO_SPACE;
+       }
+
+       for (i = 0; i < nb_hw_desc; i++) {
+               cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+               ena_buf->len = cdesc->length;
+               ena_buf->req_id = cdesc->req_id;
+               ena_buf++;
+       }
+
+       /* Update SQ head ptr */
+       io_sq->next_to_comp += nb_hw_desc;
+
+       ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+                   io_sq->qid, io_sq->next_to_comp);
+
+       /* Get rx flags from the last pkt */
+       ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+
+       ena_rx_ctx->descs = nb_hw_desc;
+       return 0;
+}
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+                              struct ena_com_buf *ena_buf,
+                              u16 req_id)
+{
+       struct ena_eth_io_rx_desc *desc;
+
+       ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+                "wrong Q type");
+
+       if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
+               return ENA_COM_NO_SPACE;
+
+       desc = get_sq_desc(io_sq);
+       if (unlikely(!desc))
+               return ENA_COM_FAULT;
+
+       memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
+
+       desc->length = ena_buf->len;
+
+       desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+       desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
+       desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+       desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+
+       desc->req_id = req_id;
+
+       desc->buff_addr_lo = (u32)ena_buf->paddr;
+       desc->buff_addr_hi =
+               ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+       ena_com_sq_update_tail(io_sq);
+
+       return 0;
+}
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
+{
+       u8 expected_phase, cdesc_phase;
+       struct ena_eth_io_tx_cdesc *cdesc;
+       u16 masked_head;
+
+       masked_head = io_cq->head & (io_cq->q_depth - 1);
+       expected_phase = io_cq->phase;
+
+       cdesc = (struct ena_eth_io_tx_cdesc *)
+               ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+               (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+       /* When the current completion descriptor phase isn't the same as the
+        * expected, it mean that the device still didn't update
+        * this completion.
+        */
+       cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+       if (cdesc_phase != expected_phase)
+               return ENA_COM_TRY_AGAIN;
+
+       if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+               ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+               return ENA_COM_INVAL;
+       }
+
+       ena_com_cq_inc_head(io_cq);
+
+       *req_id = READ_ONCE(cdesc->req_id);
+
+       return 0;
+}
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_eth_com.h b/sys/dev/virtual/amazon/ena/ena-com/ena_eth_com.h
new file mode 100644 (file)
index 0000000..d0c8b90
--- /dev/null
@@ -0,0 +1,186 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ENA_ETH_COM_H_
+#define ENA_ETH_COM_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#include "ena_com.h"
+
+/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
+#define ENA_COMP_HEAD_THRESH 4
+
+struct ena_com_tx_ctx {
+       struct ena_com_tx_meta ena_meta;
+       struct ena_com_buf *ena_bufs;
+       /* For LLQ, header buffer - pushed to the device mem space */
+       void *push_header;
+
+       enum ena_eth_io_l3_proto_index l3_proto;
+       enum ena_eth_io_l4_proto_index l4_proto;
+       u16 num_bufs;
+       u16 req_id;
+       /* For regular queue, indicate the size of the header
+        * For LLQ, indicate the size of the pushed buffer
+        */
+       u16 header_len;
+
+       u8 meta_valid;
+       u8 tso_enable;
+       u8 l3_csum_enable;
+       u8 l4_csum_enable;
+       u8 l4_csum_partial;
+       u8 df; /* Don't fragment */
+};
+
+struct ena_com_rx_ctx {
+       struct ena_com_rx_buf_info *ena_bufs;
+       enum ena_eth_io_l3_proto_index l3_proto;
+       enum ena_eth_io_l4_proto_index l4_proto;
+       bool l3_csum_err;
+       bool l4_csum_err;
+       /* fragmented packet */
+       bool frag;
+       u32 hash;
+       u16 descs;
+       int max_bufs;
+};
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+                      struct ena_com_tx_ctx *ena_tx_ctx,
+                      int *nb_hw_desc);
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+                  struct ena_com_io_sq *io_sq,
+                  struct ena_com_rx_ctx *ena_rx_ctx);
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+                              struct ena_com_buf *ena_buf,
+                              u16 req_id);
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+
+static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
+                                      struct ena_eth_io_intr_reg *intr_reg)
+{
+       ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
+}
+
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
+{
+       u16 tail, next_to_comp, cnt;
+
+       next_to_comp = io_sq->next_to_comp;
+       tail = io_sq->tail;
+       cnt = tail - next_to_comp;
+
+       return io_sq->q_depth - 1 - cnt;
+}
+
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+                                               u16 required_buffers)
+{
+       int temp;
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+               return ena_com_free_desc(io_sq) >= required_buffers;
+
+       /* This calculation doesn't need to be 100% accurate. So to reduce
+        * the calculation overhead just Subtract 2 lines from the free descs
+        * (one for the header line and one to compensate the devision
+        * down calculation.
+        */
+       temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+       return ena_com_free_desc(io_sq) > temp;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+       u16 tail;
+
+       tail = io_sq->tail;
+
+       ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
+                   io_sq->qid, tail);
+
+       ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
+
+       return 0;
+}
+
+static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
+{
+       u16 unreported_comp, head;
+       bool need_update;
+
+       head = io_cq->head;
+       unreported_comp = head - io_cq->last_head_update;
+       need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+       if (io_cq->cq_head_db_reg && need_update) {
+               ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
+                           io_cq->qid, head);
+               ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
+               io_cq->last_head_update = head;
+       }
+
+       return 0;
+}
+
+static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
+                                           u8 numa_node)
+{
+       struct ena_eth_io_numa_node_cfg_reg numa_cfg;
+
+       if (!io_cq->numa_node_cfg_reg)
+               return;
+
+       numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
+               | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+
+       ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+}
+
+static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
+{
+       io_sq->next_to_comp += elem;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* ENA_ETH_COM_H_ */
diff --git a/sys/dev/virtual/amazon/ena/ena-com/ena_eth_io_defs.h b/sys/dev/virtual/amazon/ena/ena-com/ena_eth_io_defs.h
new file mode 100644 (file)
index 0000000..c16fed8
--- /dev/null
@@ -0,0 +1,960 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+enum ena_eth_io_l3_proto_index {
+       ENA_ETH_IO_L3_PROTO_UNKNOWN     = 0,
+
+       ENA_ETH_IO_L3_PROTO_IPV4        = 8,
+
+       ENA_ETH_IO_L3_PROTO_IPV6        = 11,
+
+       ENA_ETH_IO_L3_PROTO_FCOE        = 21,
+
+       ENA_ETH_IO_L3_PROTO_ROCE        = 22,
+};
+
+enum ena_eth_io_l4_proto_index {
+       ENA_ETH_IO_L4_PROTO_UNKNOWN             = 0,
+
+       ENA_ETH_IO_L4_PROTO_TCP                 = 12,
+
+       ENA_ETH_IO_L4_PROTO_UDP                 = 13,
+
+       ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE      = 23,
+};
+
+struct ena_eth_io_tx_desc {
+       /* 15:0 : length - Buffer length in bytes, must
+        *    include any packet trailers that the ENA supposed
+        *    to update like End-to-End CRC, Authentication GMAC
+        *    etc. This length must not include the
+        *    'Push_Buffer' length. This length must not include
+        *    the 4-byte added in the end for 802.3 Ethernet FCS
+        * 21:16 : req_id_hi - Request ID[15:10]
+        * 22 : reserved22 - MBZ
+        * 23 : meta_desc - MBZ
+        * 24 : phase
+        * 25 : reserved1 - MBZ
+        * 26 : first - Indicates first descriptor in
+        *    transaction
+        * 27 : last - Indicates last descriptor in
+        *    transaction
+        * 28 : comp_req - Indicates whether completion
+        *    should be posted, after packet is transmitted.
+        *    Valid only for first descriptor
+        * 30:29 : reserved29 - MBZ
+        * 31 : reserved31 - MBZ
+        */
+       uint32_t len_ctrl;
+
+       /* 3:0 : l3_proto_idx - L3 protocol. This field
+        *    required when l3_csum_en,l3_csum or tso_en are set.
+        * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+        *    DF flags of the IPv4 header is 0. Otherwise must
+        *    be set to 1
+        * 6:5 : reserved5
+        * 7 : tso_en - Enable TSO, For TCP only.
+        * 12:8 : l4_proto_idx - L4 protocol. This field need
+        *    to be set when l4_csum_en or tso_en are set.
+        * 13 : l3_csum_en - enable IPv4 header checksum.
+        * 14 : l4_csum_en - enable TCP/UDP checksum.
+        * 15 : ethernet_fcs_dis - when set, the controller
+        *    will not append the 802.3 Ethernet Frame Check
+        *    Sequence to the packet
+        * 16 : reserved16
+        * 17 : l4_csum_partial - L4 partial checksum. when
+        *    set to 0, the ENA calculates the L4 checksum,
+        *    where the Destination Address required for the
+        *    TCP/UDP pseudo-header is taken from the actual
+        *    packet L3 header. when set to 1, the ENA doesn't
+        *    calculate the sum of the pseudo-header, instead,
+        *    the checksum field of the L4 is used instead. When
+        *    TSO enabled, the checksum of the pseudo-header
+        *    must not include the tcp length field. L4 partial
+        *    checksum should be used for IPv6 packet that
+        *    contains Routing Headers.
+        * 20:18 : reserved18 - MBZ
+        * 21 : reserved21 - MBZ
+        * 31:22 : req_id_lo - Request ID[9:0]
+        */
+       uint32_t meta_ctrl;
+
+       uint32_t buff_addr_lo;
+
+       /* address high and header size
+        * 15:0 : addr_hi - Buffer Pointer[47:32]
+        * 23:16 : reserved16_w2
+        * 31:24 : header_length - Header length. For Low
+        *    Latency Queues, this fields indicates the number
+        *    of bytes written to the headers' memory. For
+        *    normal queues, if packet is TCP or UDP, and longer
+        *    than max_header_size, then this field should be
+        *    set to the sum of L4 header offset and L4 header
+        *    size(without options), otherwise, this field
+        *    should be set to 0. For both modes, this field
+        *    must not exceed the max_header_size.
+        *    max_header_size value is reported by the Max
+        *    Queues Feature descriptor
+        */
+       uint32_t buff_addr_hi_hdr_sz;
+};
+
+struct ena_eth_io_tx_meta_desc {
+       /* 9:0 : req_id_lo - Request ID[9:0]
+        * 11:10 : reserved10 - MBZ
+        * 12 : reserved12 - MBZ
+        * 13 : reserved13 - MBZ
+        * 14 : ext_valid - if set, offset fields in Word2
+        *    are valid Also MSS High in Word 0 and bits [31:24]
+        *    in Word 3
+        * 15 : reserved15
+        * 19:16 : mss_hi
+        * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+        *    Extended Metadata Descriptor
+        * 21 : meta_store - Store extended metadata in queue
+        *    cache
+        * 22 : reserved22 - MBZ
+        * 23 : meta_desc - MBO
+        * 24 : phase
+        * 25 : reserved25 - MBZ
+        * 26 : first - Indicates first descriptor in
+        *    transaction
+        * 27 : last - Indicates last descriptor in
+        *    transaction
+        * 28 : comp_req - Indicates whether completion
+        *    should be posted, after packet is transmitted.
+        *    Valid only for first descriptor
+        * 30:29 : reserved29 - MBZ
+        * 31 : reserved31 - MBZ
+        */
+       uint32_t len_ctrl;
+
+       /* 5:0 : req_id_hi
+        * 31:6 : reserved6 - MBZ
+        */
+       uint32_t word1;
+
+       /* 7:0 : l3_hdr_len
+        * 15:8 : l3_hdr_off
+        * 21:16 : l4_hdr_len_in_words - counts the L4 header
+        *    length in words. there is an explicit assumption
+        *    that L4 header appears right after L3 header and
+        *    L4 offset is based on l3_hdr_off+l3_hdr_len
+        * 31:22 : mss_lo
+        */
+       uint32_t word2;
+
+       uint32_t reserved;
+};
+
+struct ena_eth_io_tx_cdesc {
+       /* Request ID[15:0] */
+       uint16_t req_id;
+
+       uint8_t status;
+
+       /* flags
+        * 0 : phase
+        * 7:1 : reserved1
+        */
+       uint8_t flags;
+
+       uint16_t sub_qid;
+
+       uint16_t sq_head_idx;
+};
+
+struct ena_eth_io_rx_desc {
+       /* In bytes. 0 means 64KB */
+       uint16_t length;
+
+       /* MBZ */
+       uint8_t reserved2;
+
+       /* 0 : phase
+        * 1 : reserved1 - MBZ
+        * 2 : first - Indicates first descriptor in
+        *    transaction
+        * 3 : last - Indicates last descriptor in transaction
+        * 4 : comp_req
+        * 5 : reserved5 - MBO
+        * 7:6 : reserved6 - MBZ
+        */
+       uint8_t ctrl;
+
+       uint16_t req_id;
+
+       /* MBZ */
+       uint16_t reserved6;
+
+       uint32_t buff_addr_lo;
+
+       uint16_t buff_addr_hi;
+
+       /* MBZ */
+       uint16_t reserved16_w3;
+};
+
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+       /* 4:0 : l3_proto_idx
+        * 6:5 : src_vlan_cnt
+        * 7 : reserved7 - MBZ
+        * 12:8 : l4_proto_idx
+        * 13 : l3_csum_err - when set, either the L3
+        *    checksum error detected, or, the controller didn't
+        *    validate the checksum. This bit is valid only when
+        *    l3_proto_idx indicates IPv4 packet
+        * 14 : l4_csum_err - when set, either the L4
+        *    checksum error detected, or, the controller didn't
+        *    validate the checksum. This bit is valid only when
+        *    l4_proto_idx indicates TCP/UDP packet, and,
+        *    ipv4_frag is not set
+        * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+        * 23:16 : reserved16
+        * 24 : phase
+        * 25 : l3_csum2 - second checksum engine result
+        * 26 : first - Indicates first descriptor in
+        *    transaction
+        * 27 : last - Indicates last descriptor in
+        *    transaction
+        * 29:28 : reserved28
+        * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+        *    Descriptor was used
+        * 31 : reserved31
+        */
+       uint32_t status;
+
+       uint16_t length;
+
+       uint16_t req_id;
+
+       /* 32-bit hash result */
+       uint32_t hash;
+
+       uint16_t sub_qid;
+
+       uint16_t