4 * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "ena_eth_com.h"
36 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 struct ena_com_io_cq *io_cq)
39 struct ena_eth_io_rx_cdesc_base *cdesc;
40 u16 expected_phase, head_masked;
43 head_masked = io_cq->head & (io_cq->q_depth - 1);
44 expected_phase = io_cq->phase;
46 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
49 desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
52 if (desc_phase != expected_phase)
58 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
62 /* Switch phase bit in case of wrap around */
63 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
67 static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
72 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
74 offset = tail_masked * io_sq->desc_entry_size;
76 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
79 static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
82 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
87 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
88 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
90 /* Make sure everything was written into the bounce buffer before
91 * writing the bounce buffer to the device
95 /* The line is completed. Copy it to dev */
96 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
98 llq_info->desc_list_entry_size);
102 /* Switch phase bit in case of wrap around */
103 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
107 static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
111 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
112 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
113 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
116 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
120 llq_info->descs_num_before_header * io_sq->desc_entry_size;
122 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
123 ena_trc_err("trying to write header larger than llq entry can accommodate\n");
124 return ENA_COM_FAULT;
127 if (unlikely(!bounce_buffer)) {
128 ena_trc_err("bounce buffer is NULL\n");
129 return ENA_COM_FAULT;
132 memcpy(bounce_buffer + header_offset, header_src, header_len);
137 static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
139 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
143 bounce_buffer = pkt_ctrl->curr_bounce_buf;
145 if (unlikely(!bounce_buffer)) {
146 ena_trc_err("bounce buffer is NULL\n");
150 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
152 pkt_ctrl->descs_left_in_line--;
157 static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
159 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
160 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
162 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
165 /* bounce buffer was used, so write it and get a new one */
167 ena_com_write_bounce_buffer_to_dev(io_sq,
168 pkt_ctrl->curr_bounce_buf);
169 pkt_ctrl->curr_bounce_buf =
170 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
171 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
172 0x0, llq_info->desc_list_entry_size);
176 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
179 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
181 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
182 return get_sq_desc_llq(io_sq);
184 return get_sq_desc_regular_queue(io_sq);
187 static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
189 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
190 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
192 if (!pkt_ctrl->descs_left_in_line) {
193 ena_com_write_bounce_buffer_to_dev(io_sq,
194 pkt_ctrl->curr_bounce_buf);
196 pkt_ctrl->curr_bounce_buf =
197 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
198 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
199 0x0, llq_info->desc_list_entry_size);
202 if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
203 pkt_ctrl->descs_left_in_line = 1;
205 pkt_ctrl->descs_left_in_line =
206 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
210 static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
213 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
214 ena_com_sq_update_llq_tail(io_sq);
220 /* Switch phase bit in case of wrap around */
221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
225 static inline struct ena_eth_io_rx_cdesc_base *
226 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
228 idx &= (io_cq->q_depth - 1);
229 return (struct ena_eth_io_rx_cdesc_base *)
230 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
231 idx * io_cq->cdesc_entry_size_in_bytes);
234 static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
235 u16 *first_cdesc_idx)
237 struct ena_eth_io_rx_cdesc_base *cdesc;
238 u16 count = 0, head_masked;
242 cdesc = ena_com_get_next_rx_cdesc(io_cq);
246 ena_com_cq_inc_head(io_cq);
248 last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
249 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
253 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
254 count += io_cq->cur_rx_pkt_cdesc_count;
256 head_masked = io_cq->head & (io_cq->q_depth - 1);
258 io_cq->cur_rx_pkt_cdesc_count = 0;
259 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
261 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
262 io_cq->qid, *first_cdesc_idx, count);
264 io_cq->cur_rx_pkt_cdesc_count += count;
271 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
272 struct ena_com_tx_ctx *ena_tx_ctx)
276 if (ena_tx_ctx->meta_valid) {
277 rc = memcmp(&io_sq->cached_tx_meta,
278 &ena_tx_ctx->ena_meta,
279 sizeof(struct ena_com_tx_meta));
281 if (unlikely(rc != 0))
288 static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
289 struct ena_com_tx_ctx *ena_tx_ctx)
291 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
292 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
294 meta_desc = get_sq_desc(io_sq);
295 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
297 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
299 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
301 /* bits 0-9 of the mss */
302 meta_desc->word2 |= (ena_meta->mss <<
303 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
304 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
305 /* bits 10-13 of the mss */
306 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
307 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
308 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
310 /* Extended meta desc */
311 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
312 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
313 meta_desc->len_ctrl |= (io_sq->phase <<
314 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
315 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
317 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
318 meta_desc->word2 |= ena_meta->l3_hdr_len &
319 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
320 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
321 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
322 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
324 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
325 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
326 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
328 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
330 /* Cached the meta desc */
331 memcpy(&io_sq->cached_tx_meta, ena_meta,
332 sizeof(struct ena_com_tx_meta));
334 ena_com_sq_update_tail(io_sq);
337 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
338 struct ena_eth_io_rx_cdesc_base *cdesc)
340 ena_rx_ctx->l3_proto = cdesc->status &
341 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
342 ena_rx_ctx->l4_proto =
343 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
344 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
345 ena_rx_ctx->l3_csum_err =
346 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
347 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
348 ena_rx_ctx->l4_csum_err =
349 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
350 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
351 ena_rx_ctx->hash = cdesc->hash;
353 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
354 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
356 ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
357 ena_rx_ctx->l3_proto,
358 ena_rx_ctx->l4_proto,
359 ena_rx_ctx->l3_csum_err,
360 ena_rx_ctx->l4_csum_err,
366 /*****************************************************************************/
367 /***************************** API **********************************/
368 /*****************************************************************************/
370 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
371 struct ena_com_tx_ctx *ena_tx_ctx,
374 struct ena_eth_io_tx_desc *desc = NULL;
375 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
376 void *buffer_to_push = ena_tx_ctx->push_header;
377 u16 header_len = ena_tx_ctx->header_len;
378 u16 num_bufs = ena_tx_ctx->num_bufs;
379 u16 start_tail = io_sq->tail;
384 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
387 /* num_bufs +1 for potential meta desc */
388 if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
389 ena_trc_err("Not enough space in the tx queue\n");
390 return ENA_COM_NO_MEM;
393 if (unlikely(header_len > io_sq->tx_max_header_size)) {
394 ena_trc_err("header size is too large %d max header: %d\n",
395 header_len, io_sq->tx_max_header_size);
396 return ENA_COM_INVAL;
399 if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && !buffer_to_push))
400 return ENA_COM_INVAL;
402 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
406 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
409 ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
411 /* If the caller doesn't want send packets */
412 if (unlikely(!num_bufs && !header_len)) {
413 ena_com_close_bounce_buffer(io_sq);
414 *nb_hw_desc = io_sq->tail - start_tail;
418 desc = get_sq_desc(io_sq);
420 return ENA_COM_FAULT;
421 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
423 /* Set first desc when we don't have meta descriptor */
425 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
427 desc->buff_addr_hi_hdr_sz |= (header_len <<
428 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
429 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
430 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
431 ENA_ETH_IO_TX_DESC_PHASE_MASK;
433 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
436 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
437 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
438 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
440 desc->meta_ctrl |= (ena_tx_ctx->df <<
441 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
442 ENA_ETH_IO_TX_DESC_DF_MASK;
445 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
446 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
447 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
449 if (ena_tx_ctx->meta_valid) {
450 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
451 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
452 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
453 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
454 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
455 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
456 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
457 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
458 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
459 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
460 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
461 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
462 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
463 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
464 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
465 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
466 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
469 for (i = 0; i < num_bufs; i++) {
470 /* The first desc share the same desc as the header */
471 if (likely(i != 0)) {
472 ena_com_sq_update_tail(io_sq);
474 desc = get_sq_desc(io_sq);
476 return ENA_COM_FAULT;
478 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
480 desc->len_ctrl |= (io_sq->phase <<
481 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
482 ENA_ETH_IO_TX_DESC_PHASE_MASK;
485 desc->len_ctrl |= ena_bufs->len &
486 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
488 addr_hi = ((ena_bufs->paddr &
489 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
491 desc->buff_addr_lo = (u32)ena_bufs->paddr;
492 desc->buff_addr_hi_hdr_sz |= addr_hi &
493 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
497 /* set the last desc indicator */
498 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
500 ena_com_sq_update_tail(io_sq);
502 ena_com_close_bounce_buffer(io_sq);
504 *nb_hw_desc = io_sq->tail - start_tail;
508 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
509 struct ena_com_io_sq *io_sq,
510 struct ena_com_rx_ctx *ena_rx_ctx)
512 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
513 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
518 ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
521 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
522 if (nb_hw_desc == 0) {
523 ena_rx_ctx->descs = nb_hw_desc;
527 ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
528 io_cq->qid, nb_hw_desc);
530 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
531 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
532 nb_hw_desc, ena_rx_ctx->max_bufs);
533 return ENA_COM_NO_SPACE;
536 for (i = 0; i < nb_hw_desc; i++) {
537 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
539 ena_buf->len = cdesc->length;
540 ena_buf->req_id = cdesc->req_id;
544 /* Update SQ head ptr */
545 io_sq->next_to_comp += nb_hw_desc;
547 ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
548 io_sq->qid, io_sq->next_to_comp);
550 /* Get rx flags from the last pkt */
551 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
553 ena_rx_ctx->descs = nb_hw_desc;
557 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
558 struct ena_com_buf *ena_buf,
561 struct ena_eth_io_rx_desc *desc;
563 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
566 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
567 return ENA_COM_NO_SPACE;
569 desc = get_sq_desc(io_sq);
571 return ENA_COM_FAULT;
573 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
575 desc->length = ena_buf->len;
577 desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
578 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
579 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
580 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
582 desc->req_id = req_id;
584 desc->buff_addr_lo = (u32)ena_buf->paddr;
586 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
588 ena_com_sq_update_tail(io_sq);
593 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
595 u8 expected_phase, cdesc_phase;
596 struct ena_eth_io_tx_cdesc *cdesc;
599 masked_head = io_cq->head & (io_cq->q_depth - 1);
600 expected_phase = io_cq->phase;
602 cdesc = (struct ena_eth_io_tx_cdesc *)
603 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
604 (masked_head * io_cq->cdesc_entry_size_in_bytes));
606 /* When the current completion descriptor phase isn't the same as the
607 * expected, it mean that the device still didn't update
610 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
611 if (cdesc_phase != expected_phase)
612 return ENA_COM_TRY_AGAIN;
614 if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
615 ena_trc_err("Invalid req id %d\n", cdesc->req_id);
616 return ENA_COM_INVAL;
619 ena_com_cq_inc_head(io_cq);
621 *req_id = READ_ONCE(cdesc->req_id);