LCOV - code coverage report
Current view: top level - plugins/avf - output.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 1 273 0.4 %
Date: 2023-07-05 22:20:52 Functions: 5 16 31.2 %

          Line data    Source code
       1             : /*
       2             :  *------------------------------------------------------------------
       3             :  * Copyright (c) 2018 Cisco and/or its affiliates.
       4             :  * Licensed under the Apache License, Version 2.0 (the "License");
       5             :  * you may not use this file except in compliance with the License.
       6             :  * You may obtain a copy of the License at:
       7             :  *
       8             :  *     http://www.apache.org/licenses/LICENSE-2.0
       9             :  *
      10             :  * Unless required by applicable law or agreed to in writing, software
      11             :  * distributed under the License is distributed on an "AS IS" BASIS,
      12             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      13             :  * See the License for the specific language governing permissions and
      14             :  * limitations under the License.
      15             :  *------------------------------------------------------------------
      16             :  */
      17             : 
      18             : #include <vlib/vlib.h>
      19             : #include <vlib/unix/unix.h>
      20             : #include <vlib/pci/pci.h>
      21             : #include <vppinfra/ring.h>
      22             : #include <vppinfra/vector/ip_csum.h>
      23             : 
      24             : #include <vnet/ethernet/ethernet.h>
      25             : #include <vnet/ip/ip4_packet.h>
      26             : #include <vnet/ip/ip6_packet.h>
      27             : #include <vnet/udp/udp_packet.h>
      28             : #include <vnet/tcp/tcp_packet.h>
      29             : 
      30             : #include <vnet/devices/devices.h>
      31             : 
      32             : #include <avf/avf.h>
      33             : 
      34             : static_always_inline u8
      35           0 : avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
      36             : {
      37           0 :   return d->qword[1] & 0x0f;
      38             : }
      39             : 
      40             : struct avf_ip4_psh
      41             : {
      42             :   u32 src;
      43             :   u32 dst;
      44             :   u8 zero;
      45             :   u8 proto;
      46             :   u16 l4len;
      47             : };
      48             : 
      49             : struct avf_ip6_psh
      50             : {
      51             :   ip6_address_t src;
      52             :   ip6_address_t dst;
      53             :   u32 l4len;
      54             :   u32 proto;
      55             : };
      56             : 
      57             : static_always_inline u64
      58           0 : avf_tx_prepare_cksum (vlib_buffer_t * b, u8 is_tso)
      59             : {
      60           0 :   u64 flags = 0;
      61           0 :   if (!is_tso && !(b->flags & VNET_BUFFER_F_OFFLOAD))
      62           0 :     return 0;
      63             : 
      64           0 :   vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
      65           0 :   u32 is_tcp = is_tso || oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
      66           0 :   u32 is_udp = !is_tso && oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
      67             : 
      68           0 :   if (!is_tcp && !is_udp)
      69           0 :     return 0;
      70             : 
      71           0 :   u32 is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
      72           0 :   u32 is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
      73             : 
      74           0 :   ASSERT (!(is_tcp && is_udp));
      75           0 :   ASSERT (is_ip4 || is_ip6);
      76           0 :   i16 l2_hdr_offset = b->current_data;
      77           0 :   i16 l3_hdr_offset = vnet_buffer (b)->l3_hdr_offset;
      78           0 :   i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset;
      79           0 :   u16 l2_len = l3_hdr_offset - l2_hdr_offset;
      80           0 :   u16 l3_len = l4_hdr_offset - l3_hdr_offset;
      81           0 :   ip4_header_t *ip4 = (void *) (b->data + l3_hdr_offset);
      82           0 :   ip6_header_t *ip6 = (void *) (b->data + l3_hdr_offset);
      83           0 :   tcp_header_t *tcp = (void *) (b->data + l4_hdr_offset);
      84           0 :   udp_header_t *udp = (void *) (b->data + l4_hdr_offset);
      85           0 :   u16 l4_len = is_tcp ? tcp_header_bytes (tcp) : sizeof (udp_header_t);
      86           0 :   u16 sum = 0;
      87             : 
      88           0 :   flags |= AVF_TXD_OFFSET_MACLEN (l2_len) |
      89           0 :     AVF_TXD_OFFSET_IPLEN (l3_len) | AVF_TXD_OFFSET_L4LEN (l4_len);
      90           0 :   flags |= is_ip4 ? AVF_TXD_CMD_IIPT_IPV4 : AVF_TXD_CMD_IIPT_IPV6;
      91           0 :   flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : AVF_TXD_CMD_L4T_UDP;
      92             : 
      93           0 :   if (is_ip4)
      94           0 :     ip4->checksum = 0;
      95             : 
      96           0 :   if (is_tso)
      97             :     {
      98           0 :       if (is_ip4)
      99           0 :         ip4->length = 0;
     100             :       else
     101           0 :         ip6->payload_length = 0;
     102             :     }
     103             : 
     104           0 :       if (is_ip4)
     105             :         {
     106           0 :           struct avf_ip4_psh psh = { 0 };
     107           0 :           psh.src = ip4->src_address.as_u32;
     108           0 :           psh.dst = ip4->dst_address.as_u32;
     109           0 :           psh.proto = ip4->protocol;
     110           0 :           psh.l4len =
     111             :             is_tso ? 0 :
     112           0 :             clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
     113             :                                   (l4_hdr_offset - l3_hdr_offset));
     114           0 :           sum = ~clib_ip_csum ((u8 *) &psh, sizeof (psh));
     115             :         }
     116             :       else
     117             :         {
     118           0 :           struct avf_ip6_psh psh = { 0 };
     119           0 :           psh.src = ip6->src_address;
     120           0 :           psh.dst = ip6->dst_address;
     121           0 :           psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol);
     122           0 :           psh.l4len = is_tso ? 0 : ip6->payload_length;
     123           0 :           sum = ~clib_ip_csum ((u8 *) &psh, sizeof (psh));
     124             :         }
     125             : 
     126           0 :   if (is_tcp)
     127           0 :     tcp->checksum = sum;
     128             :   else
     129           0 :     udp->checksum = sum;
     130           0 :   return flags;
     131             : }
     132             : 
     133             : static_always_inline u32
     134           0 : avf_tx_fill_ctx_desc (vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d,
     135             :                       vlib_buffer_t *b)
     136             : {
     137             :   vlib_buffer_t *ctx_ph;
     138           0 :   u32 *bi = txq->ph_bufs;
     139             : 
     140           0 : next:
     141           0 :   ctx_ph = vlib_get_buffer (vm, bi[0]);
     142           0 :   if (PREDICT_FALSE (ctx_ph->ref_count == 255))
     143             :     {
     144           0 :       bi++;
     145           0 :       goto next;
     146             :     }
     147             : 
     148             :   /* Acquire a reference on the placeholder buffer */
     149           0 :   ctx_ph->ref_count++;
     150             : 
     151           0 :   u16 l234hdr_sz = vnet_buffer (b)->l4_hdr_offset - b->current_data +
     152           0 :                    vnet_buffer2 (b)->gso_l4_hdr_sz;
     153           0 :   u16 tlen = vlib_buffer_length_in_chain (vm, b) - l234hdr_sz;
     154           0 :   d[0].qword[0] = 0;
     155           0 :   d[0].qword[1] = AVF_TXD_DTYP_CTX | AVF_TXD_CTX_CMD_TSO
     156           0 :     | AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) |
     157           0 :     AVF_TXD_CTX_SEG_TLEN (tlen);
     158           0 :   return bi[0];
     159             : }
     160             : 
     161             : static_always_inline void
     162           0 : avf_tx_copy_desc (avf_tx_desc_t *d, avf_tx_desc_t *s, u32 n_descs)
     163             : {
     164             : #if defined CLIB_HAVE_VEC512
     165           0 :   while (n_descs >= 8)
     166             :     {
     167           0 :       u64x8u *dv = (u64x8u *) d;
     168           0 :       u64x8u *sv = (u64x8u *) s;
     169             : 
     170           0 :       dv[0] = sv[0];
     171           0 :       dv[1] = sv[1];
     172             : 
     173             :       /* next */
     174           0 :       d += 8;
     175           0 :       s += 8;
     176           0 :       n_descs -= 8;
     177             :     }
     178             : #elif defined CLIB_HAVE_VEC256
     179           0 :   while (n_descs >= 4)
     180             :     {
     181           0 :       u64x4u *dv = (u64x4u *) d;
     182           0 :       u64x4u *sv = (u64x4u *) s;
     183             : 
     184           0 :       dv[0] = sv[0];
     185           0 :       dv[1] = sv[1];
     186             : 
     187             :       /* next */
     188           0 :       d += 4;
     189           0 :       s += 4;
     190           0 :       n_descs -= 4;
     191             :     }
     192             : #elif defined CLIB_HAVE_VEC128
     193           0 :   while (n_descs >= 2)
     194             :     {
     195           0 :       u64x2u *dv = (u64x2u *) d;
     196           0 :       u64x2u *sv = (u64x2u *) s;
     197             : 
     198           0 :       dv[0] = sv[0];
     199           0 :       dv[1] = sv[1];
     200             : 
     201             :       /* next */
     202           0 :       d += 2;
     203           0 :       s += 2;
     204           0 :       n_descs -= 2;
     205             :     }
     206             : #endif
     207           0 :   while (n_descs)
     208             :     {
     209           0 :       d[0].qword[0] = s[0].qword[0];
     210           0 :       d[0].qword[1] = s[0].qword[1];
     211           0 :       d++;
     212           0 :       s++;
     213           0 :       n_descs--;
     214             :     }
     215           0 : }
     216             : 
     217             : static_always_inline void
     218           0 : avf_tx_fill_data_desc (vlib_main_t *vm, avf_tx_desc_t *d, vlib_buffer_t *b,
     219             :                        u64 cmd, int use_va_dma)
     220             : {
     221           0 :   if (use_va_dma)
     222           0 :     d->qword[0] = vlib_buffer_get_current_va (b);
     223             :   else
     224           0 :     d->qword[0] = vlib_buffer_get_current_pa (vm, b);
     225           0 :   d->qword[1] = (((u64) b->current_length) << 34 | cmd | AVF_TXD_CMD_RSV);
     226           0 : }
     227             : static_always_inline u16
     228           0 : avf_tx_prepare (vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq,
     229             :                 u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma)
     230             : {
     231           0 :   const u64 cmd_eop = AVF_TXD_CMD_EOP;
     232           0 :   u16 n_free_desc, n_desc_left, n_packets_left = n_packets;
     233             : #if defined CLIB_HAVE_VEC512
     234             :   vlib_buffer_t *b[8];
     235             : #else
     236             :   vlib_buffer_t *b[4];
     237             : #endif
     238           0 :   avf_tx_desc_t *d = txq->tmp_descs;
     239           0 :   u32 *tb = txq->tmp_bufs;
     240             : 
     241           0 :   n_free_desc = n_desc_left = txq->size - txq->n_enqueued - 8;
     242             : 
     243           0 :   if (n_desc_left == 0)
     244           0 :     return 0;
     245             : 
     246           0 :   while (n_packets_left && n_desc_left)
     247             :     {
     248             : #if defined CLIB_HAVE_VEC512
     249             :       u32 flags;
     250             :       u64x8 or_flags_vec512;
     251             :       u64x8 flags_mask_vec512;
     252             : #else
     253             :       u32 flags, or_flags;
     254             : #endif
     255             : 
     256             : #if defined CLIB_HAVE_VEC512
     257           0 :       if (n_packets_left < 8 || n_desc_left < 8)
     258             : #else
     259           0 :       if (n_packets_left < 8 || n_desc_left < 4)
     260             : #endif
     261           0 :         goto one_by_one;
     262             : 
     263             : #if defined CLIB_HAVE_VEC512
     264           0 :       u64x8 base_ptr = u64x8_splat (vm->buffer_main->buffer_mem_start);
     265           0 :       u32x8 buf_indices = u32x8_load_unaligned (buffers);
     266             : 
     267           0 :       *(u64x8 *) &b = base_ptr + u64x8_from_u32x8 (
     268             :                                    buf_indices << CLIB_LOG2_CACHE_LINE_BYTES);
     269             : 
     270           0 :       or_flags_vec512 = u64x8_i64gather (u64x8_load_unaligned (b), 0, 1);
     271             : #else
     272           0 :       vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
     273           0 :       vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
     274           0 :       vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
     275           0 :       vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
     276             : 
     277           0 :       b[0] = vlib_get_buffer (vm, buffers[0]);
     278           0 :       b[1] = vlib_get_buffer (vm, buffers[1]);
     279           0 :       b[2] = vlib_get_buffer (vm, buffers[2]);
     280           0 :       b[3] = vlib_get_buffer (vm, buffers[3]);
     281             : 
     282           0 :       or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
     283             : #endif
     284             : 
     285             : #if defined CLIB_HAVE_VEC512
     286           0 :       flags_mask_vec512 = u64x8_splat (
     287             :         VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD | VNET_BUFFER_F_GSO);
     288           0 :       if (PREDICT_FALSE (
     289             :             !u64x8_is_all_zero (or_flags_vec512 & flags_mask_vec512)))
     290             : #else
     291           0 :       if (PREDICT_FALSE (or_flags &
     292             :                          (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD |
     293             :                           VNET_BUFFER_F_GSO)))
     294             : #endif
     295           0 :         goto one_by_one;
     296             : 
     297             : #if defined CLIB_HAVE_VEC512
     298           0 :       vlib_buffer_copy_indices (tb, buffers, 8);
     299           0 :       avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma);
     300           0 :       avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma);
     301           0 :       avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma);
     302           0 :       avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma);
     303           0 :       avf_tx_fill_data_desc (vm, d + 4, b[4], cmd_eop, use_va_dma);
     304           0 :       avf_tx_fill_data_desc (vm, d + 5, b[5], cmd_eop, use_va_dma);
     305           0 :       avf_tx_fill_data_desc (vm, d + 6, b[6], cmd_eop, use_va_dma);
     306           0 :       avf_tx_fill_data_desc (vm, d + 7, b[7], cmd_eop, use_va_dma);
     307             : 
     308           0 :       buffers += 8;
     309           0 :       n_packets_left -= 8;
     310           0 :       n_desc_left -= 8;
     311           0 :       d += 8;
     312           0 :       tb += 8;
     313             : #else
     314           0 :       vlib_buffer_copy_indices (tb, buffers, 4);
     315             : 
     316           0 :       avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma);
     317           0 :       avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma);
     318           0 :       avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma);
     319           0 :       avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma);
     320             : 
     321           0 :       buffers += 4;
     322           0 :       n_packets_left -= 4;
     323           0 :       n_desc_left -= 4;
     324           0 :       d += 4;
     325           0 :       tb += 4;
     326             : #endif
     327             : 
     328           0 :       continue;
     329             : 
     330           0 :     one_by_one:
     331           0 :       tb[0] = buffers[0];
     332           0 :       b[0] = vlib_get_buffer (vm, buffers[0]);
     333           0 :       flags = b[0]->flags;
     334             : 
     335             :       /* No chained buffers or TSO case */
     336           0 :       if (PREDICT_TRUE (
     337             :             (flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_GSO)) == 0))
     338             :         {
     339           0 :           u64 cmd = cmd_eop;
     340             : 
     341           0 :           if (PREDICT_FALSE (flags & VNET_BUFFER_F_OFFLOAD))
     342           0 :             cmd |= avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
     343             : 
     344           0 :           avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
     345             :         }
     346             :       else
     347             :         {
     348           0 :           u16 n_desc_needed = 1;
     349           0 :           u64 cmd = 0;
     350             : 
     351           0 :           if (flags & VLIB_BUFFER_NEXT_PRESENT)
     352             :             {
     353           0 :               vlib_buffer_t *next = vlib_get_buffer (vm, b[0]->next_buffer);
     354           0 :               n_desc_needed = 2;
     355           0 :               while (next->flags & VLIB_BUFFER_NEXT_PRESENT)
     356             :                 {
     357           0 :                   next = vlib_get_buffer (vm, next->next_buffer);
     358           0 :                   n_desc_needed++;
     359             :                 }
     360             :             }
     361             : 
     362           0 :           if (flags & VNET_BUFFER_F_GSO)
     363             :             {
     364           0 :               n_desc_needed++;
     365             :             }
     366           0 :           else if (PREDICT_FALSE (n_desc_needed > 8))
     367             :             {
     368           0 :               vlib_buffer_free_one (vm, buffers[0]);
     369           0 :               vlib_error_count (vm, node->node_index,
     370             :                                 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
     371           0 :               n_packets_left -= 1;
     372           0 :               buffers += 1;
     373           0 :               continue;
     374             :             }
     375             : 
     376           0 :           if (PREDICT_FALSE (n_desc_left < n_desc_needed))
     377           0 :             break;
     378             : 
     379           0 :           if (flags & VNET_BUFFER_F_GSO)
     380             :             {
     381             :               /* Enqueue a context descriptor */
     382           0 :               tb[1] = tb[0];
     383           0 :               tb[0] = avf_tx_fill_ctx_desc (vm, txq, d, b[0]);
     384           0 :               n_desc_left -= 1;
     385           0 :               d += 1;
     386           0 :               tb += 1;
     387           0 :               cmd = avf_tx_prepare_cksum (b[0], 1 /* is_tso */);
     388             :             }
     389           0 :           else if (flags & VNET_BUFFER_F_OFFLOAD)
     390             :             {
     391           0 :               cmd = avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
     392             :             }
     393             : 
     394             :           /* Deal with chain buffer if present */
     395           0 :           while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
     396             :             {
     397           0 :               avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
     398             : 
     399           0 :               n_desc_left -= 1;
     400           0 :               d += 1;
     401           0 :               tb += 1;
     402             : 
     403           0 :               tb[0] = b[0]->next_buffer;
     404           0 :               b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
     405             :             }
     406             : 
     407           0 :           avf_tx_fill_data_desc (vm, d, b[0], cmd_eop | cmd, use_va_dma);
     408             :         }
     409             : 
     410           0 :       buffers += 1;
     411           0 :       n_packets_left -= 1;
     412           0 :       n_desc_left -= 1;
     413           0 :       d += 1;
     414           0 :       tb += 1;
     415             :     }
     416             : 
     417           0 :   *n_enq_descs = n_free_desc - n_desc_left;
     418           0 :   return n_packets - n_packets_left;
     419             : }
     420             : 
     421        2795 : VNET_DEVICE_CLASS_TX_FN (avf_device_class) (vlib_main_t * vm,
     422             :                                             vlib_node_runtime_t * node,
     423             :                                             vlib_frame_t * frame)
     424             : {
     425           0 :   vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
     426           0 :   avf_device_t *ad = avf_get_device (rd->dev_instance);
     427           0 :   vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
     428           0 :   u8 qid = tf->queue_id;
     429           0 :   avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid);
     430             :   u16 next;
     431           0 :   u16 mask = txq->size - 1;
     432           0 :   u32 *buffers = vlib_frame_vector_args (frame);
     433             :   u16 n_enq, n_left, n_desc, *slot;
     434           0 :   u16 n_retry = 2;
     435             : 
     436           0 :   if (tf->shared_queue)
     437           0 :     clib_spinlock_lock (&txq->lock);
     438             : 
     439           0 :   n_left = frame->n_vectors;
     440             : 
     441           0 : retry:
     442           0 :   next = txq->next;
     443             :   /* release consumed bufs */
     444           0 :   if (txq->n_enqueued)
     445             :     {
     446           0 :       i32 complete_slot = -1;
     447             :       while (1)
     448           0 :         {
     449           0 :           u16 *slot = clib_ring_get_first (txq->rs_slots);
     450             : 
     451           0 :           if (slot == 0)
     452           0 :             break;
     453             : 
     454           0 :           if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F)
     455           0 :             break;
     456             : 
     457           0 :           complete_slot = slot[0];
     458             : 
     459           0 :           clib_ring_deq (txq->rs_slots);
     460             :         }
     461             : 
     462           0 :       if (complete_slot >= 0)
     463             :         {
     464             :           u16 first, mask, n_free;
     465           0 :           mask = txq->size - 1;
     466           0 :           first = (txq->next - txq->n_enqueued) & mask;
     467           0 :           n_free = (complete_slot + 1 - first) & mask;
     468             : 
     469           0 :           txq->n_enqueued -= n_free;
     470           0 :           vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size,
     471             :                                               n_free);
     472             :         }
     473             :     }
     474             : 
     475           0 :   n_desc = 0;
     476           0 :   if (ad->flags & AVF_DEVICE_F_VA_DMA)
     477           0 :     n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 1);
     478             :   else
     479           0 :     n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 0);
     480             : 
     481           0 :   if (n_desc)
     482             :     {
     483           0 :       if (PREDICT_TRUE (next + n_desc <= txq->size))
     484             :         {
     485             :           /* no wrap */
     486           0 :           avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_desc);
     487           0 :           vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs, n_desc);
     488             :         }
     489             :       else
     490             :         {
     491             :           /* wrap */
     492           0 :           u32 n_not_wrap = txq->size - next;
     493           0 :           avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_not_wrap);
     494           0 :           avf_tx_copy_desc (txq->descs, txq->tmp_descs + n_not_wrap,
     495             :                             n_desc - n_not_wrap);
     496           0 :           vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs,
     497             :                                     n_not_wrap);
     498           0 :           vlib_buffer_copy_indices (txq->bufs, txq->tmp_bufs + n_not_wrap,
     499             :                                     n_desc - n_not_wrap);
     500             :         }
     501             : 
     502           0 :       next += n_desc;
     503           0 :       if ((slot = clib_ring_enq (txq->rs_slots)))
     504             :         {
     505           0 :           u16 rs_slot = slot[0] = (next - 1) & mask;
     506           0 :           txq->descs[rs_slot].qword[1] |= AVF_TXD_CMD_RS;
     507             :         }
     508             : 
     509           0 :       txq->next = next & mask;
     510           0 :       avf_tail_write (txq->qtx_tail, txq->next);
     511           0 :       txq->n_enqueued += n_desc;
     512           0 :       n_left -= n_enq;
     513             :     }
     514             : 
     515           0 :   if (n_left)
     516             :     {
     517           0 :       buffers += n_enq;
     518             : 
     519           0 :       if (n_retry--)
     520           0 :         goto retry;
     521             : 
     522           0 :       vlib_buffer_free (vm, buffers, n_left);
     523           0 :       vlib_error_count (vm, node->node_index,
     524             :                         AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
     525             :     }
     526             : 
     527           0 :   if (tf->shared_queue)
     528           0 :     clib_spinlock_unlock (&txq->lock);
     529             : 
     530           0 :   return frame->n_vectors - n_left;
     531             : }
     532             : 
     533             : /*
     534             :  * fd.io coding-style-patch-verification: ON
     535             :  *
     536             :  * Local Variables:
     537             :  * eval: (c-set-style "gnu")
     538             :  * End:
     539             :  */

Generated by: LCOV version 1.14