LCOV - code coverage report
Current view: top level - vnet/gso - node.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 240 406 59.1 %
Date: 2023-10-26 01:39:38 Functions: 30 37 81.1 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2018 Cisco and/or its affiliates.
       3             :  * Licensed under the Apache License, Version 2.0 (the "License");
       4             :  * you may not use this file except in compliance with the License.
       5             :  * You may obtain a copy of the License at:
       6             :  *
       7             :  *     http://www.apache.org/licenses/LICENSE-2.0
       8             :  *
       9             :  * Unless required by applicable law or agreed to in writing, software
      10             :  * distributed under the License is distributed on an "AS IS" BASIS,
      11             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      12             :  * See the License for the specific language governing permissions and
      13             :  * limitations under the License.
      14             :  */
      15             : 
      16             : #include <vlib/vlib.h>
      17             : #include <vnet/vnet.h>
      18             : #include <vppinfra/error.h>
      19             : #include <vnet/ethernet/ethernet.h>
      20             : #include <vnet/feature/feature.h>
      21             : #include <vnet/gso/gso.h>
      22             : #include <vnet/gso/hdr_offset_parser.h>
      23             : #include <vnet/ip/icmp46_packet.h>
      24             : #include <vnet/ip/ip4.h>
      25             : #include <vnet/ip/ip6.h>
      26             : #include <vnet/udp/udp_packet.h>
      27             : 
      28             : #define foreach_gso_error                                                     \
      29             :   _ (NO_BUFFERS, "no buffers to segment GSO")                                 \
      30             :   _ (UNHANDLED_TYPE, "unhandled gso type")
      31             : 
      32             : static char *gso_error_strings[] = {
      33             : #define _(sym, string) string,
      34             :   foreach_gso_error
      35             : #undef _
      36             : };
      37             : 
      38             : typedef enum
      39             : {
      40             : #define _(sym, str) GSO_ERROR_##sym,
      41             :   foreach_gso_error
      42             : #undef _
      43             :     GSO_N_ERROR,
      44             : } gso_error_t;
      45             : 
      46             : typedef enum
      47             : {
      48             :   GSO_NEXT_DROP,
      49             :   GSO_N_NEXT,
      50             : } gso_next_t;
      51             : 
      52             : typedef struct
      53             : {
      54             :   u32 flags;
      55             :   u16 gso_size;
      56             :   u8 gso_l4_hdr_sz;
      57             :   generic_header_offset_t gho;
      58             : } gso_trace_t;
      59             : 
      60             : static u8 *
      61         780 : format_gso_trace (u8 * s, va_list * args)
      62             : {
      63         780 :   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
      64         780 :   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
      65         780 :   gso_trace_t *t = va_arg (*args, gso_trace_t *);
      66             : 
      67         780 :   if (t->flags & VNET_BUFFER_F_GSO)
      68             :     {
      69         185 :       s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
      70         185 :                   t->gso_size, t->gso_l4_hdr_sz, format_generic_header_offset,
      71             :                   &t->gho);
      72             :     }
      73             :   else
      74             :     {
      75             :       s =
      76         595 :         format (s, "non-gso buffer\n%U", format_generic_header_offset,
      77             :                 &t->gho);
      78             :     }
      79             : 
      80         780 :   return s;
      81             : }
      82             : 
      83             : static_always_inline u16
      84          20 : tso_segment_ipip_tunnel_fixup (vlib_main_t * vm,
      85             :                                vnet_interface_per_thread_data_t * ptd,
      86             :                                vlib_buffer_t * sb0,
      87             :                                generic_header_offset_t * gho)
      88             : {
      89          20 :   u16 n_tx_bufs = vec_len (ptd->split_buffers);
      90          20 :   u16 i = 0, n_tx_bytes = 0;
      91             : 
      92         920 :   while (i < n_tx_bufs)
      93             :     {
      94         900 :       vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
      95             : 
      96         900 :       ip4_header_t *ip4 =
      97         900 :         (ip4_header_t *) (vlib_buffer_get_current (b0) +
      98         900 :                           gho->outer_l3_hdr_offset);
      99         900 :       ip6_header_t *ip6 =
     100         900 :         (ip6_header_t *) (vlib_buffer_get_current (b0) +
     101         900 :                           gho->outer_l3_hdr_offset);
     102             : 
     103         900 :       if (gho->gho_flags & GHO_F_OUTER_IP4)
     104             :         {
     105         450 :           ip4->length =
     106         450 :             clib_host_to_net_u16 (b0->current_length -
     107         450 :                                   gho->outer_l3_hdr_offset);
     108         450 :           ip4->checksum = ip4_header_checksum (ip4);
     109             :         }
     110         450 :       else if (gho->gho_flags & GHO_F_OUTER_IP6)
     111             :         {
     112         450 :           ip6->payload_length =
     113         450 :             clib_host_to_net_u16 (b0->current_length -
     114         450 :                                   gho->outer_l4_hdr_offset);
     115             :         }
     116             : 
     117         900 :       n_tx_bytes += gho->outer_hdr_sz;
     118         900 :       i++;
     119             :     }
     120          20 :   return n_tx_bytes;
     121             : }
     122             : 
     123             : static_always_inline void
     124         900 : tso_segment_vxlan_tunnel_headers_fixup (vlib_main_t * vm, vlib_buffer_t * b,
     125             :                                         generic_header_offset_t * gho)
     126             : {
     127         900 :   u8 proto = 0;
     128         900 :   ip4_header_t *ip4 = 0;
     129         900 :   ip6_header_t *ip6 = 0;
     130         900 :   udp_header_t *udp = 0;
     131             : 
     132         900 :   ip4 =
     133         900 :     (ip4_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
     134         900 :   ip6 =
     135         900 :     (ip6_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
     136         900 :   udp =
     137         900 :     (udp_header_t *) (vlib_buffer_get_current (b) + gho->outer_l4_hdr_offset);
     138             : 
     139         900 :   if (gho->gho_flags & GHO_F_OUTER_IP4)
     140             :     {
     141         450 :       proto = ip4->protocol;
     142         450 :       ip4->length =
     143         450 :         clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
     144         450 :       ip4->checksum = ip4_header_checksum (ip4);
     145             :     }
     146         450 :   else if (gho->gho_flags & GHO_F_OUTER_IP6)
     147             :     {
     148         450 :       proto = ip6->protocol;
     149         450 :       ip6->payload_length =
     150         450 :         clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
     151             :     }
     152         900 :   if (proto == IP_PROTOCOL_UDP)
     153             :     {
     154             :       int bogus;
     155         900 :       udp->length =
     156         900 :         clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
     157         900 :       udp->checksum = 0;
     158         900 :       if (gho->gho_flags & GHO_F_OUTER_IP6)
     159             :         {
     160         450 :           udp->checksum =
     161         450 :             ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
     162             :         }
     163         450 :       else if (gho->gho_flags & GHO_F_OUTER_IP4)
     164             :         {
     165         450 :           udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
     166             :         }
     167             :       /* FIXME: it should be OUTER_UDP_CKSUM */
     168         900 :       vnet_buffer_offload_flags_clear (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
     169             :     }
     170         900 : }
     171             : 
     172             : static_always_inline u16
     173          20 : tso_segment_vxlan_tunnel_fixup (vlib_main_t * vm,
     174             :                                 vnet_interface_per_thread_data_t * ptd,
     175             :                                 vlib_buffer_t * sb0,
     176             :                                 generic_header_offset_t * gho)
     177             : {
     178          20 :   u16 n_tx_bufs = vec_len (ptd->split_buffers);
     179          20 :   u16 i = 0, n_tx_bytes = 0;
     180             : 
     181         920 :   while (i < n_tx_bufs)
     182             :     {
     183         900 :       vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
     184             : 
     185         900 :       tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho);
     186         900 :       n_tx_bytes += gho->outer_hdr_sz;
     187         900 :       i++;
     188             :     }
     189          20 :   return n_tx_bytes;
     190             : }
     191             : 
     192             : static_always_inline u16
     193           0 : tso_alloc_tx_bufs (vlib_main_t * vm,
     194             :                    vnet_interface_per_thread_data_t * ptd,
     195             :                    vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
     196             :                    u16 gso_size, u16 first_data_size,
     197             :                    generic_header_offset_t * gho)
     198             : {
     199             :   u16 n_alloc, size;
     200           0 :   u16 first_packet_length = l234_sz + first_data_size;
     201             : 
     202             :   /*
     203             :    * size is the amount of data per segmented buffer except the 1st
     204             :    * segmented buffer.
     205             :    * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
     206             :    * l234_sz is hdr_sz from l2_hdr_offset.
     207             :    */
     208           0 :   size =
     209           0 :     clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
     210             :               - gho->l2_hdr_offset);
     211             : 
     212             :   /*
     213             :    * First segmented buffer length is calculated separately.
     214             :    * As it may contain less data than gso_size (when gso_size is
     215             :    * greater than current_length of 1st buffer from GSO chained
     216             :    * buffers) and/or size calculated above.
     217             :    */
     218           0 :   u16 n_bufs = 1;
     219             : 
     220             :   /*
     221             :    * Total packet length minus first packet length including l234 header.
     222             :    * rounded-up division
     223             :    */
     224           0 :   ASSERT (n_bytes_b0 > first_packet_length);
     225           0 :   n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
     226             : 
     227           0 :   vec_validate (ptd->split_buffers, n_bufs - 1);
     228             : 
     229           0 :   n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
     230           0 :   if (n_alloc < n_bufs)
     231             :     {
     232           0 :       vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
     233           0 :       return 0;
     234             :     }
     235           0 :   return n_alloc;
     236             : }
     237             : 
     238             : static_always_inline void
     239           0 : tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
     240             :                                  u32 flags, u16 length)
     241             : {
     242             :   /* copying objects from cacheline 0 */
     243           0 :   nb0->current_data = b0->current_data;
     244           0 :   nb0->current_length = length;
     245           0 :   nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
     246           0 :   nb0->flow_id = b0->flow_id;
     247           0 :   nb0->error = b0->error;
     248           0 :   nb0->current_config_index = b0->current_config_index;
     249           0 :   clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
     250             : 
     251             :   /* copying objects from cacheline 1 */
     252           0 :   nb0->trace_handle = b0->trace_handle;
     253           0 :   nb0->total_length_not_including_first_buffer = 0;
     254             : 
     255             :   /* copying data */
     256           0 :   clib_memcpy_fast (vlib_buffer_get_current (nb0),
     257           0 :                     vlib_buffer_get_current (b0), length);
     258           0 : }
     259             : 
     260             : static_always_inline void
     261           0 : tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
     262             :                             vlib_buffer_t * b0, u16 template_data_sz,
     263             :                             u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
     264             :                             u32 next_tcp_seq, u32 flags,
     265             :                             generic_header_offset_t * gho)
     266             : {
     267           0 :   tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
     268             : 
     269           0 :   *p_dst_left =
     270           0 :     clib_min (gso_size,
     271             :               vlib_buffer_get_default_data_size (vm) - (template_data_sz +
     272             :                                                         nb0->current_data));
     273           0 :   *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
     274             : 
     275           0 :   tcp_header_t *tcp =
     276           0 :     (tcp_header_t *) (vlib_buffer_get_current (nb0) + gho->l4_hdr_offset);
     277           0 :   tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
     278           0 : }
     279             : 
     280             : static_always_inline void
     281           0 : tso_fixup_segmented_buf (vlib_main_t * vm, vlib_buffer_t * b0, u8 tcp_flags,
     282             :                          int is_l2, int is_ip6, generic_header_offset_t * gho)
     283             : {
     284           0 :   ip4_header_t *ip4 =
     285           0 :     (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
     286           0 :   ip6_header_t *ip6 =
     287           0 :     (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
     288           0 :   tcp_header_t *tcp =
     289           0 :     (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset);
     290             : 
     291           0 :   tcp->flags = tcp_flags;
     292             : 
     293           0 :   if (is_ip6)
     294             :     {
     295           0 :       ip6->payload_length =
     296           0 :         clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
     297           0 :       if (gho->gho_flags & GHO_F_TCP)
     298             :         {
     299           0 :           int bogus = 0;
     300           0 :           tcp->checksum = 0;
     301           0 :           tcp->checksum =
     302           0 :             ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
     303           0 :           vnet_buffer_offload_flags_clear (b0,
     304             :                                            VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
     305             :         }
     306             :     }
     307             :   else
     308             :     {
     309           0 :       ip4->length =
     310           0 :         clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
     311           0 :       if (gho->gho_flags & GHO_F_IP4)
     312           0 :         ip4->checksum = ip4_header_checksum (ip4);
     313           0 :       if (gho->gho_flags & GHO_F_TCP)
     314             :         {
     315           0 :           tcp->checksum = 0;
     316           0 :           tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
     317             :         }
     318           0 :       vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM |
     319             :                                             VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
     320             :     }
     321             : 
     322           0 :   if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
     323             :     {
     324           0 :       u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
     325             : 
     326           0 :       ip_adjacency_t *adj0 = adj_get (adj_index0);
     327             : 
     328           0 :       if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN &&
     329           0 :           adj0->sub_type.midchain.fixup_func)
     330             :         /* calls e.g. ipip44_fixup */
     331           0 :         adj0->sub_type.midchain.fixup_func
     332             :           (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
     333             :     }
     334           0 : }
     335             : 
     336             : /**
     337             :  * Allocate the necessary number of ptd->split_buffers,
     338             :  * and segment the possibly chained buffer(s) from b0 into
     339             :  * there.
     340             :  *
     341             :  * Return the cumulative number of bytes sent or zero
     342             :  * if allocation failed.
     343             :  */
     344             : 
     345             : static_always_inline u32
     346           0 : tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
     347             :                     u32 sbi0, vlib_buffer_t * sb0,
     348             :                     generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
     349             :                     int is_ip6)
     350             : {
     351           0 :   u32 n_tx_bytes = 0;
     352           0 :   u16 gso_size = vnet_buffer2 (sb0)->gso_size;
     353             : 
     354           0 :   u8 save_tcp_flags = 0;
     355           0 :   u8 tcp_flags_no_fin_psh = 0;
     356           0 :   u32 next_tcp_seq = 0;
     357             : 
     358           0 :   tcp_header_t *tcp =
     359           0 :     (tcp_header_t *) (vlib_buffer_get_current (sb0) + gho->l4_hdr_offset);
     360           0 :   next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
     361             :   /* store original flags for last packet and reset FIN and PSH */
     362           0 :   save_tcp_flags = tcp->flags;
     363           0 :   tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
     364           0 :   tcp->checksum = 0;
     365             : 
     366           0 :   u32 default_bflags =
     367           0 :     sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
     368           0 :   u16 l234_sz = gho->hdr_sz;
     369           0 :   int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
     370           0 :   next_tcp_seq += first_data_size;
     371             : 
     372           0 :   if (PREDICT_FALSE
     373             :       (!tso_alloc_tx_bufs
     374             :        (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
     375           0 :     return 0;
     376             : 
     377           0 :   vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
     378           0 :   tso_init_buf_from_template_base (b0, sb0, default_bflags,
     379           0 :                                    l234_sz + first_data_size);
     380             : 
     381           0 :   u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
     382           0 :   if (total_src_left)
     383             :     {
     384             :       /* Need to copy more segments */
     385             :       u8 *src_ptr, *dst_ptr;
     386             :       u16 src_left, dst_left;
     387             :       /* current source buffer */
     388           0 :       vlib_buffer_t *csb0 = sb0;
     389           0 :       u32 csbi0 = sbi0;
     390             :       /* current dest buffer */
     391             :       vlib_buffer_t *cdb0;
     392           0 :       u16 dbi = 1;              /* the buffer [0] is b0 */
     393             : 
     394           0 :       src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
     395           0 :       src_left = sb0->current_length - l234_sz - first_data_size;
     396             : 
     397           0 :       tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
     398             :                                gho);
     399             : 
     400             :       /* grab a second buffer and prepare the loop */
     401           0 :       ASSERT (dbi < vec_len (ptd->split_buffers));
     402           0 :       cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
     403           0 :       tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
     404             :                                   &dst_left, next_tcp_seq, default_bflags,
     405             :                                   gho);
     406             : 
     407             :       /* an arbitrary large number to catch the runaway loops */
     408           0 :       int nloops = 2000;
     409           0 :       while (total_src_left)
     410             :         {
     411           0 :           if (nloops-- <= 0)
     412           0 :             clib_panic ("infinite loop detected");
     413           0 :           u16 bytes_to_copy = clib_min (src_left, dst_left);
     414             : 
     415           0 :           clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
     416             : 
     417           0 :           src_left -= bytes_to_copy;
     418           0 :           src_ptr += bytes_to_copy;
     419           0 :           total_src_left -= bytes_to_copy;
     420           0 :           dst_left -= bytes_to_copy;
     421           0 :           dst_ptr += bytes_to_copy;
     422           0 :           next_tcp_seq += bytes_to_copy;
     423           0 :           cdb0->current_length += bytes_to_copy;
     424             : 
     425           0 :           if (0 == src_left)
     426             :             {
     427           0 :               int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
     428           0 :               u32 next_bi = csb0->next_buffer;
     429             : 
     430             :               /* init src to the next buffer in chain */
     431           0 :               if (has_next)
     432             :                 {
     433           0 :                   csbi0 = next_bi;
     434           0 :                   csb0 = vlib_get_buffer (vm, csbi0);
     435           0 :                   src_left = csb0->current_length;
     436           0 :                   src_ptr = vlib_buffer_get_current (csb0);
     437             :                 }
     438             :               else
     439             :                 {
     440           0 :                   ASSERT (total_src_left == 0);
     441           0 :                   break;
     442             :                 }
     443             :             }
     444           0 :           if (0 == dst_left && total_src_left)
     445             :             {
     446           0 :               n_tx_bytes += cdb0->current_length;
     447           0 :               tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
     448             :                                        is_ip6, gho);
     449           0 :               ASSERT (dbi < vec_len (ptd->split_buffers));
     450           0 :               cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
     451           0 :               tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
     452             :                                           gso_size, &dst_ptr, &dst_left,
     453             :                                           next_tcp_seq, default_bflags, gho);
     454             :             }
     455             :         }
     456             : 
     457           0 :       tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
     458             : 
     459           0 :       n_tx_bytes += cdb0->current_length;
     460             :     }
     461           0 :   n_tx_bytes += b0->current_length;
     462           0 :   return n_tx_bytes;
     463             : }
     464             : 
     465             : __clib_unused u32
     466           0 : gso_segment_buffer (vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd,
     467             :                     u32 bi, vlib_buffer_t *b, generic_header_offset_t *gho,
     468             :                     u32 n_bytes_b, u8 is_l2, u8 is_ip6)
     469             : {
     470             : 
     471           0 :   return tso_segment_buffer (vm, ptd, bi, b, gho, n_bytes_b, is_l2, is_ip6);
     472             : }
     473             : 
     474             : static_always_inline void
     475           0 : drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
     476             :                            vlib_node_runtime_t * node, u32 * pbi0,
     477             :                            u32 sw_if_index, u32 drop_error_code)
     478             : {
     479           0 :   u32 thread_index = vm->thread_index;
     480             : 
     481             :   vlib_simple_counter_main_t *cm;
     482           0 :   cm =
     483           0 :     vec_elt_at_index (vnm->interface_main.sw_if_counters,
     484             :                       VNET_INTERFACE_COUNTER_TX_ERROR);
     485           0 :   vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
     486             : 
     487           0 :   vlib_error_drop_buffers (vm, node, pbi0,
     488             :                            /* buffer stride */ 1,
     489             :                            /* n_buffers */ 1, GSO_NEXT_DROP, node->node_index,
     490             :                            drop_error_code);
     491           0 : }
     492             : 
     493             : static_always_inline uword
     494      913293 : vnet_gso_node_inline (vlib_main_t * vm,
     495             :                       vlib_node_runtime_t * node,
     496             :                       vlib_frame_t * frame,
     497             :                       vnet_main_t * vnm,
     498             :                       vnet_hw_interface_t * hi,
     499             :                       int is_l2, int is_ip4, int is_ip6, int do_segmentation)
     500             : {
     501             :   u32 *to_next;
     502      913293 :   u32 next_index = node->cached_next_index;
     503      913293 :   u32 *from = vlib_frame_vector_args (frame);
     504      913293 :   u32 n_left_from = frame->n_vectors;
     505      913293 :   u32 *from_end = from + n_left_from;
     506      913293 :   u32 thread_index = vm->thread_index;
     507      913293 :   vnet_interface_main_t *im = &vnm->interface_main;
     508      913293 :   vnet_interface_per_thread_data_t *ptd =
     509      913293 :     vec_elt_at_index (im->per_thread_data, thread_index);
     510      913293 :   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
     511             : 
     512      913293 :   vlib_get_buffers (vm, from, b, n_left_from);
     513             : 
     514     1826640 :   while (n_left_from > 0)
     515             :     {
     516             :       u32 n_left_to_next;
     517             : 
     518      913351 :       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
     519             : 
     520      913351 :       if (!do_segmentation)
     521          50 :         while (from + 8 <= from_end && n_left_to_next >= 4)
     522             :           {
     523             :             u32 bi0, bi1, bi2, bi3;
     524             :             u32 next0, next1, next2, next3;
     525             :             u32 swif0, swif1, swif2, swif3;
     526             :             gso_trace_t *t0, *t1, *t2, *t3;
     527             :             vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
     528             : 
     529             :             /* Prefetch next iteration. */
     530          48 :             vlib_prefetch_buffer_header (b[4], LOAD);
     531          48 :             vlib_prefetch_buffer_header (b[5], LOAD);
     532          48 :             vlib_prefetch_buffer_header (b[6], LOAD);
     533          48 :             vlib_prefetch_buffer_header (b[7], LOAD);
     534             : 
     535          48 :             bi0 = from[0];
     536          48 :             bi1 = from[1];
     537          48 :             bi2 = from[2];
     538          48 :             bi3 = from[3];
     539          48 :             to_next[0] = bi0;
     540          48 :             to_next[1] = bi1;
     541          48 :             to_next[2] = bi2;
     542          48 :             to_next[3] = bi3;
     543             : 
     544          48 :             swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
     545          48 :             swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
     546          48 :             swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
     547          48 :             swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
     548             : 
     549          48 :             if (PREDICT_FALSE (hi->sw_if_index != swif0))
     550             :               {
     551           0 :                 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
     552           0 :                 if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
     553           0 :                     (b[0]->flags & VNET_BUFFER_F_GSO))
     554           0 :                   break;
     555             :               }
     556          48 :             if (PREDICT_FALSE (hi->sw_if_index != swif1))
     557             :               {
     558           0 :                 hi1 = vnet_get_sup_hw_interface (vnm, swif1);
     559           0 :                 if (!(hi1->caps & VNET_HW_IF_CAP_TCP_GSO) &&
     560           0 :                     (b[1]->flags & VNET_BUFFER_F_GSO))
     561           0 :                   break;
     562             :               }
     563          48 :             if (PREDICT_FALSE (hi->sw_if_index != swif2))
     564             :               {
     565           0 :                 hi2 = vnet_get_sup_hw_interface (vnm, swif2);
     566           0 :                 if ((hi2->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
     567           0 :                     (b[2]->flags & VNET_BUFFER_F_GSO))
     568           0 :                   break;
     569             :               }
     570          48 :             if (PREDICT_FALSE (hi->sw_if_index != swif3))
     571             :               {
     572           0 :                 hi3 = vnet_get_sup_hw_interface (vnm, swif3);
     573           0 :                 if (!(hi3->caps & VNET_HW_IF_CAP_TCP_GSO) &&
     574           0 :                     (b[3]->flags & VNET_BUFFER_F_GSO))
     575           0 :                   break;
     576             :               }
     577             : 
     578          48 :             if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
     579             :               {
     580          48 :                 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
     581          48 :                 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
     582          48 :                 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
     583          48 :                 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
     584          48 :                 clib_memset (&t0->gho, 0, sizeof (t0->gho));
     585          48 :                 vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
     586             :                                                    is_ip4, is_ip6);
     587             :               }
     588          48 :             if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
     589             :               {
     590          48 :                 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
     591          48 :                 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
     592          48 :                 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
     593          48 :                 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
     594          48 :                 clib_memset (&t1->gho, 0, sizeof (t1->gho));
     595          48 :                 vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
     596             :                                                    is_ip4, is_ip6);
     597             :               }
     598          48 :             if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
     599             :               {
     600          48 :                 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
     601          48 :                 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
     602          48 :                 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
     603          48 :                 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
     604          48 :                 clib_memset (&t2->gho, 0, sizeof (t2->gho));
     605          48 :                 vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
     606             :                                                    is_ip4, is_ip6);
     607             :               }
     608          48 :             if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
     609             :               {
     610          48 :                 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
     611          48 :                 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
     612          48 :                 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
     613          48 :                 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
     614          48 :                 clib_memset (&t3->gho, 0, sizeof (t3->gho));
     615          48 :                 vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
     616             :                                                    is_ip4, is_ip6);
     617             :               }
     618             : 
     619          48 :             from += 4;
     620          48 :             to_next += 4;
     621          48 :             n_left_to_next -= 4;
     622          48 :             n_left_from -= 4;
     623             : 
     624          48 :             next0 = next1 = 0;
     625          48 :             next2 = next3 = 0;
     626          48 :             vnet_feature_next (&next0, b[0]);
     627          48 :             vnet_feature_next (&next1, b[1]);
     628          48 :             vnet_feature_next (&next2, b[2]);
     629          48 :             vnet_feature_next (&next3, b[3]);
     630          48 :             vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
     631             :                                              n_left_to_next, bi0, bi1, bi2,
     632             :                                              bi3, next0, next1, next2, next3);
     633          48 :             b += 4;
     634             :           }
     635             : 
     636    36014400 :       while (from + 1 <= from_end && n_left_to_next > 0)
     637             :         {
     638             :           u32 bi0, swif0;
     639             :           gso_trace_t *t0;
     640             :           vnet_hw_interface_t *hi0;
     641    35101000 :           u32 next0 = 0;
     642    35101000 :           u32 do_segmentation0 = 0;
     643             : 
     644    35101000 :           swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
     645    35101000 :           if (PREDICT_FALSE (hi->sw_if_index != swif0))
     646             :             {
     647     5488400 :               hi0 = vnet_get_sup_hw_interface (vnm, swif0);
     648     5488400 :               if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
     649     5488400 :                   (b[0]->flags & VNET_BUFFER_F_GSO))
     650           0 :                 do_segmentation0 = 1;
     651             :             }
     652             :           else
     653    29612600 :             do_segmentation0 = do_segmentation;
     654             : 
     655             :           /* speculatively enqueue b0 to the current next frame */
     656    35101000 :           to_next[0] = bi0 = from[0];
     657    35101000 :           to_next += 1;
     658    35101000 :           n_left_to_next -= 1;
     659    35101000 :           from += 1;
     660    35101000 :           n_left_from -= 1;
     661             : 
     662    35101000 :           if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
     663             :             {
     664         672 :               t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
     665         672 :               t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
     666         672 :               t0->gso_size = vnet_buffer2 (b[0])->gso_size;
     667         672 :               t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
     668         672 :               clib_memset (&t0->gho, 0, sizeof (t0->gho));
     669         672 :               vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
     670             :                                                  is_ip4, is_ip6);
     671             :             }
     672             : 
     673    35101000 :           if (do_segmentation0)
     674             :             {
     675    29612600 :               if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
     676             :                 {
     677             :                   /*
     678             :                    * Undo the enqueue of the b0 - it is not going anywhere,
     679             :                    * and will be freed either after it's segmented or
     680             :                    * when dropped, if there is no buffers to segment into.
     681             :                    */
     682      174694 :                   to_next -= 1;
     683      174694 :                   n_left_to_next += 1;
     684             :                   /* undo the counting. */
     685      174694 :                   generic_header_offset_t gho = { 0 };
     686      174694 :                   u32 n_tx_bytes = 0;
     687      174694 :                   u32 inner_is_ip6 = is_ip6;
     688             : 
     689      174694 :                   vnet_generic_header_offset_parser (b[0], &gho, is_l2,
     690             :                                                      is_ip4, is_ip6);
     691             : 
     692      174694 :                   if (PREDICT_FALSE (gho.gho_flags & GHO_F_TUNNEL))
     693             :                     {
     694          40 :                       if (PREDICT_FALSE
     695             :                           (gho.gho_flags & (GHO_F_GRE_TUNNEL |
     696             :                                             GHO_F_GENEVE_TUNNEL)))
     697             :                         {
     698             :                           /* not supported yet */
     699           0 :                           drop_one_buffer_and_count (vm, vnm, node, from - 1,
     700             :                                                      hi->sw_if_index,
     701             :                                                      GSO_ERROR_UNHANDLED_TYPE);
     702           0 :                           b += 1;
     703           0 :                           continue;
     704             :                         }
     705             : 
     706          40 :                       inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
     707             :                     }
     708             : 
     709      174694 :                   n_tx_bytes = gso_segment_buffer_inline (vm, ptd, b[0], &gho,
     710             :                                                           is_l2, inner_is_ip6);
     711             : 
     712      174694 :                   if (PREDICT_FALSE (n_tx_bytes == 0))
     713             :                     {
     714           0 :                       drop_one_buffer_and_count (vm, vnm, node, from - 1,
     715             :                                                  hi->sw_if_index,
     716             :                                                  GSO_ERROR_NO_BUFFERS);
     717           0 :                       b += 1;
     718           0 :                       continue;
     719             :                     }
     720             : 
     721             : 
     722      174694 :                   if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
     723             :                     {
     724          20 :                       n_tx_bytes +=
     725          20 :                         tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
     726             :                     }
     727             :                   else
     728      174674 :                     if (PREDICT_FALSE
     729             :                         (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
     730             :                                           GHO_F_IPIP6_TUNNEL)))
     731             :                     {
     732          20 :                       n_tx_bytes +=
     733          20 :                         tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
     734             :                     }
     735             : 
     736      174694 :                   u16 n_tx_bufs = vec_len (ptd->split_buffers);
     737      174694 :                   u32 *from_seg = ptd->split_buffers;
     738             : 
     739      349633 :                   while (n_tx_bufs > 0)
     740             :                     {
     741             :                       u32 sbi0;
     742             :                       vlib_buffer_t *sb0;
     743     5587080 :                       while (n_tx_bufs > 0 && n_left_to_next > 0)
     744             :                         {
     745     5412140 :                           sbi0 = to_next[0] = from_seg[0];
     746     5412140 :                           sb0 = vlib_get_buffer (vm, sbi0);
     747     5412140 :                           vnet_buffer_offload_flags_clear (sb0, 0x7F);
     748     5412140 :                           ASSERT (sb0->current_length > 0);
     749     5412140 :                           to_next += 1;
     750     5412140 :                           from_seg += 1;
     751     5412140 :                           n_left_to_next -= 1;
     752     5412140 :                           n_tx_bufs -= 1;
     753     5412140 :                           next0 = 0;
     754     5412140 :                           vnet_feature_next (&next0, sb0);
     755     5412140 :                           vlib_validate_buffer_enqueue_x1 (vm, node,
     756             :                                                            next_index,
     757             :                                                            to_next,
     758             :                                                            n_left_to_next,
     759             :                                                            sbi0, next0);
     760             :                         }
     761      174939 :                       vlib_put_next_frame (vm, node, next_index,
     762             :                                            n_left_to_next);
     763      174939 :                       if (n_tx_bufs > 0)
     764         245 :                         vlib_get_next_frame (vm, node, next_index,
     765             :                                              to_next, n_left_to_next);
     766             :                     }
     767             :                   /* The buffers were enqueued. Reset the length */
     768      174694 :                   vec_set_len (ptd->split_buffers, 0);
     769             :                   /* Free the now segmented buffer */
     770      174694 :                   vlib_buffer_free_one (vm, bi0);
     771      174694 :                   b += 1;
     772      174694 :                   continue;
     773             :                 }
     774             :             }
     775             : 
     776    34926300 :           vnet_feature_next (&next0, b[0]);
     777    34926300 :           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
     778             :                                            n_left_to_next, bi0, next0);
     779    34926300 :           b += 1;
     780             :         }
     781      913351 :       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     782             :     }
     783             : 
     784      913293 :   return frame->n_vectors;
     785             : }
     786             : 
     787             : static_always_inline uword
     788      913293 : vnet_gso_inline (vlib_main_t * vm,
     789             :                  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
     790             :                  int is_ip4, int is_ip6)
     791             : {
     792      913293 :   vnet_main_t *vnm = vnet_get_main ();
     793             :   vnet_hw_interface_t *hi;
     794             : 
     795      913293 :   if (frame->n_vectors > 0)
     796             :     {
     797      913293 :       u32 *from = vlib_frame_vector_args (frame);
     798      913293 :       vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
     799      913293 :       hi = vnet_get_sup_hw_interface (vnm,
     800      913293 :                                       vnet_buffer (b)->sw_if_index[VLIB_TX]);
     801             : 
     802      913293 :       if (hi->caps & (VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_VXLAN_TNL_GSO))
     803           2 :         return vnet_gso_node_inline (vm, node, frame, vnm, hi,
     804             :                                      is_l2, is_ip4, is_ip6,
     805             :                                      /* do_segmentation */ 0);
     806             :       else
     807      913291 :         return vnet_gso_node_inline (vm, node, frame, vnm, hi,
     808             :                                      is_l2, is_ip4, is_ip6,
     809             :                                      /* do_segmentation */ 1);
     810             :     }
     811           0 :   return 0;
     812             : }
     813             : 
     814      324878 : VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
     815             :                                 vlib_frame_t * frame)
     816             : {
     817      324303 :   return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
     818             :                           0 /* ip6 */ );
     819             : }
     820             : 
     821      287794 : VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
     822             :                                 vlib_frame_t * frame)
     823             : {
     824      287219 :   return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
     825             :                           1 /* ip6 */ );
     826             : }
     827             : 
     828      163395 : VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
     829             :                              vlib_frame_t * frame)
     830             : {
     831      162820 :   return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
     832             :                           0 /* ip6 */ );
     833             : }
     834             : 
     835      139526 : VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
     836             :                              vlib_frame_t * frame)
     837             : {
     838      138951 :   return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
     839             :                           1 /* ip6 */ );
     840             : }
     841             : 
     842             : /* *INDENT-OFF* */
     843             : 
     844      183788 : VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
     845             :   .vector_size = sizeof (u32),
     846             :   .format_trace = format_gso_trace,
     847             :   .type = VLIB_NODE_TYPE_INTERNAL,
     848             :   .n_errors = ARRAY_LEN(gso_error_strings),
     849             :   .error_strings = gso_error_strings,
     850             :   .n_next_nodes = GSO_N_NEXT,
     851             :   .next_nodes = {
     852             :         [GSO_NEXT_DROP] = "error-drop",
     853             :   },
     854             :   .name = "gso-l2-ip4",
     855             : };
     856             : 
     857      183788 : VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
     858             :   .vector_size = sizeof (u32),
     859             :   .format_trace = format_gso_trace,
     860             :   .type = VLIB_NODE_TYPE_INTERNAL,
     861             :   .n_errors = ARRAY_LEN(gso_error_strings),
     862             :   .error_strings = gso_error_strings,
     863             :   .n_next_nodes = GSO_N_NEXT,
     864             :   .next_nodes = {
     865             :         [GSO_NEXT_DROP] = "error-drop",
     866             :   },
     867             :   .name = "gso-l2-ip6",
     868             : };
     869             : 
     870      183788 : VLIB_REGISTER_NODE (gso_ip4_node) = {
     871             :   .vector_size = sizeof (u32),
     872             :   .format_trace = format_gso_trace,
     873             :   .type = VLIB_NODE_TYPE_INTERNAL,
     874             :   .n_errors = ARRAY_LEN(gso_error_strings),
     875             :   .error_strings = gso_error_strings,
     876             :   .n_next_nodes = GSO_N_NEXT,
     877             :   .next_nodes = {
     878             :         [GSO_NEXT_DROP] = "error-drop",
     879             :   },
     880             :   .name = "gso-ip4",
     881             : };
     882             : 
     883      183788 : VLIB_REGISTER_NODE (gso_ip6_node) = {
     884             :   .vector_size = sizeof (u32),
     885             :   .format_trace = format_gso_trace,
     886             :   .type = VLIB_NODE_TYPE_INTERNAL,
     887             :   .n_errors = ARRAY_LEN(gso_error_strings),
     888             :   .error_strings = gso_error_strings,
     889             :   .n_next_nodes = GSO_N_NEXT,
     890             :   .next_nodes = {
     891             :         [GSO_NEXT_DROP] = "error-drop",
     892             :   },
     893             :   .name = "gso-ip6",
     894             : };
     895             : 
     896       76635 : VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
     897             :   .arc_name = "l2-output-ip4",
     898             :   .node_name = "gso-l2-ip4",
     899             :   .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
     900             : };
     901             : 
     902       76635 : VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
     903             :   .arc_name = "l2-output-ip6",
     904             :   .node_name = "gso-l2-ip6",
     905             :   .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
     906             : };
     907             : 
     908       76635 : VNET_FEATURE_INIT (gso_ip4_node, static) = {
     909             :   .arc_name = "ip4-output",
     910             :   .node_name = "gso-ip4",
     911             :   .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
     912             : };
     913             : 
     914       76635 : VNET_FEATURE_INIT (gso_ip6_node, static) = {
     915             :   .arc_name = "ip6-output",
     916             :   .node_name = "gso-ip6",
     917             :   .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
     918             : };
     919             : 
     920             : /*
     921             :  * fd.io coding-style-patch-verification: ON
     922             :  *
     923             :  * Local Variables:
     924             :  * eval: (c-set-style "gnu")
     925             :  * End:
     926             :  */

Generated by: LCOV version 1.14